28#define RTE_LPM_NAMESIZE 32
31#define RTE_LPM_MAX_DEPTH 32
34#define RTE_LPM_TBL24_NUM_ENTRIES (1 << 24)
37#define RTE_LPM_TBL8_GROUP_NUM_ENTRIES 256
40#define RTE_LPM_MAX_TBL8_NUM_GROUPS (1 << 24)
43#define RTE_LPM_TBL8_NUM_GROUPS 256
46#define RTE_LPM_TBL8_NUM_ENTRIES (RTE_LPM_TBL8_NUM_GROUPS * \
47 RTE_LPM_TBL8_GROUP_NUM_ENTRIES)
50#if defined(RTE_LIBRTE_LPM_DEBUG)
51#define RTE_LPM_RETURN_IF_TRUE(cond, retval) do { \
52 if (cond) return (retval); \
55#define RTE_LPM_RETURN_IF_TRUE(cond, retval)
59#define RTE_LPM_VALID_EXT_ENTRY_BITMASK 0x03000000
62#define RTE_LPM_LOOKUP_SUCCESS 0x01000000
65#define RTE_LPM_RCU_DQ_RECLAIM_MAX 16
75#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
78struct rte_lpm_tbl_entry {
84 uint32_t next_hop :24;
94 uint32_t valid_group :1;
101struct rte_lpm_tbl_entry {
103 uint32_t valid_group :1;
105 uint32_t next_hop :24;
121 struct rte_lpm_tbl_entry tbl24[RTE_LPM_TBL24_NUM_ENTRIES]
123 struct rte_lpm_tbl_entry *tbl8;
128 struct rte_rcu_qsbr *v;
136 uint32_t reclaim_thd;
137 uint32_t reclaim_max;
220rte_lpm_add(
struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint32_t next_hop);
280 unsigned tbl24_index = (ip >> 8);
282 const uint32_t *ptbl;
285 RTE_LPM_RETURN_IF_TRUE(((lpm == NULL) || (next_hop == NULL)), -EINVAL);
288 ptbl = (
const uint32_t *)(&lpm->tbl24[tbl24_index]);
296 if (
unlikely((tbl_entry & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
297 RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
299 unsigned tbl8_index = (uint8_t)ip +
300 (((uint32_t)tbl_entry & 0x00FFFFFF) *
301 RTE_LPM_TBL8_GROUP_NUM_ENTRIES);
303 ptbl = (
const uint32_t *)&lpm->tbl8[tbl8_index];
307 *next_hop = ((uint32_t)tbl_entry & 0x00FFFFFF);
331#define rte_lpm_lookup_bulk(lpm, ips, next_hops, n) \
332 rte_lpm_lookup_bulk_func(lpm, ips, next_hops, n)
335rte_lpm_lookup_bulk_func(
const struct rte_lpm *lpm,
const uint32_t *ips,
336 uint32_t *next_hops,
const unsigned n)
339 unsigned tbl24_indexes[n];
340 const uint32_t *ptbl;
343 RTE_LPM_RETURN_IF_TRUE(((lpm == NULL) || (ips == NULL) ||
344 (next_hops == NULL)), -EINVAL);
346 for (i = 0; i < n; i++) {
347 tbl24_indexes[i] = ips[i] >> 8;
350 for (i = 0; i < n; i++) {
352 ptbl = (
const uint32_t *)&lpm->tbl24[tbl24_indexes[i]];
353 next_hops[i] = *ptbl;
356 if (
unlikely((next_hops[i] & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
357 RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
359 unsigned tbl8_index = (uint8_t)ips[i] +
360 (((uint32_t)next_hops[i] & 0x00FFFFFF) *
361 RTE_LPM_TBL8_GROUP_NUM_ENTRIES);
363 ptbl = (
const uint32_t *)&lpm->tbl8[tbl8_index];
364 next_hops[i] = *ptbl;
371#define RTE_LPM_MASKX4_RES UINT64_C(0x00ffffff00ffffff)
396#if defined(RTE_ARCH_ARM)
397#ifdef RTE_HAS_SVE_ACLE
398#include "rte_lpm_sve.h"
399#undef rte_lpm_lookup_bulk
400#define rte_lpm_lookup_bulk(lpm, ips, next_hops, n) \
401 __rte_lpm_lookup_vec(lpm, ips, next_hops, n)
403#include "rte_lpm_neon.h"
404#elif defined(RTE_ARCH_PPC_64)
405#include "rte_lpm_altivec.h"
406#elif defined(RTE_ARCH_X86)
407#include "rte_lpm_sse.h"
409#include "rte_lpm_scalar.h"
#define __rte_cache_aligned
int rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint32_t next_hop)
int rte_lpm_rcu_qsbr_add(struct rte_lpm *lpm, struct rte_lpm_rcu_config *cfg)
int rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth)
void rte_lpm_free(struct rte_lpm *lpm)
static void rte_lpm_lookupx4(const struct rte_lpm *lpm, xmm_t ip, uint32_t hop[4], uint32_t defv)
#define RTE_LPM_LOOKUP_SUCCESS
struct rte_lpm * rte_lpm_create(const char *name, int socket_id, const struct rte_lpm_config *config)
void rte_lpm_delete_all(struct rte_lpm *lpm)
int rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint32_t *next_hop)
static int rte_lpm_lookup(const struct rte_lpm *lpm, uint32_t ip, uint32_t *next_hop)
struct rte_lpm * rte_lpm_find_existing(const char *name)