65 #include <rte_config.h> 99 #define PKT_RX_VLAN (1ULL << 0) 101 #define PKT_RX_RSS_HASH (1ULL << 1) 102 #define PKT_RX_FDIR (1ULL << 2) 111 #define PKT_RX_L4_CKSUM_BAD (1ULL << 3) 120 #define PKT_RX_IP_CKSUM_BAD (1ULL << 4) 122 #define PKT_RX_EIP_CKSUM_BAD (1ULL << 5) 130 #define PKT_RX_VLAN_STRIPPED (1ULL << 6) 140 #define PKT_RX_IP_CKSUM_MASK ((1ULL << 4) | (1ULL << 7)) 142 #define PKT_RX_IP_CKSUM_UNKNOWN 0 143 #define PKT_RX_IP_CKSUM_BAD (1ULL << 4) 144 #define PKT_RX_IP_CKSUM_GOOD (1ULL << 7) 145 #define PKT_RX_IP_CKSUM_NONE ((1ULL << 4) | (1ULL << 7)) 155 #define PKT_RX_L4_CKSUM_MASK ((1ULL << 3) | (1ULL << 8)) 157 #define PKT_RX_L4_CKSUM_UNKNOWN 0 158 #define PKT_RX_L4_CKSUM_BAD (1ULL << 3) 159 #define PKT_RX_L4_CKSUM_GOOD (1ULL << 8) 160 #define PKT_RX_L4_CKSUM_NONE ((1ULL << 3) | (1ULL << 8)) 162 #define PKT_RX_IEEE1588_PTP (1ULL << 9) 163 #define PKT_RX_IEEE1588_TMST (1ULL << 10) 164 #define PKT_RX_FDIR_ID (1ULL << 13) 165 #define PKT_RX_FDIR_FLX (1ULL << 14) 175 #define PKT_RX_QINQ_STRIPPED (1ULL << 15) 182 #define PKT_RX_LRO (1ULL << 16) 187 #define PKT_RX_TIMESTAMP (1ULL << 17) 192 #define PKT_RX_SEC_OFFLOAD (1ULL << 18) 197 #define PKT_RX_SEC_OFFLOAD_FAILED (1ULL << 19) 206 #define PKT_RX_QINQ (1ULL << 20) 215 #define PKT_TX_SEC_OFFLOAD (1ULL << 43) 221 #define PKT_TX_MACSEC (1ULL << 44) 231 #define PKT_TX_TUNNEL_VXLAN (0x1ULL << 45) 232 #define PKT_TX_TUNNEL_GRE (0x2ULL << 45) 233 #define PKT_TX_TUNNEL_IPIP (0x3ULL << 45) 234 #define PKT_TX_TUNNEL_GENEVE (0x4ULL << 45) 236 #define PKT_TX_TUNNEL_MPLSINUDP (0x5ULL << 45) 238 #define PKT_TX_TUNNEL_MASK (0xFULL << 45) 243 #define PKT_TX_QINQ_PKT (1ULL << 49) 254 #define PKT_TX_TCP_SEG (1ULL << 50) 256 #define PKT_TX_IEEE1588_TMST (1ULL << 51) 266 #define PKT_TX_L4_NO_CKSUM (0ULL << 52) 267 #define PKT_TX_TCP_CKSUM (1ULL << 52) 268 #define PKT_TX_SCTP_CKSUM (2ULL << 52) 269 #define PKT_TX_UDP_CKSUM (3ULL << 52) 270 #define PKT_TX_L4_MASK (3ULL << 52) 278 #define PKT_TX_IP_CKSUM (1ULL << 54) 286 #define PKT_TX_IPV4 (1ULL << 55) 294 #define PKT_TX_IPV6 (1ULL << 56) 296 #define PKT_TX_VLAN_PKT (1ULL << 57) 304 #define PKT_TX_OUTER_IP_CKSUM (1ULL << 58) 311 #define PKT_TX_OUTER_IPV4 (1ULL << 59) 318 #define PKT_TX_OUTER_IPV6 (1ULL << 60) 324 #define PKT_TX_OFFLOAD_MASK ( \ 325 PKT_TX_OUTER_IPV6 | \ 326 PKT_TX_OUTER_IPV4 | \ 327 PKT_TX_OUTER_IP_CKSUM | \ 333 PKT_TX_IEEE1588_TMST | \ 336 PKT_TX_TUNNEL_MASK | \ 340 #define __RESERVED (1ULL << 61) 342 #define IND_ATTACHED_MBUF (1ULL << 62) 345 #define CTRL_MBUF_FLAG (1ULL << 63) 348 #define RTE_MBUF_PRIV_ALIGN 8 406 #define RTE_MBUF_DEFAULT_DATAROOM 2048 407 #define RTE_MBUF_DEFAULT_BUF_SIZE \ 408 (RTE_MBUF_DEFAULT_DATAROOM + RTE_PKTMBUF_HEADROOM) 465 MARKER rx_descriptor_fields1;
540 MARKER cacheline1 __rte_cache_min_aligned;
586 #define RTE_MBUF_MAX_NB_SEGS UINT16_MAX 618 #if RTE_CACHE_LINE_SIZE == 64 639 return mb->buf_iova + mb->data_off;
644 rte_mbuf_data_dma_addr(
const struct rte_mbuf *mb)
664 return mb->buf_iova + RTE_PKTMBUF_HEADROOM;
669 rte_mbuf_data_dma_addr_default(
const struct rte_mbuf *mb)
707 #define RTE_MBUF_INDIRECT(mb) ((mb)->ol_flags & IND_ATTACHED_MBUF) 712 #define RTE_MBUF_DIRECT(mb) (!RTE_MBUF_INDIRECT(mb)) 725 #ifdef RTE_LIBRTE_MBUF_DEBUG 728 #define __rte_mbuf_sanity_check(m, is_h) rte_mbuf_sanity_check(m, is_h) 733 #define __rte_mbuf_sanity_check(m, is_h) do { } while (0) 737 #ifdef RTE_MBUF_REFCNT_ATOMIC 746 static inline uint16_t
766 static inline uint16_t
767 __rte_mbuf_refcnt_update(
struct rte_mbuf *m, int16_t value)
781 static inline uint16_t
794 return (uint16_t)value;
797 return __rte_mbuf_refcnt_update(m, value);
803 static inline uint16_t
804 __rte_mbuf_refcnt_update(
struct rte_mbuf *m, int16_t value)
813 static inline uint16_t
816 return __rte_mbuf_refcnt_update(m, value);
822 static inline uint16_t
840 #define RTE_MBUF_PREFETCH_TO_FREE(m) do { \ 861 #define MBUF_RAW_ALLOC_CHECK(m) do { \ 862 RTE_ASSERT(rte_mbuf_refcnt_read(m) == 1); \ 863 RTE_ASSERT((m)->next == NULL); \ 864 RTE_ASSERT((m)->nb_segs == 1); \ 865 __rte_mbuf_sanity_check(m, 0); \ 895 MBUF_RAW_ALLOC_CHECK(m);
918 RTE_ASSERT(m->
next == NULL);
927 __rte_mbuf_raw_free(
struct rte_mbuf *m)
954 void *m,
unsigned i);
968 #define rte_ctrlmbuf_alloc(mp) rte_pktmbuf_alloc(mp) 976 #define rte_ctrlmbuf_free(m) rte_pktmbuf_free(m) 986 #define rte_ctrlmbuf_data(m) ((char *)((m)->buf_addr) + (m)->data_off) 996 #define rte_ctrlmbuf_len(m) rte_pktmbuf_data_len(m) 1035 void *m,
unsigned i);
1107 static inline uint16_t
1128 static inline uint16_t
1147 m->data_off = (uint16_t)
RTE_MIN((uint16_t)RTE_PKTMBUF_HEADROOM,
1159 #define MBUF_INVALID_PORT UINT16_MAX 1161 static inline void rte_pktmbuf_reset(
struct rte_mbuf *m)
1196 rte_pktmbuf_reset(m);
1215 struct rte_mbuf **mbufs,
unsigned count)
1229 switch (count % 4) {
1231 while (idx != count) {
1232 MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
1233 rte_pktmbuf_reset(mbufs[idx]);
1237 MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
1238 rte_pktmbuf_reset(mbufs[idx]);
1242 MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
1243 rte_pktmbuf_reset(mbufs[idx]);
1247 MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
1248 rte_pktmbuf_reset(mbufs[idx]);
1288 mi->buf_iova = m->buf_iova;
1292 mi->data_off = m->data_off;
1336 m->
buf_addr = (
char *)m + mbuf_size;
1338 m->
buf_len = (uint16_t)buf_len;
1375 if (m->
next != NULL) {
1382 }
else if (__rte_mbuf_refcnt_update(m, -1) == 0) {
1387 if (m->
next != NULL) {
1401 __rte_pktmbuf_prefree_seg(
struct rte_mbuf *m)
1483 }
while ((md = md->
next) != NULL &&
1517 }
while ((m = m->
next) != NULL);
1562 while (m2->
next != NULL)
1581 #define rte_pktmbuf_mtod_offset(m, t, o) \ 1582 ((t)((char *)(m)->buf_addr + (m)->data_off + (o))) 1596 #define rte_pktmbuf_mtod(m, t) rte_pktmbuf_mtod_offset(m, t, 0) 1607 #define rte_pktmbuf_iova_offset(m, o) \ 1608 (rte_iova_t)((m)->buf_iova + (m)->data_off + (o)) 1611 #define rte_pktmbuf_mtophys_offset(m, o) \ 1612 rte_pktmbuf_iova_offset(m, o) 1621 #define rte_pktmbuf_iova(m) rte_pktmbuf_iova_offset(m, 0) 1624 #define rte_pktmbuf_mtophys(m) rte_pktmbuf_iova(m) 1634 #define rte_pktmbuf_pkt_len(m) ((m)->pkt_len) 1644 #define rte_pktmbuf_data_len(m) ((m)->data_len) 1672 m->data_off = (uint16_t)(m->data_off - len);
1676 return (
char *)m->
buf_addr + m->data_off;
1708 return (
char*) tail;
1736 m->data_off = (uint16_t)(m->data_off + len);
1738 return (
char *)m->
buf_addr + m->data_off;
1788 const void *__rte_pktmbuf_read(
const struct rte_mbuf *m, uint32_t off,
1789 uint32_t len,
void *buf);
1812 uint32_t off, uint32_t len,
void *buf)
1817 return __rte_pktmbuf_read(m, off, len, buf);
1846 cur_tail->
next = tail;
1875 uint64_t inner_l3_offset = m->
l2_len;
1905 !(ol_flags & PKT_TX_IP_CKSUM)))
1909 if ((ol_flags & PKT_TX_OUTER_IP_CKSUM) &&
1931 size_t seg_len, copy_len;
static rte_iova_t rte_mbuf_data_iova(const struct rte_mbuf *mb)
uint16_t mbuf_data_room_size
#define __rte_always_inline
static int16_t rte_atomic16_read(const rte_atomic16_t *v)
static struct rte_mbuf * rte_pktmbuf_alloc(struct rte_mempool *mp)
uint8_t inner_esp_next_proto
__extension__ typedef void * MARKER[0]
#define RTE_MBUF_DIRECT(mb)
#define IND_ATTACHED_MBUF
static uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp)
static int rte_validate_tx_offload(const struct rte_mbuf *m)
static void rte_pktmbuf_free(struct rte_mbuf *m)
void rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg)
static struct rte_mbuf * rte_pktmbuf_clone(struct rte_mbuf *md, struct rte_mempool *mp)
static __rte_always_inline void rte_pktmbuf_free_seg(struct rte_mbuf *m)
static struct rte_mbuf * rte_mbuf_from_indirect(struct rte_mbuf *mi)
static int rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail)
__extension__ typedef uint8_t MARKER8[0]
void rte_pktmbuf_dump(FILE *f, const struct rte_mbuf *m, unsigned dump_len)
static uint16_t rte_pktmbuf_headroom(const struct rte_mbuf *m)
struct rte_mbuf __rte_cache_aligned
static int rte_pktmbuf_alloc_bulk(struct rte_mempool *pool, struct rte_mbuf **mbufs, unsigned count)
static void rte_pktmbuf_reset_headroom(struct rte_mbuf *m)
#define PKT_TX_OUTER_IP_CKSUM
static void rte_mbuf_prefetch_part2(struct rte_mbuf *m)
static __rte_always_inline struct rte_mbuf * rte_pktmbuf_prefree_seg(struct rte_mbuf *m)
static int rte_pktmbuf_is_contiguous(const struct rte_mbuf *m)
int rte_get_tx_ol_flag_list(uint64_t mask, char *buf, size_t buflen)
#define PKT_TX_OUTER_IPV4
static uint16_t rte_pktmbuf_tailroom(const struct rte_mbuf *m)
static __rte_always_inline void rte_mbuf_raw_free(struct rte_mbuf *m)
struct rte_mbuf::@117::@129 sched
__extension__ typedef uint64_t MARKER64[0]
void rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header)
struct rte_mbuf::@117::@128 fdir
#define __rte_mbuf_sanity_check(m, is_h)
const char * rte_get_tx_ol_flag_name(uint64_t mask)
static uint16_t rte_mbuf_refcnt_read(const struct rte_mbuf *m)
static int rte_pktmbuf_linearize(struct rte_mbuf *mbuf)
static __rte_always_inline int rte_mempool_get(struct rte_mempool *mp, void **obj_p)
static void rte_atomic16_set(rte_atomic16_t *v, int16_t new_value)
static char * rte_pktmbuf_adj(struct rte_mbuf *m, uint16_t len)
#define rte_pktmbuf_pkt_len(m)
RTE_STD_C11 union rte_mbuf::@114 __rte_aligned
static void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *m)
static __rte_always_inline int rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned int n)
static int16_t rte_atomic16_add_return(rte_atomic16_t *v, int16_t inc)
static int rte_is_ctrlmbuf(struct rte_mbuf *m)
static void rte_pktmbuf_detach(struct rte_mbuf *m)
#define rte_pktmbuf_data_len(m)
#define rte_pktmbuf_mtod(m, t)
static uint16_t rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
#define MBUF_INVALID_PORT
static uint16_t rte_pktmbuf_data_room_size(struct rte_mempool *mp)
const char * rte_get_rx_ol_flag_name(uint64_t mask)
void rte_pktmbuf_init(struct rte_mempool *mp, void *opaque_arg, void *m, unsigned i)
struct rte_mempool * pool
static char * rte_pktmbuf_append(struct rte_mbuf *m, uint16_t len)
static void rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
static rte_iova_t rte_mbuf_data_iova_default(const struct rte_mbuf *mb)
static int rte_pktmbuf_trim(struct rte_mbuf *m, uint16_t len)
static char * rte_mbuf_to_baddr(struct rte_mbuf *md)
static const void * rte_pktmbuf_read(const struct rte_mbuf *m, uint32_t off, uint32_t len, void *buf)
static char * rte_pktmbuf_prepend(struct rte_mbuf *m, uint16_t len)
static struct rte_mbuf * rte_mbuf_raw_alloc(struct rte_mempool *mp)
static void rte_pktmbuf_refcnt_update(struct rte_mbuf *m, int16_t v)
#define RTE_PTR_SUB(ptr, x)
static struct rte_mbuf * rte_pktmbuf_lastseg(struct rte_mbuf *m)
static void * rte_memcpy(void *dst, const void *src, size_t n)
#define PKT_TX_OFFLOAD_MASK
static rte_iova_t rte_mempool_virt2iova(const void *elt)
static __rte_always_inline void rte_mempool_put(struct rte_mempool *mp, void *obj)
static void rte_mbuf_prefetch_part1(struct rte_mbuf *m)
rte_atomic16_t refcnt_atomic
static void * rte_mempool_get_priv(struct rte_mempool *mp)
char name[RTE_MEMZONE_NAMESIZE]
void rte_ctrlmbuf_init(struct rte_mempool *mp, void *opaque_arg, void *m, unsigned i)
#define RTE_MBUF_INDIRECT(mb)
#define rte_pktmbuf_mtod_offset(m, t, o)
int rte_get_rx_ol_flag_list(uint64_t mask, char *buf, size_t buflen)
static void rte_prefetch0(const volatile void *p)
struct rte_mempool * rte_pktmbuf_pool_create(const char *name, unsigned n, unsigned cache_size, uint16_t priv_size, uint16_t data_room_size, int socket_id)