35 #include <rte_compat.h> 37 #include <rte_config.h> 71 #define PKT_RX_VLAN (1ULL << 0) 73 #define PKT_RX_RSS_HASH (1ULL << 1) 74 #define PKT_RX_FDIR (1ULL << 2) 83 #define PKT_RX_L4_CKSUM_BAD (1ULL << 3) 92 #define PKT_RX_IP_CKSUM_BAD (1ULL << 4) 94 #define PKT_RX_EIP_CKSUM_BAD (1ULL << 5) 102 #define PKT_RX_VLAN_STRIPPED (1ULL << 6) 112 #define PKT_RX_IP_CKSUM_MASK ((1ULL << 4) | (1ULL << 7)) 114 #define PKT_RX_IP_CKSUM_UNKNOWN 0 115 #define PKT_RX_IP_CKSUM_BAD (1ULL << 4) 116 #define PKT_RX_IP_CKSUM_GOOD (1ULL << 7) 117 #define PKT_RX_IP_CKSUM_NONE ((1ULL << 4) | (1ULL << 7)) 127 #define PKT_RX_L4_CKSUM_MASK ((1ULL << 3) | (1ULL << 8)) 129 #define PKT_RX_L4_CKSUM_UNKNOWN 0 130 #define PKT_RX_L4_CKSUM_BAD (1ULL << 3) 131 #define PKT_RX_L4_CKSUM_GOOD (1ULL << 8) 132 #define PKT_RX_L4_CKSUM_NONE ((1ULL << 3) | (1ULL << 8)) 134 #define PKT_RX_IEEE1588_PTP (1ULL << 9) 135 #define PKT_RX_IEEE1588_TMST (1ULL << 10) 136 #define PKT_RX_FDIR_ID (1ULL << 13) 137 #define PKT_RX_FDIR_FLX (1ULL << 14) 147 #define PKT_RX_QINQ_STRIPPED (1ULL << 15) 154 #define PKT_RX_LRO (1ULL << 16) 159 #define PKT_RX_TIMESTAMP (1ULL << 17) 164 #define PKT_RX_SEC_OFFLOAD (1ULL << 18) 169 #define PKT_RX_SEC_OFFLOAD_FAILED (1ULL << 19) 178 #define PKT_RX_QINQ (1ULL << 20) 189 #define PKT_TX_UDP_SEG (1ULL << 42) 194 #define PKT_TX_SEC_OFFLOAD (1ULL << 43) 200 #define PKT_TX_MACSEC (1ULL << 44) 207 #define PKT_TX_TUNNEL_VXLAN (0x1ULL << 45) 208 #define PKT_TX_TUNNEL_GRE (0x2ULL << 45) 209 #define PKT_TX_TUNNEL_IPIP (0x3ULL << 45) 210 #define PKT_TX_TUNNEL_GENEVE (0x4ULL << 45) 212 #define PKT_TX_TUNNEL_MPLSINUDP (0x5ULL << 45) 214 #define PKT_TX_TUNNEL_MASK (0xFULL << 45) 219 #define PKT_TX_QINQ (1ULL << 49) 221 #define PKT_TX_QINQ_PKT PKT_TX_QINQ 236 #define PKT_TX_TCP_SEG (1ULL << 50) 238 #define PKT_TX_IEEE1588_TMST (1ULL << 51) 251 #define PKT_TX_L4_NO_CKSUM (0ULL << 52) 252 #define PKT_TX_TCP_CKSUM (1ULL << 52) 253 #define PKT_TX_SCTP_CKSUM (2ULL << 52) 254 #define PKT_TX_UDP_CKSUM (3ULL << 52) 255 #define PKT_TX_L4_MASK (3ULL << 52) 264 #define PKT_TX_IP_CKSUM (1ULL << 54) 272 #define PKT_TX_IPV4 (1ULL << 55) 280 #define PKT_TX_IPV6 (1ULL << 56) 285 #define PKT_TX_VLAN (1ULL << 57) 287 #define PKT_TX_VLAN_PKT PKT_TX_VLAN 297 #define PKT_TX_OUTER_IP_CKSUM (1ULL << 58) 304 #define PKT_TX_OUTER_IPV4 (1ULL << 59) 311 #define PKT_TX_OUTER_IPV6 (1ULL << 60) 317 #define PKT_TX_OFFLOAD_MASK ( \ 320 PKT_TX_OUTER_IP_CKSUM | \ 322 PKT_TX_IEEE1588_TMST | \ 325 PKT_TX_TUNNEL_MASK | \ 329 #define __RESERVED (1ULL << 61) 331 #define IND_ATTACHED_MBUF (1ULL << 62) 334 #define CTRL_MBUF_FLAG (1ULL << 63) 337 #define RTE_MBUF_PRIV_ALIGN 8 395 #define RTE_MBUF_DEFAULT_DATAROOM 2048 396 #define RTE_MBUF_DEFAULT_BUF_SIZE \ 397 (RTE_MBUF_DEFAULT_DATAROOM + RTE_PKTMBUF_HEADROOM) 454 MARKER rx_descriptor_fields1;
529 MARKER cacheline1 __rte_cache_min_aligned;
575 #define RTE_MBUF_MAX_NB_SEGS UINT16_MAX 607 #if RTE_CACHE_LINE_SIZE == 64 628 return mb->buf_iova + mb->data_off;
633 rte_mbuf_data_dma_addr(
const struct rte_mbuf *mb)
653 return mb->buf_iova + RTE_PKTMBUF_HEADROOM;
658 rte_mbuf_data_dma_addr_default(
const struct rte_mbuf *mb)
696 #define RTE_MBUF_INDIRECT(mb) ((mb)->ol_flags & IND_ATTACHED_MBUF) 701 #define RTE_MBUF_DIRECT(mb) (!RTE_MBUF_INDIRECT(mb)) 714 #ifdef RTE_LIBRTE_MBUF_DEBUG 717 #define __rte_mbuf_sanity_check(m, is_h) rte_mbuf_sanity_check(m, is_h) 722 #define __rte_mbuf_sanity_check(m, is_h) do { } while (0) 726 #ifdef RTE_MBUF_REFCNT_ATOMIC 735 static inline uint16_t
755 static inline uint16_t
756 __rte_mbuf_refcnt_update(
struct rte_mbuf *m, int16_t value)
770 static inline uint16_t
785 return __rte_mbuf_refcnt_update(m, value);
791 static inline uint16_t
792 __rte_mbuf_refcnt_update(
struct rte_mbuf *m, int16_t value)
801 static inline uint16_t
804 return __rte_mbuf_refcnt_update(m, value);
810 static inline uint16_t
828 #define RTE_MBUF_PREFETCH_TO_FREE(m) do { \ 849 #define MBUF_RAW_ALLOC_CHECK(m) do { \ 850 RTE_ASSERT(rte_mbuf_refcnt_read(m) == 1); \ 851 RTE_ASSERT((m)->next == NULL); \ 852 RTE_ASSERT((m)->nb_segs == 1); \ 853 __rte_mbuf_sanity_check(m, 0); \ 881 MBUF_RAW_ALLOC_CHECK(m);
904 RTE_ASSERT(m->
next == NULL);
913 __rte_mbuf_raw_free(
struct rte_mbuf *m)
940 void *m,
unsigned i);
954 #define rte_ctrlmbuf_alloc(mp) rte_pktmbuf_alloc(mp) 962 #define rte_ctrlmbuf_free(m) rte_pktmbuf_free(m) 972 #define rte_ctrlmbuf_data(m) ((char *)((m)->buf_addr) + (m)->data_off) 982 #define rte_ctrlmbuf_len(m) rte_pktmbuf_data_len(m) 1021 void *m,
unsigned i);
1079 unsigned cache_size, uint16_t priv_size, uint16_t data_room_size,
1121 unsigned int cache_size, uint16_t priv_size, uint16_t data_room_size,
1135 static inline uint16_t
1156 static inline uint16_t
1175 m->data_off =
RTE_MIN(RTE_PKTMBUF_HEADROOM, (uint16_t)m->
buf_len);
1186 #define MBUF_INVALID_PORT UINT16_MAX 1188 static inline void rte_pktmbuf_reset(
struct rte_mbuf *m)
1223 rte_pktmbuf_reset(m);
1242 struct rte_mbuf **mbufs,
unsigned count)
1256 switch (count % 4) {
1258 while (idx != count) {
1259 MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
1260 rte_pktmbuf_reset(mbufs[idx]);
1264 MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
1265 rte_pktmbuf_reset(mbufs[idx]);
1269 MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
1270 rte_pktmbuf_reset(mbufs[idx]);
1274 MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
1275 rte_pktmbuf_reset(mbufs[idx]);
1315 mi->buf_iova = m->buf_iova;
1319 mi->data_off = m->data_off;
1355 uint32_t mbuf_size, buf_len, priv_size;
1362 m->
buf_addr = (
char *)m + mbuf_size;
1401 if (m->
next != NULL) {
1408 }
else if (__rte_mbuf_refcnt_update(m, -1) == 0) {
1413 if (m->
next != NULL) {
1427 __rte_pktmbuf_prefree_seg(
struct rte_mbuf *m)
1509 }
while ((md = md->
next) != NULL &&
1543 }
while ((m = m->
next) != NULL);
1586 while (m->
next != NULL)
1605 #define rte_pktmbuf_mtod_offset(m, t, o) \ 1606 ((t)((char *)(m)->buf_addr + (m)->data_off + (o))) 1620 #define rte_pktmbuf_mtod(m, t) rte_pktmbuf_mtod_offset(m, t, 0) 1631 #define rte_pktmbuf_iova_offset(m, o) \ 1632 (rte_iova_t)((m)->buf_iova + (m)->data_off + (o)) 1635 #define rte_pktmbuf_mtophys_offset(m, o) \ 1636 rte_pktmbuf_iova_offset(m, o) 1645 #define rte_pktmbuf_iova(m) rte_pktmbuf_iova_offset(m, 0) 1648 #define rte_pktmbuf_mtophys(m) rte_pktmbuf_iova(m) 1658 #define rte_pktmbuf_pkt_len(m) ((m)->pkt_len) 1668 #define rte_pktmbuf_data_len(m) ((m)->data_len) 1697 return (
char *)m->
buf_addr + m->data_off;
1729 return (
char*) tail;
1756 return (
char *)m->
buf_addr + m->data_off;
1806 const void *__rte_pktmbuf_read(
const struct rte_mbuf *m, uint32_t off,
1807 uint32_t len,
void *buf);
1830 uint32_t off, uint32_t len,
void *buf)
1835 return __rte_pktmbuf_read(m, off, len, buf);
1864 cur_tail->
next = tail;
1890 uint64_t inner_l3_offset = m->
l2_len;
1942 int seg_len, copy_len;
static rte_iova_t rte_mbuf_data_iova(const struct rte_mbuf *mb)
uint16_t mbuf_data_room_size
#define __rte_always_inline
static int16_t rte_atomic16_read(const rte_atomic16_t *v)
static struct rte_mbuf * rte_pktmbuf_alloc(struct rte_mempool *mp)
RTE_STD_C11 union rte_mbuf::@116 __rte_aligned
uint8_t inner_esp_next_proto
__extension__ typedef void * MARKER[0]
#define RTE_MBUF_DIRECT(mb)
#define IND_ATTACHED_MBUF
static uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp)
static int rte_validate_tx_offload(const struct rte_mbuf *m)
static void rte_pktmbuf_free(struct rte_mbuf *m)
void rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg)
static struct rte_mbuf * rte_pktmbuf_clone(struct rte_mbuf *md, struct rte_mempool *mp)
static __rte_always_inline void rte_pktmbuf_free_seg(struct rte_mbuf *m)
static struct rte_mbuf * rte_mbuf_from_indirect(struct rte_mbuf *mi)
static int rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail)
__extension__ typedef uint8_t MARKER8[0]
struct rte_mbuf::@119::@130 fdir
void rte_pktmbuf_dump(FILE *f, const struct rte_mbuf *m, unsigned dump_len)
static uint16_t rte_pktmbuf_headroom(const struct rte_mbuf *m)
struct rte_mbuf __rte_cache_aligned
static int rte_pktmbuf_alloc_bulk(struct rte_mempool *pool, struct rte_mbuf **mbufs, unsigned count)
static void rte_pktmbuf_reset_headroom(struct rte_mbuf *m)
#define PKT_TX_OUTER_IP_CKSUM
struct rte_mempool *__rte_experimental rte_pktmbuf_pool_create_by_ops(const char *name, unsigned int n, unsigned int cache_size, uint16_t priv_size, uint16_t data_room_size, int socket_id, const char *ops_name)
static void rte_mbuf_prefetch_part2(struct rte_mbuf *m)
struct rte_mbuf::@119::@131 sched
static __rte_always_inline struct rte_mbuf * rte_pktmbuf_prefree_seg(struct rte_mbuf *m)
static int rte_pktmbuf_is_contiguous(const struct rte_mbuf *m)
int rte_get_tx_ol_flag_list(uint64_t mask, char *buf, size_t buflen)
#define PKT_TX_OUTER_IPV4
static uint16_t rte_pktmbuf_tailroom(const struct rte_mbuf *m)
static __rte_always_inline void rte_mbuf_raw_free(struct rte_mbuf *m)
__extension__ typedef uint64_t MARKER64[0]
void rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header)
#define __rte_mbuf_sanity_check(m, is_h)
const char * rte_get_tx_ol_flag_name(uint64_t mask)
static uint16_t rte_mbuf_refcnt_read(const struct rte_mbuf *m)
static int rte_pktmbuf_linearize(struct rte_mbuf *mbuf)
static __rte_always_inline int rte_mempool_get(struct rte_mempool *mp, void **obj_p)
static void rte_atomic16_set(rte_atomic16_t *v, int16_t new_value)
static char * rte_pktmbuf_adj(struct rte_mbuf *m, uint16_t len)
#define rte_pktmbuf_pkt_len(m)
static void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *m)
static __rte_always_inline int rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned int n)
static int16_t rte_atomic16_add_return(rte_atomic16_t *v, int16_t inc)
static int rte_is_ctrlmbuf(struct rte_mbuf *m)
static void rte_pktmbuf_detach(struct rte_mbuf *m)
#define rte_pktmbuf_data_len(m)
#define rte_pktmbuf_mtod(m, t)
static uint16_t rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
#define MBUF_INVALID_PORT
static uint16_t rte_pktmbuf_data_room_size(struct rte_mempool *mp)
const char * rte_get_rx_ol_flag_name(uint64_t mask)
void rte_pktmbuf_init(struct rte_mempool *mp, void *opaque_arg, void *m, unsigned i)
struct rte_mempool * pool
static char * rte_pktmbuf_append(struct rte_mbuf *m, uint16_t len)
static void rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
static rte_iova_t rte_mbuf_data_iova_default(const struct rte_mbuf *mb)
static int rte_pktmbuf_trim(struct rte_mbuf *m, uint16_t len)
static char * rte_mbuf_to_baddr(struct rte_mbuf *md)
static const void * rte_pktmbuf_read(const struct rte_mbuf *m, uint32_t off, uint32_t len, void *buf)
static char * rte_pktmbuf_prepend(struct rte_mbuf *m, uint16_t len)
static struct rte_mbuf * rte_mbuf_raw_alloc(struct rte_mempool *mp)
static void rte_pktmbuf_refcnt_update(struct rte_mbuf *m, int16_t v)
#define RTE_PTR_SUB(ptr, x)
static struct rte_mbuf * rte_pktmbuf_lastseg(struct rte_mbuf *m)
static void * rte_memcpy(void *dst, const void *src, size_t n)
#define PKT_TX_OFFLOAD_MASK
static rte_iova_t rte_mempool_virt2iova(const void *elt)
static __rte_always_inline void rte_mempool_put(struct rte_mempool *mp, void *obj)
static void rte_mbuf_prefetch_part1(struct rte_mbuf *m)
rte_atomic16_t refcnt_atomic
static void * rte_mempool_get_priv(struct rte_mempool *mp)
char name[RTE_MEMZONE_NAMESIZE]
void rte_ctrlmbuf_init(struct rte_mempool *mp, void *opaque_arg, void *m, unsigned i)
#define RTE_MBUF_INDIRECT(mb)
#define rte_pktmbuf_mtod_offset(m, t, o)
int rte_get_rx_ol_flag_list(uint64_t mask, char *buf, size_t buflen)
static void rte_prefetch0(const volatile void *p)
struct rte_mempool * rte_pktmbuf_pool_create(const char *name, unsigned n, unsigned cache_size, uint16_t priv_size, uint16_t data_room_size, int socket_id)