DPDK  18.02.0
rte_mbuf.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation.
3  * Copyright 2014 6WIND S.A.
4  */
5 
6 #ifndef _RTE_MBUF_H_
7 #define _RTE_MBUF_H_
8 
34 #include <stdint.h>
35 #include <rte_compat.h>
36 #include <rte_common.h>
37 #include <rte_config.h>
38 #include <rte_mempool.h>
39 #include <rte_memory.h>
40 #include <rte_atomic.h>
41 #include <rte_prefetch.h>
42 #include <rte_branch_prediction.h>
43 #include <rte_mbuf_ptype.h>
44 
45 #ifdef __cplusplus
46 extern "C" {
47 #endif
48 
49 /*
50  * Packet Offload Features Flags. It also carry packet type information.
51  * Critical resources. Both rx/tx shared these bits. Be cautious on any change
52  *
53  * - RX flags start at bit position zero, and get added to the left of previous
54  * flags.
55  * - The most-significant 3 bits are reserved for generic mbuf flags
56  * - TX flags therefore start at bit position 60 (i.e. 63-3), and new flags get
57  * added to the right of the previously defined flags i.e. they should count
58  * downwards, not upwards.
59  *
60  * Keep these flags synchronized with rte_get_rx_ol_flag_name() and
61  * rte_get_tx_ol_flag_name().
62  */
63 
71 #define PKT_RX_VLAN (1ULL << 0)
72 
73 #define PKT_RX_RSS_HASH (1ULL << 1)
74 #define PKT_RX_FDIR (1ULL << 2)
83 #define PKT_RX_L4_CKSUM_BAD (1ULL << 3)
84 
92 #define PKT_RX_IP_CKSUM_BAD (1ULL << 4)
93 
94 #define PKT_RX_EIP_CKSUM_BAD (1ULL << 5)
102 #define PKT_RX_VLAN_STRIPPED (1ULL << 6)
103 
112 #define PKT_RX_IP_CKSUM_MASK ((1ULL << 4) | (1ULL << 7))
113 
114 #define PKT_RX_IP_CKSUM_UNKNOWN 0
115 #define PKT_RX_IP_CKSUM_BAD (1ULL << 4)
116 #define PKT_RX_IP_CKSUM_GOOD (1ULL << 7)
117 #define PKT_RX_IP_CKSUM_NONE ((1ULL << 4) | (1ULL << 7))
118 
127 #define PKT_RX_L4_CKSUM_MASK ((1ULL << 3) | (1ULL << 8))
128 
129 #define PKT_RX_L4_CKSUM_UNKNOWN 0
130 #define PKT_RX_L4_CKSUM_BAD (1ULL << 3)
131 #define PKT_RX_L4_CKSUM_GOOD (1ULL << 8)
132 #define PKT_RX_L4_CKSUM_NONE ((1ULL << 3) | (1ULL << 8))
133 
134 #define PKT_RX_IEEE1588_PTP (1ULL << 9)
135 #define PKT_RX_IEEE1588_TMST (1ULL << 10)
136 #define PKT_RX_FDIR_ID (1ULL << 13)
137 #define PKT_RX_FDIR_FLX (1ULL << 14)
147 #define PKT_RX_QINQ_STRIPPED (1ULL << 15)
148 
154 #define PKT_RX_LRO (1ULL << 16)
155 
159 #define PKT_RX_TIMESTAMP (1ULL << 17)
160 
164 #define PKT_RX_SEC_OFFLOAD (1ULL << 18)
165 
169 #define PKT_RX_SEC_OFFLOAD_FAILED (1ULL << 19)
170 
178 #define PKT_RX_QINQ (1ULL << 20)
179 
180 /* add new RX flags here */
181 
182 /* add new TX flags here */
183 
189 #define PKT_TX_UDP_SEG (1ULL << 42)
190 
194 #define PKT_TX_SEC_OFFLOAD (1ULL << 43)
195 
200 #define PKT_TX_MACSEC (1ULL << 44)
201 
207 #define PKT_TX_TUNNEL_VXLAN (0x1ULL << 45)
208 #define PKT_TX_TUNNEL_GRE (0x2ULL << 45)
209 #define PKT_TX_TUNNEL_IPIP (0x3ULL << 45)
210 #define PKT_TX_TUNNEL_GENEVE (0x4ULL << 45)
211 
212 #define PKT_TX_TUNNEL_MPLSINUDP (0x5ULL << 45)
213 /* add new TX TUNNEL type here */
214 #define PKT_TX_TUNNEL_MASK (0xFULL << 45)
215 
219 #define PKT_TX_QINQ (1ULL << 49)
220 /* this old name is deprecated */
221 #define PKT_TX_QINQ_PKT PKT_TX_QINQ
222 
236 #define PKT_TX_TCP_SEG (1ULL << 50)
237 
238 #define PKT_TX_IEEE1588_TMST (1ULL << 51)
251 #define PKT_TX_L4_NO_CKSUM (0ULL << 52)
252 #define PKT_TX_TCP_CKSUM (1ULL << 52)
253 #define PKT_TX_SCTP_CKSUM (2ULL << 52)
254 #define PKT_TX_UDP_CKSUM (3ULL << 52)
255 #define PKT_TX_L4_MASK (3ULL << 52)
264 #define PKT_TX_IP_CKSUM (1ULL << 54)
265 
272 #define PKT_TX_IPV4 (1ULL << 55)
273 
280 #define PKT_TX_IPV6 (1ULL << 56)
281 
285 #define PKT_TX_VLAN (1ULL << 57)
286 /* this old name is deprecated */
287 #define PKT_TX_VLAN_PKT PKT_TX_VLAN
288 
297 #define PKT_TX_OUTER_IP_CKSUM (1ULL << 58)
298 
304 #define PKT_TX_OUTER_IPV4 (1ULL << 59)
305 
311 #define PKT_TX_OUTER_IPV6 (1ULL << 60)
312 
317 #define PKT_TX_OFFLOAD_MASK ( \
318  PKT_TX_IP_CKSUM | \
319  PKT_TX_L4_MASK | \
320  PKT_TX_OUTER_IP_CKSUM | \
321  PKT_TX_TCP_SEG | \
322  PKT_TX_IEEE1588_TMST | \
323  PKT_TX_QINQ_PKT | \
324  PKT_TX_VLAN_PKT | \
325  PKT_TX_TUNNEL_MASK | \
326  PKT_TX_MACSEC | \
327  PKT_TX_SEC_OFFLOAD)
328 
329 #define __RESERVED (1ULL << 61)
331 #define IND_ATTACHED_MBUF (1ULL << 62)
333 /* Use final bit of flags to indicate a control mbuf */
334 #define CTRL_MBUF_FLAG (1ULL << 63)
337 #define RTE_MBUF_PRIV_ALIGN 8
338 
347 const char *rte_get_rx_ol_flag_name(uint64_t mask);
348 
361 int rte_get_rx_ol_flag_list(uint64_t mask, char *buf, size_t buflen);
362 
373 const char *rte_get_tx_ol_flag_name(uint64_t mask);
374 
387 int rte_get_tx_ol_flag_list(uint64_t mask, char *buf, size_t buflen);
388 
395 #define RTE_MBUF_DEFAULT_DATAROOM 2048
396 #define RTE_MBUF_DEFAULT_BUF_SIZE \
397  (RTE_MBUF_DEFAULT_DATAROOM + RTE_PKTMBUF_HEADROOM)
398 
399 /* define a set of marker types that can be used to refer to set points in the
400  * mbuf */
401 __extension__
402 typedef void *MARKER[0];
403 __extension__
404 typedef uint8_t MARKER8[0];
405 __extension__
406 typedef uint64_t MARKER64[0];
412 struct rte_mbuf {
413  MARKER cacheline0;
414 
415  void *buf_addr;
423  union {
424  rte_iova_t buf_iova;
426  } __rte_aligned(sizeof(rte_iova_t));
427 
428  /* next 8 bytes are initialised on RX descriptor rearm */
429  MARKER64 rearm_data;
430  uint16_t data_off;
431 
442  union {
444  uint16_t refcnt;
445  };
446  uint16_t nb_segs;
449  uint16_t port;
450 
451  uint64_t ol_flags;
453  /* remaining bytes are set on RX when pulling packet from descriptor */
454  MARKER rx_descriptor_fields1;
455 
456  /*
457  * The packet type, which is the combination of outer/inner L2, L3, L4
458  * and tunnel types. The packet_type is about data really present in the
459  * mbuf. Example: if vlan stripping is enabled, a received vlan packet
460  * would have RTE_PTYPE_L2_ETHER and not RTE_PTYPE_L2_VLAN because the
461  * vlan is stripped from the data.
462  */
464  union {
465  uint32_t packet_type;
466  struct {
467  uint32_t l2_type:4;
468  uint32_t l3_type:4;
469  uint32_t l4_type:4;
470  uint32_t tun_type:4;
472  union {
478  __extension__
479  struct {
480  uint8_t inner_l2_type:4;
482  uint8_t inner_l3_type:4;
484  };
485  };
486  uint32_t inner_l4_type:4;
487  };
488  };
489 
490  uint32_t pkt_len;
491  uint16_t data_len;
493  uint16_t vlan_tci;
494 
495  union {
496  uint32_t rss;
497  struct {
499  union {
500  struct {
501  uint16_t hash;
502  uint16_t id;
503  };
504  uint32_t lo;
506  };
507  uint32_t hi;
510  } fdir;
511  struct {
512  uint32_t lo;
513  uint32_t hi;
514  } sched;
515  uint32_t usr;
516  } hash;
519  uint16_t vlan_tci_outer;
520 
521  uint16_t buf_len;
526  uint64_t timestamp;
527 
528  /* second cache line - fields only used in slow path or on TX */
529  MARKER cacheline1 __rte_cache_min_aligned;
530 
532  union {
533  void *userdata;
534  uint64_t udata64;
535  };
536 
537  struct rte_mempool *pool;
538  struct rte_mbuf *next;
540  /* fields to support TX offloads */
542  union {
543  uint64_t tx_offload;
544  __extension__
545  struct {
546  uint64_t l2_len:7;
550  uint64_t l3_len:9;
551  uint64_t l4_len:8;
552  uint64_t tso_segsz:16;
554  /* fields for TX offloading of tunnels */
555  uint64_t outer_l3_len:9;
556  uint64_t outer_l2_len:7;
558  /* uint64_t unused:8; */
559  };
560  };
561 
564  uint16_t priv_size;
565 
567  uint16_t timesync;
568 
570  uint32_t seqn;
571 
573 
575 #define RTE_MBUF_MAX_NB_SEGS UINT16_MAX
576 
587 static inline void
589 {
590  rte_prefetch0(&m->cacheline0);
591 }
592 
604 static inline void
606 {
607 #if RTE_CACHE_LINE_SIZE == 64
608  rte_prefetch0(&m->cacheline1);
609 #else
610  RTE_SET_USED(m);
611 #endif
612 }
613 
614 
615 static inline uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp);
616 
625 static inline rte_iova_t
626 rte_mbuf_data_iova(const struct rte_mbuf *mb)
627 {
628  return mb->buf_iova + mb->data_off;
629 }
630 
631 __rte_deprecated
632 static inline phys_addr_t
633 rte_mbuf_data_dma_addr(const struct rte_mbuf *mb)
634 {
635  return rte_mbuf_data_iova(mb);
636 }
637 
650 static inline rte_iova_t
652 {
653  return mb->buf_iova + RTE_PKTMBUF_HEADROOM;
654 }
655 
656 __rte_deprecated
657 static inline phys_addr_t
658 rte_mbuf_data_dma_addr_default(const struct rte_mbuf *mb)
659 {
660  return rte_mbuf_data_iova_default(mb);
661 }
662 
671 static inline struct rte_mbuf *
673 {
674  return (struct rte_mbuf *)RTE_PTR_SUB(mi->buf_addr, sizeof(*mi) + mi->priv_size);
675 }
676 
685 static inline char *
687 {
688  char *buffer_addr;
689  buffer_addr = (char *)md + sizeof(*md) + rte_pktmbuf_priv_size(md->pool);
690  return buffer_addr;
691 }
692 
696 #define RTE_MBUF_INDIRECT(mb) ((mb)->ol_flags & IND_ATTACHED_MBUF)
697 
701 #define RTE_MBUF_DIRECT(mb) (!RTE_MBUF_INDIRECT(mb))
702 
711  uint16_t mbuf_priv_size;
712 };
713 
714 #ifdef RTE_LIBRTE_MBUF_DEBUG
715 
717 #define __rte_mbuf_sanity_check(m, is_h) rte_mbuf_sanity_check(m, is_h)
718 
719 #else /* RTE_LIBRTE_MBUF_DEBUG */
720 
722 #define __rte_mbuf_sanity_check(m, is_h) do { } while (0)
723 
724 #endif /* RTE_LIBRTE_MBUF_DEBUG */
725 
726 #ifdef RTE_MBUF_REFCNT_ATOMIC
727 
735 static inline uint16_t
736 rte_mbuf_refcnt_read(const struct rte_mbuf *m)
737 {
738  return (uint16_t)(rte_atomic16_read(&m->refcnt_atomic));
739 }
740 
748 static inline void
749 rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
750 {
751  rte_atomic16_set(&m->refcnt_atomic, new_value);
752 }
753 
754 /* internal */
755 static inline uint16_t
756 __rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
757 {
758  return (uint16_t)(rte_atomic16_add_return(&m->refcnt_atomic, value));
759 }
760 
770 static inline uint16_t
771 rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
772 {
773  /*
774  * The atomic_add is an expensive operation, so we don't want to
775  * call it in the case where we know we are the uniq holder of
776  * this mbuf (i.e. ref_cnt == 1). Otherwise, an atomic
777  * operation has to be used because concurrent accesses on the
778  * reference counter can occur.
779  */
780  if (likely(rte_mbuf_refcnt_read(m) == 1)) {
781  rte_mbuf_refcnt_set(m, 1 + value);
782  return 1 + value;
783  }
784 
785  return __rte_mbuf_refcnt_update(m, value);
786 }
787 
788 #else /* ! RTE_MBUF_REFCNT_ATOMIC */
789 
790 /* internal */
791 static inline uint16_t
792 __rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
793 {
794  m->refcnt = (uint16_t)(m->refcnt + value);
795  return m->refcnt;
796 }
797 
801 static inline uint16_t
802 rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
803 {
804  return __rte_mbuf_refcnt_update(m, value);
805 }
806 
810 static inline uint16_t
812 {
813  return m->refcnt;
814 }
815 
819 static inline void
820 rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
821 {
822  m->refcnt = new_value;
823 }
824 
825 #endif /* RTE_MBUF_REFCNT_ATOMIC */
826 
828 #define RTE_MBUF_PREFETCH_TO_FREE(m) do { \
829  if ((m) != NULL) \
830  rte_prefetch0(m); \
831 } while (0)
832 
833 
846 void
847 rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header);
848 
849 #define MBUF_RAW_ALLOC_CHECK(m) do { \
850  RTE_ASSERT(rte_mbuf_refcnt_read(m) == 1); \
851  RTE_ASSERT((m)->next == NULL); \
852  RTE_ASSERT((m)->nb_segs == 1); \
853  __rte_mbuf_sanity_check(m, 0); \
854 } while (0)
855 
875 static inline struct rte_mbuf *rte_mbuf_raw_alloc(struct rte_mempool *mp)
876 {
877  struct rte_mbuf *m;
878 
879  if (rte_mempool_get(mp, (void **)&m) < 0)
880  return NULL;
881  MBUF_RAW_ALLOC_CHECK(m);
882  return m;
883 }
884 
899 static __rte_always_inline void
901 {
902  RTE_ASSERT(RTE_MBUF_DIRECT(m));
903  RTE_ASSERT(rte_mbuf_refcnt_read(m) == 1);
904  RTE_ASSERT(m->next == NULL);
905  RTE_ASSERT(m->nb_segs == 1);
907  rte_mempool_put(m->pool, m);
908 }
909 
910 /* compat with older versions */
911 __rte_deprecated
912 static inline void
913 __rte_mbuf_raw_free(struct rte_mbuf *m)
914 {
916 }
917 
918 /* Operations on ctrl mbuf */
919 
939 void rte_ctrlmbuf_init(struct rte_mempool *mp, void *opaque_arg,
940  void *m, unsigned i);
941 
954 #define rte_ctrlmbuf_alloc(mp) rte_pktmbuf_alloc(mp)
955 
962 #define rte_ctrlmbuf_free(m) rte_pktmbuf_free(m)
963 
972 #define rte_ctrlmbuf_data(m) ((char *)((m)->buf_addr) + (m)->data_off)
973 
982 #define rte_ctrlmbuf_len(m) rte_pktmbuf_data_len(m)
983 
993 static inline int
995 {
996  return !!(m->ol_flags & CTRL_MBUF_FLAG);
997 }
998 
999 /* Operations on pkt mbuf */
1000 
1020 void rte_pktmbuf_init(struct rte_mempool *mp, void *opaque_arg,
1021  void *m, unsigned i);
1022 
1023 
1041 void rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg);
1042 
1077 struct rte_mempool *
1078 rte_pktmbuf_pool_create(const char *name, unsigned n,
1079  unsigned cache_size, uint16_t priv_size, uint16_t data_room_size,
1080  int socket_id);
1081 
1119 struct rte_mempool * __rte_experimental
1120 rte_pktmbuf_pool_create_by_ops(const char *name, unsigned int n,
1121  unsigned int cache_size, uint16_t priv_size, uint16_t data_room_size,
1122  int socket_id, const char *ops_name);
1123 
1135 static inline uint16_t
1137 {
1138  struct rte_pktmbuf_pool_private *mbp_priv;
1139 
1140  mbp_priv = (struct rte_pktmbuf_pool_private *)rte_mempool_get_priv(mp);
1141  return mbp_priv->mbuf_data_room_size;
1142 }
1143 
1156 static inline uint16_t
1158 {
1159  struct rte_pktmbuf_pool_private *mbp_priv;
1160 
1161  mbp_priv = (struct rte_pktmbuf_pool_private *)rte_mempool_get_priv(mp);
1162  return mbp_priv->mbuf_priv_size;
1163 }
1164 
1173 static inline void rte_pktmbuf_reset_headroom(struct rte_mbuf *m)
1174 {
1175  m->data_off = RTE_MIN(RTE_PKTMBUF_HEADROOM, (uint16_t)m->buf_len);
1176 }
1177 
1186 #define MBUF_INVALID_PORT UINT16_MAX
1187 
1188 static inline void rte_pktmbuf_reset(struct rte_mbuf *m)
1189 {
1190  m->next = NULL;
1191  m->pkt_len = 0;
1192  m->tx_offload = 0;
1193  m->vlan_tci = 0;
1194  m->vlan_tci_outer = 0;
1195  m->nb_segs = 1;
1196  m->port = MBUF_INVALID_PORT;
1197 
1198  m->ol_flags = 0;
1199  m->packet_type = 0;
1201 
1202  m->data_len = 0;
1204 }
1205 
1219 static inline struct rte_mbuf *rte_pktmbuf_alloc(struct rte_mempool *mp)
1220 {
1221  struct rte_mbuf *m;
1222  if ((m = rte_mbuf_raw_alloc(mp)) != NULL)
1223  rte_pktmbuf_reset(m);
1224  return m;
1225 }
1226 
1241 static inline int rte_pktmbuf_alloc_bulk(struct rte_mempool *pool,
1242  struct rte_mbuf **mbufs, unsigned count)
1243 {
1244  unsigned idx = 0;
1245  int rc;
1246 
1247  rc = rte_mempool_get_bulk(pool, (void **)mbufs, count);
1248  if (unlikely(rc))
1249  return rc;
1250 
1251  /* To understand duff's device on loop unwinding optimization, see
1252  * https://en.wikipedia.org/wiki/Duff's_device.
1253  * Here while() loop is used rather than do() while{} to avoid extra
1254  * check if count is zero.
1255  */
1256  switch (count % 4) {
1257  case 0:
1258  while (idx != count) {
1259  MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
1260  rte_pktmbuf_reset(mbufs[idx]);
1261  idx++;
1262  /* fall-through */
1263  case 3:
1264  MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
1265  rte_pktmbuf_reset(mbufs[idx]);
1266  idx++;
1267  /* fall-through */
1268  case 2:
1269  MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
1270  rte_pktmbuf_reset(mbufs[idx]);
1271  idx++;
1272  /* fall-through */
1273  case 1:
1274  MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
1275  rte_pktmbuf_reset(mbufs[idx]);
1276  idx++;
1277  /* fall-through */
1278  }
1279  }
1280  return 0;
1281 }
1282 
1300 static inline void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *m)
1301 {
1302  struct rte_mbuf *md;
1303 
1304  RTE_ASSERT(RTE_MBUF_DIRECT(mi) &&
1305  rte_mbuf_refcnt_read(mi) == 1);
1306 
1307  /* if m is not direct, get the mbuf that embeds the data */
1308  if (RTE_MBUF_DIRECT(m))
1309  md = m;
1310  else
1311  md = rte_mbuf_from_indirect(m);
1312 
1313  rte_mbuf_refcnt_update(md, 1);
1314  mi->priv_size = m->priv_size;
1315  mi->buf_iova = m->buf_iova;
1316  mi->buf_addr = m->buf_addr;
1317  mi->buf_len = m->buf_len;
1318 
1319  mi->data_off = m->data_off;
1320  mi->data_len = m->data_len;
1321  mi->port = m->port;
1322  mi->vlan_tci = m->vlan_tci;
1323  mi->vlan_tci_outer = m->vlan_tci_outer;
1324  mi->tx_offload = m->tx_offload;
1325  mi->hash = m->hash;
1326 
1327  mi->next = NULL;
1328  mi->pkt_len = mi->data_len;
1329  mi->nb_segs = 1;
1330  mi->ol_flags = m->ol_flags | IND_ATTACHED_MBUF;
1331  mi->packet_type = m->packet_type;
1332  mi->timestamp = m->timestamp;
1333 
1334  __rte_mbuf_sanity_check(mi, 1);
1336 }
1337 
1351 static inline void rte_pktmbuf_detach(struct rte_mbuf *m)
1352 {
1353  struct rte_mbuf *md = rte_mbuf_from_indirect(m);
1354  struct rte_mempool *mp = m->pool;
1355  uint32_t mbuf_size, buf_len, priv_size;
1356 
1357  priv_size = rte_pktmbuf_priv_size(mp);
1358  mbuf_size = sizeof(struct rte_mbuf) + priv_size;
1360 
1361  m->priv_size = priv_size;
1362  m->buf_addr = (char *)m + mbuf_size;
1363  m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size;
1364  m->buf_len = (uint16_t)buf_len;
1366  m->data_len = 0;
1367  m->ol_flags = 0;
1368 
1369  if (rte_mbuf_refcnt_update(md, -1) == 0) {
1370  md->next = NULL;
1371  md->nb_segs = 1;
1372  rte_mbuf_refcnt_set(md, 1);
1373  rte_mbuf_raw_free(md);
1374  }
1375 }
1376 
1391 static __rte_always_inline struct rte_mbuf *
1393 {
1395 
1396  if (likely(rte_mbuf_refcnt_read(m) == 1)) {
1397 
1398  if (RTE_MBUF_INDIRECT(m))
1399  rte_pktmbuf_detach(m);
1400 
1401  if (m->next != NULL) {
1402  m->next = NULL;
1403  m->nb_segs = 1;
1404  }
1405 
1406  return m;
1407 
1408  } else if (__rte_mbuf_refcnt_update(m, -1) == 0) {
1409 
1410  if (RTE_MBUF_INDIRECT(m))
1411  rte_pktmbuf_detach(m);
1412 
1413  if (m->next != NULL) {
1414  m->next = NULL;
1415  m->nb_segs = 1;
1416  }
1417  rte_mbuf_refcnt_set(m, 1);
1418 
1419  return m;
1420  }
1421  return NULL;
1422 }
1423 
1424 /* deprecated, replaced by rte_pktmbuf_prefree_seg() */
1425 __rte_deprecated
1426 static inline struct rte_mbuf *
1427 __rte_pktmbuf_prefree_seg(struct rte_mbuf *m)
1428 {
1429  return rte_pktmbuf_prefree_seg(m);
1430 }
1431 
1441 static __rte_always_inline void
1443 {
1444  m = rte_pktmbuf_prefree_seg(m);
1445  if (likely(m != NULL))
1446  rte_mbuf_raw_free(m);
1447 }
1448 
1458 static inline void rte_pktmbuf_free(struct rte_mbuf *m)
1459 {
1460  struct rte_mbuf *m_next;
1461 
1462  if (m != NULL)
1464 
1465  while (m != NULL) {
1466  m_next = m->next;
1468  m = m_next;
1469  }
1470 }
1471 
1489 static inline struct rte_mbuf *rte_pktmbuf_clone(struct rte_mbuf *md,
1490  struct rte_mempool *mp)
1491 {
1492  struct rte_mbuf *mc, *mi, **prev;
1493  uint32_t pktlen;
1494  uint16_t nseg;
1495 
1496  if (unlikely ((mc = rte_pktmbuf_alloc(mp)) == NULL))
1497  return NULL;
1498 
1499  mi = mc;
1500  prev = &mi->next;
1501  pktlen = md->pkt_len;
1502  nseg = 0;
1503 
1504  do {
1505  nseg++;
1506  rte_pktmbuf_attach(mi, md);
1507  *prev = mi;
1508  prev = &mi->next;
1509  } while ((md = md->next) != NULL &&
1510  (mi = rte_pktmbuf_alloc(mp)) != NULL);
1511 
1512  *prev = NULL;
1513  mc->nb_segs = nseg;
1514  mc->pkt_len = pktlen;
1515 
1516  /* Allocation of new indirect segment failed */
1517  if (unlikely (mi == NULL)) {
1518  rte_pktmbuf_free(mc);
1519  return NULL;
1520  }
1521 
1522  __rte_mbuf_sanity_check(mc, 1);
1523  return mc;
1524 }
1525 
1537 static inline void rte_pktmbuf_refcnt_update(struct rte_mbuf *m, int16_t v)
1538 {
1540 
1541  do {
1542  rte_mbuf_refcnt_update(m, v);
1543  } while ((m = m->next) != NULL);
1544 }
1545 
1554 static inline uint16_t rte_pktmbuf_headroom(const struct rte_mbuf *m)
1555 {
1557  return m->data_off;
1558 }
1559 
1568 static inline uint16_t rte_pktmbuf_tailroom(const struct rte_mbuf *m)
1569 {
1571  return (uint16_t)(m->buf_len - rte_pktmbuf_headroom(m) -
1572  m->data_len);
1573 }
1574 
1583 static inline struct rte_mbuf *rte_pktmbuf_lastseg(struct rte_mbuf *m)
1584 {
1586  while (m->next != NULL)
1587  m = m->next;
1588  return m;
1589 }
1590 
1605 #define rte_pktmbuf_mtod_offset(m, t, o) \
1606  ((t)((char *)(m)->buf_addr + (m)->data_off + (o)))
1607 
1620 #define rte_pktmbuf_mtod(m, t) rte_pktmbuf_mtod_offset(m, t, 0)
1621 
1631 #define rte_pktmbuf_iova_offset(m, o) \
1632  (rte_iova_t)((m)->buf_iova + (m)->data_off + (o))
1633 
1634 /* deprecated */
1635 #define rte_pktmbuf_mtophys_offset(m, o) \
1636  rte_pktmbuf_iova_offset(m, o)
1637 
1645 #define rte_pktmbuf_iova(m) rte_pktmbuf_iova_offset(m, 0)
1646 
1647 /* deprecated */
1648 #define rte_pktmbuf_mtophys(m) rte_pktmbuf_iova(m)
1649 
1658 #define rte_pktmbuf_pkt_len(m) ((m)->pkt_len)
1659 
1668 #define rte_pktmbuf_data_len(m) ((m)->data_len)
1669 
1685 static inline char *rte_pktmbuf_prepend(struct rte_mbuf *m,
1686  uint16_t len)
1687 {
1689 
1690  if (unlikely(len > rte_pktmbuf_headroom(m)))
1691  return NULL;
1692 
1693  m->data_off -= len;
1694  m->data_len = (uint16_t)(m->data_len + len);
1695  m->pkt_len = (m->pkt_len + len);
1696 
1697  return (char *)m->buf_addr + m->data_off;
1698 }
1699 
1715 static inline char *rte_pktmbuf_append(struct rte_mbuf *m, uint16_t len)
1716 {
1717  void *tail;
1718  struct rte_mbuf *m_last;
1719 
1721 
1722  m_last = rte_pktmbuf_lastseg(m);
1723  if (unlikely(len > rte_pktmbuf_tailroom(m_last)))
1724  return NULL;
1725 
1726  tail = (char *)m_last->buf_addr + m_last->data_off + m_last->data_len;
1727  m_last->data_len = (uint16_t)(m_last->data_len + len);
1728  m->pkt_len = (m->pkt_len + len);
1729  return (char*) tail;
1730 }
1731 
1746 static inline char *rte_pktmbuf_adj(struct rte_mbuf *m, uint16_t len)
1747 {
1749 
1750  if (unlikely(len > m->data_len))
1751  return NULL;
1752 
1753  m->data_len = (uint16_t)(m->data_len - len);
1754  m->data_off += len;
1755  m->pkt_len = (m->pkt_len - len);
1756  return (char *)m->buf_addr + m->data_off;
1757 }
1758 
1773 static inline int rte_pktmbuf_trim(struct rte_mbuf *m, uint16_t len)
1774 {
1775  struct rte_mbuf *m_last;
1776 
1778 
1779  m_last = rte_pktmbuf_lastseg(m);
1780  if (unlikely(len > m_last->data_len))
1781  return -1;
1782 
1783  m_last->data_len = (uint16_t)(m_last->data_len - len);
1784  m->pkt_len = (m->pkt_len - len);
1785  return 0;
1786 }
1787 
1797 static inline int rte_pktmbuf_is_contiguous(const struct rte_mbuf *m)
1798 {
1800  return !!(m->nb_segs == 1);
1801 }
1802 
1806 const void *__rte_pktmbuf_read(const struct rte_mbuf *m, uint32_t off,
1807  uint32_t len, void *buf);
1808 
1829 static inline const void *rte_pktmbuf_read(const struct rte_mbuf *m,
1830  uint32_t off, uint32_t len, void *buf)
1831 {
1832  if (likely(off + len <= rte_pktmbuf_data_len(m)))
1833  return rte_pktmbuf_mtod_offset(m, char *, off);
1834  else
1835  return __rte_pktmbuf_read(m, off, len, buf);
1836 }
1837 
1854 static inline int rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail)
1855 {
1856  struct rte_mbuf *cur_tail;
1857 
1858  /* Check for number-of-segments-overflow */
1859  if (head->nb_segs + tail->nb_segs > RTE_MBUF_MAX_NB_SEGS)
1860  return -EOVERFLOW;
1861 
1862  /* Chain 'tail' onto the old tail */
1863  cur_tail = rte_pktmbuf_lastseg(head);
1864  cur_tail->next = tail;
1865 
1866  /* accumulate number of segments and total length. */
1867  head->nb_segs += tail->nb_segs;
1868  head->pkt_len += tail->pkt_len;
1869 
1870  /* pkt_len is only set in the head */
1871  tail->pkt_len = tail->data_len;
1872 
1873  return 0;
1874 }
1875 
1886 static inline int
1888 {
1889  uint64_t ol_flags = m->ol_flags;
1890  uint64_t inner_l3_offset = m->l2_len;
1891 
1892  /* Does packet set any of available offloads? */
1893  if (!(ol_flags & PKT_TX_OFFLOAD_MASK))
1894  return 0;
1895 
1897  inner_l3_offset += m->outer_l2_len + m->outer_l3_len;
1898 
1899  /* Headers are fragmented */
1900  if (rte_pktmbuf_data_len(m) < inner_l3_offset + m->l3_len + m->l4_len)
1901  return -ENOTSUP;
1902 
1903  /* IP checksum can be counted only for IPv4 packet */
1905  return -EINVAL;
1906 
1907  /* IP type not set when required */
1909  if (!(ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6)))
1910  return -EINVAL;
1911 
1912  /* Check requirements for TSO packet */
1913  if (ol_flags & PKT_TX_TCP_SEG)
1914  if ((m->tso_segsz == 0) ||
1915  ((ol_flags & PKT_TX_IPV4) &&
1916  !(ol_flags & PKT_TX_IP_CKSUM)))
1917  return -EINVAL;
1918 
1919  /* PKT_TX_OUTER_IP_CKSUM set for non outer IPv4 packet. */
1920  if ((ol_flags & PKT_TX_OUTER_IP_CKSUM) &&
1922  return -EINVAL;
1923 
1924  return 0;
1925 }
1926 
1939 static inline int
1941 {
1942  int seg_len, copy_len;
1943  struct rte_mbuf *m;
1944  struct rte_mbuf *m_next;
1945  char *buffer;
1946 
1947  if (rte_pktmbuf_is_contiguous(mbuf))
1948  return 0;
1949 
1950  /* Extend first segment to the total packet length */
1951  copy_len = rte_pktmbuf_pkt_len(mbuf) - rte_pktmbuf_data_len(mbuf);
1952 
1953  if (unlikely(copy_len > rte_pktmbuf_tailroom(mbuf)))
1954  return -1;
1955 
1956  buffer = rte_pktmbuf_mtod_offset(mbuf, char *, mbuf->data_len);
1957  mbuf->data_len = (uint16_t)(mbuf->pkt_len);
1958 
1959  /* Append data from next segments to the first one */
1960  m = mbuf->next;
1961  while (m != NULL) {
1962  m_next = m->next;
1963 
1964  seg_len = rte_pktmbuf_data_len(m);
1965  rte_memcpy(buffer, rte_pktmbuf_mtod(m, char *), seg_len);
1966  buffer += seg_len;
1967 
1969  m = m_next;
1970  }
1971 
1972  mbuf->next = NULL;
1973  mbuf->nb_segs = 1;
1974 
1975  return 0;
1976 }
1977 
1992 void rte_pktmbuf_dump(FILE *f, const struct rte_mbuf *m, unsigned dump_len);
1993 
1994 #ifdef __cplusplus
1995 }
1996 #endif
1997 
1998 #endif /* _RTE_MBUF_H_ */
static rte_iova_t rte_mbuf_data_iova(const struct rte_mbuf *mb)
Definition: rte_mbuf.h:626
struct rte_mbuf * next
Definition: rte_mbuf.h:538
uint16_t mbuf_data_room_size
Definition: rte_mbuf.h:710
uint64_t timestamp
Definition: rte_mbuf.h:526
uint16_t vlan_tci_outer
Definition: rte_mbuf.h:519
#define __rte_always_inline
Definition: rte_common.h:110
static int16_t rte_atomic16_read(const rte_atomic16_t *v)
Definition: rte_atomic.h:226
static struct rte_mbuf * rte_pktmbuf_alloc(struct rte_mempool *mp)
Definition: rte_mbuf.h:1219
RTE_STD_C11 union rte_mbuf::@116 __rte_aligned
uint8_t inner_esp_next_proto
Definition: rte_mbuf.h:473
__extension__ typedef void * MARKER[0]
Definition: rte_mbuf.h:402
#define RTE_MBUF_DIRECT(mb)
Definition: rte_mbuf.h:701
#define IND_ATTACHED_MBUF
Definition: rte_mbuf.h:331
rte_iova_t buf_physaddr
Definition: rte_mbuf.h:425
static uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp)
Definition: rte_mbuf.h:1157
static int rte_validate_tx_offload(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1887
#define likely(x)
static void rte_pktmbuf_free(struct rte_mbuf *m)
Definition: rte_mbuf.h:1458
uint64_t l2_len
Definition: rte_mbuf.h:546
void rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg)
static struct rte_mbuf * rte_pktmbuf_clone(struct rte_mbuf *md, struct rte_mempool *mp)
Definition: rte_mbuf.h:1489
static __rte_always_inline void rte_pktmbuf_free_seg(struct rte_mbuf *m)
Definition: rte_mbuf.h:1442
void * buf_addr
Definition: rte_mbuf.h:415
uint32_t l2_type
Definition: rte_mbuf.h:467
static struct rte_mbuf * rte_mbuf_from_indirect(struct rte_mbuf *mi)
Definition: rte_mbuf.h:672
uint16_t data_len
Definition: rte_mbuf.h:491
uint32_t lo
Definition: rte_mbuf.h:504
void * userdata
Definition: rte_mbuf.h:533
static int rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail)
Definition: rte_mbuf.h:1854
uint8_t inner_l2_type
Definition: rte_mbuf.h:480
uint64_t tso_segsz
Definition: rte_mbuf.h:552
__extension__ typedef uint8_t MARKER8[0]
Definition: rte_mbuf.h:404
struct rte_mbuf::@119::@130 fdir
uint64_t l4_len
Definition: rte_mbuf.h:551
void rte_pktmbuf_dump(FILE *f, const struct rte_mbuf *m, unsigned dump_len)
static uint16_t rte_pktmbuf_headroom(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1554
struct rte_mbuf __rte_cache_aligned
static int rte_pktmbuf_alloc_bulk(struct rte_mempool *pool, struct rte_mbuf **mbufs, unsigned count)
Definition: rte_mbuf.h:1241
static void rte_pktmbuf_reset_headroom(struct rte_mbuf *m)
Definition: rte_mbuf.h:1173
uint32_t cache_size
Definition: rte_mempool.h:212
#define PKT_TX_OUTER_IP_CKSUM
Definition: rte_mbuf.h:297
struct rte_mempool *__rte_experimental rte_pktmbuf_pool_create_by_ops(const char *name, unsigned int n, unsigned int cache_size, uint16_t priv_size, uint16_t data_room_size, int socket_id, const char *ops_name)
static void rte_mbuf_prefetch_part2(struct rte_mbuf *m)
Definition: rte_mbuf.h:605
#define PKT_TX_IPV6
Definition: rte_mbuf.h:280
struct rte_mbuf::@119::@131 sched
uint16_t nb_segs
Definition: rte_mbuf.h:446
uint16_t port
Definition: rte_mbuf.h:449
uint64_t outer_l3_len
Definition: rte_mbuf.h:555
static __rte_always_inline struct rte_mbuf * rte_pktmbuf_prefree_seg(struct rte_mbuf *m)
Definition: rte_mbuf.h:1392
static int rte_pktmbuf_is_contiguous(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1797
uint64_t l3_len
Definition: rte_mbuf.h:550
uint32_t l4_type
Definition: rte_mbuf.h:469
int rte_get_tx_ol_flag_list(uint64_t mask, char *buf, size_t buflen)
#define PKT_TX_OUTER_IPV4
Definition: rte_mbuf.h:304
static uint16_t rte_pktmbuf_tailroom(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1568
static __rte_always_inline void rte_mbuf_raw_free(struct rte_mbuf *m)
Definition: rte_mbuf.h:900
#define PKT_TX_TCP_SEG
Definition: rte_mbuf.h:236
#define unlikely(x)
uint16_t priv_size
Definition: rte_mbuf.h:564
uint16_t timesync
Definition: rte_mbuf.h:567
uint32_t hi
Definition: rte_mbuf.h:507
__extension__ typedef uint64_t MARKER64[0]
Definition: rte_mbuf.h:406
void rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header)
#define RTE_MIN(a, b)
Definition: rte_common.h:290
#define PKT_TX_IPV4
Definition: rte_mbuf.h:272
#define __rte_mbuf_sanity_check(m, is_h)
Definition: rte_mbuf.h:722
const char * rte_get_tx_ol_flag_name(uint64_t mask)
static uint16_t rte_mbuf_refcnt_read(const struct rte_mbuf *m)
Definition: rte_mbuf.h:811
static int rte_pktmbuf_linearize(struct rte_mbuf *mbuf)
Definition: rte_mbuf.h:1940
static __rte_always_inline int rte_mempool_get(struct rte_mempool *mp, void **obj_p)
Definition: rte_mempool.h:1363
uint64_t outer_l2_len
Definition: rte_mbuf.h:556
static void rte_atomic16_set(rte_atomic16_t *v, int16_t new_value)
Definition: rte_atomic.h:240
#define CTRL_MBUF_FLAG
Definition: rte_mbuf.h:334
uint16_t refcnt
Definition: rte_mbuf.h:444
static char * rte_pktmbuf_adj(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:1746
#define rte_pktmbuf_pkt_len(m)
Definition: rte_mbuf.h:1658
static void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *m)
Definition: rte_mbuf.h:1300
static __rte_always_inline int rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned int n)
Definition: rte_mempool.h:1335
uint32_t tun_type
Definition: rte_mbuf.h:470
static int16_t rte_atomic16_add_return(rte_atomic16_t *v, int16_t inc)
Definition: rte_atomic.h:321
static int rte_is_ctrlmbuf(struct rte_mbuf *m)
Definition: rte_mbuf.h:994
uint64_t ol_flags
Definition: rte_mbuf.h:451
static void rte_pktmbuf_detach(struct rte_mbuf *m)
Definition: rte_mbuf.h:1351
uint32_t pkt_len
Definition: rte_mbuf.h:490
#define PKT_TX_L4_MASK
Definition: rte_mbuf.h:255
uint16_t buf_len
Definition: rte_mbuf.h:521
uint32_t inner_l4_type
Definition: rte_mbuf.h:486
#define rte_pktmbuf_data_len(m)
Definition: rte_mbuf.h:1668
#define rte_pktmbuf_mtod(m, t)
Definition: rte_mbuf.h:1620
static uint16_t rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
Definition: rte_mbuf.h:802
uint32_t packet_type
Definition: rte_mbuf.h:465
#define MBUF_INVALID_PORT
Definition: rte_mbuf.h:1186
uint32_t seqn
Definition: rte_mbuf.h:570
static uint16_t rte_pktmbuf_data_room_size(struct rte_mempool *mp)
Definition: rte_mbuf.h:1136
uint8_t inner_l3_type
Definition: rte_mbuf.h:482
const char * rte_get_rx_ol_flag_name(uint64_t mask)
#define RTE_STD_C11
Definition: rte_common.h:37
#define PKT_TX_IP_CKSUM
Definition: rte_mbuf.h:264
void rte_pktmbuf_init(struct rte_mempool *mp, void *opaque_arg, void *m, unsigned i)
struct rte_mempool * pool
Definition: rte_mbuf.h:537
static char * rte_pktmbuf_append(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:1715
static void rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
Definition: rte_mbuf.h:820
static rte_iova_t rte_mbuf_data_iova_default(const struct rte_mbuf *mb)
Definition: rte_mbuf.h:651
uint32_t rss
Definition: rte_mbuf.h:496
static int rte_pktmbuf_trim(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:1773
uint64_t rte_iova_t
Definition: rte_memory.h:78
static char * rte_mbuf_to_baddr(struct rte_mbuf *md)
Definition: rte_mbuf.h:686
static const void * rte_pktmbuf_read(const struct rte_mbuf *m, uint32_t off, uint32_t len, void *buf)
Definition: rte_mbuf.h:1829
static char * rte_pktmbuf_prepend(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:1685
static struct rte_mbuf * rte_mbuf_raw_alloc(struct rte_mempool *mp)
Definition: rte_mbuf.h:875
static void rte_pktmbuf_refcnt_update(struct rte_mbuf *m, int16_t v)
Definition: rte_mbuf.h:1537
uint64_t phys_addr_t
Definition: rte_memory.h:69
#define RTE_PTR_SUB(ptr, x)
Definition: rte_common.h:127
static struct rte_mbuf * rte_pktmbuf_lastseg(struct rte_mbuf *m)
Definition: rte_mbuf.h:1583
static void * rte_memcpy(void *dst, const void *src, size_t n)
#define PKT_TX_OFFLOAD_MASK
Definition: rte_mbuf.h:317
static rte_iova_t rte_mempool_virt2iova(const void *elt)
Definition: rte_mempool.h:1446
static __rte_always_inline void rte_mempool_put(struct rte_mempool *mp, void *obj)
Definition: rte_mempool.h:1204
uint64_t udata64
Definition: rte_mbuf.h:534
uint32_t l3_type
Definition: rte_mbuf.h:468
static void rte_mbuf_prefetch_part1(struct rte_mbuf *m)
Definition: rte_mbuf.h:588
rte_atomic16_t refcnt_atomic
Definition: rte_mbuf.h:443
static void * rte_mempool_get_priv(struct rte_mempool *mp)
Definition: rte_mempool.h:1481
uint64_t tx_offload
Definition: rte_mbuf.h:543
char name[RTE_MEMZONE_NAMESIZE]
Definition: rte_mempool.h:201
void rte_ctrlmbuf_init(struct rte_mempool *mp, void *opaque_arg, void *m, unsigned i)
uint16_t vlan_tci
Definition: rte_mbuf.h:493
#define RTE_MBUF_INDIRECT(mb)
Definition: rte_mbuf.h:696
#define RTE_SET_USED(x)
Definition: rte_common.h:82
#define rte_pktmbuf_mtod_offset(m, t, o)
Definition: rte_mbuf.h:1605
int rte_get_rx_ol_flag_list(uint64_t mask, char *buf, size_t buflen)
static void rte_prefetch0(const volatile void *p)
struct rte_mempool * rte_pktmbuf_pool_create(const char *name, unsigned n, unsigned cache_size, uint16_t priv_size, uint16_t data_room_size, int socket_id)
uint32_t usr
Definition: rte_mbuf.h:515