147 #define RTE_ETHDEV_HAS_LRO_SUPPORT 149 #include <rte_compat.h> 156 #include <rte_config.h> 160 #include "rte_dev_info.h" 197 #define ETH_LINK_SPEED_AUTONEG (0 << 0) 198 #define ETH_LINK_SPEED_FIXED (1 << 0) 199 #define ETH_LINK_SPEED_10M_HD (1 << 1) 200 #define ETH_LINK_SPEED_10M (1 << 2) 201 #define ETH_LINK_SPEED_100M_HD (1 << 3) 202 #define ETH_LINK_SPEED_100M (1 << 4) 203 #define ETH_LINK_SPEED_1G (1 << 5) 204 #define ETH_LINK_SPEED_2_5G (1 << 6) 205 #define ETH_LINK_SPEED_5G (1 << 7) 206 #define ETH_LINK_SPEED_10G (1 << 8) 207 #define ETH_LINK_SPEED_20G (1 << 9) 208 #define ETH_LINK_SPEED_25G (1 << 10) 209 #define ETH_LINK_SPEED_40G (1 << 11) 210 #define ETH_LINK_SPEED_50G (1 << 12) 211 #define ETH_LINK_SPEED_56G (1 << 13) 212 #define ETH_LINK_SPEED_100G (1 << 14) 217 #define ETH_SPEED_NUM_NONE 0 218 #define ETH_SPEED_NUM_10M 10 219 #define ETH_SPEED_NUM_100M 100 220 #define ETH_SPEED_NUM_1G 1000 221 #define ETH_SPEED_NUM_2_5G 2500 222 #define ETH_SPEED_NUM_5G 5000 223 #define ETH_SPEED_NUM_10G 10000 224 #define ETH_SPEED_NUM_20G 20000 225 #define ETH_SPEED_NUM_25G 25000 226 #define ETH_SPEED_NUM_40G 40000 227 #define ETH_SPEED_NUM_50G 50000 228 #define ETH_SPEED_NUM_56G 56000 229 #define ETH_SPEED_NUM_100G 100000 240 } __attribute__((aligned(8)));
243 #define ETH_LINK_HALF_DUPLEX 0 244 #define ETH_LINK_FULL_DUPLEX 1 245 #define ETH_LINK_DOWN 0 246 #define ETH_LINK_UP 1 247 #define ETH_LINK_FIXED 0 248 #define ETH_LINK_AUTONEG 1 254 struct rte_eth_thresh { 263 #define ETH_MQ_RX_RSS_FLAG 0x1 264 #define ETH_MQ_RX_DCB_FLAG 0x2 265 #define ETH_MQ_RX_VMDQ_FLAG 0x4 296 #define ETH_RSS ETH_MQ_RX_RSS 297 #define VMDQ_DCB ETH_MQ_RX_VMDQ_DCB 298 #define ETH_DCB_RX ETH_MQ_RX_DCB 314 #define ETH_DCB_NONE ETH_MQ_TX_NONE 315 #define ETH_VMDQ_DCB_TX ETH_MQ_TX_VMDQ_DCB 316 #define ETH_DCB_TX ETH_MQ_TX_DCB 366 ETH_VLAN_TYPE_UNKNOWN = 0,
409 #define ETH_RSS_IPV4 (1ULL << RTE_ETH_FLOW_IPV4) 410 #define ETH_RSS_FRAG_IPV4 (1ULL << RTE_ETH_FLOW_FRAG_IPV4) 411 #define ETH_RSS_NONFRAG_IPV4_TCP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) 412 #define ETH_RSS_NONFRAG_IPV4_UDP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) 413 #define ETH_RSS_NONFRAG_IPV4_SCTP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) 414 #define ETH_RSS_NONFRAG_IPV4_OTHER (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) 415 #define ETH_RSS_IPV6 (1ULL << RTE_ETH_FLOW_IPV6) 416 #define ETH_RSS_FRAG_IPV6 (1ULL << RTE_ETH_FLOW_FRAG_IPV6) 417 #define ETH_RSS_NONFRAG_IPV6_TCP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) 418 #define ETH_RSS_NONFRAG_IPV6_UDP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) 419 #define ETH_RSS_NONFRAG_IPV6_SCTP (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) 420 #define ETH_RSS_NONFRAG_IPV6_OTHER (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) 421 #define ETH_RSS_L2_PAYLOAD (1ULL << RTE_ETH_FLOW_L2_PAYLOAD) 422 #define ETH_RSS_IPV6_EX (1ULL << RTE_ETH_FLOW_IPV6_EX) 423 #define ETH_RSS_IPV6_TCP_EX (1ULL << RTE_ETH_FLOW_IPV6_TCP_EX) 424 #define ETH_RSS_IPV6_UDP_EX (1ULL << RTE_ETH_FLOW_IPV6_UDP_EX) 425 #define ETH_RSS_PORT (1ULL << RTE_ETH_FLOW_PORT) 426 #define ETH_RSS_VXLAN (1ULL << RTE_ETH_FLOW_VXLAN) 427 #define ETH_RSS_GENEVE (1ULL << RTE_ETH_FLOW_GENEVE) 428 #define ETH_RSS_NVGRE (1ULL << RTE_ETH_FLOW_NVGRE) 430 #define ETH_RSS_IP ( \ 432 ETH_RSS_FRAG_IPV4 | \ 433 ETH_RSS_NONFRAG_IPV4_OTHER | \ 435 ETH_RSS_FRAG_IPV6 | \ 436 ETH_RSS_NONFRAG_IPV6_OTHER | \ 439 #define ETH_RSS_UDP ( \ 440 ETH_RSS_NONFRAG_IPV4_UDP | \ 441 ETH_RSS_NONFRAG_IPV6_UDP | \ 444 #define ETH_RSS_TCP ( \ 445 ETH_RSS_NONFRAG_IPV4_TCP | \ 446 ETH_RSS_NONFRAG_IPV6_TCP | \ 449 #define ETH_RSS_SCTP ( \ 450 ETH_RSS_NONFRAG_IPV4_SCTP | \ 451 ETH_RSS_NONFRAG_IPV6_SCTP) 453 #define ETH_RSS_TUNNEL ( \ 459 #define ETH_RSS_PROTO_MASK ( \ 461 ETH_RSS_FRAG_IPV4 | \ 462 ETH_RSS_NONFRAG_IPV4_TCP | \ 463 ETH_RSS_NONFRAG_IPV4_UDP | \ 464 ETH_RSS_NONFRAG_IPV4_SCTP | \ 465 ETH_RSS_NONFRAG_IPV4_OTHER | \ 467 ETH_RSS_FRAG_IPV6 | \ 468 ETH_RSS_NONFRAG_IPV6_TCP | \ 469 ETH_RSS_NONFRAG_IPV6_UDP | \ 470 ETH_RSS_NONFRAG_IPV6_SCTP | \ 471 ETH_RSS_NONFRAG_IPV6_OTHER | \ 472 ETH_RSS_L2_PAYLOAD | \ 474 ETH_RSS_IPV6_TCP_EX | \ 475 ETH_RSS_IPV6_UDP_EX | \ 486 #define ETH_RSS_RETA_SIZE_64 64 487 #define ETH_RSS_RETA_SIZE_128 128 488 #define ETH_RSS_RETA_SIZE_256 256 489 #define ETH_RSS_RETA_SIZE_512 512 490 #define RTE_RETA_GROUP_SIZE 64 493 #define ETH_VMDQ_MAX_VLAN_FILTERS 64 494 #define ETH_DCB_NUM_USER_PRIORITIES 8 495 #define ETH_VMDQ_DCB_NUM_QUEUES 128 496 #define ETH_DCB_NUM_QUEUES 128 499 #define ETH_DCB_PG_SUPPORT 0x00000001 500 #define ETH_DCB_PFC_SUPPORT 0x00000002 503 #define ETH_VLAN_STRIP_OFFLOAD 0x0001 504 #define ETH_VLAN_FILTER_OFFLOAD 0x0002 505 #define ETH_VLAN_EXTEND_OFFLOAD 0x0004 508 #define ETH_VLAN_STRIP_MASK 0x0001 509 #define ETH_VLAN_FILTER_MASK 0x0002 510 #define ETH_VLAN_EXTEND_MASK 0x0004 511 #define ETH_VLAN_ID_MAX 0x0FFF 514 #define ETH_NUM_RECEIVE_MAC_ADDR 128 517 #define ETH_VMDQ_NUM_UC_HASH_ARRAY 128 520 #define ETH_VMDQ_ACCEPT_UNTAG 0x0001 521 #define ETH_VMDQ_ACCEPT_HASH_MC 0x0002 522 #define ETH_VMDQ_ACCEPT_HASH_UC 0x0004 523 #define ETH_VMDQ_ACCEPT_BROADCAST 0x0008 524 #define ETH_VMDQ_ACCEPT_MULTICAST 0x0010 527 #define ETH_MIRROR_MAX_VLANS 64 529 #define ETH_MIRROR_VIRTUAL_POOL_UP 0x01 530 #define ETH_MIRROR_UPLINK_PORT 0x02 531 #define ETH_MIRROR_DOWNLINK_PORT 0x04 532 #define ETH_MIRROR_VLAN 0x08 533 #define ETH_MIRROR_VIRTUAL_POOL_DOWN 0x10 538 struct rte_eth_vlan_mirror { 564 uint16_t
reta[RTE_RETA_GROUP_SIZE];
589 struct rte_eth_dcb_rx_conf {
595 struct rte_eth_vmdq_dcb_tx_conf {
601 struct rte_eth_dcb_tx_conf {
607 struct rte_eth_vmdq_tx_conf {
705 #define ETH_TXQ_FLAGS_NOMULTSEGS 0x0001 706 #define ETH_TXQ_FLAGS_NOREFCOUNT 0x0002 707 #define ETH_TXQ_FLAGS_NOMULTMEMP 0x0004 708 #define ETH_TXQ_FLAGS_NOVLANOFFL 0x0100 709 #define ETH_TXQ_FLAGS_NOXSUMSCTP 0x0200 710 #define ETH_TXQ_FLAGS_NOXSUMUDP 0x0400 711 #define ETH_TXQ_FLAGS_NOXSUMTCP 0x0800 712 #define ETH_TXQ_FLAGS_NOOFFLOADS \ 713 (ETH_TXQ_FLAGS_NOVLANOFFL | ETH_TXQ_FLAGS_NOXSUMSCTP | \ 714 ETH_TXQ_FLAGS_NOXSUMUDP | ETH_TXQ_FLAGS_NOXSUMTCP) 715 #define ETH_TXQ_FLAGS_NOXSUMS \ 716 (ETH_TXQ_FLAGS_NOXSUMSCTP | ETH_TXQ_FLAGS_NOXSUMUDP | \ 717 ETH_TXQ_FLAGS_NOXSUMTCP) 725 #define ETH_TXQ_FLAGS_IGNORE 0x8000 926 #define DEV_RX_OFFLOAD_VLAN_STRIP 0x00000001 927 #define DEV_RX_OFFLOAD_IPV4_CKSUM 0x00000002 928 #define DEV_RX_OFFLOAD_UDP_CKSUM 0x00000004 929 #define DEV_RX_OFFLOAD_TCP_CKSUM 0x00000008 930 #define DEV_RX_OFFLOAD_TCP_LRO 0x00000010 931 #define DEV_RX_OFFLOAD_QINQ_STRIP 0x00000020 932 #define DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000040 933 #define DEV_RX_OFFLOAD_MACSEC_STRIP 0x00000080 934 #define DEV_RX_OFFLOAD_HEADER_SPLIT 0x00000100 935 #define DEV_RX_OFFLOAD_VLAN_FILTER 0x00000200 936 #define DEV_RX_OFFLOAD_VLAN_EXTEND 0x00000400 937 #define DEV_RX_OFFLOAD_JUMBO_FRAME 0x00000800 938 #define DEV_RX_OFFLOAD_CRC_STRIP 0x00001000 939 #define DEV_RX_OFFLOAD_SCATTER 0x00002000 940 #define DEV_RX_OFFLOAD_TIMESTAMP 0x00004000 941 #define DEV_RX_OFFLOAD_SECURITY 0x00008000 942 #define DEV_RX_OFFLOAD_CHECKSUM (DEV_RX_OFFLOAD_IPV4_CKSUM | \ 943 DEV_RX_OFFLOAD_UDP_CKSUM | \ 944 DEV_RX_OFFLOAD_TCP_CKSUM) 945 #define DEV_RX_OFFLOAD_VLAN (DEV_RX_OFFLOAD_VLAN_STRIP | \ 946 DEV_RX_OFFLOAD_VLAN_FILTER | \ 947 DEV_RX_OFFLOAD_VLAN_EXTEND) 957 #define DEV_TX_OFFLOAD_VLAN_INSERT 0x00000001 958 #define DEV_TX_OFFLOAD_IPV4_CKSUM 0x00000002 959 #define DEV_TX_OFFLOAD_UDP_CKSUM 0x00000004 960 #define DEV_TX_OFFLOAD_TCP_CKSUM 0x00000008 961 #define DEV_TX_OFFLOAD_SCTP_CKSUM 0x00000010 962 #define DEV_TX_OFFLOAD_TCP_TSO 0x00000020 963 #define DEV_TX_OFFLOAD_UDP_TSO 0x00000040 964 #define DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000080 965 #define DEV_TX_OFFLOAD_QINQ_INSERT 0x00000100 966 #define DEV_TX_OFFLOAD_VXLAN_TNL_TSO 0x00000200 967 #define DEV_TX_OFFLOAD_GRE_TNL_TSO 0x00000400 968 #define DEV_TX_OFFLOAD_IPIP_TNL_TSO 0x00000800 969 #define DEV_TX_OFFLOAD_GENEVE_TNL_TSO 0x00001000 970 #define DEV_TX_OFFLOAD_MACSEC_INSERT 0x00002000 971 #define DEV_TX_OFFLOAD_MT_LOCKFREE 0x00004000 975 #define DEV_TX_OFFLOAD_MULTI_SEGS 0x00008000 977 #define DEV_TX_OFFLOAD_MBUF_FAST_FREE 0x00010000 982 #define DEV_TX_OFFLOAD_SECURITY 0x00020000 989 struct rte_pci_device;
1004 uint32_t max_hash_mac_addrs;
1055 #define RTE_ETH_XSTATS_NAME_SIZE 64 1082 #define ETH_DCB_NUM_TCS 8 1083 #define ETH_MAX_VMDQ_POOL 64 1094 }
tc_rxq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS];
1099 }
tc_txq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS];
1117 #define RTE_ETH_QUEUE_STATE_STOPPED 0 1118 #define RTE_ETH_QUEUE_STATE_STARTED 1 1122 #define RTE_ETH_ALL RTE_MAX_ETHPORTS 1125 #define RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, retval) do { \ 1126 if (!rte_eth_dev_is_valid_port(port_id)) { \ 1127 RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); \ 1132 #define RTE_ETH_VALID_PORTID_OR_RET(port_id) do { \ 1133 if (!rte_eth_dev_is_valid_port(port_id)) { \ 1134 RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); \ 1144 #define ETH_L2_TUNNEL_ENABLE_MASK 0x00000001 1146 #define ETH_L2_TUNNEL_INSERTION_MASK 0x00000002 1148 #define ETH_L2_TUNNEL_STRIPPING_MASK 0x00000004 1150 #define ETH_L2_TUNNEL_FORWARDING_MASK 0x00000008 1175 struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts,
1199 struct rte_mbuf *pkts[], uint16_t nb_pkts,
void *user_param);
1205 RTE_ETH_DEV_UNUSED = 0,
1206 RTE_ETH_DEV_ATTACHED,
1207 RTE_ETH_DEV_DEFERRED,
1208 RTE_ETH_DEV_REMOVED,
1211 struct rte_eth_dev_sriov {
1213 uint8_t nb_q_per_pool;
1214 uint16_t def_vmdq_idx;
1215 uint16_t def_pool_q_idx;
1217 #define RTE_ETH_DEV_SRIOV(dev) ((dev)->data->sriov) 1219 #define RTE_ETH_NAME_MAX_LEN RTE_DEV_NAME_MAX_LEN 1221 #define RTE_ETH_DEV_NO_OWNER 0 1223 #define RTE_ETH_MAX_OWNER_NAME_LEN 64 1225 struct rte_eth_dev_owner {
1227 char name[RTE_ETH_MAX_OWNER_NAME_LEN];
1231 #define RTE_ETH_DEV_INTR_LSC 0x0002 1233 #define RTE_ETH_DEV_BONDED_SLAVE 0x0004 1235 #define RTE_ETH_DEV_INTR_RMV 0x0008 1252 const uint64_t owner_id);
1257 #define RTE_ETH_FOREACH_DEV_OWNED_BY(p, o) \ 1258 for (p = rte_eth_find_next_owned_by(0, o); \ 1259 (unsigned int)p < (unsigned int)RTE_MAX_ETHPORTS; \ 1260 p = rte_eth_find_next_owned_by(p + 1, o)) 1275 #define RTE_ETH_FOREACH_DEV(p) \ 1276 RTE_ETH_FOREACH_DEV_OWNED_BY(p, RTE_ETH_DEV_NO_OWNER) 1308 const struct rte_eth_dev_owner *owner);
1324 const uint64_t owner_id);
1351 struct rte_eth_dev_owner *owner);
1468 uint16_t nb_tx_queue,
const struct rte_eth_conf *eth_conf);
1481 int __rte_experimental
1527 uint16_t nb_rx_desc,
unsigned int socket_id,
1580 uint16_t nb_tx_desc,
unsigned int socket_id,
1989 uint64_t *values,
unsigned int size);
2038 uint16_t tx_queue_id, uint8_t stat_idx);
2058 uint16_t rx_queue_id,
2103 char *fw_version,
size_t fw_size);
2144 uint32_t *ptypes,
int num);
2293 typedef void (*buffer_tx_error_fn)(
struct rte_mbuf **unsent, uint16_t count,
2301 buffer_tx_error_fn error_callback;
2302 void *error_userdata;
2315 #define RTE_ETH_TX_BUFFER_SIZE(sz) \ 2316 (sizeof(struct rte_eth_dev_tx_buffer) + (sz) * sizeof(struct rte_mbuf *)) 2357 buffer_tx_error_fn callback,
void *
userdata);
2591 int epfd,
int op,
void *data);
2745 uint16_t reta_size);
2765 uint16_t reta_size);
3089 struct rte_eth_rxtx_callback;
3122 struct rte_eth_rxtx_callback *user_cb);
3155 struct rte_eth_rxtx_callback *user_cb);
3284 uint32_t nb_mc_addr);
3333 struct timespec *timestamp, uint32_t flags);
3351 struct timespec *timestamp);
3503 uint16_t *nb_rx_desc,
3504 uint16_t *nb_tx_desc);
3620 static inline uint16_t
3622 struct rte_mbuf **rx_pkts,
const uint16_t nb_pkts)
3624 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3626 #ifdef RTE_LIBRTE_ETHDEV_DEBUG 3627 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
3628 RTE_FUNC_PTR_OR_ERR_RET(*dev->rx_pkt_burst, 0);
3630 if (queue_id >= dev->data->nb_rx_queues) {
3631 RTE_PMD_DEBUG_TRACE(
"Invalid RX queue_id=%d\n", queue_id);
3635 int16_t nb_rx = (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id],
3638 #ifdef RTE_ETHDEV_RXTX_CALLBACKS 3639 struct rte_eth_rxtx_callback *cb = dev->post_rx_burst_cbs[queue_id];
3643 nb_rx = cb->fn.rx(port_id, queue_id, rx_pkts, nb_rx,
3644 nb_pkts, cb->param);
3646 }
while (cb != NULL);
3668 struct rte_eth_dev *dev;
3670 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
3671 dev = &rte_eth_devices[port_id];
3672 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_count, -ENOTSUP);
3673 if (queue_id >= dev->data->nb_rx_queues)
3676 return (*dev->dev_ops->rx_queue_count)(dev, queue_id);
3697 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3698 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3699 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_done, -ENOTSUP);
3700 return (*dev->dev_ops->rx_descriptor_done)( \
3701 dev->data->rx_queues[queue_id], offset);
3704 #define RTE_ETH_RX_DESC_AVAIL 0 3705 #define RTE_ETH_RX_DESC_DONE 1 3706 #define RTE_ETH_RX_DESC_UNAVAIL 2 3745 struct rte_eth_dev *dev;
3748 #ifdef RTE_LIBRTE_ETHDEV_DEBUG 3749 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3751 dev = &rte_eth_devices[port_id];
3752 #ifdef RTE_LIBRTE_ETHDEV_DEBUG 3753 if (queue_id >= dev->data->nb_rx_queues)
3756 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_status, -ENOTSUP);
3757 rxq = dev->data->rx_queues[queue_id];
3759 return (*dev->dev_ops->rx_descriptor_status)(rxq, offset);
3762 #define RTE_ETH_TX_DESC_FULL 0 3763 #define RTE_ETH_TX_DESC_DONE 1 3764 #define RTE_ETH_TX_DESC_UNAVAIL 2 3799 static inline int rte_eth_tx_descriptor_status(uint16_t port_id, 3800 uint16_t queue_id, uint16_t offset)
3802 struct rte_eth_dev *dev;
3805 #ifdef RTE_LIBRTE_ETHDEV_DEBUG 3806 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3808 dev = &rte_eth_devices[port_id];
3809 #ifdef RTE_LIBRTE_ETHDEV_DEBUG 3810 if (queue_id >= dev->data->nb_tx_queues)
3813 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_descriptor_status, -ENOTSUP);
3814 txq = dev->data->tx_queues[queue_id];
3816 return (*dev->dev_ops->tx_descriptor_status)(txq, offset);
3882 static inline uint16_t
3884 struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
3886 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3888 #ifdef RTE_LIBRTE_ETHDEV_DEBUG 3889 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
3890 RTE_FUNC_PTR_OR_ERR_RET(*dev->tx_pkt_burst, 0);
3892 if (queue_id >= dev->data->nb_tx_queues) {
3893 RTE_PMD_DEBUG_TRACE(
"Invalid TX queue_id=%d\n", queue_id);
3898 #ifdef RTE_ETHDEV_RXTX_CALLBACKS 3899 struct rte_eth_rxtx_callback *cb = dev->pre_tx_burst_cbs[queue_id];
3903 nb_pkts = cb->fn.tx(port_id, queue_id, tx_pkts, nb_pkts,
3906 }
while (cb != NULL);
3910 return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id], tx_pkts, nb_pkts);
3969 #ifndef RTE_ETHDEV_TX_PREPARE_NOOP 3971 static inline uint16_t
3973 struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
3975 struct rte_eth_dev *dev;
3977 #ifdef RTE_LIBRTE_ETHDEV_DEBUG 3979 RTE_PMD_DEBUG_TRACE(
"Invalid TX port_id=%d\n", port_id);
3985 dev = &rte_eth_devices[port_id];
3987 #ifdef RTE_LIBRTE_ETHDEV_DEBUG 3988 if (queue_id >= dev->data->nb_tx_queues) {
3989 RTE_PMD_DEBUG_TRACE(
"Invalid TX queue_id=%d\n", queue_id);
3995 if (!dev->tx_pkt_prepare)
3998 return (*dev->tx_pkt_prepare)(dev->data->tx_queues[queue_id],
4013 static inline uint16_t
4045 static inline uint16_t
4050 uint16_t to_send = buffer->
length;
4061 buffer->error_callback(&buffer->
pkts[sent], to_send - sent,
4062 buffer->error_userdata);
struct rte_eth_vmdq_dcb_tx_conf vmdq_dcb_tx_conf
int rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id, struct rte_eth_pfc_conf *pfc_conf)
uint8_t tc_bws[ETH_DCB_NUM_TCS]
#define ETH_VMDQ_MAX_VLAN_FILTERS
struct rte_fdir_conf fdir_conf
int rte_eth_dev_l2_tunnel_eth_type_conf(uint16_t port_id, struct rte_eth_l2_tunnel_conf *l2_tunnel)
#define __rte_always_inline
int rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *time)
int rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
int rte_eth_timesync_read_time(uint16_t port_id, struct timespec *time)
struct rte_eth_conf::@71 rx_adv_conf
uint64_t __rte_experimental rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
void rte_eth_dev_stop(uint16_t port_id)
int rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_queue, uint16_t nb_tx_queue, const struct rte_eth_conf *eth_conf)
int rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
static uint16_t rte_eth_tx_prepare(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
int rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id)
struct rte_eth_vmdq_tx_conf vmdq_tx_conf
void __rte_experimental rte_eth_dev_owner_delete(const uint64_t owner_id)
uint64_t q_errors[RTE_ETHDEV_QUEUE_STAT_CNTRS]
struct rte_eth_thresh rx_thresh
uint16_t rte_eth_find_next(uint16_t port_id)
int rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
int rte_eth_led_off(uint16_t port_id)
int rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id, struct rte_eth_udp_tunnel *tunnel_udp)
static int rte_eth_rx_descriptor_status(uint16_t port_id, uint16_t queue_id, uint16_t offset)
static int rte_eth_rx_descriptor_done(uint16_t port_id, uint16_t queue_id, uint16_t offset)
const char *__rte_experimental rte_eth_dev_tx_offload_name(uint64_t offload)
void rte_eth_dev_close(uint16_t port_id)
int rte_eth_dev_rss_reta_update(uint16_t port_id, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx, uint16_t tx_rate)
int rte_eth_dev_rss_hash_update(uint16_t port_id, struct rte_eth_rss_conf *rss_conf)
uint64_t rx_queue_offload_capa
int rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats, unsigned int n)
int rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id, int epfd, int op, void *data)
enum rte_fdir_status_mode status
enum rte_eth_tx_mq_mode mq_mode
int rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
uint64_t tx_queue_offload_capa
int __rte_experimental rte_eth_dev_is_removed(uint16_t port_id)
int rte_eth_mirror_rule_set(uint16_t port_id, struct rte_eth_mirror_conf *mirror_conf, uint8_t rule_id, uint8_t on)
uint8_t enable_default_pool
uint64_t q_obytes[RTE_ETHDEV_QUEUE_STAT_CNTRS]
int rte_eth_dev_get_eeprom_length(uint16_t port_id)
struct rte_eth_rss_conf rss_conf
void * rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id, rte_rx_callback_fn fn, void *user_param)
int rte_eth_dev_rx_intr_enable(uint16_t port_id, uint16_t queue_id)
int rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id, uint8_t stat_idx)
int rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_txq_info *qinfo)
uint32_t dcb_capability_en
__extension__ uint16_t hw_strip_crc
uint64_t q_ibytes[RTE_ETHDEV_QUEUE_STAT_CNTRS]
__extension__ uint16_t enable_scatter
struct rte_eth_vmdq_dcb_conf::@69 pool_map[ETH_VMDQ_MAX_VLAN_FILTERS]
int rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
int rte_eth_dev_callback_unregister(uint16_t port_id, enum rte_eth_event_type event, rte_eth_dev_cb_fn cb_fn, void *cb_arg)
__extension__ uint16_t header_split
int rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
int rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
void rte_eth_allmulticast_enable(uint16_t port_id)
int rte_eth_dev_set_link_up(uint16_t port_id)
int rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp, uint32_t flags)
struct rte_eth_thresh tx_thresh
struct rte_eth_desc_lim rx_desc_lim
uint16_t rte_eth_dev_count(void)
uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES]
int rte_eth_dev_get_vlan_offload(uint16_t port_id)
int rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc, unsigned int socket_id, const struct rte_eth_txconf *tx_conf)
uint8_t rx_deferred_start
int(* rte_eth_dev_cb_fn)(uint16_t port_id, enum rte_eth_event_type event, void *cb_arg, void *ret_param)
struct rte_eth_rxmode rxmode
uint64_t q_ipackets[RTE_ETHDEV_QUEUE_STAT_CNTRS]
enum rte_eth_nb_pools nb_queue_pools
struct rte_eth_txconf conf
uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex)
struct rte_intr_conf intr_conf
#define RTE_ETH_XSTATS_NAME_SIZE
int rte_eth_dev_rss_hash_conf_get(uint16_t port_id, struct rte_eth_rss_conf *rss_conf)
struct rte_eth_desc_lim tx_desc_lim
int rte_eth_timesync_disable(uint16_t port_id)
union rte_eth_conf::@72 tx_adv_conf
int rte_eth_dev_mac_addr_remove(uint16_t port_id, struct ether_addr *mac_addr)
__extension__ uint16_t hw_vlan_strip
int rte_eth_dev_filter_ctrl(uint16_t port_id, enum rte_filter_type filter_type, enum rte_filter_op filter_op, void *arg)
int rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id, struct rte_eth_rxtx_callback *user_cb)
struct rte_pci_device * pci_dev
int rte_eth_stats_reset(uint16_t port_id)
struct rte_eth_txconf default_txconf
__extension__ uint16_t hw_vlan_filter
void rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent, void *userdata)
int rte_eth_dev_udp_tunnel_port_add(uint16_t port_id, struct rte_eth_udp_tunnel *tunnel_udp)
struct rte_eth_dcb_rx_conf dcb_rx_conf
int rte_eth_dev_set_mc_addr_list(uint16_t port_id, struct ether_addr *mc_addr_set, uint32_t nb_mc_addr)
struct rte_eth_vmdq_rx_conf vmdq_rx_conf
int rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
char name[RTE_ETH_XSTATS_NAME_SIZE]
void rte_eth_promiscuous_enable(uint16_t port_id)
int rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id, struct rte_eth_rxtx_callback *user_cb)
enum rte_eth_rx_mq_mode mq_mode
int rte_eth_dev_callback_register(uint16_t port_id, enum rte_eth_event_type event, rte_eth_dev_cb_fn cb_fn, void *cb_arg)
enum rte_eth_nb_pools nb_queue_pools
int rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
void rte_eth_macaddr_get(uint16_t port_id, struct ether_addr *mac_addr)
static uint16_t rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
int rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id, int on)
int rte_eth_dev_start(uint16_t port_id)
int rte_eth_dev_reset(uint16_t port_id)
int rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids, uint64_t *values, unsigned int size)
struct rte_eth_dcb_tx_conf dcb_tx_conf
void * rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id, rte_tx_callback_fn fn, void *user_param)
int rte_eth_dev_rx_intr_disable(uint16_t port_id, uint16_t queue_id)
int rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer, buffer_tx_error_fn callback, void *userdata)
void rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent, void *userdata)
uint64_t q_opackets[RTE_ETHDEV_QUEUE_STAT_CNTRS]
int rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct ether_addr *addr, uint8_t on)
uint16_t(* rte_tx_callback_fn)(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[], uint16_t nb_pkts, void *user_param)
int rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
__extension__ uint16_t hw_timestamp
int rte_eth_dev_attach(const char *devargs, uint16_t *port_id)
int rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id, uint16_t *nb_rx_desc, uint16_t *nb_tx_desc)
struct rte_eth_rxconf conf
void rte_eth_link_get(uint16_t port_id, struct rte_eth_link *link)
__extension__ uint16_t enable_lro
#define ETH_DCB_NUM_USER_PRIORITIES
void rte_eth_promiscuous_disable(uint16_t port_id)
int rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
struct rte_eth_vlan_mirror vlan
const char *__rte_experimental rte_eth_dev_rx_offload_name(uint64_t offload)
void * rte_eth_dev_get_sec_ctx(uint8_t port_id)
__extension__ uint16_t jumbo_frame
int rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name, uint64_t *id)
int rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
__extension__ uint8_t hw_vlan_insert_pvid
uint64_t flow_type_rss_offloads
int __rte_experimental rte_eth_dev_owner_set(const uint16_t port_id, const struct rte_eth_dev_owner *owner)
int rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
uint8_t prio_tc[ETH_DCB_NUM_USER_PRIORITIES]
struct rte_eth_dcb_tc_queue_mapping::@73 tc_rxq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS]
struct rte_eth_dcb_tc_queue_mapping tc_queue
int rte_eth_dev_l2_tunnel_offload_set(uint16_t port_id, struct rte_eth_l2_tunnel_conf *l2_tunnel, uint32_t mask, uint8_t en)
static __rte_always_inline uint16_t rte_eth_tx_buffer(uint16_t port_id, uint16_t queue_id, struct rte_eth_dev_tx_buffer *buffer, struct rte_mbuf *tx_pkt)
__extension__ uint8_t hw_vlan_reject_untagged
struct rte_mempool * pool
int rte_eth_dev_detach(uint16_t port_id, char *devname)
int rte_eth_dev_get_dcb_info(uint16_t port_id, struct rte_eth_dcb_info *dcb_info)
#define __rte_cache_min_aligned
int rte_eth_dev_mac_addr_add(uint16_t port_id, struct ether_addr *mac_addr, uint32_t pool)
__extension__ uint16_t hw_ip_checksum
#define ETH_MQ_RX_RSS_FLAG
void rte_eth_xstats_reset(uint16_t port_id)
#define ETH_MIRROR_MAX_VLANS
int rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct ether_addr *mac_addr)
int rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id, uint8_t stat_idx)
__extension__ uint16_t hw_vlan_extend
int rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
static int rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id)
int rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_rxq_info *qinfo)
int rte_eth_dev_socket_id(uint16_t port_id)
uint8_t enable_default_pool
int rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
struct rte_eth_fdir_flex_conf flex_conf
int rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
int rte_eth_dev_set_vlan_ether_type(uint16_t port_id, enum rte_vlan_type vlan_type, uint16_t tag_type)
int rte_eth_dev_filter_supported(uint16_t port_id, enum rte_filter_type filter_type)
int __rte_experimental rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
int rte_eth_xstats_get_names_by_id(uint16_t port_id, struct rte_eth_xstat_name *xstats_names, unsigned int size, uint64_t *ids)
int rte_eth_dev_rss_reta_query(uint16_t port_id, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
int rte_eth_promiscuous_get(uint16_t port_id)
int rte_eth_led_on(uint16_t port_id)
enum rte_fdir_pballoc_type pballoc
int rte_eth_timesync_read_tx_timestamp(uint16_t port_id, struct timespec *timestamp)
int rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask, uint32_t *ptypes, int num)
int __rte_experimental rte_eth_dev_owner_new(uint64_t *owner_id)
__extension__ uint8_t hw_vlan_reject_tagged
int rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
void rte_eth_allmulticast_disable(uint16_t port_id)
uint8_t mac_ctrl_frame_fwd
uint16_t(* rte_rx_callback_fn)(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts, void *user_param)
__extension__ uint16_t ignore_offload_bitfield
enum rte_eth_fc_mode mode
int rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
struct rte_eth_vmdq_dcb_conf vmdq_dcb_conf
__extension__ uint16_t security
uint8_t tx_deferred_start
int rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
void rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
int rte_eth_xstats_get_names(uint16_t port_id, struct rte_eth_xstat_name *xstats_names, unsigned int size)
struct rte_eth_fc_conf fc
int rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
int rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
struct rte_eth_txmode txmode
int rte_eth_allmulticast_get(uint16_t port_id)
struct rte_eth_vmdq_rx_conf::@70 pool_map[ETH_VMDQ_MAX_VLAN_FILTERS]
int rte_eth_dev_is_valid_port(uint16_t port_id)
struct rte_eth_dcb_tc_queue_mapping::@74 tc_txq[ETH_MAX_VMDQ_POOL][ETH_DCB_NUM_TCS]
int rte_eth_timesync_enable(uint16_t port_id)
void rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *link)
int __rte_experimental rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
struct rte_eth_rxconf default_rxconf
static uint16_t rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
int rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
int rte_eth_dev_set_link_down(uint16_t port_id)
int rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
int rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
void * rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id, rte_rx_callback_fn fn, void *user_param)
static uint16_t rte_eth_tx_buffer_flush(uint16_t port_id, uint16_t queue_id, struct rte_eth_dev_tx_buffer *buffer)
int rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc, unsigned int socket_id, const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mb_pool)