DPDK 23.11.2
Loading...
Searching...
No Matches
rte_ethdev.h
Go to the documentation of this file.
1/* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
3 */
4
5#ifndef _RTE_ETHDEV_H_
6#define _RTE_ETHDEV_H_
7
148#ifdef __cplusplus
149extern "C" {
150#endif
151
152#include <stdint.h>
153
154/* Use this macro to check if LRO API is supported */
155#define RTE_ETHDEV_HAS_LRO_SUPPORT
156
157/* Alias RTE_LIBRTE_ETHDEV_DEBUG for backward compatibility. */
158#ifdef RTE_LIBRTE_ETHDEV_DEBUG
159#define RTE_ETHDEV_DEBUG_RX
160#define RTE_ETHDEV_DEBUG_TX
161#endif
162
163#include <rte_cman.h>
164#include <rte_compat.h>
165#include <rte_log.h>
166#include <rte_interrupts.h>
167#include <rte_dev.h>
168#include <rte_devargs.h>
169#include <rte_bitops.h>
170#include <rte_errno.h>
171#include <rte_common.h>
172#include <rte_config.h>
173#include <rte_power_intrinsics.h>
174
175#include "rte_ethdev_trace_fp.h"
176#include "rte_dev_info.h"
177
178extern int rte_eth_dev_logtype;
179
180#define RTE_ETHDEV_LOG(level, ...) \
181 rte_log(RTE_LOG_ ## level, rte_eth_dev_logtype, "" __VA_ARGS__)
182
183struct rte_mbuf;
184
201int rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs);
202
218
232
246#define RTE_ETH_FOREACH_MATCHING_DEV(id, devargs, iter) \
247 for (rte_eth_iterator_init(iter, devargs), \
248 id = rte_eth_iterator_next(iter); \
249 id != RTE_MAX_ETHPORTS; \
250 id = rte_eth_iterator_next(iter))
251
262 uint64_t ipackets;
263 uint64_t opackets;
264 uint64_t ibytes;
265 uint64_t obytes;
270 uint64_t imissed;
271 uint64_t ierrors;
272 uint64_t oerrors;
273 uint64_t rx_nombuf;
274 /* Queue stats are limited to max 256 queues */
276 uint64_t q_ipackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
278 uint64_t q_opackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
280 uint64_t q_ibytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
282 uint64_t q_obytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
284 uint64_t q_errors[RTE_ETHDEV_QUEUE_STAT_CNTRS];
285};
286
290#define RTE_ETH_LINK_SPEED_AUTONEG 0
291#define RTE_ETH_LINK_SPEED_FIXED RTE_BIT32(0)
292#define RTE_ETH_LINK_SPEED_10M_HD RTE_BIT32(1)
293#define RTE_ETH_LINK_SPEED_10M RTE_BIT32(2)
294#define RTE_ETH_LINK_SPEED_100M_HD RTE_BIT32(3)
295#define RTE_ETH_LINK_SPEED_100M RTE_BIT32(4)
296#define RTE_ETH_LINK_SPEED_1G RTE_BIT32(5)
297#define RTE_ETH_LINK_SPEED_2_5G RTE_BIT32(6)
298#define RTE_ETH_LINK_SPEED_5G RTE_BIT32(7)
299#define RTE_ETH_LINK_SPEED_10G RTE_BIT32(8)
300#define RTE_ETH_LINK_SPEED_20G RTE_BIT32(9)
301#define RTE_ETH_LINK_SPEED_25G RTE_BIT32(10)
302#define RTE_ETH_LINK_SPEED_40G RTE_BIT32(11)
303#define RTE_ETH_LINK_SPEED_50G RTE_BIT32(12)
304#define RTE_ETH_LINK_SPEED_56G RTE_BIT32(13)
305#define RTE_ETH_LINK_SPEED_100G RTE_BIT32(14)
306#define RTE_ETH_LINK_SPEED_200G RTE_BIT32(15)
307#define RTE_ETH_LINK_SPEED_400G RTE_BIT32(16)
313#define RTE_ETH_SPEED_NUM_NONE 0
314#define RTE_ETH_SPEED_NUM_10M 10
315#define RTE_ETH_SPEED_NUM_100M 100
316#define RTE_ETH_SPEED_NUM_1G 1000
317#define RTE_ETH_SPEED_NUM_2_5G 2500
318#define RTE_ETH_SPEED_NUM_5G 5000
319#define RTE_ETH_SPEED_NUM_10G 10000
320#define RTE_ETH_SPEED_NUM_20G 20000
321#define RTE_ETH_SPEED_NUM_25G 25000
322#define RTE_ETH_SPEED_NUM_40G 40000
323#define RTE_ETH_SPEED_NUM_50G 50000
324#define RTE_ETH_SPEED_NUM_56G 56000
325#define RTE_ETH_SPEED_NUM_100G 100000
326#define RTE_ETH_SPEED_NUM_200G 200000
327#define RTE_ETH_SPEED_NUM_400G 400000
328#define RTE_ETH_SPEED_NUM_UNKNOWN UINT32_MAX
335 union {
336 RTE_ATOMIC(uint64_t) val64;
337 __extension__
338 struct {
339 uint32_t link_speed;
340 uint16_t link_duplex : 1;
341 uint16_t link_autoneg : 1;
342 uint16_t link_status : 1;
343 };
344 };
345};
346
350#define RTE_ETH_LINK_HALF_DUPLEX 0
351#define RTE_ETH_LINK_FULL_DUPLEX 1
352#define RTE_ETH_LINK_DOWN 0
353#define RTE_ETH_LINK_UP 1
354#define RTE_ETH_LINK_FIXED 0
355#define RTE_ETH_LINK_AUTONEG 1
356#define RTE_ETH_LINK_MAX_STR_LEN 40
364 uint8_t pthresh;
365 uint8_t hthresh;
366 uint8_t wthresh;
367};
368
372#define RTE_ETH_MQ_RX_RSS_FLAG RTE_BIT32(0)
373#define RTE_ETH_MQ_RX_DCB_FLAG RTE_BIT32(1)
374#define RTE_ETH_MQ_RX_VMDQ_FLAG RTE_BIT32(2)
402
413
420 uint32_t mtu;
428 uint64_t offloads;
429
430 uint64_t reserved_64s[2];
431 void *reserved_ptrs[2];
432};
433
439 RTE_ETH_VLAN_TYPE_UNKNOWN = 0,
442 RTE_ETH_VLAN_TYPE_MAX,
443};
444
450 uint64_t ids[64];
451};
452
476
477#define RTE_ETH_HASH_ALGO_TO_CAPA(x) RTE_BIT32(x)
478#define RTE_ETH_HASH_ALGO_CAPA_MASK(x) RTE_BIT32(RTE_ETH_HASH_FUNCTION_ ## x)
479
506
507/*
508 * A packet can be identified by hardware as different flow types. Different
509 * NIC hardware may support different flow types.
510 * Basically, the NIC hardware identifies the flow type as deep protocol as
511 * possible, and exclusively. For example, if a packet is identified as
512 * 'RTE_ETH_FLOW_NONFRAG_IPV4_TCP', it will not be any of other flow types,
513 * though it is an actual IPV4 packet.
514 */
515#define RTE_ETH_FLOW_UNKNOWN 0
516#define RTE_ETH_FLOW_RAW 1
517#define RTE_ETH_FLOW_IPV4 2
518#define RTE_ETH_FLOW_FRAG_IPV4 3
519#define RTE_ETH_FLOW_NONFRAG_IPV4_TCP 4
520#define RTE_ETH_FLOW_NONFRAG_IPV4_UDP 5
521#define RTE_ETH_FLOW_NONFRAG_IPV4_SCTP 6
522#define RTE_ETH_FLOW_NONFRAG_IPV4_OTHER 7
523#define RTE_ETH_FLOW_IPV6 8
524#define RTE_ETH_FLOW_FRAG_IPV6 9
525#define RTE_ETH_FLOW_NONFRAG_IPV6_TCP 10
526#define RTE_ETH_FLOW_NONFRAG_IPV6_UDP 11
527#define RTE_ETH_FLOW_NONFRAG_IPV6_SCTP 12
528#define RTE_ETH_FLOW_NONFRAG_IPV6_OTHER 13
529#define RTE_ETH_FLOW_L2_PAYLOAD 14
530#define RTE_ETH_FLOW_IPV6_EX 15
531#define RTE_ETH_FLOW_IPV6_TCP_EX 16
532#define RTE_ETH_FLOW_IPV6_UDP_EX 17
534#define RTE_ETH_FLOW_PORT 18
535#define RTE_ETH_FLOW_VXLAN 19
536#define RTE_ETH_FLOW_GENEVE 20
537#define RTE_ETH_FLOW_NVGRE 21
538#define RTE_ETH_FLOW_VXLAN_GPE 22
539#define RTE_ETH_FLOW_GTPU 23
540#define RTE_ETH_FLOW_MAX 24
541
542/*
543 * Below macros are defined for RSS offload types, they can be used to
544 * fill rte_eth_rss_conf.rss_hf or rte_flow_action_rss.types.
545 */
546#define RTE_ETH_RSS_IPV4 RTE_BIT64(2)
547#define RTE_ETH_RSS_FRAG_IPV4 RTE_BIT64(3)
548#define RTE_ETH_RSS_NONFRAG_IPV4_TCP RTE_BIT64(4)
549#define RTE_ETH_RSS_NONFRAG_IPV4_UDP RTE_BIT64(5)
550#define RTE_ETH_RSS_NONFRAG_IPV4_SCTP RTE_BIT64(6)
551#define RTE_ETH_RSS_NONFRAG_IPV4_OTHER RTE_BIT64(7)
552#define RTE_ETH_RSS_IPV6 RTE_BIT64(8)
553#define RTE_ETH_RSS_FRAG_IPV6 RTE_BIT64(9)
554#define RTE_ETH_RSS_NONFRAG_IPV6_TCP RTE_BIT64(10)
555#define RTE_ETH_RSS_NONFRAG_IPV6_UDP RTE_BIT64(11)
556#define RTE_ETH_RSS_NONFRAG_IPV6_SCTP RTE_BIT64(12)
557#define RTE_ETH_RSS_NONFRAG_IPV6_OTHER RTE_BIT64(13)
558#define RTE_ETH_RSS_L2_PAYLOAD RTE_BIT64(14)
559#define RTE_ETH_RSS_IPV6_EX RTE_BIT64(15)
560#define RTE_ETH_RSS_IPV6_TCP_EX RTE_BIT64(16)
561#define RTE_ETH_RSS_IPV6_UDP_EX RTE_BIT64(17)
562#define RTE_ETH_RSS_PORT RTE_BIT64(18)
563#define RTE_ETH_RSS_VXLAN RTE_BIT64(19)
564#define RTE_ETH_RSS_GENEVE RTE_BIT64(20)
565#define RTE_ETH_RSS_NVGRE RTE_BIT64(21)
566#define RTE_ETH_RSS_GTPU RTE_BIT64(23)
567#define RTE_ETH_RSS_ETH RTE_BIT64(24)
568#define RTE_ETH_RSS_S_VLAN RTE_BIT64(25)
569#define RTE_ETH_RSS_C_VLAN RTE_BIT64(26)
570#define RTE_ETH_RSS_ESP RTE_BIT64(27)
571#define RTE_ETH_RSS_AH RTE_BIT64(28)
572#define RTE_ETH_RSS_L2TPV3 RTE_BIT64(29)
573#define RTE_ETH_RSS_PFCP RTE_BIT64(30)
574#define RTE_ETH_RSS_PPPOE RTE_BIT64(31)
575#define RTE_ETH_RSS_ECPRI RTE_BIT64(32)
576#define RTE_ETH_RSS_MPLS RTE_BIT64(33)
577#define RTE_ETH_RSS_IPV4_CHKSUM RTE_BIT64(34)
578
591#define RTE_ETH_RSS_L4_CHKSUM RTE_BIT64(35)
592
593#define RTE_ETH_RSS_L2TPV2 RTE_BIT64(36)
594
595/*
596 * We use the following macros to combine with above RTE_ETH_RSS_* for
597 * more specific input set selection. These bits are defined starting
598 * from the high end of the 64 bits.
599 * Note: If we use above RTE_ETH_RSS_* without SRC/DST_ONLY, it represents
600 * both SRC and DST are taken into account. If SRC_ONLY and DST_ONLY of
601 * the same level are used simultaneously, it is the same case as none of
602 * them are added.
603 */
604#define RTE_ETH_RSS_L3_SRC_ONLY RTE_BIT64(63)
605#define RTE_ETH_RSS_L3_DST_ONLY RTE_BIT64(62)
606#define RTE_ETH_RSS_L4_SRC_ONLY RTE_BIT64(61)
607#define RTE_ETH_RSS_L4_DST_ONLY RTE_BIT64(60)
608#define RTE_ETH_RSS_L2_SRC_ONLY RTE_BIT64(59)
609#define RTE_ETH_RSS_L2_DST_ONLY RTE_BIT64(58)
610
611/*
612 * Only select IPV6 address prefix as RSS input set according to
613 * https://tools.ietf.org/html/rfc6052
614 * Must be combined with RTE_ETH_RSS_IPV6, RTE_ETH_RSS_NONFRAG_IPV6_UDP,
615 * RTE_ETH_RSS_NONFRAG_IPV6_TCP, RTE_ETH_RSS_NONFRAG_IPV6_SCTP.
616 */
617#define RTE_ETH_RSS_L3_PRE32 RTE_BIT64(57)
618#define RTE_ETH_RSS_L3_PRE40 RTE_BIT64(56)
619#define RTE_ETH_RSS_L3_PRE48 RTE_BIT64(55)
620#define RTE_ETH_RSS_L3_PRE56 RTE_BIT64(54)
621#define RTE_ETH_RSS_L3_PRE64 RTE_BIT64(53)
622#define RTE_ETH_RSS_L3_PRE96 RTE_BIT64(52)
623
624/*
625 * Use the following macros to combine with the above layers
626 * to choose inner and outer layers or both for RSS computation.
627 * Bits 50 and 51 are reserved for this.
628 */
629
637#define RTE_ETH_RSS_LEVEL_PMD_DEFAULT (UINT64_C(0) << 50)
638
643#define RTE_ETH_RSS_LEVEL_OUTERMOST (UINT64_C(1) << 50)
644
649#define RTE_ETH_RSS_LEVEL_INNERMOST (UINT64_C(2) << 50)
650#define RTE_ETH_RSS_LEVEL_MASK (UINT64_C(3) << 50)
651
652#define RTE_ETH_RSS_LEVEL(rss_hf) ((rss_hf & RTE_ETH_RSS_LEVEL_MASK) >> 50)
653
664static inline uint64_t
665rte_eth_rss_hf_refine(uint64_t rss_hf)
666{
667 if ((rss_hf & RTE_ETH_RSS_L3_SRC_ONLY) && (rss_hf & RTE_ETH_RSS_L3_DST_ONLY))
668 rss_hf &= ~(RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY);
669
670 if ((rss_hf & RTE_ETH_RSS_L4_SRC_ONLY) && (rss_hf & RTE_ETH_RSS_L4_DST_ONLY))
671 rss_hf &= ~(RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY);
672
673 return rss_hf;
674}
675
676#define RTE_ETH_RSS_IPV6_PRE32 ( \
677 RTE_ETH_RSS_IPV6 | \
678 RTE_ETH_RSS_L3_PRE32)
679
680#define RTE_ETH_RSS_IPV6_PRE40 ( \
681 RTE_ETH_RSS_IPV6 | \
682 RTE_ETH_RSS_L3_PRE40)
683
684#define RTE_ETH_RSS_IPV6_PRE48 ( \
685 RTE_ETH_RSS_IPV6 | \
686 RTE_ETH_RSS_L3_PRE48)
687
688#define RTE_ETH_RSS_IPV6_PRE56 ( \
689 RTE_ETH_RSS_IPV6 | \
690 RTE_ETH_RSS_L3_PRE56)
691
692#define RTE_ETH_RSS_IPV6_PRE64 ( \
693 RTE_ETH_RSS_IPV6 | \
694 RTE_ETH_RSS_L3_PRE64)
695
696#define RTE_ETH_RSS_IPV6_PRE96 ( \
697 RTE_ETH_RSS_IPV6 | \
698 RTE_ETH_RSS_L3_PRE96)
699
700#define RTE_ETH_RSS_IPV6_PRE32_UDP ( \
701 RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
702 RTE_ETH_RSS_L3_PRE32)
703
704#define RTE_ETH_RSS_IPV6_PRE40_UDP ( \
705 RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
706 RTE_ETH_RSS_L3_PRE40)
707
708#define RTE_ETH_RSS_IPV6_PRE48_UDP ( \
709 RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
710 RTE_ETH_RSS_L3_PRE48)
711
712#define RTE_ETH_RSS_IPV6_PRE56_UDP ( \
713 RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
714 RTE_ETH_RSS_L3_PRE56)
715
716#define RTE_ETH_RSS_IPV6_PRE64_UDP ( \
717 RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
718 RTE_ETH_RSS_L3_PRE64)
719
720#define RTE_ETH_RSS_IPV6_PRE96_UDP ( \
721 RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
722 RTE_ETH_RSS_L3_PRE96)
723
724#define RTE_ETH_RSS_IPV6_PRE32_TCP ( \
725 RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
726 RTE_ETH_RSS_L3_PRE32)
727
728#define RTE_ETH_RSS_IPV6_PRE40_TCP ( \
729 RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
730 RTE_ETH_RSS_L3_PRE40)
731
732#define RTE_ETH_RSS_IPV6_PRE48_TCP ( \
733 RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
734 RTE_ETH_RSS_L3_PRE48)
735
736#define RTE_ETH_RSS_IPV6_PRE56_TCP ( \
737 RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
738 RTE_ETH_RSS_L3_PRE56)
739
740#define RTE_ETH_RSS_IPV6_PRE64_TCP ( \
741 RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
742 RTE_ETH_RSS_L3_PRE64)
743
744#define RTE_ETH_RSS_IPV6_PRE96_TCP ( \
745 RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
746 RTE_ETH_RSS_L3_PRE96)
747
748#define RTE_ETH_RSS_IPV6_PRE32_SCTP ( \
749 RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
750 RTE_ETH_RSS_L3_PRE32)
751
752#define RTE_ETH_RSS_IPV6_PRE40_SCTP ( \
753 RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
754 RTE_ETH_RSS_L3_PRE40)
755
756#define RTE_ETH_RSS_IPV6_PRE48_SCTP ( \
757 RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
758 RTE_ETH_RSS_L3_PRE48)
759
760#define RTE_ETH_RSS_IPV6_PRE56_SCTP ( \
761 RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
762 RTE_ETH_RSS_L3_PRE56)
763
764#define RTE_ETH_RSS_IPV6_PRE64_SCTP ( \
765 RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
766 RTE_ETH_RSS_L3_PRE64)
767
768#define RTE_ETH_RSS_IPV6_PRE96_SCTP ( \
769 RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
770 RTE_ETH_RSS_L3_PRE96)
771
772#define RTE_ETH_RSS_IP ( \
773 RTE_ETH_RSS_IPV4 | \
774 RTE_ETH_RSS_FRAG_IPV4 | \
775 RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
776 RTE_ETH_RSS_IPV6 | \
777 RTE_ETH_RSS_FRAG_IPV6 | \
778 RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
779 RTE_ETH_RSS_IPV6_EX)
780
781#define RTE_ETH_RSS_UDP ( \
782 RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
783 RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
784 RTE_ETH_RSS_IPV6_UDP_EX)
785
786#define RTE_ETH_RSS_TCP ( \
787 RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
788 RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
789 RTE_ETH_RSS_IPV6_TCP_EX)
790
791#define RTE_ETH_RSS_SCTP ( \
792 RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
793 RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
794
795#define RTE_ETH_RSS_TUNNEL ( \
796 RTE_ETH_RSS_VXLAN | \
797 RTE_ETH_RSS_GENEVE | \
798 RTE_ETH_RSS_NVGRE)
799
800#define RTE_ETH_RSS_VLAN ( \
801 RTE_ETH_RSS_S_VLAN | \
802 RTE_ETH_RSS_C_VLAN)
803
805#define RTE_ETH_RSS_PROTO_MASK ( \
806 RTE_ETH_RSS_IPV4 | \
807 RTE_ETH_RSS_FRAG_IPV4 | \
808 RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
809 RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
810 RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
811 RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
812 RTE_ETH_RSS_IPV6 | \
813 RTE_ETH_RSS_FRAG_IPV6 | \
814 RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
815 RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
816 RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
817 RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
818 RTE_ETH_RSS_L2_PAYLOAD | \
819 RTE_ETH_RSS_IPV6_EX | \
820 RTE_ETH_RSS_IPV6_TCP_EX | \
821 RTE_ETH_RSS_IPV6_UDP_EX | \
822 RTE_ETH_RSS_PORT | \
823 RTE_ETH_RSS_VXLAN | \
824 RTE_ETH_RSS_GENEVE | \
825 RTE_ETH_RSS_NVGRE | \
826 RTE_ETH_RSS_MPLS)
827
828/*
829 * Definitions used for redirection table entry size.
830 * Some RSS RETA sizes may not be supported by some drivers, check the
831 * documentation or the description of relevant functions for more details.
832 */
833#define RTE_ETH_RSS_RETA_SIZE_64 64
834#define RTE_ETH_RSS_RETA_SIZE_128 128
835#define RTE_ETH_RSS_RETA_SIZE_256 256
836#define RTE_ETH_RSS_RETA_SIZE_512 512
837#define RTE_ETH_RETA_GROUP_SIZE 64
838
840#define RTE_ETH_VMDQ_MAX_VLAN_FILTERS 64
841#define RTE_ETH_DCB_NUM_USER_PRIORITIES 8
842#define RTE_ETH_VMDQ_DCB_NUM_QUEUES 128
843#define RTE_ETH_DCB_NUM_QUEUES 128
847#define RTE_ETH_DCB_PG_SUPPORT RTE_BIT32(0)
848#define RTE_ETH_DCB_PFC_SUPPORT RTE_BIT32(1)
852#define RTE_ETH_VLAN_STRIP_OFFLOAD 0x0001
853#define RTE_ETH_VLAN_FILTER_OFFLOAD 0x0002
854#define RTE_ETH_VLAN_EXTEND_OFFLOAD 0x0004
855#define RTE_ETH_QINQ_STRIP_OFFLOAD 0x0008
857#define RTE_ETH_VLAN_STRIP_MASK 0x0001
858#define RTE_ETH_VLAN_FILTER_MASK 0x0002
859#define RTE_ETH_VLAN_EXTEND_MASK 0x0004
860#define RTE_ETH_QINQ_STRIP_MASK 0x0008
861#define RTE_ETH_VLAN_ID_MAX 0x0FFF
864/* Definitions used for receive MAC address */
865#define RTE_ETH_NUM_RECEIVE_MAC_ADDR 128
867/* Definitions used for unicast hash */
868#define RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY 128
874#define RTE_ETH_VMDQ_ACCEPT_UNTAG RTE_BIT32(0)
876#define RTE_ETH_VMDQ_ACCEPT_HASH_MC RTE_BIT32(1)
878#define RTE_ETH_VMDQ_ACCEPT_HASH_UC RTE_BIT32(2)
880#define RTE_ETH_VMDQ_ACCEPT_BROADCAST RTE_BIT32(3)
882#define RTE_ETH_VMDQ_ACCEPT_MULTICAST RTE_BIT32(4)
893 uint64_t mask;
895 uint16_t reta[RTE_ETH_RETA_GROUP_SIZE];
896};
897
906
917
918/* This structure may be extended in future. */
919struct rte_eth_dcb_rx_conf {
920 enum rte_eth_nb_tcs nb_tcs;
922 uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];
923};
924
925struct rte_eth_vmdq_dcb_tx_conf {
926 enum rte_eth_nb_pools nb_queue_pools;
928 uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];
929};
930
931struct rte_eth_dcb_tx_conf {
932 enum rte_eth_nb_tcs nb_tcs;
934 uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];
935};
936
937struct rte_eth_vmdq_tx_conf {
938 enum rte_eth_nb_pools nb_queue_pools;
939};
940
964
995
1006 uint64_t offloads;
1007
1008 uint16_t pvid;
1009 __extension__
1010 uint8_t
1016
1017 uint64_t reserved_64s[2];
1018 void *reserved_ptrs[2];
1019};
1020
1082 struct rte_mempool *mp;
1083 uint16_t length;
1084 uint16_t offset;
1096 uint32_t proto_hdr;
1097};
1098
1106 /* The settings for buffer split offload. */
1107 struct rte_eth_rxseg_split split;
1108 /* The other features settings should be added here. */
1109};
1110
1117 uint8_t rx_drop_en;
1119 uint16_t rx_nseg;
1126 uint16_t share_group;
1127 uint16_t share_qid;
1133 uint64_t offloads;
1142
1163 uint16_t rx_nmempool;
1165 uint64_t reserved_64s[2];
1166 void *reserved_ptrs[2];
1167};
1168
1189
1202
1207 uint32_t rte_memory:1;
1208
1209 uint32_t reserved:30;
1210};
1211
1229
1230#define RTE_ETH_MAX_HAIRPIN_PEERS 32
1231
1239 uint16_t port;
1240 uint16_t queue;
1241};
1242
1250 uint32_t peer_count:16;
1261 uint32_t tx_explicit:1;
1262
1274 uint32_t manual_bind:1;
1275
1288
1300 uint32_t use_rte_memory:1;
1301
1312 uint32_t force_memory:1;
1313
1314 uint32_t reserved:11;
1316 struct rte_eth_hairpin_peer peers[RTE_ETH_MAX_HAIRPIN_PEERS];
1317};
1318
1323 uint16_t nb_max;
1324 uint16_t nb_min;
1325 uint16_t nb_align;
1335 uint16_t nb_seg_max;
1336
1349};
1350
1360
1375
1385
1400
1421 struct {
1422 uint16_t tx_qid;
1426 uint8_t tc;
1427 } rx_pause; /* Valid when (mode == FC_RX_PAUSE || mode == FC_FULL) */
1428
1429 struct {
1430 uint16_t pause_time;
1431 uint16_t rx_qid;
1435 uint8_t tc;
1436 } tx_pause; /* Valid when (mode == FC_TX_PAUSE || mode == FC_FULL) */
1437};
1438
1444 RTE_ETH_TUNNEL_TYPE_NONE = 0,
1445 RTE_ETH_TUNNEL_TYPE_VXLAN,
1446 RTE_ETH_TUNNEL_TYPE_GENEVE,
1447 RTE_ETH_TUNNEL_TYPE_TEREDO,
1448 RTE_ETH_TUNNEL_TYPE_NVGRE,
1449 RTE_ETH_TUNNEL_TYPE_IP_IN_GRE,
1450 RTE_ETH_L2_TUNNEL_TYPE_E_TAG,
1451 RTE_ETH_TUNNEL_TYPE_VXLAN_GPE,
1452 RTE_ETH_TUNNEL_TYPE_ECPRI,
1453 RTE_ETH_TUNNEL_TYPE_MAX,
1454};
1455
1456/* Deprecated API file for rte_eth_dev_filter_* functions */
1457#include "rte_eth_ctrl.h"
1458
1469 uint16_t udp_port;
1470 uint8_t prot_type;
1471};
1472
1478 uint32_t lsc:1;
1480 uint32_t rxq:1;
1482 uint32_t rmv:1;
1483};
1484
1485#define rte_intr_conf rte_eth_intr_conf
1486
1493 uint32_t link_speeds;
1502 uint32_t lpbk_mode;
1507 struct {
1508 struct rte_eth_rss_conf rss_conf;
1510 struct rte_eth_vmdq_dcb_conf vmdq_dcb_conf;
1512 struct rte_eth_dcb_rx_conf dcb_rx_conf;
1514 struct rte_eth_vmdq_rx_conf vmdq_rx_conf;
1516 union {
1518 struct rte_eth_vmdq_dcb_tx_conf vmdq_dcb_tx_conf;
1520 struct rte_eth_dcb_tx_conf dcb_tx_conf;
1522 struct rte_eth_vmdq_tx_conf vmdq_tx_conf;
1528};
1529
1533#define RTE_ETH_RX_OFFLOAD_VLAN_STRIP RTE_BIT64(0)
1534#define RTE_ETH_RX_OFFLOAD_IPV4_CKSUM RTE_BIT64(1)
1535#define RTE_ETH_RX_OFFLOAD_UDP_CKSUM RTE_BIT64(2)
1536#define RTE_ETH_RX_OFFLOAD_TCP_CKSUM RTE_BIT64(3)
1537#define RTE_ETH_RX_OFFLOAD_TCP_LRO RTE_BIT64(4)
1538#define RTE_ETH_RX_OFFLOAD_QINQ_STRIP RTE_BIT64(5)
1539#define RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM RTE_BIT64(6)
1540#define RTE_ETH_RX_OFFLOAD_MACSEC_STRIP RTE_BIT64(7)
1541#define RTE_ETH_RX_OFFLOAD_VLAN_FILTER RTE_BIT64(9)
1542#define RTE_ETH_RX_OFFLOAD_VLAN_EXTEND RTE_BIT64(10)
1543#define RTE_ETH_RX_OFFLOAD_SCATTER RTE_BIT64(13)
1549#define RTE_ETH_RX_OFFLOAD_TIMESTAMP RTE_BIT64(14)
1550#define RTE_ETH_RX_OFFLOAD_SECURITY RTE_BIT64(15)
1551#define RTE_ETH_RX_OFFLOAD_KEEP_CRC RTE_BIT64(16)
1552#define RTE_ETH_RX_OFFLOAD_SCTP_CKSUM RTE_BIT64(17)
1553#define RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM RTE_BIT64(18)
1554#define RTE_ETH_RX_OFFLOAD_RSS_HASH RTE_BIT64(19)
1555#define RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT RTE_BIT64(20)
1556
1557#define RTE_ETH_RX_OFFLOAD_CHECKSUM (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | \
1558 RTE_ETH_RX_OFFLOAD_UDP_CKSUM | \
1559 RTE_ETH_RX_OFFLOAD_TCP_CKSUM)
1560#define RTE_ETH_RX_OFFLOAD_VLAN (RTE_ETH_RX_OFFLOAD_VLAN_STRIP | \
1561 RTE_ETH_RX_OFFLOAD_VLAN_FILTER | \
1562 RTE_ETH_RX_OFFLOAD_VLAN_EXTEND | \
1563 RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
1564
1565/*
1566 * If new Rx offload capabilities are defined, they also must be
1567 * mentioned in rte_rx_offload_names in rte_ethdev.c file.
1568 */
1569
1573#define RTE_ETH_TX_OFFLOAD_VLAN_INSERT RTE_BIT64(0)
1574#define RTE_ETH_TX_OFFLOAD_IPV4_CKSUM RTE_BIT64(1)
1575#define RTE_ETH_TX_OFFLOAD_UDP_CKSUM RTE_BIT64(2)
1576#define RTE_ETH_TX_OFFLOAD_TCP_CKSUM RTE_BIT64(3)
1577#define RTE_ETH_TX_OFFLOAD_SCTP_CKSUM RTE_BIT64(4)
1578#define RTE_ETH_TX_OFFLOAD_TCP_TSO RTE_BIT64(5)
1579#define RTE_ETH_TX_OFFLOAD_UDP_TSO RTE_BIT64(6)
1580#define RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM RTE_BIT64(7)
1581#define RTE_ETH_TX_OFFLOAD_QINQ_INSERT RTE_BIT64(8)
1582#define RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO RTE_BIT64(9)
1583#define RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO RTE_BIT64(10)
1584#define RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO RTE_BIT64(11)
1585#define RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO RTE_BIT64(12)
1586#define RTE_ETH_TX_OFFLOAD_MACSEC_INSERT RTE_BIT64(13)
1591#define RTE_ETH_TX_OFFLOAD_MT_LOCKFREE RTE_BIT64(14)
1593#define RTE_ETH_TX_OFFLOAD_MULTI_SEGS RTE_BIT64(15)
1599#define RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE RTE_BIT64(16)
1600#define RTE_ETH_TX_OFFLOAD_SECURITY RTE_BIT64(17)
1606#define RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO RTE_BIT64(18)
1612#define RTE_ETH_TX_OFFLOAD_IP_TNL_TSO RTE_BIT64(19)
1614#define RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM RTE_BIT64(20)
1620#define RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP RTE_BIT64(21)
1621/*
1622 * If new Tx offload capabilities are defined, they also must be
1623 * mentioned in rte_tx_offload_names in rte_ethdev.c file.
1624 */
1625
1630#define RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP RTE_BIT64(0)
1632#define RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP RTE_BIT64(1)
1642#define RTE_ETH_DEV_CAPA_RXQ_SHARE RTE_BIT64(2)
1644#define RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP RTE_BIT64(3)
1646#define RTE_ETH_DEV_CAPA_FLOW_SHARED_OBJECT_KEEP RTE_BIT64(4)
1649/*
1650 * Fallback default preferred Rx/Tx port parameters.
1651 * These are used if an application requests default parameters
1652 * but the PMD does not provide preferred values.
1653 */
1654#define RTE_ETH_DEV_FALLBACK_RX_RINGSIZE 512
1655#define RTE_ETH_DEV_FALLBACK_TX_RINGSIZE 512
1656#define RTE_ETH_DEV_FALLBACK_RX_NBQUEUES 1
1657#define RTE_ETH_DEV_FALLBACK_TX_NBQUEUES 1
1658
1665 uint16_t burst_size;
1666 uint16_t ring_size;
1667 uint16_t nb_queues;
1668};
1669
1674#define RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID (UINT16_MAX)
1675
1680 const char *name;
1681 uint16_t domain_id;
1689 uint16_t port_id;
1695 uint16_t rx_domain;
1696};
1697
1705 __extension__
1706 uint32_t multi_pools:1;
1707 uint32_t offset_allowed:1;
1709 uint16_t max_nseg;
1710 uint16_t reserved;
1711};
1712
1726
1748
1831
1833#define RTE_ETH_QUEUE_STATE_STOPPED 0
1834#define RTE_ETH_QUEUE_STATE_STARTED 1
1835#define RTE_ETH_QUEUE_STATE_HAIRPIN 2
1857
1867
1891
1892/* Generic Burst mode flag definition, values can be ORed. */
1893
1899#define RTE_ETH_BURST_FLAG_PER_QUEUE RTE_BIT64(0)
1900
1906 uint64_t flags;
1908#define RTE_ETH_BURST_MODE_INFO_SIZE 1024
1910};
1911
1913#define RTE_ETH_XSTATS_NAME_SIZE 64
1914
1925 uint64_t id;
1926 uint64_t value;
1927};
1928
1946
1947#define RTE_ETH_DCB_NUM_TCS 8
1948#define RTE_ETH_MAX_VMDQ_POOL 64
1949
1956 struct {
1957 uint16_t base;
1958 uint16_t nb_queue;
1959 } tc_rxq[RTE_ETH_MAX_VMDQ_POOL][RTE_ETH_DCB_NUM_TCS];
1961 struct {
1962 uint16_t base;
1963 uint16_t nb_queue;
1964 } tc_txq[RTE_ETH_MAX_VMDQ_POOL][RTE_ETH_DCB_NUM_TCS];
1965};
1966
1978
1990
1991/* Translate from FEC mode to FEC capa */
1992#define RTE_ETH_FEC_MODE_TO_CAPA(x) RTE_BIT32(x)
1993
1994/* This macro indicates FEC capa mask */
1995#define RTE_ETH_FEC_MODE_CAPA_MASK(x) RTE_BIT32(RTE_ETH_FEC_ ## x)
1996
1997/* A structure used to get capabilities per link speed */
1998struct rte_eth_fec_capa {
1999 uint32_t speed;
2000 uint32_t capa;
2001};
2002
2003#define RTE_ETH_ALL RTE_MAX_ETHPORTS
2004
2005/* Macros to check for valid port */
2006#define RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, retval) do { \
2007 if (!rte_eth_dev_is_valid_port(port_id)) { \
2008 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%u\n", port_id); \
2009 return retval; \
2010 } \
2011} while (0)
2012
2013#define RTE_ETH_VALID_PORTID_OR_RET(port_id) do { \
2014 if (!rte_eth_dev_is_valid_port(port_id)) { \
2015 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%u\n", port_id); \
2016 return; \
2017 } \
2018} while (0)
2019
2042typedef uint16_t (*rte_rx_callback_fn)(uint16_t port_id, uint16_t queue,
2043 struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts,
2044 void *user_param);
2045
2066typedef uint16_t (*rte_tx_callback_fn)(uint16_t port_id, uint16_t queue,
2067 struct rte_mbuf *pkts[], uint16_t nb_pkts, void *user_param);
2068
2080
2081struct rte_eth_dev_sriov {
2082 uint8_t active;
2083 uint8_t nb_q_per_pool;
2084 uint16_t def_vmdq_idx;
2085 uint16_t def_pool_q_idx;
2086};
2087#define RTE_ETH_DEV_SRIOV(dev) ((dev)->data->sriov)
2088
2089#define RTE_ETH_NAME_MAX_LEN RTE_DEV_NAME_MAX_LEN
2090
2091#define RTE_ETH_DEV_NO_OWNER 0
2092
2093#define RTE_ETH_MAX_OWNER_NAME_LEN 64
2094
2095struct rte_eth_dev_owner {
2096 uint64_t id;
2097 char name[RTE_ETH_MAX_OWNER_NAME_LEN];
2098};
2099
2105#define RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE RTE_BIT32(0)
2107#define RTE_ETH_DEV_INTR_LSC RTE_BIT32(1)
2109#define RTE_ETH_DEV_BONDING_MEMBER RTE_BIT32(2)
2111#define RTE_ETH_DEV_INTR_RMV RTE_BIT32(3)
2113#define RTE_ETH_DEV_REPRESENTOR RTE_BIT32(4)
2115#define RTE_ETH_DEV_NOLIVE_MAC_ADDR RTE_BIT32(5)
2120#define RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS RTE_BIT32(6)
2134uint64_t rte_eth_find_next_owned_by(uint16_t port_id,
2135 const uint64_t owner_id);
2136
2140#define RTE_ETH_FOREACH_DEV_OWNED_BY(p, o) \
2141 for (p = rte_eth_find_next_owned_by(0, o); \
2142 (unsigned int)p < (unsigned int)RTE_MAX_ETHPORTS; \
2143 p = rte_eth_find_next_owned_by(p + 1, o))
2144
2153uint16_t rte_eth_find_next(uint16_t port_id);
2154
2158#define RTE_ETH_FOREACH_DEV(p) \
2159 RTE_ETH_FOREACH_DEV_OWNED_BY(p, RTE_ETH_DEV_NO_OWNER)
2160
2172uint16_t
2173rte_eth_find_next_of(uint16_t port_id_start,
2174 const struct rte_device *parent);
2175
2184#define RTE_ETH_FOREACH_DEV_OF(port_id, parent) \
2185 for (port_id = rte_eth_find_next_of(0, parent); \
2186 port_id < RTE_MAX_ETHPORTS; \
2187 port_id = rte_eth_find_next_of(port_id + 1, parent))
2188
2200uint16_t
2201rte_eth_find_next_sibling(uint16_t port_id_start, uint16_t ref_port_id);
2202
2213#define RTE_ETH_FOREACH_DEV_SIBLING(port_id, ref_port_id) \
2214 for (port_id = rte_eth_find_next_sibling(0, ref_port_id); \
2215 port_id < RTE_MAX_ETHPORTS; \
2216 port_id = rte_eth_find_next_sibling(port_id + 1, ref_port_id))
2217
2228int rte_eth_dev_owner_new(uint64_t *owner_id);
2229
2240int rte_eth_dev_owner_set(const uint16_t port_id,
2241 const struct rte_eth_dev_owner *owner);
2242
2253int rte_eth_dev_owner_unset(const uint16_t port_id,
2254 const uint64_t owner_id);
2255
2264int rte_eth_dev_owner_delete(const uint64_t owner_id);
2265
2276int rte_eth_dev_owner_get(const uint16_t port_id,
2277 struct rte_eth_dev_owner *owner);
2278
2290
2300
2312uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex);
2313
2322const char *rte_eth_dev_rx_offload_name(uint64_t offload);
2323
2332const char *rte_eth_dev_tx_offload_name(uint64_t offload);
2333
2345__rte_experimental
2346const char *rte_eth_dev_capability_name(uint64_t capability);
2347
2387int rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_queue,
2388 uint16_t nb_tx_queue, const struct rte_eth_conf *eth_conf);
2389
2398int
2399rte_eth_dev_is_removed(uint16_t port_id);
2400
2463int rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
2464 uint16_t nb_rx_desc, unsigned int socket_id,
2465 const struct rte_eth_rxconf *rx_conf,
2466 struct rte_mempool *mb_pool);
2467
2495__rte_experimental
2497 (uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc,
2498 const struct rte_eth_hairpin_conf *conf);
2499
2548int rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2549 uint16_t nb_tx_desc, unsigned int socket_id,
2550 const struct rte_eth_txconf *tx_conf);
2551
2577__rte_experimental
2579 (uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc,
2580 const struct rte_eth_hairpin_conf *conf);
2581
2608__rte_experimental
2609int rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports,
2610 size_t len, uint32_t direction);
2611
2634__rte_experimental
2635int rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port);
2636
2661__rte_experimental
2662int rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port);
2663
2679__rte_experimental
2680int rte_eth_dev_count_aggr_ports(uint16_t port_id);
2681
2709__rte_experimental
2710int rte_eth_dev_map_aggr_tx_affinity(uint16_t port_id, uint16_t tx_queue_id,
2711 uint8_t affinity);
2712
2725int rte_eth_dev_socket_id(uint16_t port_id);
2726
2736int rte_eth_dev_is_valid_port(uint16_t port_id);
2737
2754__rte_experimental
2755int rte_eth_rx_queue_is_valid(uint16_t port_id, uint16_t queue_id);
2756
2773__rte_experimental
2774int rte_eth_tx_queue_is_valid(uint16_t port_id, uint16_t queue_id);
2775
2793int rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id);
2794
2811int rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id);
2812
2830int rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id);
2831
2848int rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id);
2849
2873int rte_eth_dev_start(uint16_t port_id);
2874
2888int rte_eth_dev_stop(uint16_t port_id);
2889
2902int rte_eth_dev_set_link_up(uint16_t port_id);
2903
2913int rte_eth_dev_set_link_down(uint16_t port_id);
2914
2925int rte_eth_dev_close(uint16_t port_id);
2926
2964int rte_eth_dev_reset(uint16_t port_id);
2965
2977int rte_eth_promiscuous_enable(uint16_t port_id);
2978
2990int rte_eth_promiscuous_disable(uint16_t port_id);
2991
3002int rte_eth_promiscuous_get(uint16_t port_id);
3003
3015int rte_eth_allmulticast_enable(uint16_t port_id);
3016
3028int rte_eth_allmulticast_disable(uint16_t port_id);
3029
3040int rte_eth_allmulticast_get(uint16_t port_id);
3041
3059int rte_eth_link_get(uint16_t port_id, struct rte_eth_link *link);
3060
3075int rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *link);
3076
3090__rte_experimental
3091const char *rte_eth_link_speed_to_str(uint32_t link_speed);
3092
3111__rte_experimental
3112int rte_eth_link_to_str(char *str, size_t len,
3113 const struct rte_eth_link *eth_link);
3114
3132int rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats);
3133
3145int rte_eth_stats_reset(uint16_t port_id);
3146
3176int rte_eth_xstats_get_names(uint16_t port_id,
3177 struct rte_eth_xstat_name *xstats_names,
3178 unsigned int size);
3179
3213int rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
3214 unsigned int n);
3215
3240int
3242 struct rte_eth_xstat_name *xstats_names, unsigned int size,
3243 uint64_t *ids);
3244
3269int rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
3270 uint64_t *values, unsigned int size);
3271
3291int rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
3292 uint64_t *id);
3293
3306int rte_eth_xstats_reset(uint16_t port_id);
3307
3327 uint16_t tx_queue_id, uint8_t stat_idx);
3328
3348 uint16_t rx_queue_id,
3349 uint8_t stat_idx);
3350
3364int rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr);
3365
3386__rte_experimental
3387int rte_eth_macaddrs_get(uint16_t port_id, struct rte_ether_addr *ma,
3388 unsigned int num);
3389
3409int rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info);
3410
3426__rte_experimental
3427int rte_eth_dev_conf_get(uint16_t port_id, struct rte_eth_conf *dev_conf);
3428
3449int rte_eth_dev_fw_version_get(uint16_t port_id,
3450 char *fw_version, size_t fw_size);
3451
3491int rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
3492 uint32_t *ptypes, int num);
3523int rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask,
3524 uint32_t *set_ptypes, unsigned int num);
3525
3538int rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu);
3539
3557int rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu);
3558
3578int rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on);
3579
3598int rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
3599 int on);
3600
3618 enum rte_vlan_type vlan_type,
3619 uint16_t tag_type);
3620
3638int rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask);
3639
3653int rte_eth_dev_get_vlan_offload(uint16_t port_id);
3654
3669int rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on);
3670
3696__rte_experimental
3697int rte_eth_rx_avail_thresh_set(uint16_t port_id, uint16_t queue_id,
3698 uint8_t avail_thresh);
3699
3726__rte_experimental
3727int rte_eth_rx_avail_thresh_query(uint16_t port_id, uint16_t *queue_id,
3728 uint8_t *avail_thresh);
3729
3730typedef void (*buffer_tx_error_fn)(struct rte_mbuf **unsent, uint16_t count,
3731 void *userdata);
3732
3738 buffer_tx_error_fn error_callback;
3739 void *error_userdata;
3740 uint16_t size;
3741 uint16_t length;
3743 struct rte_mbuf *pkts[];
3744};
3745
3752#define RTE_ETH_TX_BUFFER_SIZE(sz) \
3753 (sizeof(struct rte_eth_dev_tx_buffer) + (sz) * sizeof(struct rte_mbuf *))
3754
3765int
3766rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size);
3767
3792int
3794 buffer_tx_error_fn callback, void *userdata);
3795
3818void
3819rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
3820 void *userdata);
3821
3845void
3846rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
3847 void *userdata);
3848
3874int
3875rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt);
3876
3910
3931
3952
3991
4018
4096
4098typedef int (*rte_eth_dev_cb_fn)(uint16_t port_id,
4099 enum rte_eth_event_type event, void *cb_arg, void *ret_param);
4100
4119 enum rte_eth_event_type event,
4120 rte_eth_dev_cb_fn cb_fn, void *cb_arg);
4121
4141 enum rte_eth_event_type event,
4142 rte_eth_dev_cb_fn cb_fn, void *cb_arg);
4143
4165int rte_eth_dev_rx_intr_enable(uint16_t port_id, uint16_t queue_id);
4166
4187int rte_eth_dev_rx_intr_disable(uint16_t port_id, uint16_t queue_id);
4188
4206int rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data);
4207
4229int rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
4230 int epfd, int op, void *data);
4231
4246int
4247rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id);
4248
4262int rte_eth_led_on(uint16_t port_id);
4263
4277int rte_eth_led_off(uint16_t port_id);
4278
4307__rte_experimental
4308int rte_eth_fec_get_capability(uint16_t port_id,
4309 struct rte_eth_fec_capa *speed_fec_capa,
4310 unsigned int num);
4311
4332__rte_experimental
4333int rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa);
4334
4358__rte_experimental
4359int rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa);
4360
4375int rte_eth_dev_flow_ctrl_get(uint16_t port_id,
4376 struct rte_eth_fc_conf *fc_conf);
4377
4392int rte_eth_dev_flow_ctrl_set(uint16_t port_id,
4393 struct rte_eth_fc_conf *fc_conf);
4394
4411 struct rte_eth_pfc_conf *pfc_conf);
4412
4431int rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *mac_addr,
4432 uint32_t pool);
4433
4451__rte_experimental
4453 struct rte_eth_pfc_queue_info *pfc_queue_info);
4454
4478__rte_experimental
4480 struct rte_eth_pfc_queue_conf *pfc_queue_conf);
4481
4496int rte_eth_dev_mac_addr_remove(uint16_t port_id,
4497 struct rte_ether_addr *mac_addr);
4498
4517 struct rte_ether_addr *mac_addr);
4518
4536int rte_eth_dev_rss_reta_update(uint16_t port_id,
4537 struct rte_eth_rss_reta_entry64 *reta_conf,
4538 uint16_t reta_size);
4539
4558int rte_eth_dev_rss_reta_query(uint16_t port_id,
4559 struct rte_eth_rss_reta_entry64 *reta_conf,
4560 uint16_t reta_size);
4561
4581int rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr,
4582 uint8_t on);
4583
4602int rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on);
4603
4620int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
4621 uint32_t tx_rate);
4622
4637int rte_eth_dev_rss_hash_update(uint16_t port_id,
4638 struct rte_eth_rss_conf *rss_conf);
4639
4655int
4657 struct rte_eth_rss_conf *rss_conf);
4658
4671__rte_experimental
4672const char *
4674
4699int
4701 struct rte_eth_udp_tunnel *tunnel_udp);
4702
4722int
4724 struct rte_eth_udp_tunnel *tunnel_udp);
4725
4740int rte_eth_dev_get_dcb_info(uint16_t port_id,
4741 struct rte_eth_dcb_info *dcb_info);
4742
4743struct rte_eth_rxtx_callback;
4744
4770const struct rte_eth_rxtx_callback *
4771rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
4772 rte_rx_callback_fn fn, void *user_param);
4773
4800const struct rte_eth_rxtx_callback *
4801rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
4802 rte_rx_callback_fn fn, void *user_param);
4803
4829const struct rte_eth_rxtx_callback *
4830rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
4831 rte_tx_callback_fn fn, void *user_param);
4832
4866int rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
4867 const struct rte_eth_rxtx_callback *user_cb);
4868
4902int rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
4903 const struct rte_eth_rxtx_callback *user_cb);
4904
4924int rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
4925 struct rte_eth_rxq_info *qinfo);
4926
4946int rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
4947 struct rte_eth_txq_info *qinfo);
4948
4969__rte_experimental
4971 uint16_t queue_id,
4972 struct rte_eth_recycle_rxq_info *recycle_rxq_info);
4973
4992int rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
4993 struct rte_eth_burst_mode *mode);
4994
5013int rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
5014 struct rte_eth_burst_mode *mode);
5015
5036__rte_experimental
5037int rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id,
5038 struct rte_power_monitor_cond *pmc);
5039
5058int rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info);
5059
5072int rte_eth_dev_get_eeprom_length(uint16_t port_id);
5073
5090int rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info);
5091
5108int rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info);
5109
5128__rte_experimental
5129int
5131 struct rte_eth_dev_module_info *modinfo);
5132
5152__rte_experimental
5153int
5155 struct rte_dev_eeprom_info *info);
5156
5176int rte_eth_dev_set_mc_addr_list(uint16_t port_id,
5177 struct rte_ether_addr *mc_addr_set,
5178 uint32_t nb_mc_addr);
5179
5192int rte_eth_timesync_enable(uint16_t port_id);
5193
5206int rte_eth_timesync_disable(uint16_t port_id);
5207
5227 struct timespec *timestamp, uint32_t flags);
5228
5245 struct timespec *timestamp);
5246
5264int rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta);
5265
5281int rte_eth_timesync_read_time(uint16_t port_id, struct timespec *time);
5282
5301int rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *time);
5302
5348__rte_experimental
5349int
5350rte_eth_read_clock(uint16_t port_id, uint64_t *clock);
5351
5367int
5368rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id);
5369
5386int
5387rte_eth_dev_get_name_by_port(uint16_t port_id, char *name);
5388
5406 uint16_t *nb_rx_desc,
5407 uint16_t *nb_tx_desc);
5408
5423int
5424rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool);
5425
5435void *
5436rte_eth_dev_get_sec_ctx(uint16_t port_id);
5437
5453__rte_experimental
5455 struct rte_eth_hairpin_cap *cap);
5456
5466 int pf;
5467 __extension__
5468 union {
5469 int vf;
5470 int sf;
5471 };
5472 uint32_t id_base;
5473 uint32_t id_end;
5474 char name[RTE_DEV_NAME_MAX_LEN];
5475};
5476
5490
5514__rte_experimental
5515int rte_eth_representor_info_get(uint16_t port_id,
5516 struct rte_eth_representor_info *info);
5517
5519#define RTE_ETH_RX_METADATA_USER_FLAG RTE_BIT64(0)
5520
5522#define RTE_ETH_RX_METADATA_USER_MARK RTE_BIT64(1)
5523
5525#define RTE_ETH_RX_METADATA_TUNNEL_ID RTE_BIT64(2)
5526
5566int rte_eth_rx_metadata_negotiate(uint16_t port_id, uint64_t *features);
5567
5569#define RTE_ETH_DEV_REASSEMBLY_F_IPV4 (RTE_BIT32(0))
5571#define RTE_ETH_DEV_REASSEMBLY_F_IPV6 (RTE_BIT32(1))
5572
5583 uint32_t timeout_ms;
5585 uint16_t max_frags;
5590 uint16_t flags;
5591};
5592
5613__rte_experimental
5615 struct rte_eth_ip_reassembly_params *capa);
5616
5638__rte_experimental
5640 struct rte_eth_ip_reassembly_params *conf);
5641
5671__rte_experimental
5673 const struct rte_eth_ip_reassembly_params *conf);
5674
5682typedef struct {
5689 uint16_t time_spent;
5691 uint16_t nb_frags;
5693
5712__rte_experimental
5713int rte_eth_dev_priv_dump(uint16_t port_id, FILE *file);
5714
5738__rte_experimental
5739int rte_eth_rx_descriptor_dump(uint16_t port_id, uint16_t queue_id,
5740 uint16_t offset, uint16_t num, FILE *file);
5741
5765__rte_experimental
5766int rte_eth_tx_descriptor_dump(uint16_t port_id, uint16_t queue_id,
5767 uint16_t offset, uint16_t num, FILE *file);
5768
5769
5770/* Congestion management */
5771
5782
5804 uint8_t rsvd[8];
5805};
5806
5818 union {
5825 uint16_t rx_queue;
5833 } obj_param;
5834 union {
5848 } mode_param;
5849};
5850
5868__rte_experimental
5869int rte_eth_cman_info_get(uint16_t port_id, struct rte_eth_cman_info *info);
5870
5888__rte_experimental
5889int rte_eth_cman_config_init(uint16_t port_id, struct rte_eth_cman_config *config);
5890
5907__rte_experimental
5908int rte_eth_cman_config_set(uint16_t port_id, const struct rte_eth_cman_config *config);
5909
5930__rte_experimental
5931int rte_eth_cman_config_get(uint16_t port_id, struct rte_eth_cman_config *config);
5932
5933#include <rte_ethdev_core.h>
5934
5958uint16_t rte_eth_call_rx_callbacks(uint16_t port_id, uint16_t queue_id,
5959 struct rte_mbuf **rx_pkts, uint16_t nb_rx, uint16_t nb_pkts,
5960 void *opaque);
5961
6049static inline uint16_t
6050rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id,
6051 struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
6052{
6053 uint16_t nb_rx;
6054 struct rte_eth_fp_ops *p;
6055 void *qd;
6056
6057#ifdef RTE_ETHDEV_DEBUG_RX
6058 if (port_id >= RTE_MAX_ETHPORTS ||
6059 queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6060 RTE_ETHDEV_LOG(ERR,
6061 "Invalid port_id=%u or queue_id=%u\n",
6062 port_id, queue_id);
6063 return 0;
6064 }
6065#endif
6066
6067 /* fetch pointer to queue data */
6068 p = &rte_eth_fp_ops[port_id];
6069 qd = p->rxq.data[queue_id];
6070
6071#ifdef RTE_ETHDEV_DEBUG_RX
6072 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
6073
6074 if (qd == NULL) {
6075 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u for port_id=%u\n",
6076 queue_id, port_id);
6077 return 0;
6078 }
6079#endif
6080
6081 nb_rx = p->rx_pkt_burst(qd, rx_pkts, nb_pkts);
6082
6083#ifdef RTE_ETHDEV_RXTX_CALLBACKS
6084 {
6085 void *cb;
6086
6087 /* rte_memory_order_release memory order was used when the
6088 * call back was inserted into the list.
6089 * Since there is a clear dependency between loading
6090 * cb and cb->fn/cb->next, rte_memory_order_acquire memory order is
6091 * not required.
6092 */
6093 cb = rte_atomic_load_explicit(&p->rxq.clbk[queue_id],
6094 rte_memory_order_relaxed);
6095 if (unlikely(cb != NULL))
6096 nb_rx = rte_eth_call_rx_callbacks(port_id, queue_id,
6097 rx_pkts, nb_rx, nb_pkts, cb);
6098 }
6099#endif
6100
6101 rte_ethdev_trace_rx_burst(port_id, queue_id, (void **)rx_pkts, nb_rx);
6102 return nb_rx;
6103}
6104
6122static inline int
6123rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id)
6124{
6125 struct rte_eth_fp_ops *p;
6126 void *qd;
6127
6128#ifdef RTE_ETHDEV_DEBUG_RX
6129 if (port_id >= RTE_MAX_ETHPORTS ||
6130 queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6131 RTE_ETHDEV_LOG(ERR,
6132 "Invalid port_id=%u or queue_id=%u\n",
6133 port_id, queue_id);
6134 return -EINVAL;
6135 }
6136#endif
6137
6138 /* fetch pointer to queue data */
6139 p = &rte_eth_fp_ops[port_id];
6140 qd = p->rxq.data[queue_id];
6141
6142#ifdef RTE_ETHDEV_DEBUG_RX
6143 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6144 if (qd == NULL)
6145 return -EINVAL;
6146#endif
6147
6148 if (*p->rx_queue_count == NULL)
6149 return -ENOTSUP;
6150 return (int)(*p->rx_queue_count)(qd);
6151}
6152
6156#define RTE_ETH_RX_DESC_AVAIL 0
6157#define RTE_ETH_RX_DESC_DONE 1
6158#define RTE_ETH_RX_DESC_UNAVAIL 2
6194static inline int
6195rte_eth_rx_descriptor_status(uint16_t port_id, uint16_t queue_id,
6196 uint16_t offset)
6197{
6198 struct rte_eth_fp_ops *p;
6199 void *qd;
6200
6201#ifdef RTE_ETHDEV_DEBUG_RX
6202 if (port_id >= RTE_MAX_ETHPORTS ||
6203 queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6204 RTE_ETHDEV_LOG(ERR,
6205 "Invalid port_id=%u or queue_id=%u\n",
6206 port_id, queue_id);
6207 return -EINVAL;
6208 }
6209#endif
6210
6211 /* fetch pointer to queue data */
6212 p = &rte_eth_fp_ops[port_id];
6213 qd = p->rxq.data[queue_id];
6214
6215#ifdef RTE_ETHDEV_DEBUG_RX
6216 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6217 if (qd == NULL)
6218 return -ENODEV;
6219#endif
6220 if (*p->rx_descriptor_status == NULL)
6221 return -ENOTSUP;
6222 return (*p->rx_descriptor_status)(qd, offset);
6223}
6224
6228#define RTE_ETH_TX_DESC_FULL 0
6229#define RTE_ETH_TX_DESC_DONE 1
6230#define RTE_ETH_TX_DESC_UNAVAIL 2
6266static inline int rte_eth_tx_descriptor_status(uint16_t port_id,
6267 uint16_t queue_id, uint16_t offset)
6268{
6269 struct rte_eth_fp_ops *p;
6270 void *qd;
6271
6272#ifdef RTE_ETHDEV_DEBUG_TX
6273 if (port_id >= RTE_MAX_ETHPORTS ||
6274 queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6275 RTE_ETHDEV_LOG(ERR,
6276 "Invalid port_id=%u or queue_id=%u\n",
6277 port_id, queue_id);
6278 return -EINVAL;
6279 }
6280#endif
6281
6282 /* fetch pointer to queue data */
6283 p = &rte_eth_fp_ops[port_id];
6284 qd = p->txq.data[queue_id];
6285
6286#ifdef RTE_ETHDEV_DEBUG_TX
6287 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6288 if (qd == NULL)
6289 return -ENODEV;
6290#endif
6291 if (*p->tx_descriptor_status == NULL)
6292 return -ENOTSUP;
6293 return (*p->tx_descriptor_status)(qd, offset);
6294}
6295
6315uint16_t rte_eth_call_tx_callbacks(uint16_t port_id, uint16_t queue_id,
6316 struct rte_mbuf **tx_pkts, uint16_t nb_pkts, void *opaque);
6317
6389static inline uint16_t
6390rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id,
6391 struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
6392{
6393 struct rte_eth_fp_ops *p;
6394 void *qd;
6395
6396#ifdef RTE_ETHDEV_DEBUG_TX
6397 if (port_id >= RTE_MAX_ETHPORTS ||
6398 queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6399 RTE_ETHDEV_LOG(ERR,
6400 "Invalid port_id=%u or queue_id=%u\n",
6401 port_id, queue_id);
6402 return 0;
6403 }
6404#endif
6405
6406 /* fetch pointer to queue data */
6407 p = &rte_eth_fp_ops[port_id];
6408 qd = p->txq.data[queue_id];
6409
6410#ifdef RTE_ETHDEV_DEBUG_TX
6411 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
6412
6413 if (qd == NULL) {
6414 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u for port_id=%u\n",
6415 queue_id, port_id);
6416 return 0;
6417 }
6418#endif
6419
6420#ifdef RTE_ETHDEV_RXTX_CALLBACKS
6421 {
6422 void *cb;
6423
6424 /* rte_memory_order_release memory order was used when the
6425 * call back was inserted into the list.
6426 * Since there is a clear dependency between loading
6427 * cb and cb->fn/cb->next, rte_memory_order_acquire memory order is
6428 * not required.
6429 */
6430 cb = rte_atomic_load_explicit(&p->txq.clbk[queue_id],
6431 rte_memory_order_relaxed);
6432 if (unlikely(cb != NULL))
6433 nb_pkts = rte_eth_call_tx_callbacks(port_id, queue_id,
6434 tx_pkts, nb_pkts, cb);
6435 }
6436#endif
6437
6438 nb_pkts = p->tx_pkt_burst(qd, tx_pkts, nb_pkts);
6439
6440 rte_ethdev_trace_tx_burst(port_id, queue_id, (void **)tx_pkts, nb_pkts);
6441 return nb_pkts;
6442}
6443
6497#ifndef RTE_ETHDEV_TX_PREPARE_NOOP
6498
6499static inline uint16_t
6500rte_eth_tx_prepare(uint16_t port_id, uint16_t queue_id,
6501 struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
6502{
6503 struct rte_eth_fp_ops *p;
6504 void *qd;
6505
6506#ifdef RTE_ETHDEV_DEBUG_TX
6507 if (port_id >= RTE_MAX_ETHPORTS ||
6508 queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6509 RTE_ETHDEV_LOG(ERR,
6510 "Invalid port_id=%u or queue_id=%u\n",
6511 port_id, queue_id);
6512 rte_errno = ENODEV;
6513 return 0;
6514 }
6515#endif
6516
6517 /* fetch pointer to queue data */
6518 p = &rte_eth_fp_ops[port_id];
6519 qd = p->txq.data[queue_id];
6520
6521#ifdef RTE_ETHDEV_DEBUG_TX
6522 if (!rte_eth_dev_is_valid_port(port_id)) {
6523 RTE_ETHDEV_LOG(ERR, "Invalid Tx port_id=%u\n", port_id);
6524 rte_errno = ENODEV;
6525 return 0;
6526 }
6527 if (qd == NULL) {
6528 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u for port_id=%u\n",
6529 queue_id, port_id);
6530 rte_errno = EINVAL;
6531 return 0;
6532 }
6533#endif
6534
6535 if (!p->tx_pkt_prepare)
6536 return nb_pkts;
6537
6538 return p->tx_pkt_prepare(qd, tx_pkts, nb_pkts);
6539}
6540
6541#else
6542
6543/*
6544 * Native NOOP operation for compilation targets which doesn't require any
6545 * preparations steps, and functional NOOP may introduce unnecessary performance
6546 * drop.
6547 *
6548 * Generally this is not a good idea to turn it on globally and didn't should
6549 * be used if behavior of tx_preparation can change.
6550 */
6551
6552static inline uint16_t
6553rte_eth_tx_prepare(__rte_unused uint16_t port_id,
6554 __rte_unused uint16_t queue_id,
6555 __rte_unused struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
6556{
6557 return nb_pkts;
6558}
6559
6560#endif
6561
6584static inline uint16_t
6585rte_eth_tx_buffer_flush(uint16_t port_id, uint16_t queue_id,
6586 struct rte_eth_dev_tx_buffer *buffer)
6587{
6588 uint16_t sent;
6589 uint16_t to_send = buffer->length;
6590
6591 if (to_send == 0)
6592 return 0;
6593
6594 sent = rte_eth_tx_burst(port_id, queue_id, buffer->pkts, to_send);
6595
6596 buffer->length = 0;
6597
6598 /* All packets sent, or to be dealt with by callback below */
6599 if (unlikely(sent != to_send))
6600 buffer->error_callback(&buffer->pkts[sent],
6601 (uint16_t)(to_send - sent),
6602 buffer->error_userdata);
6603
6604 return sent;
6605}
6606
6637static __rte_always_inline uint16_t
6638rte_eth_tx_buffer(uint16_t port_id, uint16_t queue_id,
6639 struct rte_eth_dev_tx_buffer *buffer, struct rte_mbuf *tx_pkt)
6640{
6641 buffer->pkts[buffer->length++] = tx_pkt;
6642 if (buffer->length < buffer->size)
6643 return 0;
6644
6645 return rte_eth_tx_buffer_flush(port_id, queue_id, buffer);
6646}
6647
6701__rte_experimental
6702static inline uint16_t
6703rte_eth_recycle_mbufs(uint16_t rx_port_id, uint16_t rx_queue_id,
6704 uint16_t tx_port_id, uint16_t tx_queue_id,
6705 struct rte_eth_recycle_rxq_info *recycle_rxq_info)
6706{
6707 struct rte_eth_fp_ops *p1, *p2;
6708 void *qd1, *qd2;
6709 uint16_t nb_mbufs;
6710
6711#ifdef RTE_ETHDEV_DEBUG_TX
6712 if (tx_port_id >= RTE_MAX_ETHPORTS ||
6713 tx_queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6714 RTE_ETHDEV_LOG(ERR,
6715 "Invalid tx_port_id=%u or tx_queue_id=%u\n",
6716 tx_port_id, tx_queue_id);
6717 return 0;
6718 }
6719#endif
6720
6721 /* fetch pointer to Tx queue data */
6722 p1 = &rte_eth_fp_ops[tx_port_id];
6723 qd1 = p1->txq.data[tx_queue_id];
6724
6725#ifdef RTE_ETHDEV_DEBUG_TX
6726 RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port_id, 0);
6727
6728 if (qd1 == NULL) {
6729 RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u for port_id=%u\n",
6730 tx_queue_id, tx_port_id);
6731 return 0;
6732 }
6733#endif
6734 if (p1->recycle_tx_mbufs_reuse == NULL)
6735 return 0;
6736
6737#ifdef RTE_ETHDEV_DEBUG_RX
6738 if (rx_port_id >= RTE_MAX_ETHPORTS ||
6739 rx_queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6740 RTE_ETHDEV_LOG(ERR, "Invalid rx_port_id=%u or rx_queue_id=%u\n",
6741 rx_port_id, rx_queue_id);
6742 return 0;
6743 }
6744#endif
6745
6746 /* fetch pointer to Rx queue data */
6747 p2 = &rte_eth_fp_ops[rx_port_id];
6748 qd2 = p2->rxq.data[rx_queue_id];
6749
6750#ifdef RTE_ETHDEV_DEBUG_RX
6751 RTE_ETH_VALID_PORTID_OR_ERR_RET(rx_port_id, 0);
6752
6753 if (qd2 == NULL) {
6754 RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u for port_id=%u\n",
6755 rx_queue_id, rx_port_id);
6756 return 0;
6757 }
6758#endif
6759 if (p2->recycle_rx_descriptors_refill == NULL)
6760 return 0;
6761
6762 /* Copy used *rte_mbuf* buffer pointers from Tx mbuf ring
6763 * into Rx mbuf ring.
6764 */
6765 nb_mbufs = p1->recycle_tx_mbufs_reuse(qd1, recycle_rxq_info);
6766
6767 /* If no recycling mbufs, return 0. */
6768 if (nb_mbufs == 0)
6769 return 0;
6770
6771 /* Replenish the Rx descriptors with the recycling
6772 * into Rx mbuf ring.
6773 */
6774 p2->recycle_rx_descriptors_refill(qd2, nb_mbufs);
6775
6776 return nb_mbufs;
6777}
6778
6807__rte_experimental
6808int rte_eth_buffer_split_get_supported_hdr_ptypes(uint16_t port_id, uint32_t *ptypes, int num);
6809
6810#ifdef __cplusplus
6811}
6812#endif
6813
6814#endif /* _RTE_ETHDEV_H_ */
#define RTE_BIT32(nr)
Definition rte_bitops.h:40
#define unlikely(x)
rte_cman_mode
Definition rte_cman.h:20
#define __rte_cache_min_aligned
Definition rte_common.h:528
#define __rte_unused
Definition rte_common.h:143
#define __rte_always_inline
Definition rte_common.h:331
#define rte_errno
Definition rte_errno.h:29
int rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
rte_eth_nb_pools
Definition rte_ethdev.h:911
@ RTE_ETH_64_POOLS
Definition rte_ethdev.h:915
@ RTE_ETH_32_POOLS
Definition rte_ethdev.h:914
@ RTE_ETH_8_POOLS
Definition rte_ethdev.h:912
@ RTE_ETH_16_POOLS
Definition rte_ethdev.h:913
rte_eth_event_ipsec_subtype
@ RTE_ETH_EVENT_IPSEC_PMD_ERROR_END
@ RTE_ETH_EVENT_IPSEC_UNKNOWN
@ RTE_ETH_EVENT_IPSEC_MAX
@ RTE_ETH_EVENT_IPSEC_SA_PKT_EXPIRY
@ RTE_ETH_EVENT_IPSEC_ESN_OVERFLOW
@ RTE_ETH_EVENT_IPSEC_SA_BYTE_HARD_EXPIRY
@ RTE_ETH_EVENT_IPSEC_PMD_ERROR_START
@ RTE_ETH_EVENT_IPSEC_SA_BYTE_EXPIRY
@ RTE_ETH_EVENT_IPSEC_SA_TIME_EXPIRY
@ RTE_ETH_EVENT_IPSEC_SA_PKT_HARD_EXPIRY
int rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *time)
__rte_experimental int rte_eth_cman_config_get(uint16_t port_id, struct rte_eth_cman_config *config)
int rte_eth_dev_is_removed(uint16_t port_id)
__rte_experimental int rte_eth_dev_hairpin_capability_get(uint16_t port_id, struct rte_eth_hairpin_cap *cap)
int rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
int rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp, uint32_t flags)
static uint64_t rte_eth_rss_hf_refine(uint64_t rss_hf)
Definition rte_ethdev.h:665
__rte_experimental int rte_eth_dev_map_aggr_tx_affinity(uint16_t port_id, uint16_t tx_queue_id, uint8_t affinity)
static __rte_always_inline uint16_t rte_eth_tx_buffer(uint16_t port_id, uint16_t queue_id, struct rte_eth_dev_tx_buffer *buffer, struct rte_mbuf *tx_pkt)
void rte_eth_iterator_cleanup(struct rte_dev_iterator *iter)
int rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
int rte_eth_dev_set_link_down(uint16_t port_id)
rte_eth_event_macsec_subtype
@ RTE_ETH_SUBEVENT_MACSEC_UNKNOWN
@ RTE_ETH_SUBEVENT_MACSEC_RX_SECTAG_E_EQ0_C_EQ1
@ RTE_ETH_SUBEVENT_MACSEC_RX_SECTAG_SL_GTE48
@ RTE_ETH_SUBEVENT_MACSEC_RX_SECTAG_SC_EQ1_SCB_EQ1
@ RTE_ETH_SUBEVENT_MACSEC_RX_SECTAG_ES_EQ1_SC_EQ1
@ RTE_ETH_SUBEVENT_MACSEC_RX_SECTAG_V_EQ1
int rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_queue, uint16_t nb_tx_queue, const struct rte_eth_conf *eth_conf)
int rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id, int epfd, int op, void *data)
rte_eth_event_type
@ RTE_ETH_EVENT_RECOVERY_FAILED
@ RTE_ETH_EVENT_UNKNOWN
@ RTE_ETH_EVENT_VF_MBOX
@ RTE_ETH_EVENT_IPSEC
@ RTE_ETH_EVENT_INTR_RESET
@ RTE_ETH_EVENT_INTR_RMV
@ RTE_ETH_EVENT_ERR_RECOVERING
@ RTE_ETH_EVENT_MACSEC
@ RTE_ETH_EVENT_RECOVERY_SUCCESS
@ RTE_ETH_EVENT_DESTROY
@ RTE_ETH_EVENT_FLOW_AGED
@ RTE_ETH_EVENT_QUEUE_STATE
@ RTE_ETH_EVENT_INTR_LSC
@ RTE_ETH_EVENT_MAX
@ RTE_ETH_EVENT_RX_AVAIL_THRESH
@ RTE_ETH_EVENT_NEW
int rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_burst_mode *mode)
__rte_experimental int rte_eth_dev_get_module_info(uint16_t port_id, struct rte_eth_dev_module_info *modinfo)
int rte_eth_dev_is_valid_port(uint16_t port_id)
rte_eth_cman_obj
@ RTE_ETH_CMAN_OBJ_RX_QUEUE_MEMPOOL
@ RTE_ETH_CMAN_OBJ_RX_QUEUE
#define RTE_ETH_DCB_NUM_USER_PRIORITIES
Definition rte_ethdev.h:841
__rte_experimental int rte_eth_recycle_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_recycle_rxq_info *recycle_rxq_info)
__rte_experimental int rte_eth_dev_priv_dump(uint16_t port_id, FILE *file)
int rte_eth_dev_reset(uint16_t port_id)
#define RTE_ETH_BURST_MODE_INFO_SIZE
__rte_experimental const char * rte_eth_dev_capability_name(uint64_t capability)
int rte_eth_allmulticast_disable(uint16_t port_id)
int rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats, unsigned int n)
rte_eth_dev_state
@ RTE_ETH_DEV_ATTACHED
@ RTE_ETH_DEV_UNUSED
@ RTE_ETH_DEV_REMOVED
int rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc, unsigned int socket_id, const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mb_pool)
int rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
int rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
__rte_experimental int rte_eth_dev_count_aggr_ports(uint16_t port_id)
int rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
int rte_eth_dev_rss_reta_update(uint16_t port_id, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
static uint16_t rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
rte_eth_fec_mode
@ RTE_ETH_FEC_NOFEC
@ RTE_ETH_FEC_BASER
@ RTE_ETH_FEC_AUTO
@ RTE_ETH_FEC_RS
@ RTE_ETH_FEC_LLRS
int rte_eth_xstats_get_names(uint16_t port_id, struct rte_eth_xstat_name *xstats_names, unsigned int size)
int rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
__rte_experimental int rte_eth_read_clock(uint16_t port_id, uint64_t *clock)
rte_eth_err_handle_mode
@ RTE_ETH_ERROR_HANDLE_MODE_PASSIVE
@ RTE_ETH_ERROR_HANDLE_MODE_NONE
@ RTE_ETH_ERROR_HANDLE_MODE_PROACTIVE
const struct rte_eth_rxtx_callback * rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id, rte_rx_callback_fn fn, void *user_param)
int rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
rte_eth_tx_mq_mode
Definition rte_ethdev.h:407
@ RTE_ETH_MQ_TX_DCB
Definition rte_ethdev.h:409
@ RTE_ETH_MQ_TX_VMDQ_DCB
Definition rte_ethdev.h:410
@ RTE_ETH_MQ_TX_VMDQ_ONLY
Definition rte_ethdev.h:411
@ RTE_ETH_MQ_TX_NONE
Definition rte_ethdev.h:408
int rte_eth_promiscuous_get(uint16_t port_id)
uint64_t rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
int rte_eth_led_off(uint16_t port_id)
int rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
int rte_eth_dev_set_link_up(uint16_t port_id)
__rte_experimental int rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa)
int rte_eth_link_get(uint16_t port_id, struct rte_eth_link *link)
int rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id, uint8_t stat_idx)
uint16_t rte_eth_find_next(uint16_t port_id)
rte_eth_rx_mq_mode
Definition rte_ethdev.h:381
@ RTE_ETH_MQ_RX_DCB_RSS
Definition rte_ethdev.h:390
@ RTE_ETH_MQ_RX_VMDQ_DCB_RSS
Definition rte_ethdev.h:399
@ RTE_ETH_MQ_RX_DCB
Definition rte_ethdev.h:388
@ RTE_ETH_MQ_RX_VMDQ_DCB
Definition rte_ethdev.h:397
@ RTE_ETH_MQ_RX_VMDQ_RSS
Definition rte_ethdev.h:395
@ RTE_ETH_MQ_RX_NONE
Definition rte_ethdev.h:383
@ RTE_ETH_MQ_RX_RSS
Definition rte_ethdev.h:386
@ RTE_ETH_MQ_RX_VMDQ_ONLY
Definition rte_ethdev.h:393
int rte_eth_allmulticast_get(uint16_t port_id)
int rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids, uint64_t *values, unsigned int size)
int rte_eth_allmulticast_enable(uint16_t port_id)
int rte_eth_rx_metadata_negotiate(uint16_t port_id, uint64_t *features)
int rte_eth_promiscuous_enable(uint16_t port_id)
rte_eth_representor_type
@ RTE_ETH_REPRESENTOR_PF
@ RTE_ETH_REPRESENTOR_VF
@ RTE_ETH_REPRESENTOR_SF
@ RTE_ETH_REPRESENTOR_NONE
int rte_eth_timesync_enable(uint16_t port_id)
__rte_experimental int rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link)
int rte_eth_dev_rss_hash_conf_get(uint16_t port_id, struct rte_eth_rss_conf *rss_conf)
#define RTE_ETH_VMDQ_MAX_VLAN_FILTERS
Definition rte_ethdev.h:840
int rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id, const struct rte_eth_rxtx_callback *user_cb)
int rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id, struct rte_eth_pfc_conf *pfc_conf)
int rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
int rte_eth_dev_udp_tunnel_port_add(uint16_t port_id, struct rte_eth_udp_tunnel *tunnel_udp)
int rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
uint16_t rte_eth_iterator_next(struct rte_dev_iterator *iter)
__rte_experimental int rte_eth_ip_reassembly_capability_get(uint16_t port_id, struct rte_eth_ip_reassembly_params *capa)
__rte_experimental int rte_eth_dev_get_module_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
int rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id, int on)
int rte_eth_dev_set_vlan_ether_type(uint16_t port_id, enum rte_vlan_type vlan_type, uint16_t tag_type)
int rte_eth_dev_stop(uint16_t port_id)
int rte_eth_timesync_disable(uint16_t port_id)
__rte_experimental int rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc, const struct rte_eth_hairpin_conf *conf)
uint16_t(* rte_rx_callback_fn)(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts, void *user_param)
int rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
static uint16_t rte_eth_tx_prepare(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
rte_eth_tunnel_type
int rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
int rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc, unsigned int socket_id, const struct rte_eth_txconf *tx_conf)
const struct rte_eth_rxtx_callback * rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id, rte_tx_callback_fn fn, void *user_param)
int rte_eth_promiscuous_disable(uint16_t port_id)
int rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id, const struct rte_eth_rxtx_callback *user_cb)
int rte_eth_dev_rx_intr_disable(uint16_t port_id, uint16_t queue_id)
int rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
int rte_eth_dev_owner_delete(const uint64_t owner_id)
__rte_experimental int rte_eth_rx_queue_is_valid(uint16_t port_id, uint16_t queue_id)
int rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
int rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
static uint16_t rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
int rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id)
__rte_experimental int rte_eth_rx_descriptor_dump(uint16_t port_id, uint16_t queue_id, uint16_t offset, uint16_t num, FILE *file)
int(* rte_eth_dev_cb_fn)(uint16_t port_id, enum rte_eth_event_type event, void *cb_arg, void *ret_param)
int rte_eth_dev_rx_intr_enable(uint16_t port_id, uint16_t queue_id)
int rte_eth_dev_get_eeprom_length(uint16_t port_id)
int rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr)
int rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *mac_addr)
int rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
int rte_eth_xstats_get_names_by_id(uint16_t port_id, struct rte_eth_xstat_name *xstats_names, unsigned int size, uint64_t *ids)
__rte_experimental int rte_eth_dev_priority_flow_ctrl_queue_info_get(uint16_t port_id, struct rte_eth_pfc_queue_info *pfc_queue_info)
__rte_experimental int rte_eth_buffer_split_get_supported_hdr_ptypes(uint16_t port_id, uint32_t *ptypes, int num)
#define RTE_ETH_MQ_RX_DCB_FLAG
Definition rte_ethdev.h:373
uint16_t rte_eth_find_next_sibling(uint16_t port_id_start, uint16_t ref_port_id)
const struct rte_eth_rxtx_callback * rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id, rte_rx_callback_fn fn, void *user_param)
int rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name, uint64_t *id)
uint16_t rte_eth_dev_count_avail(void)
int rte_eth_dev_callback_unregister(uint16_t port_id, enum rte_eth_event_type event, rte_eth_dev_cb_fn cb_fn, void *cb_arg)
__rte_experimental int rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa)
rte_eth_fc_mode
@ RTE_ETH_FC_TX_PAUSE
@ RTE_ETH_FC_RX_PAUSE
@ RTE_ETH_FC_NONE
@ RTE_ETH_FC_FULL
rte_eth_event_macsec_type
@ RTE_ETH_EVENT_MACSEC_RX_SA_PN_HARD_EXP
@ RTE_ETH_EVENT_MACSEC_SA_NOT_VALID
@ RTE_ETH_EVENT_MACSEC_RX_SA_PN_SOFT_EXP
@ RTE_ETH_EVENT_MACSEC_UNKNOWN
@ RTE_ETH_EVENT_MACSEC_TX_SA_PN_HARD_EXP
@ RTE_ETH_EVENT_MACSEC_SECTAG_VAL_ERR
@ RTE_ETH_EVENT_MACSEC_TX_SA_PN_SOFT_EXP
int rte_eth_led_on(uint16_t port_id)
int rte_eth_dev_rss_reta_query(uint16_t port_id, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
__rte_experimental int rte_eth_ip_reassembly_conf_set(uint16_t port_id, const struct rte_eth_ip_reassembly_params *conf)
__rte_experimental int rte_eth_rx_avail_thresh_set(uint16_t port_id, uint16_t queue_id, uint8_t avail_thresh)
int rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask, uint32_t *ptypes, int num)
__rte_experimental int rte_eth_cman_info_get(uint16_t port_id, struct rte_eth_cman_info *info)
int rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
int rte_eth_dev_set_mc_addr_list(uint16_t port_id, struct rte_ether_addr *mc_addr_set, uint32_t nb_mc_addr)
int rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs)
int rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_burst_mode *mode)
int rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer, buffer_tx_error_fn callback, void *userdata)
int rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *mac_addr, uint32_t pool)
int rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr, uint8_t on)
int rte_eth_dev_owner_set(const uint16_t port_id, const struct rte_eth_dev_owner *owner)
uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex)
__rte_experimental int rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port)
__rte_experimental int rte_eth_dev_priority_flow_ctrl_queue_configure(uint16_t port_id, struct rte_eth_pfc_queue_conf *pfc_queue_conf)
__rte_experimental int rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports, size_t len, uint32_t direction)
int rte_eth_dev_get_vlan_offload(uint16_t port_id)
#define RTE_ETH_MQ_RX_RSS_FLAG
Definition rte_ethdev.h:372
int rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
int rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_rxq_info *qinfo)
int rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
void * rte_eth_dev_get_sec_ctx(uint16_t port_id)
int rte_eth_dev_callback_register(uint16_t port_id, enum rte_eth_event_type event, rte_eth_dev_cb_fn cb_fn, void *cb_arg)
int rte_eth_dev_close(uint16_t port_id)
void rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent, void *userdata)
__rte_experimental int rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port)
int rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
__rte_experimental int rte_eth_ip_reassembly_conf_get(uint16_t port_id, struct rte_eth_ip_reassembly_params *conf)
__rte_experimental int rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id, struct rte_power_monitor_cond *pmc)
int rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_txq_info *qinfo)
const char * rte_eth_dev_rx_offload_name(uint64_t offload)
int rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id, struct rte_eth_udp_tunnel *tunnel_udp)
int rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
#define RTE_ETH_MQ_RX_VMDQ_FLAG
Definition rte_ethdev.h:374
__rte_experimental int rte_eth_tx_descriptor_dump(uint16_t port_id, uint16_t queue_id, uint16_t offset, uint16_t num, FILE *file)
__rte_experimental int rte_eth_dev_conf_get(uint16_t port_id, struct rte_eth_conf *dev_conf)
void rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent, void *userdata)
__rte_experimental int rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc, const struct rte_eth_hairpin_conf *conf)
int rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
static uint16_t rte_eth_tx_buffer_flush(uint16_t port_id, uint16_t queue_id, struct rte_eth_dev_tx_buffer *buffer)
int rte_eth_dev_socket_id(uint16_t port_id)
int rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *link)
int rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
int rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id, uint16_t *nb_rx_desc, uint16_t *nb_tx_desc)
static int rte_eth_rx_descriptor_status(uint16_t port_id, uint16_t queue_id, uint16_t offset)
__rte_experimental int rte_eth_tx_queue_is_valid(uint16_t port_id, uint16_t queue_id)
static int rte_eth_tx_descriptor_status(uint16_t port_id, uint16_t queue_id, uint16_t offset)
int rte_eth_dev_get_dcb_info(uint16_t port_id, struct rte_eth_dcb_info *dcb_info)
static __rte_experimental uint16_t rte_eth_recycle_mbufs(uint16_t rx_port_id, uint16_t rx_queue_id, uint16_t tx_port_id, uint16_t tx_queue_id, struct rte_eth_recycle_rxq_info *recycle_rxq_info)
int rte_eth_dev_rss_hash_update(uint16_t port_id, struct rte_eth_rss_conf *rss_conf)
int rte_eth_dev_owner_new(uint64_t *owner_id)
int rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask, uint32_t *set_ptypes, unsigned int num)
int rte_eth_timesync_read_time(uint16_t port_id, struct timespec *time)
__rte_experimental int rte_eth_rx_avail_thresh_query(uint16_t port_id, uint16_t *queue_id, uint8_t *avail_thresh)
int rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *mac_addr)
__rte_experimental const char * rte_eth_link_speed_to_str(uint32_t link_speed)
int rte_eth_xstats_reset(uint16_t port_id)
int rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx, uint32_t tx_rate)
__rte_experimental const char * rte_eth_dev_rss_algo_name(enum rte_eth_hash_function rss_algo)
int rte_eth_timesync_read_tx_timestamp(uint16_t port_id, struct timespec *timestamp)
uint16_t rte_eth_find_next_of(uint16_t port_id_start, const struct rte_device *parent)
rte_vlan_type
Definition rte_ethdev.h:438
@ RTE_ETH_VLAN_TYPE_OUTER
Definition rte_ethdev.h:441
@ RTE_ETH_VLAN_TYPE_INNER
Definition rte_ethdev.h:440
__rte_experimental int rte_eth_macaddrs_get(uint16_t port_id, struct rte_ether_addr *ma, unsigned int num)
uint16_t(* rte_tx_callback_fn)(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[], uint16_t nb_pkts, void *user_param)
const char * rte_eth_dev_tx_offload_name(uint64_t offload)
rte_eth_hash_function
Definition rte_ethdev.h:456
@ RTE_ETH_HASH_FUNCTION_DEFAULT
Definition rte_ethdev.h:458
@ RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ_SORT
Definition rte_ethdev.h:473
@ RTE_ETH_HASH_FUNCTION_SIMPLE_XOR
Definition rte_ethdev.h:460
@ RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ
Definition rte_ethdev.h:466
@ RTE_ETH_HASH_FUNCTION_TOEPLITZ
Definition rte_ethdev.h:459
uint16_t rte_eth_dev_count_total(void)
#define RTE_ETH_XSTATS_NAME_SIZE
int rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
int rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
int rte_eth_stats_reset(uint16_t port_id)
int rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id, uint8_t stat_idx)
static int rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id)
__rte_experimental int rte_eth_cman_config_set(uint16_t port_id, const struct rte_eth_cman_config *config)
rte_eth_nb_tcs
Definition rte_ethdev.h:902
@ RTE_ETH_4_TCS
Definition rte_ethdev.h:903
@ RTE_ETH_8_TCS
Definition rte_ethdev.h:904
__rte_experimental int rte_eth_cman_config_init(uint16_t port_id, struct rte_eth_cman_config *config)
__rte_experimental int rte_eth_representor_info_get(uint16_t port_id, struct rte_eth_representor_info *info)
int rte_eth_dev_start(uint16_t port_id)
__rte_experimental int rte_eth_fec_get_capability(uint16_t port_id, struct rte_eth_fec_capa *speed_fec_capa, unsigned int num)
char info[RTE_ETH_BURST_MODE_INFO_SIZE]
uint8_t rsvd_mode_params[4]
enum rte_eth_cman_obj obj
struct rte_cman_red_params red
uint8_t rsvd_obj_params[4]
enum rte_cman_mode mode
uint64_t modes_supported
uint64_t objs_supported
struct rte_eth_intr_conf intr_conf
struct rte_eth_txmode txmode
struct rte_eth_rxmode rxmode
struct rte_eth_conf::@126 rx_adv_conf
uint32_t lpbk_mode
uint32_t dcb_capability_en
union rte_eth_conf::@127 tx_adv_conf
uint32_t link_speeds
uint8_t tc_bws[RTE_ETH_DCB_NUM_TCS]
uint8_t prio_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES]
struct rte_eth_dcb_tc_queue_mapping tc_queue
struct rte_eth_dcb_tc_queue_mapping::@129 tc_txq[RTE_ETH_MAX_VMDQ_POOL][RTE_ETH_DCB_NUM_TCS]
struct rte_eth_dcb_tc_queue_mapping::@128 tc_rxq[RTE_ETH_MAX_VMDQ_POOL][RTE_ETH_DCB_NUM_TCS]
uint16_t nb_mtu_seg_max
uint16_t nb_seg_max
uint32_t max_rx_bufsize
uint32_t max_hash_mac_addrs
struct rte_eth_desc_lim rx_desc_lim
unsigned int if_index
uint16_t max_rx_queues
uint16_t vmdq_queue_num
uint32_t min_rx_bufsize
uint16_t max_tx_queues
struct rte_eth_txconf default_txconf
uint16_t max_vmdq_pools
struct rte_device * device
struct rte_eth_rxconf default_rxconf
uint16_t nb_tx_queues
enum rte_eth_err_handle_mode err_handle_mode
uint32_t max_rx_pktlen
uint32_t max_lro_pkt_size
uint16_t vmdq_queue_base
void * reserved_ptrs[2]
uint64_t reserved_64s[2]
uint64_t tx_queue_offload_capa
uint16_t vmdq_pool_base
struct rte_eth_desc_lim tx_desc_lim
uint64_t flow_type_rss_offloads
uint16_t max_rx_mempools
struct rte_eth_dev_portconf default_txportconf
uint64_t tx_offload_capa
const char * driver_name
uint8_t hash_key_size
uint32_t speed_capa
struct rte_eth_dev_portconf default_rxportconf
struct rte_eth_switch_info switch_info
struct rte_eth_rxseg_capa rx_seg_capa
uint64_t rx_queue_offload_capa
uint64_t rx_offload_capa
uint16_t nb_rx_queues
uint32_t max_mac_addrs
const uint32_t * dev_flags
struct rte_mbuf * pkts[]
enum rte_eth_event_ipsec_subtype subtype
enum rte_eth_event_macsec_type type
enum rte_eth_event_macsec_subtype subtype
uint32_t low_water
uint16_t send_xon
enum rte_eth_fc_mode mode
uint32_t high_water
uint16_t pause_time
uint8_t mac_ctrl_frame_fwd
struct rte_eth_hairpin_queue_cap tx_cap
struct rte_eth_hairpin_queue_cap rx_cap
uint32_t use_locked_device_memory
struct rte_eth_fc_conf fc
enum rte_eth_fc_mode mode
enum rte_eth_fc_mode mode_capa
struct rte_mempool * mp
struct rte_mbuf ** mbuf_ring
struct rte_eth_representor_range ranges[]
enum rte_eth_representor_type type
char name[RTE_DEV_NAME_MAX_LEN]
uint8_t * rss_key
Definition rte_ethdev.h:497
uint8_t rss_key_len
Definition rte_ethdev.h:498
enum rte_eth_hash_function algorithm
Definition rte_ethdev.h:504
uint16_t reta[RTE_ETH_RETA_GROUP_SIZE]
Definition rte_ethdev.h:895
struct rte_eth_thresh rx_thresh
uint64_t offloads
void * reserved_ptrs[2]
uint64_t reserved_64s[2]
uint8_t rx_deferred_start
uint16_t share_group
uint8_t rx_drop_en
uint16_t share_qid
union rte_eth_rxseg * rx_seg
struct rte_mempool ** rx_mempools
uint16_t rx_nseg
uint16_t rx_free_thresh
uint32_t max_lro_pkt_size
Definition rte_ethdev.h:422
uint64_t offloads
Definition rte_ethdev.h:428
void * reserved_ptrs[2]
Definition rte_ethdev.h:431
uint64_t reserved_64s[2]
Definition rte_ethdev.h:430
enum rte_eth_rx_mq_mode mq_mode
Definition rte_ethdev.h:419
struct rte_eth_rxconf conf
struct rte_mempool * mp
uint16_t rx_buf_size
__extension__ uint32_t multi_pools
uint32_t offset_allowed
uint32_t offset_align_log2
struct rte_mempool * mp
uint64_t imissed
Definition rte_ethdev.h:270
uint64_t obytes
Definition rte_ethdev.h:265
uint64_t opackets
Definition rte_ethdev.h:263
uint64_t rx_nombuf
Definition rte_ethdev.h:273
uint64_t ibytes
Definition rte_ethdev.h:264
uint64_t q_ibytes[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition rte_ethdev.h:280
uint64_t q_opackets[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition rte_ethdev.h:278
uint64_t ierrors
Definition rte_ethdev.h:271
uint64_t q_errors[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition rte_ethdev.h:284
uint64_t ipackets
Definition rte_ethdev.h:262
uint64_t q_ipackets[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition rte_ethdev.h:276
uint64_t q_obytes[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition rte_ethdev.h:282
uint64_t oerrors
Definition rte_ethdev.h:272
const char * name
uint8_t hthresh
Definition rte_ethdev.h:365
uint8_t pthresh
Definition rte_ethdev.h:364
uint8_t wthresh
Definition rte_ethdev.h:366
uint8_t tx_deferred_start
uint64_t offloads
void * reserved_ptrs[2]
uint64_t reserved_64s[2]
struct rte_eth_thresh tx_thresh
uint16_t tx_rs_thresh
uint16_t tx_free_thresh
uint64_t offloads
__extension__ uint8_t hw_vlan_insert_pvid
void * reserved_ptrs[2]
__extension__ uint8_t hw_vlan_reject_tagged
uint64_t reserved_64s[2]
__extension__ uint8_t hw_vlan_reject_untagged
enum rte_eth_tx_mq_mode mq_mode
struct rte_eth_txconf conf
enum rte_eth_nb_pools nb_queue_pools
Definition rte_ethdev.h:953
struct rte_eth_vmdq_dcb_conf::@122 pool_map[RTE_ETH_VMDQ_MAX_VLAN_FILTERS]
uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES]
Definition rte_ethdev.h:962
enum rte_eth_nb_pools nb_queue_pools
Definition rte_ethdev.h:984
struct rte_eth_vmdq_rx_conf::@123 pool_map[RTE_ETH_VMDQ_MAX_VLAN_FILTERS]
uint8_t enable_default_pool
Definition rte_ethdev.h:985
char name[RTE_ETH_XSTATS_NAME_SIZE]
uint64_t value