/linux-6.14.4/include/net/ |
D | xdp.h | 2 /* include/net/xdp.h 17 * DOC: XDP RX-queue information 19 * The XDP RX-queue info (xdp_rxq_info) is associated with the driver 24 * reference to this xdp_rxq_info structure. This provides the XDP 34 * The struct is not directly tied to the XDP prog. A new XDP prog 45 MEM_TYPE_PAGE_ORDER0, /* Orig XDP full page model */ 51 /* XDP flags for ndo_xdp_xmit */ 75 XDP_FLAGS_HAS_FRAGS = BIT(0), /* non-linear xdp buff */ 76 XDP_FLAGS_FRAGS_PF_MEMALLOC = BIT(1), /* xdp paged memory is under 92 static __always_inline bool xdp_buff_has_frags(const struct xdp_buff *xdp) in xdp_buff_has_frags() argument [all …]
|
D | xdp_sock_drv.h | 76 static inline dma_addr_t xsk_buff_xdp_get_dma(struct xdp_buff *xdp) in xsk_buff_xdp_get_dma() argument 78 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp); in xsk_buff_xdp_get_dma() 83 static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp) in xsk_buff_xdp_get_frame_dma() argument 85 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp); in xsk_buff_xdp_get_frame_dma() 101 static inline u32 xsk_buff_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max) in xsk_buff_alloc_batch() argument 103 return xp_alloc_batch(pool, xdp, max); in xsk_buff_alloc_batch() 111 static inline void xsk_buff_free(struct xdp_buff *xdp) in xsk_buff_free() argument 113 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp); in xsk_buff_free() 117 if (likely(!xdp_buff_has_frags(xdp))) in xsk_buff_free() 125 xdp_get_shared_info_from_buff(xdp)->nr_frags = 0; in xsk_buff_free() [all …]
|
/linux-6.14.4/Documentation/bpf/ |
D | redirect.rst | 25 :doc: xdp redirect 29 those that do, not all of them support non-linear frames. Non-linear xdp 41 The following bpftrace command can be used to capture and count all XDP tracepoints: 45 sudo bpftrace -e 'tracepoint:xdp:* { @cnt[probe] = count(); }' 49 @cnt[tracepoint:xdp:mem_connect]: 18 50 @cnt[tracepoint:xdp:mem_disconnect]: 18 51 @cnt[tracepoint:xdp:xdp_exception]: 19605 52 @cnt[tracepoint:xdp:xdp_devmap_xmit]: 1393604 53 @cnt[tracepoint:xdp:xdp_redirect]: 22292200 56 The various xdp tracepoints can be found in ``source/include/trace/events/xdp.h`` [all …]
|
/linux-6.14.4/tools/testing/selftests/bpf/progs/ |
D | xdp_features.c | 65 xdp_process_echo_packet(struct xdp_md *xdp, bool dut) in xdp_process_echo_packet() argument 67 void *data_end = (void *)(long)xdp->data_end; in xdp_process_echo_packet() 68 void *data = (void *)(long)xdp->data; in xdp_process_echo_packet() 135 xdp_update_stats(struct xdp_md *xdp, bool tx, bool dut) in xdp_update_stats() argument 139 if (xdp_process_echo_packet(xdp, tx)) in xdp_update_stats() 155 SEC("xdp") 156 int xdp_tester_check_tx(struct xdp_md *xdp) in xdp_tester_check_tx() argument 158 xdp_update_stats(xdp, true, false); in xdp_tester_check_tx() 163 SEC("xdp") 164 int xdp_tester_check_rx(struct xdp_md *xdp) in xdp_tester_check_rx() argument [all …]
|
D | verifier_xdp_direct_packet_access.c | 8 SEC("xdp") 9 __description("XDP pkt read, pkt_end mangling, bad access 1") 29 SEC("xdp") 30 __description("XDP pkt read, pkt_end mangling, bad access 2") 50 SEC("xdp") 51 __description("XDP pkt read, pkt_data' > pkt_end, corner case, good access") 70 SEC("xdp") 71 __description("XDP pkt read, pkt_data' > pkt_end, bad access 1") 91 SEC("xdp") 92 __description("XDP pkt read, pkt_data' > pkt_end, bad access 2") [all …]
|
D | test_xdp_do_redirect.c | 11 * @MARK_IN: frame is being processed by the input XDP prog. 28 SEC("xdp") 29 int xdp_redirect(struct xdp_md *xdp) in xdp_redirect() argument 31 __u32 *metadata = (void *)(long)xdp->data_meta; in xdp_redirect() 32 void *data_end = (void *)(long)xdp->data_end; in xdp_redirect() 33 void *data = (void *)(long)xdp->data; in xdp_redirect() 41 if (xdp->ingress_ifindex != (__u32)ifindex_in) in xdp_redirect() 55 if (bpf_xdp_adjust_meta(xdp, sizeof(__u64))) in xdp_redirect() 85 SEC("xdp") 86 int xdp_count_pkts(struct xdp_md *xdp) in xdp_count_pkts() argument [all …]
|
D | xsk_xdp_progs.c | 19 SEC("xdp.frags") int xsk_def_prog(struct xdp_md *xdp) in xsk_def_prog() argument 24 SEC("xdp.frags") int xsk_xdp_drop(struct xdp_md *xdp) in xsk_xdp_drop() argument 33 SEC("xdp.frags") int xsk_xdp_populate_metadata(struct xdp_md *xdp) in xsk_xdp_populate_metadata() argument 40 err = bpf_xdp_adjust_meta(xdp, -(int)sizeof(struct xdp_info)); in xsk_xdp_populate_metadata() 44 data = (void *)(long)xdp->data; in xsk_xdp_populate_metadata() 45 data_meta = (void *)(long)xdp->data_meta; in xsk_xdp_populate_metadata() 56 SEC("xdp") int xsk_xdp_shared_umem(struct xdp_md *xdp) in xsk_xdp_shared_umem() argument 58 void *data = (void *)(long)xdp->data; in xsk_xdp_shared_umem() 59 void *data_end = (void *)(long)xdp->data_end; in xsk_xdp_shared_umem()
|
D | test_xdp.c | 79 static __always_inline int handle_ipv4(struct xdp_md *xdp) in handle_ipv4() argument 81 void *data_end = (void *)(long)xdp->data_end; in handle_ipv4() 82 void *data = (void *)(long)xdp->data; in handle_ipv4() 112 if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct iphdr))) in handle_ipv4() 115 data = (void *)(long)xdp->data; in handle_ipv4() 116 data_end = (void *)(long)xdp->data_end; in handle_ipv4() 152 static __always_inline int handle_ipv6(struct xdp_md *xdp) in handle_ipv6() argument 154 void *data_end = (void *)(long)xdp->data_end; in handle_ipv6() 155 void *data = (void *)(long)xdp->data; in handle_ipv6() 182 if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct ipv6hdr))) in handle_ipv6() [all …]
|
D | test_xdp_loop.c | 75 static __always_inline int handle_ipv4(struct xdp_md *xdp) in handle_ipv4() argument 77 void *data_end = (void *)(long)xdp->data_end; in handle_ipv4() 78 void *data = (void *)(long)xdp->data; in handle_ipv4() 108 if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct iphdr))) in handle_ipv4() 111 data = (void *)(long)xdp->data; in handle_ipv4() 112 data_end = (void *)(long)xdp->data_end; in handle_ipv4() 148 static __always_inline int handle_ipv6(struct xdp_md *xdp) in handle_ipv6() argument 150 void *data_end = (void *)(long)xdp->data_end; in handle_ipv6() 151 void *data = (void *)(long)xdp->data; in handle_ipv6() 178 if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct ipv6hdr))) in handle_ipv6() [all …]
|
/linux-6.14.4/drivers/net/ethernet/broadcom/bnxt/ |
D | bnxt_xdp.c | 28 struct xdp_buff *xdp) in bnxt_xmit_bd() argument 38 if (xdp && xdp_buff_has_frags(xdp)) { in bnxt_xmit_bd() 39 sinfo = xdp_get_shared_info_from_buff(xdp); in bnxt_xmit_bd() 47 if (xdp) in bnxt_xmit_bd() 48 tx_buf->page = virt_to_head_page(xdp->data); in bnxt_xmit_bd() 96 struct xdp_buff *xdp) in __bnxt_xmit_xdp() argument 100 tx_buf = bnxt_xmit_bd(bp, txr, mapping, len, xdp); in __bnxt_xmit_xdp() 183 struct xdp_buff *xdp) in bnxt_xdp_buff_init() argument 198 xdp_init_buff(xdp, buflen, &rxr->xdp_rxq); in bnxt_xdp_buff_init() 199 xdp_prepare_buff(xdp, data_ptr - offset, offset, len, true); in bnxt_xdp_buff_init() [all …]
|
/linux-6.14.4/net/core/ |
D | xdp.c | 2 /* net/core/xdp.c 20 #include <net/xdp.h> 22 #include <trace/events/xdp.h> 371 * xdp_reg_page_pool - register &page_pool as a memory provider for XDP 374 * Can be used to register pools manually without connecting to any XDP RxQ 375 * info, so that the XDP layer will be aware of them. Then, they can be 408 * @xdp_rxq: XDP RxQ info to attach the pool to 426 /* XDP RX runs under NAPI protection, and in different delivery error 433 bool napi_direct, struct xdp_buff *xdp) in __xdp_return() argument 454 xsk_buff_free(xdp); in __xdp_return() [all …]
|
/linux-6.14.4/Documentation/networking/ |
D | xdp-rx-metadata.rst | 4 XDP RX Metadata 7 This document describes how an eXpress Data Path (XDP) program can access 14 XDP has access to a set of kfuncs to manipulate the metadata in an XDP frame. 16 implement these kfuncs. The set of kfuncs is declared in ``include/net/xdp.h`` 22 .. kernel-doc:: net/core/xdp.c 25 .. kernel-doc:: net/core/xdp.c 28 .. kernel-doc:: net/core/xdp.c 31 An XDP program can use these kfuncs to read the metadata into stack 33 consumers, an XDP program can store it into the metadata area carried 42 Within an XDP frame, the metadata layout (accessed via ``xdp_buff``) is [all …]
|
/linux-6.14.4/drivers/net/ethernet/intel/igb/ |
D | igb_xsk.c | 6 #include <net/xdp.h> 176 static u16 igb_fill_rx_descs(struct xsk_buff_pool *pool, struct xdp_buff **xdp, in igb_fill_rx_descs() argument 187 buffs = xsk_buff_alloc_batch(pool, xdp, count); in igb_fill_rx_descs() 189 dma = xsk_buff_xdp_get_dma(*xdp); in igb_fill_rx_descs() 194 xdp++; in igb_fill_rx_descs() 207 struct xdp_buff **xdp; in igb_alloc_rx_buffers_zc() local 210 xdp = &rx_ring->rx_buffer_info_zc[ntu]; in igb_alloc_rx_buffers_zc() 213 nb_buffs_extra = igb_fill_rx_descs(xsk_pool, xdp, rx_desc, in igb_alloc_rx_buffers_zc() 220 xdp = rx_ring->rx_buffer_info_zc; in igb_alloc_rx_buffers_zc() 225 nb_buffs = igb_fill_rx_descs(xsk_pool, xdp, rx_desc, count); in igb_alloc_rx_buffers_zc() [all …]
|
/linux-6.14.4/drivers/net/vmxnet3/ |
D | vmxnet3_xdp.c | 48 NL_SET_ERR_MSG_FMT_MOD(extack, "MTU %u too large for XDP", in vmxnet3_xdp_set() 54 NL_SET_ERR_MSG_MOD(extack, "LRO is not supported with XDP"); in vmxnet3_xdp_set() 86 "failed to re-create rx queues for XDP."); in vmxnet3_xdp_set() 92 "failed to activate device for XDP."); in vmxnet3_xdp_set() 100 /* This is the main xdp call used by kernel to set/unset eBPF program. */ 152 } else { /* XDP buffer from page pool */ in vmxnet3_xdp_xmit_frame() 261 vmxnet3_run_xdp(struct vmxnet3_rx_queue *rq, struct xdp_buff *xdp, in vmxnet3_run_xdp() argument 270 act = bpf_prog_run_xdp(prog, xdp); in vmxnet3_run_xdp() 271 page = virt_to_page(xdp->data_hard_start); in vmxnet3_run_xdp() 277 err = xdp_do_redirect(rq->adapter->netdev, xdp, prog); in vmxnet3_run_xdp() [all …]
|
/linux-6.14.4/drivers/net/ethernet/intel/ice/ |
D | ice_xsk.c | 6 #include <net/xdp.h> 340 * ice_realloc_zc_buf - reallocate XDP ZC queue pairs 345 * XDP requires more memory, than rx_buf provides. 426 * @xdp: SW ring of xdp_buff that will hold the buffers 437 static u16 ice_fill_rx_descs(struct xsk_buff_pool *pool, struct xdp_buff **xdp, in ice_fill_rx_descs() argument 444 buffs = xsk_buff_alloc_batch(pool, xdp, count); in ice_fill_rx_descs() 446 dma = xsk_buff_xdp_get_dma(*xdp); in ice_fill_rx_descs() 453 ice_xdp_meta_set_desc(*xdp, rx_desc); in ice_fill_rx_descs() 456 xdp++; in ice_fill_rx_descs() 481 struct xdp_buff **xdp; in __ice_alloc_rx_bufs_zc() local [all …]
|
/linux-6.14.4/drivers/net/ethernet/intel/ixgbe/ |
D | ixgbe_xsk.c | 6 #include <net/xdp.h> 99 struct xdp_buff *xdp) in ixgbe_run_xdp_zc() argument 108 act = bpf_prog_run_xdp(xdp_prog, xdp); in ixgbe_run_xdp_zc() 111 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); in ixgbe_run_xdp_zc() 125 xdpf = xdp_convert_buff_to_frame(xdp); in ixgbe_run_xdp_zc() 168 bi->xdp = xsk_buff_alloc(rx_ring->xsk_pool); in ixgbe_alloc_rx_buffers_zc() 169 if (!bi->xdp) { in ixgbe_alloc_rx_buffers_zc() 174 dma = xsk_buff_xdp_get_dma(bi->xdp); in ixgbe_alloc_rx_buffers_zc() 214 const struct xdp_buff *xdp) in ixgbe_construct_skb_zc() argument 216 unsigned int totalsize = xdp->data_end - xdp->data_meta; in ixgbe_construct_skb_zc() [all …]
|
/linux-6.14.4/drivers/net/hyperv/ |
D | netvsc_bpf.c | 17 #include <net/xdp.h> 25 struct xdp_buff *xdp) in netvsc_run_xdp() argument 35 xdp->data_hard_start = NULL; in netvsc_run_xdp() 56 xdp_init_buff(xdp, PAGE_SIZE, &nvchan->xdp_rxq); in netvsc_run_xdp() 57 xdp_prepare_buff(xdp, page_address(page), NETVSC_XDP_HDRM, len, false); in netvsc_run_xdp() 59 memcpy(xdp->data, data, len); in netvsc_run_xdp() 61 act = bpf_prog_run_xdp(prog, xdp); in netvsc_run_xdp() 73 if (!xdp_do_redirect(ndev, xdp, prog)) { in netvsc_run_xdp() 107 xdp->data_hard_start = NULL; in netvsc_run_xdp() 138 netdev_err(dev, "XDP: mtu:%u too large, buf_max:%u\n", in netvsc_xdp_set() [all …]
|
/linux-6.14.4/tools/testing/selftests/net/ |
D | veth.sh | 249 ip -n $NS_DST link set dev veth$DST xdp object ${BPF_FILE} section xdp 250 chk_gro_flag "gro vs xdp while down - gro flag off" $DST off 253 ip -n $NS_DST link set dev veth$DST xdp off 254 chk_gro_flag " - after xdp off" $DST off 257 ip -n $NS_SRC link set dev veth$SRC xdp object ${BPF_FILE} section xdp 258 chk_gro_flag " - after peer xdp" $DST off 263 ip -n $NS_DST link set dev veth$DST xdp object ${BPF_FILE} section xdp 265 chk_gro_flag "gro vs xdp while down - gro flag on" $DST on 268 ip -n $NS_DST link set dev veth$DST xdp off 269 chk_gro_flag " - after xdp off" $DST on [all …]
|
/linux-6.14.4/include/trace/events/ |
D | xdp.h | 3 #define TRACE_SYSTEM xdp 12 #include <net/xdp.h> 32 const struct bpf_prog *xdp, u32 act), 34 TP_ARGS(dev, xdp, act), 43 __entry->prog_id = xdp->aux->id; 93 const struct bpf_prog *xdp, 98 TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index), 124 __entry->prog_id = xdp->aux->id; 143 const struct bpf_prog *xdp, 147 TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index) [all …]
|
/linux-6.14.4/drivers/net/ethernet/microchip/lan966x/ |
D | lan966x_xdp.c | 9 static int lan966x_xdp_setup(struct net_device *dev, struct netdev_bpf *xdp) in lan966x_xdp_setup() argument 18 NL_SET_ERR_MSG_MOD(xdp->extack, in lan966x_xdp_setup() 19 "Allow to set xdp only when using fdma"); in lan966x_xdp_setup() 24 old_prog = xchg(&port->xdp_prog, xdp->prog); in lan966x_xdp_setup() 43 int lan966x_xdp(struct net_device *dev, struct netdev_bpf *xdp) in lan966x_xdp() argument 45 switch (xdp->command) { in lan966x_xdp() 47 return lan966x_xdp_setup(dev, xdp); in lan966x_xdp() 79 struct xdp_buff xdp; in lan966x_xdp_run() local 82 xdp_init_buff(&xdp, PAGE_SIZE << lan966x->rx.page_order, in lan966x_xdp_run() 84 xdp_prepare_buff(&xdp, page_address(page), in lan966x_xdp_run() [all …]
|
/linux-6.14.4/tools/bpf/bpftool/Documentation/ |
D | bpftool-net.rst | 32 | *ATTACH_TYPE* := { **xdp** | **xdpgeneric** | **xdpdrv** | **xdpoffload** | **tcx_ingress** | **t… 39 Currently, device driver xdp attachments, tcx, netkit and old-style tc 52 The current output will start with all xdp program attachments, followed by 54 flow_dissector and finally netfilter programs. Both xdp programs and 64 command used with **overwrite** option. Currently, only XDP-related modes 68 **xdp** - try native XDP and fallback to generic XDP if NIC driver does not support it; 69 …**xdpgeneric** - Generic XDP. runs at generic XDP hook when packet already enters receive path as … 70 **xdpdrv** - Native XDP. runs earliest point in driver's receive path; 71 **xdpoffload** - Offload XDP. runs directly on NIC on each packet reception; 78 for attach must be specified. Currently, only XDP-related modes are [all …]
|
/linux-6.14.4/samples/bpf/ |
D | xdp_tx_iptunnel_kern.c | 77 static __always_inline int handle_ipv4(struct xdp_md *xdp) in handle_ipv4() argument 79 void *data_end = (void *)(long)xdp->data_end; in handle_ipv4() 80 void *data = (void *)(long)xdp->data; in handle_ipv4() 112 if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct iphdr))) in handle_ipv4() 115 data = (void *)(long)xdp->data; in handle_ipv4() 116 data_end = (void *)(long)xdp->data_end; in handle_ipv4() 152 static __always_inline int handle_ipv6(struct xdp_md *xdp) in handle_ipv6() argument 154 void *data_end = (void *)(long)xdp->data_end; in handle_ipv6() 155 void *data = (void *)(long)xdp->data; in handle_ipv6() 184 if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct ipv6hdr))) in handle_ipv6() [all …]
|
D | xdp_adjust_tail_kern.c | 71 static __always_inline int send_icmp4_too_big(struct xdp_md *xdp) in send_icmp4_too_big() argument 75 if (bpf_xdp_adjust_head(xdp, 0 - headroom)) in send_icmp4_too_big() 77 void *data = (void *)(long)xdp->data; in send_icmp4_too_big() 78 void *data_end = (void *)(long)xdp->data_end; in send_icmp4_too_big() 121 static __always_inline int handle_ipv4(struct xdp_md *xdp) in handle_ipv4() argument 123 void *data_end = (void *)(long)xdp->data_end; in handle_ipv4() 124 void *data = (void *)(long)xdp->data; in handle_ipv4() 130 if (bpf_xdp_adjust_tail(xdp, 0 - offset)) in handle_ipv4() 132 return send_icmp4_too_big(xdp); in handle_ipv4() 138 int _xdp_icmp(struct xdp_md *xdp) in _xdp_icmp() argument [all …]
|
/linux-6.14.4/drivers/net/ethernet/netronome/nfp/nfd3/ |
D | xsk.c | 35 txbuf->xdp = xrxbuf->xdp; in nfp_nfd3_xsk_tx_xdp() 87 skb_put_data(skb, xrxbuf->xdp->data, pkt_len); in nfp_nfd3_xsk_rx_skb() 105 xrxbuf->xdp->data - xrxbuf->xdp->data_meta); in nfp_nfd3_xsk_rx_skb() 184 xrxbuf->xdp->data += meta_len; in nfp_nfd3_xsk_rx() 185 xrxbuf->xdp->data_end = xrxbuf->xdp->data + pkt_len; in nfp_nfd3_xsk_rx() 186 xdp_set_data_meta_invalid(xrxbuf->xdp); in nfp_nfd3_xsk_rx() 187 xsk_buff_dma_sync_for_cpu(xrxbuf->xdp); in nfp_nfd3_xsk_rx() 188 net_prefetch(xrxbuf->xdp->data); in nfp_nfd3_xsk_rx() 192 xrxbuf->xdp->data - in nfp_nfd3_xsk_rx() 194 xrxbuf->xdp->data, in nfp_nfd3_xsk_rx() [all …]
|
/linux-6.14.4/drivers/net/ethernet/intel/i40e/ |
D | i40e_xsk.c | 56 * XDP requires more memory, than rx_buf provides. 190 * i40e_run_xdp_zc - Executes an XDP program on an xdp_buff 192 * @xdp: xdp_buff used as input to the XDP program 193 * @xdp_prog: XDP program to run 197 static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp, in i40e_run_xdp_zc() argument 204 act = bpf_prog_run_xdp(xdp_prog, xdp); in i40e_run_xdp_zc() 207 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); in i40e_run_xdp_zc() 222 result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring); in i40e_run_xdp_zc() 244 struct xdp_buff **xdp; in i40e_alloc_rx_buffers_zc() local 249 xdp = i40e_rx_bi(rx_ring, ntu); in i40e_alloc_rx_buffers_zc() [all …]
|