Lines Matching +full:foo +full:- +full:queue

1 /* SPDX-License-Identifier: GPL-2.0-or-later */
20 /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
36 /* This indicates where we are processing relative to skb->data. */
39 /* This is non-zero if the packet cannot be merged with the new skb. */
45 /* Used in ipv6_gro_receive() and foo-over-udp and esp-in-udp */
59 /* This is non-zero if the packet may be of the same flow. */
74 /* Used in foo-over-udp, set in udp[46]_gro_receive */
103 #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
108 return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT; in gro_recursion_inc_test()
117 NAPI_GRO_CB(skb)->flush |= 1; in call_gro_receive()
132 NAPI_GRO_CB(skb)->flush |= 1; in call_gro_receive_sk()
141 return NAPI_GRO_CB(skb)->data_offset; in skb_gro_offset()
146 return skb->len - NAPI_GRO_CB(skb)->data_offset; in skb_gro_len()
151 NAPI_GRO_CB(skb)->data_offset += len; in skb_gro_pull()
157 return NAPI_GRO_CB(skb)->frag0 + offset; in skb_gro_header_fast()
163 return likely(hlen <= NAPI_GRO_CB(skb)->frag0_len); in skb_gro_may_pull()
172 return skb->data + offset; in skb_gro_header_slow()
188 return NAPI_GRO_CB(skb)->network_offsets[NAPI_GRO_CB(skb)->encap_mark]; in skb_gro_receive_network_offset()
196 return skb->data + skb_gro_receive_network_offset(skb); in skb_gro_network_header()
204 return csum_tcpudp_nofold(iph->saddr, iph->daddr, in inet_gro_compute_pseudo()
211 if (NAPI_GRO_CB(skb)->csum_valid) in skb_gro_postpull_rcsum()
212 NAPI_GRO_CB(skb)->csum = wsum_negate(csum_partial(start, len, in skb_gro_postpull_rcsum()
213 wsum_negate(NAPI_GRO_CB(skb)->csum))); in skb_gro_postpull_rcsum()
225 return (NAPI_GRO_CB(skb)->gro_remcsum_start == skb_gro_offset(skb)); in skb_at_gro_remcsum_start()
232 return ((skb->ip_summed != CHECKSUM_PARTIAL || in __skb_gro_checksum_validate_needed()
236 NAPI_GRO_CB(skb)->csum_cnt == 0 && in __skb_gro_checksum_validate_needed()
243 if (NAPI_GRO_CB(skb)->csum_valid && in __skb_gro_checksum_validate_complete()
244 !csum_fold(csum_add(psum, NAPI_GRO_CB(skb)->csum))) in __skb_gro_checksum_validate_complete()
247 NAPI_GRO_CB(skb)->csum = psum; in __skb_gro_checksum_validate_complete()
254 if (NAPI_GRO_CB(skb)->csum_cnt > 0) { in skb_gro_incr_csum_unnecessary()
256 NAPI_GRO_CB(skb)->csum_cnt--; in skb_gro_incr_csum_unnecessary()
290 return (NAPI_GRO_CB(skb)->csum_cnt == 0 && in __skb_gro_checksum_convert_check()
291 !NAPI_GRO_CB(skb)->csum_valid); in __skb_gro_checksum_convert_check()
297 NAPI_GRO_CB(skb)->csum = ~pseudo; in __skb_gro_checksum_convert()
298 NAPI_GRO_CB(skb)->csum_valid = 1; in __skb_gro_checksum_convert()
315 grc->offset = 0; in skb_gro_remcsum_init()
316 grc->delta = 0; in skb_gro_remcsum_init()
328 BUG_ON(!NAPI_GRO_CB(skb)->csum_valid); in skb_gro_remcsum_process()
331 NAPI_GRO_CB(skb)->gro_remcsum_start = off + hdrlen + start; in skb_gro_remcsum_process()
339 delta = remcsum_adjust(ptr + hdrlen, NAPI_GRO_CB(skb)->csum, in skb_gro_remcsum_process()
342 /* Adjust skb->csum since we changed the packet */ in skb_gro_remcsum_process()
343 NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta); in skb_gro_remcsum_process()
345 grc->offset = off + hdrlen + offset; in skb_gro_remcsum_process()
346 grc->delta = delta; in skb_gro_remcsum_process()
355 size_t plen = grc->offset + sizeof(u16); in skb_gro_remcsum_cleanup()
357 if (!grc->delta) in skb_gro_remcsum_cleanup()
360 ptr = skb_gro_header(skb, plen, grc->offset); in skb_gro_remcsum_cleanup()
364 remcsum_unadjust((__sum16 *)ptr, grc->delta); in skb_gro_remcsum_cleanup()
370 if (PTR_ERR(pp) != -EINPROGRESS) in skb_gro_flush_final()
371 NAPI_GRO_CB(skb)->flush |= flush; in skb_gro_flush_final()
378 if (PTR_ERR(pp) != -EINPROGRESS) { in skb_gro_flush_final_remcsum()
379 NAPI_GRO_CB(skb)->flush |= flush; in skb_gro_flush_final_remcsum()
381 skb->remcsum_offload = 0; in skb_gro_flush_final_remcsum()
387 NAPI_GRO_CB(skb)->flush |= flush; in skb_gro_flush_final()
394 NAPI_GRO_CB(skb)->flush |= flush; in skb_gro_flush_final_remcsum()
396 skb->remcsum_offload = 0; in skb_gro_flush_final_remcsum()
418 NAPI_GRO_CB(skb)->flush |= 1, NULL : \
443 return ~csum_unfold(csum_ipv6_magic(&iph->saddr, &iph->daddr, in ip6_gro_compute_pseudo()
450 const u32 id = ntohl(*(__be32 *)&iph->id); in inet_gro_flush()
451 const u32 id2 = ntohl(*(__be32 *)&iph2->id); in inet_gro_flush()
452 const u16 ipid_offset = (id >> 16) - (id2 >> 16); in inet_gro_flush()
453 const u16 count = NAPI_GRO_CB(p)->count; in inet_gro_flush()
458 flush = (iph->ttl ^ iph2->ttl) | (iph->tos ^ iph2->tos) | (df ^ (id2 & IP_DF)); in inet_gro_flush()
468 NAPI_GRO_CB(p)->ip_fixedid = true; in inet_gro_flush()
470 return ipid_offset ^ (count * !NAPI_GRO_CB(p)->ip_fixedid); in inet_gro_flush()
480 (__force __be32)(iph->hop_limit ^ iph2->hop_limit)); in ipv6_gro_flush()
487 const void *nh = th - diff; in __gro_receive_network_flush()
488 const void *nh2 = th2 - diff; in __gro_receive_network_flush()
490 if (((struct iphdr *)nh)->version == 6) in __gro_receive_network_flush()
499 const bool encap_mark = NAPI_GRO_CB(p)->encap_mark; in gro_receive_network_flush()
503 flush = __gro_receive_network_flush(th, th2, p, off - NAPI_GRO_CB(p)->network_offset, encap_mark); in gro_receive_network_flush()
505 …flush |= __gro_receive_network_flush(th, th2, p, off - NAPI_GRO_CB(p)->inner_network_offset, false… in gro_receive_network_flush()
516 if (!napi->rx_count) in gro_normal_list()
518 netif_receive_skb_list_internal(&napi->rx_list); in gro_normal_list()
519 INIT_LIST_HEAD(&napi->rx_list); in gro_normal_list()
520 napi->rx_count = 0; in gro_normal_list()
523 /* Queue one GRO_NORMAL SKB up for list processing. If batch size exceeded,
528 list_add_tail(&skb->list, &napi->rx_list); in gro_normal_one()
529 napi->rx_count += segs; in gro_normal_one()
530 if (napi->rx_count >= READ_ONCE(net_hotdata.gro_normal_batch)) in gro_normal_one()
537 * The caller must verify skb_valid_dst(skb) is false and skb->dev is initialized.
542 *iif = inet_iif(skb) ?: skb->dev->ifindex; in inet_get_iif_sdif()
546 if (netif_is_l3_slave(skb->dev)) { in inet_get_iif_sdif()
547 struct net_device *master = netdev_master_upper_dev_get_rcu(skb->dev); in inet_get_iif_sdif()
550 *iif = master ? master->ifindex : 0; in inet_get_iif_sdif()
558 * The caller must verify skb_valid_dst(skb) is false and skb->dev is initialized.
563 /* using skb->dev->ifindex because skb_dst(skb) is not initialized */ in inet6_get_iif_sdif()
564 *iif = skb->dev->ifindex; in inet6_get_iif_sdif()
568 if (netif_is_l3_slave(skb->dev)) { in inet6_get_iif_sdif()
569 struct net_device *master = netdev_master_upper_dev_get_rcu(skb->dev); in inet6_get_iif_sdif()
572 *iif = master ? master->ifindex : 0; in inet6_get_iif_sdif()