Lines Matching +full:dont +full:- +full:validate

1 // SPDX-License-Identifier: GPL-2.0
28 /* ----------------------------------------------- */
29 /* IP-TFS default SA values (tunnel egress/dir-in) */
30 /* ----------------------------------------------- */
33 * define IPTFS_DEFAULT_DROP_TIME_USECS - default drop time
36 * time before a missing out-of-order IPTFS tunnel packet is considered lost.
44 * define IPTFS_DEFAULT_REORDER_WINDOW - default reorder window size
54 /* ------------------------------------------------ */
55 /* IPTFS default SA values (tunnel ingress/dir-out) */
56 /* ------------------------------------------------ */
59 * define IPTFS_DEFAULT_INIT_DELAY_USECS - default initial output delay
70 * define IPTFS_DEFAULT_MAX_QUEUE_SIZE - default max output queue size.
80 /* Assumed: skb->head is cache aligned.
82 * L2 Header resv: Arrange for cacheline to start at skb->data - 16 to keep the
83 * to-be-pushed L2 header in the same cacheline as resulting `skb->data` (i.e.,
84 * the L3 header). If cacheline size is > 64 then skb->data + pushed L2 will all
87 * L3 Header resv: For L3+L2 headers (i.e., skb->data points at the IPTFS payload)
88 * we want `skb->data` to be cacheline aligned and all pushed L2L3 headers will
91 * with the start of the IPTFS payload (skb->data).
104 * struct xfrm_iptfs_config - configuration for the IPTFS tunnel.
127 * struct xfrm_iptfs_data - mode specific xfrm state.
190 if (iph->version == 4) in __trace_ip_proto()
191 return iph->protocol; in __trace_ip_proto()
192 return ((struct ipv6hdr *)iph)->nexthdr; in __trace_ip_proto()
200 if (iph->version == 4) { in __trace_ip_proto_seq()
202 protocol = iph->protocol; in __trace_ip_proto_seq()
203 } else if (iph->version == 6) { in __trace_ip_proto_seq()
205 protocol = ((struct ipv6hdr *)(iph))->nexthdr; in __trace_ip_proto_seq()
209 return ntohs(((struct icmphdr *)nexthdr)->un.echo.sequence); in __trace_ip_proto_seq()
211 return ntohs(((struct icmp6hdr *)nexthdr)->icmp6_sequence); in __trace_ip_proto_seq()
213 return ntohl(((struct tcphdr *)nexthdr)->seq); in __trace_ip_proto_seq()
215 return ntohs(((struct udphdr *)nexthdr)->source); in __trace_ip_proto_seq()
224 u64 seq = ntohl(XFRM_SKB_CB(skb)->seq.input.low); in __esp_seq()
226 return seq | (u64)ntohl(XFRM_SKB_CB(skb)->seq.input.hi) << 32; in __esp_seq()
234 * iptfs_alloc_skb() - Allocate a new `skb`.
235 * @tpl: the skb to copy required meta-data from.
239 * A new `skb` is allocated and required meta-data is copied from `tpl`, the
244 * `skb->data - 16` which is a good guess for good cache alignment (placing the
248 * dst->dev plus the calculated L3 overhead for the xfrm dst or
269 resv = LL_RESERVED_SPACE(dst->dev) + dst->header_len; in iptfs_alloc_skb()
282 skb->dev = tpl->dev; in iptfs_alloc_skb()
293 * iptfs_skb_head_to_frag() - initialize a skb_frag_t based on skb head data
299 struct page *page = virt_to_head_page(skb->data); in iptfs_skb_head_to_frag()
302 skb_frag_fill_page_desc(frag, page, skb->data - addr, skb_headlen(skb)); in iptfs_skb_head_to_frag()
306 * struct iptfs_skb_frag_walk - use to track a walk through fragments
313 * @pp_recycle: copy of skb->pp_recycle
326 * iptfs_skb_prepare_frag_walk() - initialize a frag walk over an skb.
341 walk->initial_offset = initial_offset; in iptfs_skb_prepare_frag_walk()
342 walk->fragi = 0; in iptfs_skb_prepare_frag_walk()
343 walk->past = 0; in iptfs_skb_prepare_frag_walk()
344 walk->total = 0; in iptfs_skb_prepare_frag_walk()
345 walk->nr_frags = 0; in iptfs_skb_prepare_frag_walk()
346 walk->pp_recycle = skb->pp_recycle; in iptfs_skb_prepare_frag_walk()
348 if (skb->head_frag) { in iptfs_skb_prepare_frag_walk()
350 initial_offset -= skb_headlen(skb); in iptfs_skb_prepare_frag_walk()
352 frag = &walk->frags[walk->nr_frags++]; in iptfs_skb_prepare_frag_walk()
354 frag->offset += initial_offset; in iptfs_skb_prepare_frag_walk()
355 frag->len -= initial_offset; in iptfs_skb_prepare_frag_walk()
356 walk->total += frag->len; in iptfs_skb_prepare_frag_walk()
360 initial_offset -= skb_headlen(skb); in iptfs_skb_prepare_frag_walk()
363 for (i = 0; i < shinfo->nr_frags; i++) { in iptfs_skb_prepare_frag_walk()
364 from = &shinfo->frags[i]; in iptfs_skb_prepare_frag_walk()
365 if (initial_offset >= from->len) { in iptfs_skb_prepare_frag_walk()
366 initial_offset -= from->len; in iptfs_skb_prepare_frag_walk()
369 frag = &walk->frags[walk->nr_frags++]; in iptfs_skb_prepare_frag_walk()
372 frag->offset += initial_offset; in iptfs_skb_prepare_frag_walk()
373 frag->len -= initial_offset; in iptfs_skb_prepare_frag_walk()
376 walk->total += frag->len; in iptfs_skb_prepare_frag_walk()
384 offset -= walk->initial_offset; in iptfs_skb_reset_frag_walk()
387 while (offset < walk->past) { in iptfs_skb_reset_frag_walk()
388 walk->past -= walk->frags[--walk->fragi].len; in iptfs_skb_reset_frag_walk()
389 if (offset >= walk->past) in iptfs_skb_reset_frag_walk()
392 while (offset >= walk->past + walk->frags[walk->fragi].len) in iptfs_skb_reset_frag_walk()
393 walk->past += walk->frags[walk->fragi++].len; in iptfs_skb_reset_frag_walk()
396 offset -= walk->past; in iptfs_skb_reset_frag_walk()
401 * iptfs_skb_can_add_frags() - check if ok to add frags from walk to skb
416 if (skb_has_frag_list(skb) || skb->pp_recycle != walk->pp_recycle) in iptfs_skb_can_add_frags()
423 fragi = walk->fragi; in iptfs_skb_can_add_frags()
424 nr_frags = shinfo->nr_frags; in iptfs_skb_can_add_frags()
425 while (len && fragi < walk->nr_frags) { in iptfs_skb_can_add_frags()
426 skb_frag_t *frag = &walk->frags[fragi]; in iptfs_skb_can_add_frags()
428 fraglen = frag->len; in iptfs_skb_can_add_frags()
430 fraglen -= offset; in iptfs_skb_can_add_frags()
437 len -= fraglen; in iptfs_skb_can_add_frags()
445 * iptfs_skb_add_frags() - add a range of fragment references into an skb
465 if (!walk->nr_frags || offset >= walk->total + walk->initial_offset) in iptfs_skb_add_frags()
471 while (len && walk->fragi < walk->nr_frags) { in iptfs_skb_add_frags()
472 skb_frag_t *frag = &walk->frags[walk->fragi]; in iptfs_skb_add_frags()
473 skb_frag_t *tofrag = &shinfo->frags[shinfo->nr_frags]; in iptfs_skb_add_frags()
477 tofrag->offset += offset; in iptfs_skb_add_frags()
478 tofrag->len -= offset; in iptfs_skb_add_frags()
482 shinfo->nr_frags++; in iptfs_skb_add_frags()
485 fraglen = tofrag->len; in iptfs_skb_add_frags()
487 tofrag->len = len; in iptfs_skb_add_frags()
488 skb->len += len; in iptfs_skb_add_frags()
489 skb->data_len += len; in iptfs_skb_add_frags()
493 len -= fraglen; /* careful, use dst bv_len */ in iptfs_skb_add_frags()
494 skb->len += fraglen; /* careful, " " " */ in iptfs_skb_add_frags()
495 skb->data_len += fraglen; /* careful, " " " */ in iptfs_skb_add_frags()
496 walk->past += frag->len; /* careful, use src bv_len */ in iptfs_skb_add_frags()
497 walk->fragi++; in iptfs_skb_add_frags()
514 * iptfs_pskb_add_frags() - Create and add frags into a new sk_buff.
527 * frag walk for the remaining @len of data (i.e., @len - @copy_len bytes).
543 len - copy_len)) { in iptfs_pskb_add_frags()
550 XFRM_INC_STATS(dev_net(st->root_skb->dev), in iptfs_pskb_add_frags()
556 iptfs_skb_add_frags(skb, walk, off + copy_len, len - copy_len); in iptfs_pskb_add_frags()
561 * iptfs_pskb_extract_seq() - Create and load data into a new sk_buff.
568 * Create a new sk_buff `skb` with @skblen of packet data space. If non-zero,
579 struct sk_buff *skb = iptfs_alloc_skb(st->root_skb, skblen, false); in iptfs_pskb_extract_seq()
584 XFRM_INC_STATS(dev_net(st->root_skb->dev), LINUX_MIB_XFRMINERROR); in iptfs_pskb_extract_seq()
592 * iptfs_input_save_runt() - save data in xtfs runt space.
604 memcpy(xtfs->ra_runt, buf, len); in iptfs_input_save_runt()
606 xtfs->ra_runtlen = len; in iptfs_input_save_runt()
607 xtfs->ra_wantseq = seq + 1; in iptfs_input_save_runt()
611 * __iptfs_iphlen() - return the v4/v6 header length using packet data.
622 if (iph->version == 0x4) in __iptfs_iphlen()
628 * __iptfs_iplen() - return the v4/v6 length using packet data.
642 if (iph->version == 0x4) in __iptfs_iplen()
643 return ntohs(iph->tot_len); in __iptfs_iplen()
644 return ntohs(((struct ipv6hdr *)iph)->payload_len) + in __iptfs_iplen()
649 * iptfs_complete_inner_skb() - finish preparing the inner packet for gro recv.
668 skb->ip_summed = CHECKSUM_NONE; in iptfs_complete_inner_skb()
674 if (ip_hdr(skb)->version == 0x4) { in iptfs_complete_inner_skb()
677 if (x->props.flags & XFRM_STATE_DECAP_DSCP) in iptfs_complete_inner_skb()
678 ipv4_copy_dscp(XFRM_MODE_SKB_CB(skb)->tos, iph); in iptfs_complete_inner_skb()
679 if (!(x->props.flags & XFRM_STATE_NOECN)) in iptfs_complete_inner_skb()
680 if (INET_ECN_is_ce(XFRM_MODE_SKB_CB(skb)->tos)) in iptfs_complete_inner_skb()
683 skb->protocol = htons(ETH_P_IP); in iptfs_complete_inner_skb()
687 if (x->props.flags & XFRM_STATE_DECAP_DSCP) in iptfs_complete_inner_skb()
688 ipv6_copy_dscp(XFRM_MODE_SKB_CB(skb)->tos, iph); in iptfs_complete_inner_skb()
689 if (!(x->props.flags & XFRM_STATE_NOECN)) in iptfs_complete_inner_skb()
690 if (INET_ECN_is_ce(XFRM_MODE_SKB_CB(skb)->tos)) in iptfs_complete_inner_skb()
693 skb->protocol = htons(ETH_P_IPV6); in iptfs_complete_inner_skb()
699 assert_spin_locked(&xtfs->drop_lock); in __iptfs_reassem_done()
702 hrtimer_try_to_cancel(&xtfs->drop_timer); in __iptfs_reassem_done()
704 kfree_skb(xtfs->ra_newskb); in __iptfs_reassem_done()
705 xtfs->ra_newskb = NULL; in __iptfs_reassem_done()
709 * iptfs_reassem_abort() - In-progress packet is aborted free the state.
718 * iptfs_reassem_done() - In-progress packet is complete, clear the state.
727 * iptfs_reassem_cont() - Continue the reassembly of an inner packets.
736 * Process an IPTFS payload that has a non-zero `blkoff` or when we are
737 * expecting the continuation b/c we have a runt or in-progress packet.
747 struct sk_buff *newskb = xtfs->ra_newskb; in iptfs_reassem_cont()
748 u32 remaining = skb->len - data; in iptfs_reassem_cont()
749 u32 runtlen = xtfs->ra_runtlen; in iptfs_reassem_cont()
753 if (!runtlen && !xtfs->ra_newskb) in iptfs_reassem_cont()
772 if (seq < xtfs->ra_wantseq) in iptfs_reassem_cont()
776 if (seq > xtfs->ra_wantseq) { in iptfs_reassem_cont()
777 XFRM_INC_STATS(xs_net(xtfs->x), LINUX_MIB_XFRMINIPTFSERROR); in iptfs_reassem_cont()
782 if ((*skb->data & 0xF0) != 0) { in iptfs_reassem_cont()
783 XFRM_INC_STATS(xs_net(xtfs->x), in iptfs_reassem_cont()
790 xtfs->ra_wantseq++; in iptfs_reassem_cont()
797 xtfs->ra_runtlen = 0; in iptfs_reassem_cont()
803 rrem = sizeof(xtfs->ra_runt) - runtlen; in iptfs_reassem_cont()
805 XFRM_INC_STATS(xs_net(xtfs->x), in iptfs_reassem_cont()
811 if (skb_copy_seq_read(st, data, &xtfs->ra_runt[runtlen], in iptfs_reassem_cont()
813 XFRM_INC_STATS(xs_net(xtfs->x), in iptfs_reassem_cont()
821 ipremain = __iptfs_iplen(xtfs->ra_runt); in iptfs_reassem_cont()
822 if (ipremain < sizeof(xtfs->ra_runt)) { in iptfs_reassem_cont()
824 XFRM_INC_STATS(xs_net(xtfs->x), in iptfs_reassem_cont()
835 XFRM_INC_STATS(xs_net(xtfs->x), LINUX_MIB_XFRMINERROR); in iptfs_reassem_cont()
838 xtfs->ra_newskb = newskb; in iptfs_reassem_cont()
841 * pointers the same as normal non-runt case. The extra `rrem` in iptfs_reassem_cont()
845 memcpy(skb_put(newskb, runtlen), xtfs->ra_runt, in iptfs_reassem_cont()
846 sizeof(xtfs->ra_runt)); in iptfs_reassem_cont()
850 ipremain = __iptfs_iplen(newskb->data); in iptfs_reassem_cont()
851 iphlen = __iptfs_iphlen(newskb->data); in iptfs_reassem_cont()
853 ipremain -= newskb->len; in iptfs_reassem_cont()
856 XFRM_INC_STATS(xs_net(xtfs->x), LINUX_MIB_XFRMINIPTFSERROR); in iptfs_reassem_cont()
861 if (newskb->len < iphlen) { in iptfs_reassem_cont()
862 iphremain = iphlen - newskb->len; in iptfs_reassem_cont()
864 XFRM_INC_STATS(xs_net(xtfs->x), in iptfs_reassem_cont()
872 XFRM_INC_STATS(xs_net(xtfs->x), in iptfs_reassem_cont()
878 xtfs->ra_wantseq++; in iptfs_reassem_cont()
883 blkoff -= copylen; in iptfs_reassem_cont()
884 remaining -= copylen; in iptfs_reassem_cont()
885 ipremain -= copylen; in iptfs_reassem_cont()
893 (skb->head_frag || skb->len == skb->data_len) && in iptfs_reassem_cont()
894 skb->pp_recycle == newskb->pp_recycle) { in iptfs_reassem_cont()
907 XFRM_INC_STATS(xs_net(xtfs->x), in iptfs_reassem_cont()
914 xtfs->ra_wantseq++; in iptfs_reassem_cont()
918 iptfs_complete_inner_skb(xtfs->x, newskb); in iptfs_reassem_cont()
919 list_add_tail(&newskb->list, list); in iptfs_reassem_cont()
926 if (xtfs->ra_newskb) { in iptfs_reassem_cont()
929 xtfs->ra_runtlen = 0; in iptfs_reassem_cont()
930 xtfs->ra_wantseq = 0; in iptfs_reassem_cont()
952 xtfs = x->mode_data; in __input_process_payload()
954 skb = skbseq->root_skb; in __input_process_payload()
965 tail = skb->len; in __input_process_payload()
972 remaining = tail - data; in __input_process_payload()
982 if (iph->version == 0x4) { in __input_process_payload()
992 iplen = be16_to_cpu(iph->tot_len); in __input_process_payload()
993 iphlen = iph->ihl << 2; in __input_process_payload()
995 XFRM_MODE_SKB_CB(skbseq->root_skb)->tos = iph->tos; in __input_process_payload()
996 } else if (iph->version == 0x6) { in __input_process_payload()
1006 iplen = be16_to_cpu(((struct ipv6hdr *)hbytes)->payload_len); in __input_process_payload()
1010 XFRM_MODE_SKB_CB(skbseq->root_skb)->tos = in __input_process_payload()
1012 } else if (iph->version == 0x0) { in __input_process_payload()
1021 if (unlikely(skbseq->stepped_offset)) { in __input_process_payload()
1025 struct sk_buff *save = skbseq->root_skb; in __input_process_payload()
1047 /* make sure our header is 32-bit aligned? */ in __input_process_payload()
1048 /* ((uintptr_t)(skb->data + data) & 0x3) == 0 && */ in __input_process_payload()
1049 skb_tailroom(skb) + tail - data >= iplen) { in __input_process_payload()
1061 * For non-linear tailroom is 0 and so we only in __input_process_payload()
1062 * re-use if the entire packet is present in __input_process_payload()
1070 * above that the header lies on a 32-bit in __input_process_payload()
1077 tail = skb->len; in __input_process_payload()
1078 remaining = skb->len; in __input_process_payload()
1080 skb->protocol = protocol; in __input_process_payload()
1082 if (skb->mac_len) in __input_process_payload()
1083 eth_hdr(skb)->h_proto = skb->protocol; in __input_process_payload()
1088 } else if (skb->head_frag && in __input_process_payload()
1127 skb->protocol = protocol; in __input_process_payload()
1130 skb_set_mac_header(skb, -first_skb->mac_len); in __input_process_payload()
1131 memcpy(skb_mac_header(skb), old_mac, first_skb->mac_len); in __input_process_payload()
1132 eth_hdr(skb)->h_proto = skb->protocol; in __input_process_payload()
1138 if (skb->len < iplen) { in __input_process_payload()
1140 spin_lock(&xtfs->drop_lock); in __input_process_payload()
1142 xtfs->ra_newskb = skb; in __input_process_payload()
1143 xtfs->ra_wantseq = seq + 1; in __input_process_payload()
1144 if (!hrtimer_is_queued(&xtfs->drop_timer)) { in __input_process_payload()
1146 hrtimer_start(&xtfs->drop_timer, in __input_process_payload()
1147 xtfs->drop_time_ns, in __input_process_payload()
1151 spin_unlock(&xtfs->drop_lock); in __input_process_payload()
1157 list_add_tail(&skb->list, sublist); in __input_process_payload()
1164 if (first_skb && first_iplen && !defer && first_skb != xtfs->ra_newskb) { in __input_process_payload()
1168 list_del(&first_skb->list); in __input_process_payload()
1171 first_skb->ip_summed = CHECKSUM_NONE; in __input_process_payload()
1177 if (xfrm_input(skb, 0, 0, -2)) in __input_process_payload()
1181 skb = skbseq->root_skb; in __input_process_payload()
1198 * iptfs_input_ordered() - handle next in order IPTFS payload.
1216 xtfs = x->mode_data; in iptfs_input_ordered()
1224 skb_prepare_seq_read(skb, 0, skb->len, &skbseq); in iptfs_input_ordered()
1226 /* Get the IPTFS header and validate it */ in iptfs_input_ordered()
1234 trace_iptfs_egress_recv(skb, xtfs, be16_to_cpu(ipth->block_offset)); in iptfs_input_ordered()
1237 if (ipth->subtype == IPTFS_SUBTYPE_CC) { in iptfs_input_ordered()
1239 remaining = sizeof(iptcch) - sizeof(*ipth); in iptfs_input_ordered()
1245 } else if (ipth->subtype != IPTFS_SUBTYPE_BASIC) { in iptfs_input_ordered()
1250 if (ipth->flags != 0) { in iptfs_input_ordered()
1259 blkoff = ntohs(ipth->block_offset); in iptfs_input_ordered()
1261 if (blkoff || xtfs->ra_runtlen || xtfs->ra_newskb) { in iptfs_input_ordered()
1262 spin_lock(&xtfs->drop_lock); in iptfs_input_ordered()
1265 if (blkoff || xtfs->ra_runtlen || xtfs->ra_newskb) { in iptfs_input_ordered()
1270 spin_unlock(&xtfs->drop_lock); in iptfs_input_ordered()
1283 /* ------------------------------- */
1284 /* Input (Egress) Re-ordering Code */
1285 /* ------------------------------- */
1289 u32 savedlen = xtfs->w_savedlen; in __vec_shift()
1294 memcpy(xtfs->w_saved, xtfs->w_saved + shift, in __vec_shift()
1295 (savedlen - shift) * sizeof(*xtfs->w_saved)); in __vec_shift()
1296 memset(xtfs->w_saved + savedlen - shift, 0, in __vec_shift()
1297 shift * sizeof(*xtfs->w_saved)); in __vec_shift()
1298 xtfs->w_savedlen -= shift; in __vec_shift()
1304 list_add_tail(&inskb->list, freelist); in __reorder_past()
1311 const u32 savedlen = xtfs->w_savedlen; in __reorder_drop()
1316 if (xtfs->w_saved[0].drop_time > now) in __reorder_drop()
1319 ++xtfs->w_wantseq; in __reorder_drop()
1322 s = xtfs->w_saved; in __reorder_drop()
1326 for (; s < se && !s->skb; s++) { in __reorder_drop()
1327 if (s->drop_time > now) in __reorder_drop()
1331 for (; s < se && s->skb; scount++, s++) in __reorder_drop()
1332 list_add_tail(&s->skb->list, list); in __reorder_drop()
1336 count = s - xtfs->w_saved; in __reorder_drop()
1338 xtfs->w_wantseq += count; in __reorder_drop()
1344 if (xtfs->w_savedlen) { in __reorder_drop()
1347 hrtimer_start(&xtfs->drop_timer, in __reorder_drop()
1348 xtfs->w_saved[0].drop_time - now, in __reorder_drop()
1358 const u32 savedlen = xtfs->w_savedlen; in __reorder_this()
1362 list_add_tail(&inskb->list, list); in __reorder_this()
1363 ++xtfs->w_wantseq; in __reorder_this()
1370 for (s = xtfs->w_saved, se = s + savedlen; s < se && s->skb; s++) in __reorder_this()
1371 list_add_tail(&s->skb->list, list); in __reorder_this()
1372 count = s - xtfs->w_saved; in __reorder_this()
1374 xtfs->w_wantseq += count; in __reorder_this()
1385 const u32 savedlen = xtfs->w_savedlen; in iptfs_set_window_drop_times()
1386 struct skb_wseq *s = xtfs->w_saved; in iptfs_set_window_drop_times()
1389 assert_spin_locked(&xtfs->drop_lock); in iptfs_set_window_drop_times()
1397 drop_time += xtfs->drop_time_ns; in iptfs_set_window_drop_times()
1401 while (index-- > 0 && !s[index].skb) in iptfs_set_window_drop_times()
1405 if (index == -1 && !hrtimer_is_queued(&xtfs->drop_timer)) in iptfs_set_window_drop_times()
1406 hrtimer_start(&xtfs->drop_timer, xtfs->drop_time_ns, in iptfs_set_window_drop_times()
1415 const u64 wantseq = xtfs->w_wantseq; in __reorder_future_fits()
1416 const u64 distance = inseq - wantseq; in __reorder_future_fits()
1417 const u32 savedlen = xtfs->w_savedlen; in __reorder_future_fits()
1418 const u32 index = distance - 1; in __reorder_future_fits()
1430 * index is an array index (i.e., - 1 of slot) in __reorder_future_fits()
1431 * : : - implicit NULL after array len in __reorder_future_fits()
1433 * +--------- used length (savedlen == 2) in __reorder_future_fits()
1434 * | +----- array size (nslots - 1 == 3) in __reorder_future_fits()
1439 * --- 0 1 2 | array index in __reorder_future_fits()
1440 * [-] [b] : :| array in __reorder_future_fits()
1445 * distance == 3 [inseq(5) - w_wantseq(2)] in __reorder_future_fits()
1446 * index == 2 [distance(6) - 1] in __reorder_future_fits()
1449 if (xtfs->w_saved[index].skb) { in __reorder_future_fits()
1451 list_add_tail(&inskb->list, freelist); in __reorder_future_fits()
1455 xtfs->w_saved[index].skb = inskb; in __reorder_future_fits()
1456 xtfs->w_savedlen = max(savedlen, index + 1); in __reorder_future_fits()
1464 const u32 nslots = xtfs->cfg.reorder_win_size + 1; in __reorder_future_shifts()
1466 u32 savedlen = xtfs->w_savedlen; in __reorder_future_shifts()
1467 u64 wantseq = xtfs->w_wantseq; in __reorder_future_shifts()
1484 * the final slot at savedlen (index savedlen - 1) is always occupied. in __reorder_future_shifts()
1488 * +--------- array length (savedlen == 2) in __reorder_future_shifts()
1489 * | +----- array size (nslots - 1 == 3) in __reorder_future_shifts()
1490 * | | +- window boundary (nslots == 4) in __reorder_future_shifts()
1494 * --- 0 1 2 | array index in __reorder_future_shifts()
1500 * distance == 4 [inseq(6) - w_wantseq(2)] in __reorder_future_shifts()
1502 * index == 3 [distance(4) - 1] in __reorder_future_shifts()
1503 * beyond == 1 [newslot(4) - lastslot((nslots(4) - 1))] in __reorder_future_shifts()
1507 * +--- window boundary (nslots == 4) in __reorder_future_shifts()
1509 * --- 0 1 2 | 3 array index in __reorder_future_shifts()
1514 * distance == 4 [inseq(6) - w_wantseq(2)] in __reorder_future_shifts()
1516 * index == 3 [distance(4) - 1] in __reorder_future_shifts()
1517 * beyond == 1 [newslot(4) - lastslot((nslots(4) - 1))] in __reorder_future_shifts()
1521 * +-- window boundary (nslots == 4) in __reorder_future_shifts()
1523 * --- 0 1 2 | 3 4 5 array index in __reorder_future_shifts()
1524 * [-] [c] : :| array in __reorder_future_shifts()
1528 * iter 1: slot0 == NULL, missed++, lastdrop = 2 (2+1-1), slot0 = [-] in __reorder_future_shifts()
1529 * iter 2: slot0 == NULL, missed++, lastdrop = 3 (2+2-1), slot0 = [c] in __reorder_future_shifts()
1530 * 2 < 3, extra = 1 (3-2), missed += extra, lastdrop = 4 (2+2+1-1) in __reorder_future_shifts()
1533 * distance == 6 [inseq(8) - w_wantseq(2)] in __reorder_future_shifts()
1535 * index == 5 [distance(6) - 1] in __reorder_future_shifts()
1536 * beyond == 3 [newslot(6) - lastslot((nslots(4) - 1))] in __reorder_future_shifts()
1546 distance = inseq - wantseq; in __reorder_future_shifts()
1547 beyond = distance - (nslots - 1); in __reorder_future_shifts()
1554 wnext = xtfs->w_saved; in __reorder_future_shifts()
1558 list_add_tail(&slot0->list, list); in __reorder_future_shifts()
1559 slot0 = wnext->skb; in __reorder_future_shifts()
1560 wnext->skb = NULL; in __reorder_future_shifts()
1570 * non-NULL b/c we shifted the final element, which is always set if in __reorder_future_shifts()
1575 list_add_tail(&slot0->list, list); in __reorder_future_shifts()
1584 xtfs->w_wantseq += beyond; in __reorder_future_shifts()
1589 xtfs->w_savedlen = nslots - 1; in __reorder_future_shifts()
1590 xtfs->w_saved[xtfs->w_savedlen - 1].skb = inskb; in __reorder_future_shifts()
1591 iptfs_set_window_drop_times(xtfs, xtfs->w_savedlen - 1); in __reorder_future_shifts()
1610 const u32 nslots = xtfs->cfg.reorder_win_size + 1; in iptfs_input_reorder()
1614 assert_spin_locked(&xtfs->drop_lock); in iptfs_input_reorder()
1616 if (unlikely(!xtfs->w_seq_set)) { in iptfs_input_reorder()
1617 xtfs->w_seq_set = true; in iptfs_input_reorder()
1618 xtfs->w_wantseq = inseq; in iptfs_input_reorder()
1620 wantseq = xtfs->w_wantseq; in iptfs_input_reorder()
1626 else if ((inseq - wantseq) < nslots) in iptfs_input_reorder()
1633 * iptfs_drop_timer() - Handle drop timer expiry.
1645 * then D(n-1) <= D(n).
1663 x = xtfs->x; in iptfs_drop_timer()
1667 spin_lock(&xtfs->drop_lock); in iptfs_drop_timer()
1670 skb = xtfs->ra_newskb; in iptfs_drop_timer()
1671 xtfs->ra_newskb = NULL; in iptfs_drop_timer()
1676 count = xtfs->w_savedlen ? __reorder_drop(xtfs, &list) : 0; in iptfs_drop_timer()
1678 spin_unlock(&xtfs->drop_lock); in iptfs_drop_timer()
1694 * iptfs_input() - handle receipt of iptfs payload
1701 * Return: -EINPROGRESS to inform xfrm_input to stop processing the skb.
1706 struct xfrm_iptfs_data *xtfs = x->mode_data; in iptfs_input()
1710 if (xtfs->cfg.reorder_win_size == 0) { in iptfs_input()
1715 /* Fetch list of in-order packets from the reordering window as well as in iptfs_input()
1721 spin_lock(&xtfs->drop_lock); in iptfs_input()
1723 spin_unlock(&xtfs->drop_lock); in iptfs_input()
1735 /* We always have dealt with the input SKB, either we are re-using it, in iptfs_input()
1739 return -EINPROGRESS; in iptfs_input()
1746 /* ------------------------- */
1748 /* ------------------------- */
1751 * iptfs_enqueue() - enqueue packet if ok to send.
1759 u64 newsz = xtfs->queue_size + skb->len; in iptfs_enqueue()
1762 assert_spin_locked(&xtfs->x->lock); in iptfs_enqueue()
1764 if (newsz > xtfs->cfg.max_queue_size) in iptfs_enqueue()
1768 if (newsz > xtfs->ecn_queue_size) { in iptfs_enqueue()
1770 if (iph->version == 4) in iptfs_enqueue()
1772 else if (iph->version == 6) in iptfs_enqueue()
1776 __skb_queue_tail(&xtfs->queue, skb); in iptfs_enqueue()
1777 xtfs->queue_size += skb->len; in iptfs_enqueue()
1785 u32 payload_mtu = xtfs->payload_mtu; in iptfs_get_cur_pmtu()
1786 u32 pmtu = __iptfs_get_inner_mtu(x, xdst->child_mtu_cached); in iptfs_get_cur_pmtu()
1796 if (skb->len <= pmtu) in iptfs_is_too_big()
1800 * dont-fragment. in iptfs_is_too_big()
1802 if (skb->dev) in iptfs_is_too_big()
1803 XFRM_INC_STATS(dev_net(skb->dev), LINUX_MIB_XFRMOUTERROR); in iptfs_is_too_big()
1807 else if (ip_hdr(skb)->version == 4) in iptfs_is_too_big()
1817 * This is set in dst->output for an SA.
1822 struct xfrm_state *x = dst->xfrm; in iptfs_output_collect()
1823 struct xfrm_iptfs_data *xtfs = x->mode_data; in iptfs_output_collect()
1829 /* We have hooked into dst_entry->output which means we have skipped the in iptfs_output_collect()
1835 * changing the skb->dst entry which then may not be xfrm based anymore in iptfs_output_collect()
1844 if (xtfs->cfg.dont_frag) in iptfs_output_collect()
1861 return -EINVAL; in iptfs_output_collect()
1870 spin_lock_bh(&x->lock); in iptfs_output_collect()
1889 if (xtfs->cfg.dont_frag && iptfs_is_too_big(sk, skb, pmtu)) { in iptfs_output_collect()
1904 if (!hrtimer_is_queued(&xtfs->iptfs_timer)) { in iptfs_output_collect()
1905 hrtimer_start(&xtfs->iptfs_timer, xtfs->init_delay_ns, IPTFS_HRTIMER_MODE); in iptfs_output_collect()
1906 xtfs->iptfs_settime = ktime_get_raw_fast_ns(); in iptfs_output_collect()
1907 trace_iptfs_timer_start(xtfs, xtfs->init_delay_ns); in iptfs_output_collect()
1910 spin_unlock_bh(&x->lock); in iptfs_output_collect()
1914 /* -------------------------- */
1916 /* -------------------------- */
1927 h->block_offset = htons(blkoff); in iptfs_output_prepare_skb()
1932 skb->transport_header = skb->network_header; in iptfs_output_prepare_skb()
1933 skb->network_header -= hsz; in iptfs_output_prepare_skb()
1935 IPCB(skb)->flags |= IPSKB_XFRM_TUNNEL_SIZE; in iptfs_output_prepare_skb()
1939 * iptfs_copy_create_frag() - create an inner fragment skb.
1951 struct sk_buff *src = st->root_skb; in iptfs_copy_create_frag()
1957 return ERR_PTR(-ENOMEM); in iptfs_copy_create_frag()
1970 * iptfs_copy_create_frags() - create and send N-1 fragments of a larger skb.
1990 u32 to_copy = skb->len - mtu; in iptfs_copy_create_frags()
1996 skb_prepare_seq_read(skb, 0, skb->len, &skbseq); in iptfs_copy_create_frags()
2000 to_copy = skb->len - offset; in iptfs_copy_create_frags()
2004 list_add_tail(&nskb->list, &sublist); in iptfs_copy_create_frags()
2006 /* FUTURE: if the packet has an odd/non-aligning length we could in iptfs_copy_create_frags()
2013 XFRM_INC_STATS(xs_net(xtfs->x), LINUX_MIB_XFRMOUTERROR); in iptfs_copy_create_frags()
2021 to_copy -= copy_len; in iptfs_copy_create_frags()
2051 * the rest -- no point in sending a packet that can't be reassembled. in iptfs_copy_create_frags()
2066 * iptfs_first_skb() - handle the first dequeued inner packet for output
2091 * don't fit in our max packet size. Otherwise we iptfs-fragment as in iptfs_first_skb()
2096 if (skb->ip_summed == CHECKSUM_PARTIAL) { in iptfs_first_skb()
2109 /* Simple case -- it fits. `mtu` accounted for all the overhead in iptfs_first_skb()
2112 if (skb->len <= mtu) { in iptfs_first_skb()
2129 *nextp = skb_shinfo(child)->frag_list; in iptfs_rehome_fraglist()
2131 fllen += (*nextp)->len; in iptfs_rehome_fraglist()
2132 nextp = &(*nextp)->next; in iptfs_rehome_fraglist()
2135 child->len -= fllen; in iptfs_rehome_fraglist()
2136 child->data_len -= fllen; in iptfs_rehome_fraglist()
2151 iptfs_skb_head_to_frag(from, &toi->frags[toi->nr_frags]); in iptfs_consume_frags()
2152 skb_frag_ref(to, toi->nr_frags++); in iptfs_consume_frags()
2157 memcpy(&toi->frags[toi->nr_frags], fromi->frags, in iptfs_consume_frags()
2158 sizeof(fromi->frags[0]) * fromi->nr_frags); in iptfs_consume_frags()
2159 toi->nr_frags += fromi->nr_frags; in iptfs_consume_frags()
2160 fromi->nr_frags = 0; in iptfs_consume_frags()
2161 from->data_len = 0; in iptfs_consume_frags()
2162 from->len = 0; in iptfs_consume_frags()
2163 to->truesize += from->truesize - new_truesize; in iptfs_consume_frags()
2164 from->truesize = new_truesize; in iptfs_consume_frags()
2172 struct xfrm_iptfs_data *xtfs = x->mode_data; in iptfs_output_queued()
2179 * consecutively (ESP seq-wise). Since this output function is always in iptfs_output_queued()
2191 skb->protocol = x->outer_mode.family == AF_INET ? htons(ETH_P_IP) : in iptfs_output_queued()
2194 if (skb->len > mtu && xtfs->cfg.dont_frag) { in iptfs_output_queued()
2218 * data is `mtu` - (skb->len - sizeof iptfs header). This is b/c in iptfs_output_queued()
2221 * skb->len, thus we subtract it from the skb length. in iptfs_output_queued()
2223 remaining = mtu - (skb->len - sizeof(struct ip_iptfs_hdr)); in iptfs_output_queued()
2225 /* Re-home (un-nest) nested fragment lists. We need to do this in iptfs_output_queued()
2230 nextp = &shi->frag_list; in iptfs_output_queued()
2233 nextp = iptfs_rehome_fraglist(&(*nextp)->next, *nextp); in iptfs_output_queued()
2235 nextp = &(*nextp)->next; in iptfs_output_queued()
2238 if (shi->frag_list || skb_cloned(skb) || skb_shared(skb)) in iptfs_output_queued()
2243 * NOTE: Maybe do not append if we will be mis-aligned, in iptfs_output_queued()
2244 * SW-based endpoints will probably have to copy in this in iptfs_output_queued()
2249 if (skb2->len > remaining) in iptfs_output_queued()
2260 if (skb2->ip_summed == CHECKSUM_PARTIAL) { in iptfs_output_queued()
2268 /* skb->pp_recycle is passed to __skb_flag_unref for all in iptfs_output_queued()
2274 (shi2->frag_list || in iptfs_output_queued()
2275 (!skb2->head_frag && skb_headlen(skb)) || in iptfs_output_queued()
2276 skb->pp_recycle != skb2->pp_recycle || in iptfs_output_queued()
2278 (shi->nr_frags + shi2->nr_frags + 1 > MAX_SKB_FRAGS))) in iptfs_output_queued()
2282 skb->data_len += skb2->len; in iptfs_output_queued()
2283 skb->len += skb2->len; in iptfs_output_queued()
2284 remaining -= skb2->len; in iptfs_output_queued()
2293 nextp = &skb2->next; in iptfs_output_queued()
2297 skb->truesize += skb2->truesize; in iptfs_output_queued()
2313 x = xtfs->x; in iptfs_delay_timer()
2323 spin_lock(&x->lock); in iptfs_delay_timer()
2325 skb_queue_splice_init(&xtfs->queue, &list); in iptfs_delay_timer()
2326 xtfs->queue_size = 0; in iptfs_delay_timer()
2327 settime = xtfs->iptfs_settime; in iptfs_delay_timer()
2328 spin_unlock(&x->lock); in iptfs_delay_timer()
2336 trace_iptfs_timer_expire(xtfs, (unsigned long long)(ktime_get_raw_fast_ns() - settime)); in iptfs_delay_timer()
2344 * iptfs_encap_add_ipv4() - add outer encaps
2349 * copy is that IP-TFS/AGGFRAG can have different functionality for how to set
2364 skb_set_network_header(skb, -(x->props.header_len - x->props.enc_hdr_len)); in iptfs_encap_add_ipv4()
2365 skb->mac_header = skb->network_header + offsetof(struct iphdr, protocol); in iptfs_encap_add_ipv4()
2366 skb->transport_header = skb->network_header + sizeof(*top_iph); in iptfs_encap_add_ipv4()
2369 top_iph->ihl = 5; in iptfs_encap_add_ipv4()
2370 top_iph->version = 4; in iptfs_encap_add_ipv4()
2371 top_iph->protocol = IPPROTO_AGGFRAG; in iptfs_encap_add_ipv4()
2377 top_iph->tos = 0; in iptfs_encap_add_ipv4()
2379 top_iph->frag_off = htons(IP_DF); in iptfs_encap_add_ipv4()
2380 top_iph->ttl = ip4_dst_hoplimit(xfrm_dst_child(dst)); in iptfs_encap_add_ipv4()
2381 top_iph->saddr = x->props.saddr.a4; in iptfs_encap_add_ipv4()
2382 top_iph->daddr = x->id.daddr.a4; in iptfs_encap_add_ipv4()
2383 ip_select_ident(dev_net(dst->dev), skb, NULL); in iptfs_encap_add_ipv4()
2390 * iptfs_encap_add_ipv6() - add outer encaps
2395 * copy is that IP-TFS/AGGFRAG can have different functionality for how to set
2411 skb_set_network_header(skb, -x->props.header_len + x->props.enc_hdr_len); in iptfs_encap_add_ipv6()
2412 skb->mac_header = skb->network_header + offsetof(struct ipv6hdr, nexthdr); in iptfs_encap_add_ipv6()
2413 skb->transport_header = skb->network_header + sizeof(*top_iph); in iptfs_encap_add_ipv6()
2416 top_iph->version = 6; in iptfs_encap_add_ipv6()
2417 top_iph->priority = 0; in iptfs_encap_add_ipv6()
2418 memset(top_iph->flow_lbl, 0, sizeof(top_iph->flow_lbl)); in iptfs_encap_add_ipv6()
2419 top_iph->nexthdr = IPPROTO_AGGFRAG; in iptfs_encap_add_ipv6()
2428 top_iph->hop_limit = ip6_dst_hoplimit(xfrm_dst_child(dst)); in iptfs_encap_add_ipv6()
2429 top_iph->saddr = *(struct in6_addr *)&x->props.saddr; in iptfs_encap_add_ipv6()
2430 top_iph->daddr = *(struct in6_addr *)&x->id.daddr; in iptfs_encap_add_ipv6()
2437 * iptfs_prepare_output() - prepare the skb for output
2442 * - transport_header should point at ESP header
2443 * - network_header should point at Outer IP header
2444 * - mac_header should point at protocol/nexthdr of the outer IP
2448 if (x->outer_mode.family == AF_INET) in iptfs_prepare_output()
2450 if (x->outer_mode.family == AF_INET6) { in iptfs_prepare_output()
2454 return -EAFNOSUPPORT; in iptfs_prepare_output()
2457 return -EOPNOTSUPP; in iptfs_prepare_output()
2465 * __iptfs_get_inner_mtu() - return inner MTU with no fragmentation.
2476 aead = x->data; in __iptfs_get_inner_mtu()
2478 return ((outer_mtu - x->props.header_len - crypto_aead_authsize(aead)) & in __iptfs_get_inner_mtu()
2479 ~(blksize - 1)) - 2; in __iptfs_get_inner_mtu()
2483 * iptfs_get_inner_mtu() - return the inner MTU for an IPTFS xfrm.
2491 struct xfrm_iptfs_data *xtfs = x->mode_data; in iptfs_get_inner_mtu()
2493 /* If not dont-frag we have no MTU */ in iptfs_get_inner_mtu()
2494 if (!xtfs->cfg.dont_frag) in iptfs_get_inner_mtu()
2495 return x->outer_mode.family == AF_INET ? IP_MAX_MTU : IP6_MAX_MTU; in iptfs_get_inner_mtu()
2500 * iptfs_user_init() - initialize the SA with IPTFS options from netlink.
2512 struct xfrm_iptfs_data *xtfs = x->mode_data; in iptfs_user_init()
2516 xc = &xtfs->cfg; in iptfs_user_init()
2517 xc->max_queue_size = IPTFS_DEFAULT_MAX_QUEUE_SIZE; in iptfs_user_init()
2518 xc->reorder_win_size = IPTFS_DEFAULT_REORDER_WINDOW; in iptfs_user_init()
2519 xtfs->drop_time_ns = IPTFS_DEFAULT_DROP_TIME_USECS * NSECS_IN_USEC; in iptfs_user_init()
2520 xtfs->init_delay_ns = IPTFS_DEFAULT_INIT_DELAY_USECS * NSECS_IN_USEC; in iptfs_user_init()
2523 xc->dont_frag = true; in iptfs_user_init()
2525 xc->reorder_win_size = in iptfs_user_init()
2528 if (xc->reorder_win_size) { in iptfs_user_init()
2529 xtfs->w_saved = kcalloc(xc->reorder_win_size, in iptfs_user_init()
2530 sizeof(*xtfs->w_saved), GFP_KERNEL); in iptfs_user_init()
2531 if (!xtfs->w_saved) { in iptfs_user_init()
2533 return -ENOMEM; in iptfs_user_init()
2537 xc->pkt_size = nla_get_u32(attrs[XFRMA_IPTFS_PKT_SIZE]); in iptfs_user_init()
2538 if (!xc->pkt_size) { in iptfs_user_init()
2539 xtfs->payload_mtu = 0; in iptfs_user_init()
2540 } else if (xc->pkt_size > x->props.header_len) { in iptfs_user_init()
2541 xtfs->payload_mtu = xc->pkt_size - x->props.header_len; in iptfs_user_init()
2545 return -EINVAL; in iptfs_user_init()
2549 xc->max_queue_size = nla_get_u32(attrs[XFRMA_IPTFS_MAX_QSIZE]); in iptfs_user_init()
2551 xtfs->drop_time_ns = in iptfs_user_init()
2555 xtfs->init_delay_ns = in iptfs_user_init()
2558 q = (u64)xc->max_queue_size * 95; in iptfs_user_init()
2560 xtfs->ecn_queue_size = (u32)q; in iptfs_user_init()
2567 struct xfrm_iptfs_data *xtfs = x->mode_data; in iptfs_sa_len()
2568 struct xfrm_iptfs_config *xc = &xtfs->cfg; in iptfs_sa_len()
2571 if (x->dir == XFRM_SA_DIR_IN) { in iptfs_sa_len()
2573 l += nla_total_size(sizeof(xc->reorder_win_size)); in iptfs_sa_len()
2575 if (xc->dont_frag) in iptfs_sa_len()
2576 l += nla_total_size(0); /* dont-frag flag */ in iptfs_sa_len()
2578 l += nla_total_size(sizeof(xc->max_queue_size)); in iptfs_sa_len()
2579 l += nla_total_size(sizeof(xc->pkt_size)); in iptfs_sa_len()
2587 struct xfrm_iptfs_data *xtfs = x->mode_data; in iptfs_copy_to_user()
2588 struct xfrm_iptfs_config *xc = &xtfs->cfg; in iptfs_copy_to_user()
2592 if (x->dir == XFRM_SA_DIR_IN) { in iptfs_copy_to_user()
2593 q = xtfs->drop_time_ns; in iptfs_copy_to_user()
2600 xc->reorder_win_size); in iptfs_copy_to_user()
2602 if (xc->dont_frag) { in iptfs_copy_to_user()
2608 q = xtfs->init_delay_ns; in iptfs_copy_to_user()
2614 ret = nla_put_u32(skb, XFRMA_IPTFS_MAX_QSIZE, xc->max_queue_size); in iptfs_copy_to_user()
2618 ret = nla_put_u32(skb, XFRMA_IPTFS_PKT_SIZE, xc->pkt_size); in iptfs_copy_to_user()
2627 __skb_queue_head_init(&xtfs->queue); in __iptfs_init_state()
2628 hrtimer_init(&xtfs->iptfs_timer, CLOCK_MONOTONIC, IPTFS_HRTIMER_MODE); in __iptfs_init_state()
2629 xtfs->iptfs_timer.function = iptfs_delay_timer; in __iptfs_init_state()
2631 spin_lock_init(&xtfs->drop_lock); in __iptfs_init_state()
2632 hrtimer_init(&xtfs->drop_timer, CLOCK_MONOTONIC, IPTFS_HRTIMER_MODE); in __iptfs_init_state()
2633 xtfs->drop_timer.function = iptfs_drop_timer; in __iptfs_init_state()
2637 if (x->props.family == AF_INET) in __iptfs_init_state()
2638 x->props.header_len += sizeof(struct iphdr) + sizeof(struct ip_iptfs_hdr); in __iptfs_init_state()
2639 else if (x->props.family == AF_INET6) in __iptfs_init_state()
2640 x->props.header_len += sizeof(struct ipv6hdr) + sizeof(struct ip_iptfs_hdr); in __iptfs_init_state()
2641 x->props.enc_hdr_len = sizeof(struct ip_iptfs_hdr); in __iptfs_init_state()
2643 /* Always keep a module reference when x->mode_data is set */ in __iptfs_init_state()
2644 __module_get(x->mode_cbs->owner); in __iptfs_init_state()
2646 x->mode_data = xtfs; in __iptfs_init_state()
2647 xtfs->x = x; in __iptfs_init_state()
2654 xtfs = kmemdup(orig->mode_data, sizeof(*xtfs), GFP_KERNEL); in iptfs_clone_state()
2656 return -ENOMEM; in iptfs_clone_state()
2658 x->mode_data = xtfs; in iptfs_clone_state()
2659 xtfs->x = x; in iptfs_clone_state()
2661 xtfs->ra_newskb = NULL; in iptfs_clone_state()
2662 if (xtfs->cfg.reorder_win_size) { in iptfs_clone_state()
2663 xtfs->w_saved = kcalloc(xtfs->cfg.reorder_win_size, in iptfs_clone_state()
2664 sizeof(*xtfs->w_saved), GFP_KERNEL); in iptfs_clone_state()
2665 if (!xtfs->w_saved) { in iptfs_clone_state()
2667 return -ENOMEM; in iptfs_clone_state()
2678 if (x->mode_data) { in iptfs_init_state()
2680 xtfs = x->mode_data; in iptfs_init_state()
2684 return -ENOMEM; in iptfs_init_state()
2694 struct xfrm_iptfs_data *xtfs = x->mode_data; in iptfs_destroy_state()
2702 spin_lock_bh(&xtfs->x->lock); in iptfs_destroy_state()
2703 hrtimer_cancel(&xtfs->iptfs_timer); in iptfs_destroy_state()
2705 skb_queue_splice_init(&xtfs->queue, &list); in iptfs_destroy_state()
2706 spin_unlock_bh(&xtfs->x->lock); in iptfs_destroy_state()
2711 spin_lock_bh(&xtfs->drop_lock); in iptfs_destroy_state()
2712 hrtimer_cancel(&xtfs->drop_timer); in iptfs_destroy_state()
2713 spin_unlock_bh(&xtfs->drop_lock); in iptfs_destroy_state()
2715 if (xtfs->ra_newskb) in iptfs_destroy_state()
2716 kfree_skb(xtfs->ra_newskb); in iptfs_destroy_state()
2718 for (s = xtfs->w_saved, se = s + xtfs->w_savedlen; s < se; s++) { in iptfs_destroy_state()
2719 if (s->skb) in iptfs_destroy_state()
2720 kfree_skb(s->skb); in iptfs_destroy_state()
2723 kfree_sensitive(xtfs->w_saved); in iptfs_destroy_state()
2726 module_put(x->mode_cbs->owner); in iptfs_destroy_state()
2747 pr_info("xfrm_iptfs: IPsec IP-TFS tunnel mode module\n"); in xfrm_iptfs_init()
2751 pr_info("%s: can't register IP-TFS\n", __func__); in xfrm_iptfs_init()
2764 MODULE_DESCRIPTION("IP-TFS support for xfrm ipsec tunnels");