1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/kernel.h>
3 #include <linux/init.h>
4 #include <linux/module.h>
5 #include <linux/netfilter.h>
6 #include <linux/rhashtable.h>
7 #include <linux/netdevice.h>
8 #include <net/ip.h>
9 #include <net/ip6_route.h>
10 #include <net/netfilter/nf_tables.h>
11 #include <net/netfilter/nf_flow_table.h>
12 #include <net/netfilter/nf_conntrack.h>
13 #include <net/netfilter/nf_conntrack_core.h>
14 #include <net/netfilter/nf_conntrack_l4proto.h>
15 #include <net/netfilter/nf_conntrack_tuple.h>
16
17 static DEFINE_MUTEX(flowtable_lock);
18 static LIST_HEAD(flowtables);
19
20 static void
flow_offload_fill_dir(struct flow_offload * flow,enum flow_offload_tuple_dir dir)21 flow_offload_fill_dir(struct flow_offload *flow,
22 enum flow_offload_tuple_dir dir)
23 {
24 struct flow_offload_tuple *ft = &flow->tuplehash[dir].tuple;
25 struct nf_conntrack_tuple *ctt = &flow->ct->tuplehash[dir].tuple;
26
27 ft->dir = dir;
28
29 switch (ctt->src.l3num) {
30 case NFPROTO_IPV4:
31 ft->src_v4 = ctt->src.u3.in;
32 ft->dst_v4 = ctt->dst.u3.in;
33 break;
34 case NFPROTO_IPV6:
35 ft->src_v6 = ctt->src.u3.in6;
36 ft->dst_v6 = ctt->dst.u3.in6;
37 break;
38 }
39
40 ft->l3proto = ctt->src.l3num;
41 ft->l4proto = ctt->dst.protonum;
42
43 switch (ctt->dst.protonum) {
44 case IPPROTO_TCP:
45 case IPPROTO_UDP:
46 ft->src_port = ctt->src.u.tcp.port;
47 ft->dst_port = ctt->dst.u.tcp.port;
48 break;
49 }
50 }
51
flow_offload_alloc(struct nf_conn * ct)52 struct flow_offload *flow_offload_alloc(struct nf_conn *ct)
53 {
54 struct flow_offload *flow;
55
56 if (unlikely(nf_ct_is_dying(ct)))
57 return NULL;
58
59 flow = kzalloc(sizeof(*flow), GFP_ATOMIC);
60 if (!flow)
61 return NULL;
62
63 refcount_inc(&ct->ct_general.use);
64 flow->ct = ct;
65
66 flow_offload_fill_dir(flow, FLOW_OFFLOAD_DIR_ORIGINAL);
67 flow_offload_fill_dir(flow, FLOW_OFFLOAD_DIR_REPLY);
68
69 if (ct->status & IPS_SRC_NAT)
70 __set_bit(NF_FLOW_SNAT, &flow->flags);
71 if (ct->status & IPS_DST_NAT)
72 __set_bit(NF_FLOW_DNAT, &flow->flags);
73
74 return flow;
75 }
76 EXPORT_SYMBOL_GPL(flow_offload_alloc);
77
flow_offload_dst_cookie(struct flow_offload_tuple * flow_tuple)78 static u32 flow_offload_dst_cookie(struct flow_offload_tuple *flow_tuple)
79 {
80 if (flow_tuple->l3proto == NFPROTO_IPV6)
81 return rt6_get_cookie(dst_rt6_info(flow_tuple->dst_cache));
82
83 return 0;
84 }
85
nft_route_dst_fetch(struct nf_flow_route * route,enum flow_offload_tuple_dir dir)86 static struct dst_entry *nft_route_dst_fetch(struct nf_flow_route *route,
87 enum flow_offload_tuple_dir dir)
88 {
89 struct dst_entry *dst = route->tuple[dir].dst;
90
91 route->tuple[dir].dst = NULL;
92
93 return dst;
94 }
95
flow_offload_fill_route(struct flow_offload * flow,struct nf_flow_route * route,enum flow_offload_tuple_dir dir)96 static int flow_offload_fill_route(struct flow_offload *flow,
97 struct nf_flow_route *route,
98 enum flow_offload_tuple_dir dir)
99 {
100 struct flow_offload_tuple *flow_tuple = &flow->tuplehash[dir].tuple;
101 struct dst_entry *dst = nft_route_dst_fetch(route, dir);
102 int i, j = 0;
103
104 switch (flow_tuple->l3proto) {
105 case NFPROTO_IPV4:
106 flow_tuple->mtu = ip_dst_mtu_maybe_forward(dst, true);
107 break;
108 case NFPROTO_IPV6:
109 flow_tuple->mtu = ip6_dst_mtu_maybe_forward(dst, true);
110 break;
111 }
112
113 flow_tuple->iifidx = route->tuple[dir].in.ifindex;
114 for (i = route->tuple[dir].in.num_encaps - 1; i >= 0; i--) {
115 flow_tuple->encap[j].id = route->tuple[dir].in.encap[i].id;
116 flow_tuple->encap[j].proto = route->tuple[dir].in.encap[i].proto;
117 if (route->tuple[dir].in.ingress_vlans & BIT(i))
118 flow_tuple->in_vlan_ingress |= BIT(j);
119 j++;
120 }
121 flow_tuple->encap_num = route->tuple[dir].in.num_encaps;
122
123 switch (route->tuple[dir].xmit_type) {
124 case FLOW_OFFLOAD_XMIT_DIRECT:
125 memcpy(flow_tuple->out.h_dest, route->tuple[dir].out.h_dest,
126 ETH_ALEN);
127 memcpy(flow_tuple->out.h_source, route->tuple[dir].out.h_source,
128 ETH_ALEN);
129 flow_tuple->out.ifidx = route->tuple[dir].out.ifindex;
130 flow_tuple->out.hw_ifidx = route->tuple[dir].out.hw_ifindex;
131 dst_release(dst);
132 break;
133 case FLOW_OFFLOAD_XMIT_XFRM:
134 case FLOW_OFFLOAD_XMIT_NEIGH:
135 flow_tuple->dst_cache = dst;
136 flow_tuple->dst_cookie = flow_offload_dst_cookie(flow_tuple);
137 break;
138 default:
139 WARN_ON_ONCE(1);
140 break;
141 }
142 flow_tuple->xmit_type = route->tuple[dir].xmit_type;
143
144 return 0;
145 }
146
nft_flow_dst_release(struct flow_offload * flow,enum flow_offload_tuple_dir dir)147 static void nft_flow_dst_release(struct flow_offload *flow,
148 enum flow_offload_tuple_dir dir)
149 {
150 if (flow->tuplehash[dir].tuple.xmit_type == FLOW_OFFLOAD_XMIT_NEIGH ||
151 flow->tuplehash[dir].tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)
152 dst_release(flow->tuplehash[dir].tuple.dst_cache);
153 }
154
flow_offload_route_init(struct flow_offload * flow,struct nf_flow_route * route)155 void flow_offload_route_init(struct flow_offload *flow,
156 struct nf_flow_route *route)
157 {
158 flow_offload_fill_route(flow, route, FLOW_OFFLOAD_DIR_ORIGINAL);
159 flow_offload_fill_route(flow, route, FLOW_OFFLOAD_DIR_REPLY);
160 flow->type = NF_FLOW_OFFLOAD_ROUTE;
161 }
162 EXPORT_SYMBOL_GPL(flow_offload_route_init);
163
nf_flow_has_expired(const struct flow_offload * flow)164 static inline bool nf_flow_has_expired(const struct flow_offload *flow)
165 {
166 return nf_flow_timeout_delta(flow->timeout) <= 0;
167 }
168
flow_offload_fixup_tcp(struct nf_conn * ct,u8 tcp_state)169 static void flow_offload_fixup_tcp(struct nf_conn *ct, u8 tcp_state)
170 {
171 struct ip_ct_tcp *tcp = &ct->proto.tcp;
172
173 spin_lock_bh(&ct->lock);
174 if (tcp->state != tcp_state)
175 tcp->state = tcp_state;
176
177 /* syn packet triggers the TCP reopen case from conntrack. */
178 if (tcp->state == TCP_CONNTRACK_CLOSE)
179 ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_CLOSE_INIT;
180
181 /* Conntrack state is outdated due to offload bypass.
182 * Clear IP_CT_TCP_FLAG_MAXACK_SET, otherwise conntracks
183 * TCP reset validation will fail.
184 */
185 tcp->seen[0].td_maxwin = 0;
186 tcp->seen[0].flags &= ~IP_CT_TCP_FLAG_MAXACK_SET;
187 tcp->seen[1].td_maxwin = 0;
188 tcp->seen[1].flags &= ~IP_CT_TCP_FLAG_MAXACK_SET;
189 spin_unlock_bh(&ct->lock);
190 }
191
flow_offload_fixup_ct(struct flow_offload * flow)192 static void flow_offload_fixup_ct(struct flow_offload *flow)
193 {
194 struct nf_conn *ct = flow->ct;
195 struct net *net = nf_ct_net(ct);
196 int l4num = nf_ct_protonum(ct);
197 bool expired, closing = false;
198 u32 offload_timeout = 0;
199 s32 timeout;
200
201 if (l4num == IPPROTO_TCP) {
202 const struct nf_tcp_net *tn = nf_tcp_pernet(net);
203 u8 tcp_state;
204
205 /* Enter CLOSE state if fin/rst packet has been seen, this
206 * allows TCP reopen from conntrack. Otherwise, pick up from
207 * the last seen TCP state.
208 */
209 closing = test_bit(NF_FLOW_CLOSING, &flow->flags);
210 if (closing) {
211 flow_offload_fixup_tcp(ct, TCP_CONNTRACK_CLOSE);
212 timeout = READ_ONCE(tn->timeouts[TCP_CONNTRACK_CLOSE]);
213 expired = false;
214 } else {
215 tcp_state = READ_ONCE(ct->proto.tcp.state);
216 flow_offload_fixup_tcp(ct, tcp_state);
217 timeout = READ_ONCE(tn->timeouts[tcp_state]);
218 expired = nf_flow_has_expired(flow);
219 }
220 offload_timeout = READ_ONCE(tn->offload_timeout);
221
222 } else if (l4num == IPPROTO_UDP) {
223 const struct nf_udp_net *tn = nf_udp_pernet(net);
224 enum udp_conntrack state =
225 test_bit(IPS_SEEN_REPLY_BIT, &ct->status) ?
226 UDP_CT_REPLIED : UDP_CT_UNREPLIED;
227
228 timeout = READ_ONCE(tn->timeouts[state]);
229 expired = nf_flow_has_expired(flow);
230 offload_timeout = READ_ONCE(tn->offload_timeout);
231 } else {
232 return;
233 }
234
235 if (expired)
236 timeout -= offload_timeout;
237
238 if (timeout < 0)
239 timeout = 0;
240
241 if (closing ||
242 nf_flow_timeout_delta(READ_ONCE(ct->timeout)) > (__s32)timeout)
243 nf_ct_refresh(ct, timeout);
244 }
245
flow_offload_route_release(struct flow_offload * flow)246 static void flow_offload_route_release(struct flow_offload *flow)
247 {
248 nft_flow_dst_release(flow, FLOW_OFFLOAD_DIR_ORIGINAL);
249 nft_flow_dst_release(flow, FLOW_OFFLOAD_DIR_REPLY);
250 }
251
flow_offload_free(struct flow_offload * flow)252 void flow_offload_free(struct flow_offload *flow)
253 {
254 switch (flow->type) {
255 case NF_FLOW_OFFLOAD_ROUTE:
256 flow_offload_route_release(flow);
257 break;
258 default:
259 break;
260 }
261 nf_ct_put(flow->ct);
262 kfree_rcu(flow, rcu_head);
263 }
264 EXPORT_SYMBOL_GPL(flow_offload_free);
265
flow_offload_hash(const void * data,u32 len,u32 seed)266 static u32 flow_offload_hash(const void *data, u32 len, u32 seed)
267 {
268 const struct flow_offload_tuple *tuple = data;
269
270 return jhash(tuple, offsetof(struct flow_offload_tuple, __hash), seed);
271 }
272
flow_offload_hash_obj(const void * data,u32 len,u32 seed)273 static u32 flow_offload_hash_obj(const void *data, u32 len, u32 seed)
274 {
275 const struct flow_offload_tuple_rhash *tuplehash = data;
276
277 return jhash(&tuplehash->tuple, offsetof(struct flow_offload_tuple, __hash), seed);
278 }
279
flow_offload_hash_cmp(struct rhashtable_compare_arg * arg,const void * ptr)280 static int flow_offload_hash_cmp(struct rhashtable_compare_arg *arg,
281 const void *ptr)
282 {
283 const struct flow_offload_tuple *tuple = arg->key;
284 const struct flow_offload_tuple_rhash *x = ptr;
285
286 if (memcmp(&x->tuple, tuple, offsetof(struct flow_offload_tuple, __hash)))
287 return 1;
288
289 return 0;
290 }
291
292 static const struct rhashtable_params nf_flow_offload_rhash_params = {
293 .head_offset = offsetof(struct flow_offload_tuple_rhash, node),
294 .hashfn = flow_offload_hash,
295 .obj_hashfn = flow_offload_hash_obj,
296 .obj_cmpfn = flow_offload_hash_cmp,
297 .automatic_shrinking = true,
298 };
299
flow_offload_get_timeout(struct flow_offload * flow)300 unsigned long flow_offload_get_timeout(struct flow_offload *flow)
301 {
302 unsigned long timeout = NF_FLOW_TIMEOUT;
303 struct net *net = nf_ct_net(flow->ct);
304 int l4num = nf_ct_protonum(flow->ct);
305
306 if (l4num == IPPROTO_TCP) {
307 struct nf_tcp_net *tn = nf_tcp_pernet(net);
308
309 timeout = tn->offload_timeout;
310 } else if (l4num == IPPROTO_UDP) {
311 struct nf_udp_net *tn = nf_udp_pernet(net);
312
313 timeout = tn->offload_timeout;
314 }
315
316 return timeout;
317 }
318
flow_offload_add(struct nf_flowtable * flow_table,struct flow_offload * flow)319 int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
320 {
321 int err;
322
323 flow->timeout = nf_flowtable_time_stamp + flow_offload_get_timeout(flow);
324
325 err = rhashtable_insert_fast(&flow_table->rhashtable,
326 &flow->tuplehash[0].node,
327 nf_flow_offload_rhash_params);
328 if (err < 0)
329 return err;
330
331 err = rhashtable_insert_fast(&flow_table->rhashtable,
332 &flow->tuplehash[1].node,
333 nf_flow_offload_rhash_params);
334 if (err < 0) {
335 rhashtable_remove_fast(&flow_table->rhashtable,
336 &flow->tuplehash[0].node,
337 nf_flow_offload_rhash_params);
338 return err;
339 }
340
341 nf_ct_refresh(flow->ct, NF_CT_DAY);
342
343 if (nf_flowtable_hw_offload(flow_table)) {
344 __set_bit(NF_FLOW_HW, &flow->flags);
345 nf_flow_offload_add(flow_table, flow);
346 }
347
348 return 0;
349 }
350 EXPORT_SYMBOL_GPL(flow_offload_add);
351
flow_offload_refresh(struct nf_flowtable * flow_table,struct flow_offload * flow,bool force)352 void flow_offload_refresh(struct nf_flowtable *flow_table,
353 struct flow_offload *flow, bool force)
354 {
355 u32 timeout;
356
357 timeout = nf_flowtable_time_stamp + flow_offload_get_timeout(flow);
358 if (force || timeout - READ_ONCE(flow->timeout) > HZ)
359 WRITE_ONCE(flow->timeout, timeout);
360 else
361 return;
362
363 if (likely(!nf_flowtable_hw_offload(flow_table)) ||
364 test_bit(NF_FLOW_CLOSING, &flow->flags))
365 return;
366
367 nf_flow_offload_add(flow_table, flow);
368 }
369 EXPORT_SYMBOL_GPL(flow_offload_refresh);
370
flow_offload_del(struct nf_flowtable * flow_table,struct flow_offload * flow)371 static void flow_offload_del(struct nf_flowtable *flow_table,
372 struct flow_offload *flow)
373 {
374 rhashtable_remove_fast(&flow_table->rhashtable,
375 &flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].node,
376 nf_flow_offload_rhash_params);
377 rhashtable_remove_fast(&flow_table->rhashtable,
378 &flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].node,
379 nf_flow_offload_rhash_params);
380 flow_offload_free(flow);
381 }
382
flow_offload_teardown(struct flow_offload * flow)383 void flow_offload_teardown(struct flow_offload *flow)
384 {
385 clear_bit(IPS_OFFLOAD_BIT, &flow->ct->status);
386 if (!test_and_set_bit(NF_FLOW_TEARDOWN, &flow->flags))
387 flow_offload_fixup_ct(flow);
388 }
389 EXPORT_SYMBOL_GPL(flow_offload_teardown);
390
391 struct flow_offload_tuple_rhash *
flow_offload_lookup(struct nf_flowtable * flow_table,struct flow_offload_tuple * tuple)392 flow_offload_lookup(struct nf_flowtable *flow_table,
393 struct flow_offload_tuple *tuple)
394 {
395 struct flow_offload_tuple_rhash *tuplehash;
396 struct flow_offload *flow;
397 int dir;
398
399 tuplehash = rhashtable_lookup(&flow_table->rhashtable, tuple,
400 nf_flow_offload_rhash_params);
401 if (!tuplehash)
402 return NULL;
403
404 dir = tuplehash->tuple.dir;
405 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
406 if (test_bit(NF_FLOW_TEARDOWN, &flow->flags))
407 return NULL;
408
409 if (unlikely(nf_ct_is_dying(flow->ct)))
410 return NULL;
411
412 return tuplehash;
413 }
414 EXPORT_SYMBOL_GPL(flow_offload_lookup);
415
416 static int
nf_flow_table_iterate(struct nf_flowtable * flow_table,void (* iter)(struct nf_flowtable * flowtable,struct flow_offload * flow,void * data),void * data)417 nf_flow_table_iterate(struct nf_flowtable *flow_table,
418 void (*iter)(struct nf_flowtable *flowtable,
419 struct flow_offload *flow, void *data),
420 void *data)
421 {
422 struct flow_offload_tuple_rhash *tuplehash;
423 struct rhashtable_iter hti;
424 struct flow_offload *flow;
425 int err = 0;
426
427 rhashtable_walk_enter(&flow_table->rhashtable, &hti);
428 rhashtable_walk_start(&hti);
429
430 while ((tuplehash = rhashtable_walk_next(&hti))) {
431 if (IS_ERR(tuplehash)) {
432 if (PTR_ERR(tuplehash) != -EAGAIN) {
433 err = PTR_ERR(tuplehash);
434 break;
435 }
436 continue;
437 }
438 if (tuplehash->tuple.dir)
439 continue;
440
441 flow = container_of(tuplehash, struct flow_offload, tuplehash[0]);
442
443 iter(flow_table, flow, data);
444 }
445 rhashtable_walk_stop(&hti);
446 rhashtable_walk_exit(&hti);
447
448 return err;
449 }
450
nf_flow_custom_gc(struct nf_flowtable * flow_table,const struct flow_offload * flow)451 static bool nf_flow_custom_gc(struct nf_flowtable *flow_table,
452 const struct flow_offload *flow)
453 {
454 return flow_table->type->gc && flow_table->type->gc(flow);
455 }
456
457 /**
458 * nf_flow_table_tcp_timeout() - new timeout of offloaded tcp entry
459 * @ct: Flowtable offloaded tcp ct
460 *
461 * Return: number of seconds when ct entry should expire.
462 */
nf_flow_table_tcp_timeout(const struct nf_conn * ct)463 static u32 nf_flow_table_tcp_timeout(const struct nf_conn *ct)
464 {
465 u8 state = READ_ONCE(ct->proto.tcp.state);
466
467 switch (state) {
468 case TCP_CONNTRACK_SYN_SENT:
469 case TCP_CONNTRACK_SYN_RECV:
470 return 0;
471 case TCP_CONNTRACK_ESTABLISHED:
472 return NF_CT_DAY;
473 case TCP_CONNTRACK_FIN_WAIT:
474 case TCP_CONNTRACK_CLOSE_WAIT:
475 case TCP_CONNTRACK_LAST_ACK:
476 case TCP_CONNTRACK_TIME_WAIT:
477 return 5 * 60 * HZ;
478 case TCP_CONNTRACK_CLOSE:
479 return 0;
480 }
481
482 return 0;
483 }
484
485 /**
486 * nf_flow_table_extend_ct_timeout() - Extend ct timeout of offloaded conntrack entry
487 * @ct: Flowtable offloaded ct
488 *
489 * Datapath lookups in the conntrack table will evict nf_conn entries
490 * if they have expired.
491 *
492 * Once nf_conn entries have been offloaded, nf_conntrack might not see any
493 * packets anymore. Thus ct->timeout is no longer refreshed and ct can
494 * be evicted.
495 *
496 * To avoid the need for an additional check on the offload bit for every
497 * packet processed via nf_conntrack_in(), set an arbitrary timeout large
498 * enough not to ever expire, this save us a check for the IPS_OFFLOAD_BIT
499 * from the packet path via nf_ct_is_expired().
500 */
nf_flow_table_extend_ct_timeout(struct nf_conn * ct)501 static void nf_flow_table_extend_ct_timeout(struct nf_conn *ct)
502 {
503 static const u32 min_timeout = 5 * 60 * HZ;
504 u32 expires = nf_ct_expires(ct);
505
506 /* normal case: large enough timeout, nothing to do. */
507 if (likely(expires >= min_timeout))
508 return;
509
510 /* must check offload bit after this, we do not hold any locks.
511 * flowtable and ct entries could have been removed on another CPU.
512 */
513 if (!refcount_inc_not_zero(&ct->ct_general.use))
514 return;
515
516 /* load ct->status after refcount increase */
517 smp_acquire__after_ctrl_dep();
518
519 if (nf_ct_is_confirmed(ct) &&
520 test_bit(IPS_OFFLOAD_BIT, &ct->status)) {
521 u8 l4proto = nf_ct_protonum(ct);
522 u32 new_timeout = true;
523
524 switch (l4proto) {
525 case IPPROTO_UDP:
526 new_timeout = NF_CT_DAY;
527 break;
528 case IPPROTO_TCP:
529 new_timeout = nf_flow_table_tcp_timeout(ct);
530 break;
531 default:
532 WARN_ON_ONCE(1);
533 break;
534 }
535
536 /* Update to ct->timeout from nf_conntrack happens
537 * without holding ct->lock.
538 *
539 * Use cmpxchg to ensure timeout extension doesn't
540 * happen when we race with conntrack datapath.
541 *
542 * The inverse -- datapath updating ->timeout right
543 * after this -- is fine, datapath is authoritative.
544 */
545 if (new_timeout) {
546 new_timeout += nfct_time_stamp;
547 cmpxchg(&ct->timeout, expires, new_timeout);
548 }
549 }
550
551 nf_ct_put(ct);
552 }
553
nf_flow_offload_gc_step(struct nf_flowtable * flow_table,struct flow_offload * flow,void * data)554 static void nf_flow_offload_gc_step(struct nf_flowtable *flow_table,
555 struct flow_offload *flow, void *data)
556 {
557 bool teardown = test_bit(NF_FLOW_TEARDOWN, &flow->flags);
558
559 if (nf_flow_has_expired(flow) ||
560 nf_ct_is_dying(flow->ct) ||
561 nf_flow_custom_gc(flow_table, flow)) {
562 flow_offload_teardown(flow);
563 teardown = true;
564 } else if (!teardown) {
565 nf_flow_table_extend_ct_timeout(flow->ct);
566 }
567
568 if (teardown) {
569 if (test_bit(NF_FLOW_HW, &flow->flags)) {
570 if (!test_bit(NF_FLOW_HW_DYING, &flow->flags))
571 nf_flow_offload_del(flow_table, flow);
572 else if (test_bit(NF_FLOW_HW_DEAD, &flow->flags))
573 flow_offload_del(flow_table, flow);
574 } else {
575 flow_offload_del(flow_table, flow);
576 }
577 } else if (test_bit(NF_FLOW_CLOSING, &flow->flags) &&
578 test_bit(NF_FLOW_HW, &flow->flags) &&
579 !test_bit(NF_FLOW_HW_DYING, &flow->flags)) {
580 nf_flow_offload_del(flow_table, flow);
581 } else if (test_bit(NF_FLOW_HW, &flow->flags)) {
582 nf_flow_offload_stats(flow_table, flow);
583 }
584 }
585
nf_flow_table_gc_run(struct nf_flowtable * flow_table)586 void nf_flow_table_gc_run(struct nf_flowtable *flow_table)
587 {
588 nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, NULL);
589 }
590
nf_flow_offload_work_gc(struct work_struct * work)591 static void nf_flow_offload_work_gc(struct work_struct *work)
592 {
593 struct nf_flowtable *flow_table;
594
595 flow_table = container_of(work, struct nf_flowtable, gc_work.work);
596 nf_flow_table_gc_run(flow_table);
597 queue_delayed_work(system_power_efficient_wq, &flow_table->gc_work, HZ);
598 }
599
nf_flow_nat_port_tcp(struct sk_buff * skb,unsigned int thoff,__be16 port,__be16 new_port)600 static void nf_flow_nat_port_tcp(struct sk_buff *skb, unsigned int thoff,
601 __be16 port, __be16 new_port)
602 {
603 struct tcphdr *tcph;
604
605 tcph = (void *)(skb_network_header(skb) + thoff);
606 inet_proto_csum_replace2(&tcph->check, skb, port, new_port, false);
607 }
608
nf_flow_nat_port_udp(struct sk_buff * skb,unsigned int thoff,__be16 port,__be16 new_port)609 static void nf_flow_nat_port_udp(struct sk_buff *skb, unsigned int thoff,
610 __be16 port, __be16 new_port)
611 {
612 struct udphdr *udph;
613
614 udph = (void *)(skb_network_header(skb) + thoff);
615 if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
616 inet_proto_csum_replace2(&udph->check, skb, port,
617 new_port, false);
618 if (!udph->check)
619 udph->check = CSUM_MANGLED_0;
620 }
621 }
622
nf_flow_nat_port(struct sk_buff * skb,unsigned int thoff,u8 protocol,__be16 port,__be16 new_port)623 static void nf_flow_nat_port(struct sk_buff *skb, unsigned int thoff,
624 u8 protocol, __be16 port, __be16 new_port)
625 {
626 switch (protocol) {
627 case IPPROTO_TCP:
628 nf_flow_nat_port_tcp(skb, thoff, port, new_port);
629 break;
630 case IPPROTO_UDP:
631 nf_flow_nat_port_udp(skb, thoff, port, new_port);
632 break;
633 }
634 }
635
nf_flow_snat_port(const struct flow_offload * flow,struct sk_buff * skb,unsigned int thoff,u8 protocol,enum flow_offload_tuple_dir dir)636 void nf_flow_snat_port(const struct flow_offload *flow,
637 struct sk_buff *skb, unsigned int thoff,
638 u8 protocol, enum flow_offload_tuple_dir dir)
639 {
640 struct flow_ports *hdr;
641 __be16 port, new_port;
642
643 hdr = (void *)(skb_network_header(skb) + thoff);
644
645 switch (dir) {
646 case FLOW_OFFLOAD_DIR_ORIGINAL:
647 port = hdr->source;
648 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port;
649 hdr->source = new_port;
650 break;
651 case FLOW_OFFLOAD_DIR_REPLY:
652 port = hdr->dest;
653 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port;
654 hdr->dest = new_port;
655 break;
656 }
657
658 nf_flow_nat_port(skb, thoff, protocol, port, new_port);
659 }
660 EXPORT_SYMBOL_GPL(nf_flow_snat_port);
661
nf_flow_dnat_port(const struct flow_offload * flow,struct sk_buff * skb,unsigned int thoff,u8 protocol,enum flow_offload_tuple_dir dir)662 void nf_flow_dnat_port(const struct flow_offload *flow, struct sk_buff *skb,
663 unsigned int thoff, u8 protocol,
664 enum flow_offload_tuple_dir dir)
665 {
666 struct flow_ports *hdr;
667 __be16 port, new_port;
668
669 hdr = (void *)(skb_network_header(skb) + thoff);
670
671 switch (dir) {
672 case FLOW_OFFLOAD_DIR_ORIGINAL:
673 port = hdr->dest;
674 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_port;
675 hdr->dest = new_port;
676 break;
677 case FLOW_OFFLOAD_DIR_REPLY:
678 port = hdr->source;
679 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_port;
680 hdr->source = new_port;
681 break;
682 }
683
684 nf_flow_nat_port(skb, thoff, protocol, port, new_port);
685 }
686 EXPORT_SYMBOL_GPL(nf_flow_dnat_port);
687
nf_flow_table_init(struct nf_flowtable * flowtable)688 int nf_flow_table_init(struct nf_flowtable *flowtable)
689 {
690 int err;
691
692 INIT_DELAYED_WORK(&flowtable->gc_work, nf_flow_offload_work_gc);
693 flow_block_init(&flowtable->flow_block);
694 init_rwsem(&flowtable->flow_block_lock);
695
696 err = rhashtable_init(&flowtable->rhashtable,
697 &nf_flow_offload_rhash_params);
698 if (err < 0)
699 return err;
700
701 queue_delayed_work(system_power_efficient_wq,
702 &flowtable->gc_work, HZ);
703
704 mutex_lock(&flowtable_lock);
705 list_add(&flowtable->list, &flowtables);
706 mutex_unlock(&flowtable_lock);
707
708 return 0;
709 }
710 EXPORT_SYMBOL_GPL(nf_flow_table_init);
711
nf_flow_table_do_cleanup(struct nf_flowtable * flow_table,struct flow_offload * flow,void * data)712 static void nf_flow_table_do_cleanup(struct nf_flowtable *flow_table,
713 struct flow_offload *flow, void *data)
714 {
715 struct net_device *dev = data;
716
717 if (!dev) {
718 flow_offload_teardown(flow);
719 return;
720 }
721
722 if (net_eq(nf_ct_net(flow->ct), dev_net(dev)) &&
723 (flow->tuplehash[0].tuple.iifidx == dev->ifindex ||
724 flow->tuplehash[1].tuple.iifidx == dev->ifindex))
725 flow_offload_teardown(flow);
726 }
727
nf_flow_table_gc_cleanup(struct nf_flowtable * flowtable,struct net_device * dev)728 void nf_flow_table_gc_cleanup(struct nf_flowtable *flowtable,
729 struct net_device *dev)
730 {
731 nf_flow_table_iterate(flowtable, nf_flow_table_do_cleanup, dev);
732 flush_delayed_work(&flowtable->gc_work);
733 nf_flow_table_offload_flush(flowtable);
734 }
735
nf_flow_table_cleanup(struct net_device * dev)736 void nf_flow_table_cleanup(struct net_device *dev)
737 {
738 struct nf_flowtable *flowtable;
739
740 mutex_lock(&flowtable_lock);
741 list_for_each_entry(flowtable, &flowtables, list)
742 nf_flow_table_gc_cleanup(flowtable, dev);
743 mutex_unlock(&flowtable_lock);
744 }
745 EXPORT_SYMBOL_GPL(nf_flow_table_cleanup);
746
nf_flow_table_free(struct nf_flowtable * flow_table)747 void nf_flow_table_free(struct nf_flowtable *flow_table)
748 {
749 mutex_lock(&flowtable_lock);
750 list_del(&flow_table->list);
751 mutex_unlock(&flowtable_lock);
752
753 cancel_delayed_work_sync(&flow_table->gc_work);
754 nf_flow_table_offload_flush(flow_table);
755 /* ... no more pending work after this stage ... */
756 nf_flow_table_iterate(flow_table, nf_flow_table_do_cleanup, NULL);
757 nf_flow_table_gc_run(flow_table);
758 nf_flow_table_offload_flush_cleanup(flow_table);
759 rhashtable_destroy(&flow_table->rhashtable);
760 }
761 EXPORT_SYMBOL_GPL(nf_flow_table_free);
762
nf_flow_table_init_net(struct net * net)763 static int nf_flow_table_init_net(struct net *net)
764 {
765 net->ft.stat = alloc_percpu(struct nf_flow_table_stat);
766 return net->ft.stat ? 0 : -ENOMEM;
767 }
768
nf_flow_table_fini_net(struct net * net)769 static void nf_flow_table_fini_net(struct net *net)
770 {
771 free_percpu(net->ft.stat);
772 }
773
nf_flow_table_pernet_init(struct net * net)774 static int nf_flow_table_pernet_init(struct net *net)
775 {
776 int ret;
777
778 ret = nf_flow_table_init_net(net);
779 if (ret < 0)
780 return ret;
781
782 ret = nf_flow_table_init_proc(net);
783 if (ret < 0)
784 goto out_proc;
785
786 return 0;
787
788 out_proc:
789 nf_flow_table_fini_net(net);
790 return ret;
791 }
792
nf_flow_table_pernet_exit(struct list_head * net_exit_list)793 static void nf_flow_table_pernet_exit(struct list_head *net_exit_list)
794 {
795 struct net *net;
796
797 list_for_each_entry(net, net_exit_list, exit_list) {
798 nf_flow_table_fini_proc(net);
799 nf_flow_table_fini_net(net);
800 }
801 }
802
803 static struct pernet_operations nf_flow_table_net_ops = {
804 .init = nf_flow_table_pernet_init,
805 .exit_batch = nf_flow_table_pernet_exit,
806 };
807
nf_flow_table_module_init(void)808 static int __init nf_flow_table_module_init(void)
809 {
810 int ret;
811
812 ret = register_pernet_subsys(&nf_flow_table_net_ops);
813 if (ret < 0)
814 return ret;
815
816 ret = nf_flow_table_offload_init();
817 if (ret)
818 goto out_offload;
819
820 ret = nf_flow_register_bpf();
821 if (ret)
822 goto out_bpf;
823
824 return 0;
825
826 out_bpf:
827 nf_flow_table_offload_exit();
828 out_offload:
829 unregister_pernet_subsys(&nf_flow_table_net_ops);
830 return ret;
831 }
832
nf_flow_table_module_exit(void)833 static void __exit nf_flow_table_module_exit(void)
834 {
835 nf_flow_table_offload_exit();
836 unregister_pernet_subsys(&nf_flow_table_net_ops);
837 }
838
839 module_init(nf_flow_table_module_init);
840 module_exit(nf_flow_table_module_exit);
841
842 MODULE_LICENSE("GPL");
843 MODULE_AUTHOR("Pablo Neira Ayuso <[email protected]>");
844 MODULE_DESCRIPTION("Netfilter flow table module");
845