1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/sched/cls_api.c	Packet classifier API.
4  *
5  * Authors:	Alexey Kuznetsov, <[email protected]>
6  *
7  * Changes:
8  *
9  * Eduardo J. Blanco <[email protected]> :990222: kmod support
10  */
11 
12 #include <linux/module.h>
13 #include <linux/types.h>
14 #include <linux/kernel.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
17 #include <linux/err.h>
18 #include <linux/skbuff.h>
19 #include <linux/init.h>
20 #include <linux/kmod.h>
21 #include <linux/slab.h>
22 #include <linux/idr.h>
23 #include <linux/jhash.h>
24 #include <linux/rculist.h>
25 #include <linux/rhashtable.h>
26 #include <net/net_namespace.h>
27 #include <net/sock.h>
28 #include <net/netlink.h>
29 #include <net/pkt_sched.h>
30 #include <net/pkt_cls.h>
31 #include <net/tc_act/tc_pedit.h>
32 #include <net/tc_act/tc_mirred.h>
33 #include <net/tc_act/tc_vlan.h>
34 #include <net/tc_act/tc_tunnel_key.h>
35 #include <net/tc_act/tc_csum.h>
36 #include <net/tc_act/tc_gact.h>
37 #include <net/tc_act/tc_police.h>
38 #include <net/tc_act/tc_sample.h>
39 #include <net/tc_act/tc_skbedit.h>
40 #include <net/tc_act/tc_ct.h>
41 #include <net/tc_act/tc_mpls.h>
42 #include <net/tc_act/tc_gate.h>
43 #include <net/flow_offload.h>
44 #include <net/tc_wrapper.h>
45 
46 /* The list of all installed classifier types */
47 static LIST_HEAD(tcf_proto_base);
48 
49 /* Protects list of registered TC modules. It is pure SMP lock. */
50 static DEFINE_RWLOCK(cls_mod_lock);
51 
52 static struct xarray tcf_exts_miss_cookies_xa;
53 struct tcf_exts_miss_cookie_node {
54 	const struct tcf_chain *chain;
55 	const struct tcf_proto *tp;
56 	const struct tcf_exts *exts;
57 	u32 chain_index;
58 	u32 tp_prio;
59 	u32 handle;
60 	u32 miss_cookie_base;
61 	struct rcu_head rcu;
62 };
63 
64 /* Each tc action entry cookie will be comprised of 32bit miss_cookie_base +
65  * action index in the exts tc actions array.
66  */
67 union tcf_exts_miss_cookie {
68 	struct {
69 		u32 miss_cookie_base;
70 		u32 act_index;
71 	};
72 	u64 miss_cookie;
73 };
74 
75 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
76 static int
tcf_exts_miss_cookie_base_alloc(struct tcf_exts * exts,struct tcf_proto * tp,u32 handle)77 tcf_exts_miss_cookie_base_alloc(struct tcf_exts *exts, struct tcf_proto *tp,
78 				u32 handle)
79 {
80 	struct tcf_exts_miss_cookie_node *n;
81 	static u32 next;
82 	int err;
83 
84 	if (WARN_ON(!handle || !tp->ops->get_exts))
85 		return -EINVAL;
86 
87 	n = kzalloc(sizeof(*n), GFP_KERNEL);
88 	if (!n)
89 		return -ENOMEM;
90 
91 	n->chain_index = tp->chain->index;
92 	n->chain = tp->chain;
93 	n->tp_prio = tp->prio;
94 	n->tp = tp;
95 	n->exts = exts;
96 	n->handle = handle;
97 
98 	err = xa_alloc_cyclic(&tcf_exts_miss_cookies_xa, &n->miss_cookie_base,
99 			      n, xa_limit_32b, &next, GFP_KERNEL);
100 	if (err < 0)
101 		goto err_xa_alloc;
102 
103 	exts->miss_cookie_node = n;
104 	return 0;
105 
106 err_xa_alloc:
107 	kfree(n);
108 	return err;
109 }
110 
tcf_exts_miss_cookie_base_destroy(struct tcf_exts * exts)111 static void tcf_exts_miss_cookie_base_destroy(struct tcf_exts *exts)
112 {
113 	struct tcf_exts_miss_cookie_node *n;
114 
115 	if (!exts->miss_cookie_node)
116 		return;
117 
118 	n = exts->miss_cookie_node;
119 	xa_erase(&tcf_exts_miss_cookies_xa, n->miss_cookie_base);
120 	kfree_rcu(n, rcu);
121 }
122 
123 static struct tcf_exts_miss_cookie_node *
tcf_exts_miss_cookie_lookup(u64 miss_cookie,int * act_index)124 tcf_exts_miss_cookie_lookup(u64 miss_cookie, int *act_index)
125 {
126 	union tcf_exts_miss_cookie mc = { .miss_cookie = miss_cookie, };
127 
128 	*act_index = mc.act_index;
129 	return xa_load(&tcf_exts_miss_cookies_xa, mc.miss_cookie_base);
130 }
131 #else /* IS_ENABLED(CONFIG_NET_TC_SKB_EXT) */
132 static int
tcf_exts_miss_cookie_base_alloc(struct tcf_exts * exts,struct tcf_proto * tp,u32 handle)133 tcf_exts_miss_cookie_base_alloc(struct tcf_exts *exts, struct tcf_proto *tp,
134 				u32 handle)
135 {
136 	return 0;
137 }
138 
tcf_exts_miss_cookie_base_destroy(struct tcf_exts * exts)139 static void tcf_exts_miss_cookie_base_destroy(struct tcf_exts *exts)
140 {
141 }
142 #endif /* IS_ENABLED(CONFIG_NET_TC_SKB_EXT) */
143 
tcf_exts_miss_cookie_get(u32 miss_cookie_base,int act_index)144 static u64 tcf_exts_miss_cookie_get(u32 miss_cookie_base, int act_index)
145 {
146 	union tcf_exts_miss_cookie mc = { .act_index = act_index, };
147 
148 	if (!miss_cookie_base)
149 		return 0;
150 
151 	mc.miss_cookie_base = miss_cookie_base;
152 	return mc.miss_cookie;
153 }
154 
155 #ifdef CONFIG_NET_CLS_ACT
156 DEFINE_STATIC_KEY_FALSE(tc_skb_ext_tc);
157 EXPORT_SYMBOL(tc_skb_ext_tc);
158 
tc_skb_ext_tc_enable(void)159 void tc_skb_ext_tc_enable(void)
160 {
161 	static_branch_inc(&tc_skb_ext_tc);
162 }
163 EXPORT_SYMBOL(tc_skb_ext_tc_enable);
164 
tc_skb_ext_tc_disable(void)165 void tc_skb_ext_tc_disable(void)
166 {
167 	static_branch_dec(&tc_skb_ext_tc);
168 }
169 EXPORT_SYMBOL(tc_skb_ext_tc_disable);
170 #endif
171 
destroy_obj_hashfn(const struct tcf_proto * tp)172 static u32 destroy_obj_hashfn(const struct tcf_proto *tp)
173 {
174 	return jhash_3words(tp->chain->index, tp->prio,
175 			    (__force __u32)tp->protocol, 0);
176 }
177 
tcf_proto_signal_destroying(struct tcf_chain * chain,struct tcf_proto * tp)178 static void tcf_proto_signal_destroying(struct tcf_chain *chain,
179 					struct tcf_proto *tp)
180 {
181 	struct tcf_block *block = chain->block;
182 
183 	mutex_lock(&block->proto_destroy_lock);
184 	hash_add_rcu(block->proto_destroy_ht, &tp->destroy_ht_node,
185 		     destroy_obj_hashfn(tp));
186 	mutex_unlock(&block->proto_destroy_lock);
187 }
188 
tcf_proto_cmp(const struct tcf_proto * tp1,const struct tcf_proto * tp2)189 static bool tcf_proto_cmp(const struct tcf_proto *tp1,
190 			  const struct tcf_proto *tp2)
191 {
192 	return tp1->chain->index == tp2->chain->index &&
193 	       tp1->prio == tp2->prio &&
194 	       tp1->protocol == tp2->protocol;
195 }
196 
tcf_proto_exists_destroying(struct tcf_chain * chain,struct tcf_proto * tp)197 static bool tcf_proto_exists_destroying(struct tcf_chain *chain,
198 					struct tcf_proto *tp)
199 {
200 	u32 hash = destroy_obj_hashfn(tp);
201 	struct tcf_proto *iter;
202 	bool found = false;
203 
204 	rcu_read_lock();
205 	hash_for_each_possible_rcu(chain->block->proto_destroy_ht, iter,
206 				   destroy_ht_node, hash) {
207 		if (tcf_proto_cmp(tp, iter)) {
208 			found = true;
209 			break;
210 		}
211 	}
212 	rcu_read_unlock();
213 
214 	return found;
215 }
216 
217 static void
tcf_proto_signal_destroyed(struct tcf_chain * chain,struct tcf_proto * tp)218 tcf_proto_signal_destroyed(struct tcf_chain *chain, struct tcf_proto *tp)
219 {
220 	struct tcf_block *block = chain->block;
221 
222 	mutex_lock(&block->proto_destroy_lock);
223 	if (hash_hashed(&tp->destroy_ht_node))
224 		hash_del_rcu(&tp->destroy_ht_node);
225 	mutex_unlock(&block->proto_destroy_lock);
226 }
227 
228 /* Find classifier type by string name */
229 
__tcf_proto_lookup_ops(const char * kind)230 static const struct tcf_proto_ops *__tcf_proto_lookup_ops(const char *kind)
231 {
232 	const struct tcf_proto_ops *t, *res = NULL;
233 
234 	if (kind) {
235 		read_lock(&cls_mod_lock);
236 		list_for_each_entry(t, &tcf_proto_base, head) {
237 			if (strcmp(kind, t->kind) == 0) {
238 				if (try_module_get(t->owner))
239 					res = t;
240 				break;
241 			}
242 		}
243 		read_unlock(&cls_mod_lock);
244 	}
245 	return res;
246 }
247 
248 static const struct tcf_proto_ops *
tcf_proto_lookup_ops(const char * kind,bool rtnl_held,struct netlink_ext_ack * extack)249 tcf_proto_lookup_ops(const char *kind, bool rtnl_held,
250 		     struct netlink_ext_ack *extack)
251 {
252 	const struct tcf_proto_ops *ops;
253 
254 	ops = __tcf_proto_lookup_ops(kind);
255 	if (ops)
256 		return ops;
257 #ifdef CONFIG_MODULES
258 	if (rtnl_held)
259 		rtnl_unlock();
260 	request_module(NET_CLS_ALIAS_PREFIX "%s", kind);
261 	if (rtnl_held)
262 		rtnl_lock();
263 	ops = __tcf_proto_lookup_ops(kind);
264 	/* We dropped the RTNL semaphore in order to perform
265 	 * the module load. So, even if we succeeded in loading
266 	 * the module we have to replay the request. We indicate
267 	 * this using -EAGAIN.
268 	 */
269 	if (ops) {
270 		module_put(ops->owner);
271 		return ERR_PTR(-EAGAIN);
272 	}
273 #endif
274 	NL_SET_ERR_MSG(extack, "TC classifier not found");
275 	return ERR_PTR(-ENOENT);
276 }
277 
278 /* Register(unregister) new classifier type */
279 
register_tcf_proto_ops(struct tcf_proto_ops * ops)280 int register_tcf_proto_ops(struct tcf_proto_ops *ops)
281 {
282 	struct tcf_proto_ops *t;
283 	int rc = -EEXIST;
284 
285 	write_lock(&cls_mod_lock);
286 	list_for_each_entry(t, &tcf_proto_base, head)
287 		if (!strcmp(ops->kind, t->kind))
288 			goto out;
289 
290 	list_add_tail(&ops->head, &tcf_proto_base);
291 	rc = 0;
292 out:
293 	write_unlock(&cls_mod_lock);
294 	return rc;
295 }
296 EXPORT_SYMBOL(register_tcf_proto_ops);
297 
298 static struct workqueue_struct *tc_filter_wq;
299 
unregister_tcf_proto_ops(struct tcf_proto_ops * ops)300 void unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
301 {
302 	struct tcf_proto_ops *t;
303 	int rc = -ENOENT;
304 
305 	/* Wait for outstanding call_rcu()s, if any, from a
306 	 * tcf_proto_ops's destroy() handler.
307 	 */
308 	rcu_barrier();
309 	flush_workqueue(tc_filter_wq);
310 
311 	write_lock(&cls_mod_lock);
312 	list_for_each_entry(t, &tcf_proto_base, head) {
313 		if (t == ops) {
314 			list_del(&t->head);
315 			rc = 0;
316 			break;
317 		}
318 	}
319 	write_unlock(&cls_mod_lock);
320 
321 	WARN(rc, "unregister tc filter kind(%s) failed %d\n", ops->kind, rc);
322 }
323 EXPORT_SYMBOL(unregister_tcf_proto_ops);
324 
tcf_queue_work(struct rcu_work * rwork,work_func_t func)325 bool tcf_queue_work(struct rcu_work *rwork, work_func_t func)
326 {
327 	INIT_RCU_WORK(rwork, func);
328 	return queue_rcu_work(tc_filter_wq, rwork);
329 }
330 EXPORT_SYMBOL(tcf_queue_work);
331 
332 /* Select new prio value from the range, managed by kernel. */
333 
tcf_auto_prio(struct tcf_proto * tp)334 static inline u32 tcf_auto_prio(struct tcf_proto *tp)
335 {
336 	u32 first = TC_H_MAKE(0xC0000000U, 0U);
337 
338 	if (tp)
339 		first = tp->prio - 1;
340 
341 	return TC_H_MAJ(first);
342 }
343 
tcf_proto_check_kind(struct nlattr * kind,char * name)344 static bool tcf_proto_check_kind(struct nlattr *kind, char *name)
345 {
346 	if (kind)
347 		return nla_strscpy(name, kind, IFNAMSIZ) < 0;
348 	memset(name, 0, IFNAMSIZ);
349 	return false;
350 }
351 
tcf_proto_is_unlocked(const char * kind)352 static bool tcf_proto_is_unlocked(const char *kind)
353 {
354 	const struct tcf_proto_ops *ops;
355 	bool ret;
356 
357 	if (strlen(kind) == 0)
358 		return false;
359 
360 	ops = tcf_proto_lookup_ops(kind, false, NULL);
361 	/* On error return false to take rtnl lock. Proto lookup/create
362 	 * functions will perform lookup again and properly handle errors.
363 	 */
364 	if (IS_ERR(ops))
365 		return false;
366 
367 	ret = !!(ops->flags & TCF_PROTO_OPS_DOIT_UNLOCKED);
368 	module_put(ops->owner);
369 	return ret;
370 }
371 
tcf_proto_create(const char * kind,u32 protocol,u32 prio,struct tcf_chain * chain,bool rtnl_held,struct netlink_ext_ack * extack)372 static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
373 					  u32 prio, struct tcf_chain *chain,
374 					  bool rtnl_held,
375 					  struct netlink_ext_ack *extack)
376 {
377 	struct tcf_proto *tp;
378 	int err;
379 
380 	tp = kzalloc(sizeof(*tp), GFP_KERNEL);
381 	if (!tp)
382 		return ERR_PTR(-ENOBUFS);
383 
384 	tp->ops = tcf_proto_lookup_ops(kind, rtnl_held, extack);
385 	if (IS_ERR(tp->ops)) {
386 		err = PTR_ERR(tp->ops);
387 		goto errout;
388 	}
389 	tp->classify = tp->ops->classify;
390 	tp->protocol = protocol;
391 	tp->prio = prio;
392 	tp->chain = chain;
393 	tp->usesw = !tp->ops->reoffload;
394 	spin_lock_init(&tp->lock);
395 	refcount_set(&tp->refcnt, 1);
396 
397 	err = tp->ops->init(tp);
398 	if (err) {
399 		module_put(tp->ops->owner);
400 		goto errout;
401 	}
402 	return tp;
403 
404 errout:
405 	kfree(tp);
406 	return ERR_PTR(err);
407 }
408 
tcf_proto_get(struct tcf_proto * tp)409 static void tcf_proto_get(struct tcf_proto *tp)
410 {
411 	refcount_inc(&tp->refcnt);
412 }
413 
tcf_proto_count_usesw(struct tcf_proto * tp,bool add)414 static void tcf_proto_count_usesw(struct tcf_proto *tp, bool add)
415 {
416 #ifdef CONFIG_NET_CLS_ACT
417 	struct tcf_block *block = tp->chain->block;
418 	bool counted = false;
419 
420 	if (!add) {
421 		if (tp->usesw && tp->counted) {
422 			if (!atomic_dec_return(&block->useswcnt))
423 				static_branch_dec(&tcf_sw_enabled_key);
424 			tp->counted = false;
425 		}
426 		return;
427 	}
428 
429 	spin_lock(&tp->lock);
430 	if (tp->usesw && !tp->counted) {
431 		counted = true;
432 		tp->counted = true;
433 	}
434 	spin_unlock(&tp->lock);
435 
436 	if (counted && atomic_inc_return(&block->useswcnt) == 1)
437 		static_branch_inc(&tcf_sw_enabled_key);
438 #endif
439 }
440 
441 static void tcf_chain_put(struct tcf_chain *chain);
442 
tcf_proto_destroy(struct tcf_proto * tp,bool rtnl_held,bool sig_destroy,struct netlink_ext_ack * extack)443 static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held,
444 			      bool sig_destroy, struct netlink_ext_ack *extack)
445 {
446 	tp->ops->destroy(tp, rtnl_held, extack);
447 	tcf_proto_count_usesw(tp, false);
448 	if (sig_destroy)
449 		tcf_proto_signal_destroyed(tp->chain, tp);
450 	tcf_chain_put(tp->chain);
451 	module_put(tp->ops->owner);
452 	kfree_rcu(tp, rcu);
453 }
454 
tcf_proto_put(struct tcf_proto * tp,bool rtnl_held,struct netlink_ext_ack * extack)455 static void tcf_proto_put(struct tcf_proto *tp, bool rtnl_held,
456 			  struct netlink_ext_ack *extack)
457 {
458 	if (refcount_dec_and_test(&tp->refcnt))
459 		tcf_proto_destroy(tp, rtnl_held, true, extack);
460 }
461 
tcf_proto_check_delete(struct tcf_proto * tp)462 static bool tcf_proto_check_delete(struct tcf_proto *tp)
463 {
464 	if (tp->ops->delete_empty)
465 		return tp->ops->delete_empty(tp);
466 
467 	tp->deleting = true;
468 	return tp->deleting;
469 }
470 
tcf_proto_mark_delete(struct tcf_proto * tp)471 static void tcf_proto_mark_delete(struct tcf_proto *tp)
472 {
473 	spin_lock(&tp->lock);
474 	tp->deleting = true;
475 	spin_unlock(&tp->lock);
476 }
477 
tcf_proto_is_deleting(struct tcf_proto * tp)478 static bool tcf_proto_is_deleting(struct tcf_proto *tp)
479 {
480 	bool deleting;
481 
482 	spin_lock(&tp->lock);
483 	deleting = tp->deleting;
484 	spin_unlock(&tp->lock);
485 
486 	return deleting;
487 }
488 
489 #define ASSERT_BLOCK_LOCKED(block)					\
490 	lockdep_assert_held(&(block)->lock)
491 
492 struct tcf_filter_chain_list_item {
493 	struct list_head list;
494 	tcf_chain_head_change_t *chain_head_change;
495 	void *chain_head_change_priv;
496 };
497 
tcf_chain_create(struct tcf_block * block,u32 chain_index)498 static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
499 					  u32 chain_index)
500 {
501 	struct tcf_chain *chain;
502 
503 	ASSERT_BLOCK_LOCKED(block);
504 
505 	chain = kzalloc(sizeof(*chain), GFP_KERNEL);
506 	if (!chain)
507 		return NULL;
508 	list_add_tail_rcu(&chain->list, &block->chain_list);
509 	mutex_init(&chain->filter_chain_lock);
510 	chain->block = block;
511 	chain->index = chain_index;
512 	chain->refcnt = 1;
513 	if (!chain->index)
514 		block->chain0.chain = chain;
515 	return chain;
516 }
517 
tcf_chain_head_change_item(struct tcf_filter_chain_list_item * item,struct tcf_proto * tp_head)518 static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item,
519 				       struct tcf_proto *tp_head)
520 {
521 	if (item->chain_head_change)
522 		item->chain_head_change(tp_head, item->chain_head_change_priv);
523 }
524 
tcf_chain0_head_change(struct tcf_chain * chain,struct tcf_proto * tp_head)525 static void tcf_chain0_head_change(struct tcf_chain *chain,
526 				   struct tcf_proto *tp_head)
527 {
528 	struct tcf_filter_chain_list_item *item;
529 	struct tcf_block *block = chain->block;
530 
531 	if (chain->index)
532 		return;
533 
534 	mutex_lock(&block->lock);
535 	list_for_each_entry(item, &block->chain0.filter_chain_list, list)
536 		tcf_chain_head_change_item(item, tp_head);
537 	mutex_unlock(&block->lock);
538 }
539 
540 /* Returns true if block can be safely freed. */
541 
tcf_chain_detach(struct tcf_chain * chain)542 static bool tcf_chain_detach(struct tcf_chain *chain)
543 {
544 	struct tcf_block *block = chain->block;
545 
546 	ASSERT_BLOCK_LOCKED(block);
547 
548 	list_del_rcu(&chain->list);
549 	if (!chain->index)
550 		block->chain0.chain = NULL;
551 
552 	if (list_empty(&block->chain_list) &&
553 	    refcount_read(&block->refcnt) == 0)
554 		return true;
555 
556 	return false;
557 }
558 
tcf_block_destroy(struct tcf_block * block)559 static void tcf_block_destroy(struct tcf_block *block)
560 {
561 	mutex_destroy(&block->lock);
562 	mutex_destroy(&block->proto_destroy_lock);
563 	xa_destroy(&block->ports);
564 	kfree_rcu(block, rcu);
565 }
566 
tcf_chain_destroy(struct tcf_chain * chain,bool free_block)567 static void tcf_chain_destroy(struct tcf_chain *chain, bool free_block)
568 {
569 	struct tcf_block *block = chain->block;
570 
571 	mutex_destroy(&chain->filter_chain_lock);
572 	kfree_rcu(chain, rcu);
573 	if (free_block)
574 		tcf_block_destroy(block);
575 }
576 
tcf_chain_hold(struct tcf_chain * chain)577 static void tcf_chain_hold(struct tcf_chain *chain)
578 {
579 	ASSERT_BLOCK_LOCKED(chain->block);
580 
581 	++chain->refcnt;
582 }
583 
tcf_chain_held_by_acts_only(struct tcf_chain * chain)584 static bool tcf_chain_held_by_acts_only(struct tcf_chain *chain)
585 {
586 	ASSERT_BLOCK_LOCKED(chain->block);
587 
588 	/* In case all the references are action references, this
589 	 * chain should not be shown to the user.
590 	 */
591 	return chain->refcnt == chain->action_refcnt;
592 }
593 
tcf_chain_lookup(struct tcf_block * block,u32 chain_index)594 static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block,
595 					  u32 chain_index)
596 {
597 	struct tcf_chain *chain;
598 
599 	ASSERT_BLOCK_LOCKED(block);
600 
601 	list_for_each_entry(chain, &block->chain_list, list) {
602 		if (chain->index == chain_index)
603 			return chain;
604 	}
605 	return NULL;
606 }
607 
608 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
tcf_chain_lookup_rcu(const struct tcf_block * block,u32 chain_index)609 static struct tcf_chain *tcf_chain_lookup_rcu(const struct tcf_block *block,
610 					      u32 chain_index)
611 {
612 	struct tcf_chain *chain;
613 
614 	list_for_each_entry_rcu(chain, &block->chain_list, list) {
615 		if (chain->index == chain_index)
616 			return chain;
617 	}
618 	return NULL;
619 }
620 #endif
621 
622 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
623 			   u32 seq, u16 flags, int event, bool unicast,
624 			   struct netlink_ext_ack *extack);
625 
__tcf_chain_get(struct tcf_block * block,u32 chain_index,bool create,bool by_act)626 static struct tcf_chain *__tcf_chain_get(struct tcf_block *block,
627 					 u32 chain_index, bool create,
628 					 bool by_act)
629 {
630 	struct tcf_chain *chain = NULL;
631 	bool is_first_reference;
632 
633 	mutex_lock(&block->lock);
634 	chain = tcf_chain_lookup(block, chain_index);
635 	if (chain) {
636 		tcf_chain_hold(chain);
637 	} else {
638 		if (!create)
639 			goto errout;
640 		chain = tcf_chain_create(block, chain_index);
641 		if (!chain)
642 			goto errout;
643 	}
644 
645 	if (by_act)
646 		++chain->action_refcnt;
647 	is_first_reference = chain->refcnt - chain->action_refcnt == 1;
648 	mutex_unlock(&block->lock);
649 
650 	/* Send notification only in case we got the first
651 	 * non-action reference. Until then, the chain acts only as
652 	 * a placeholder for actions pointing to it and user ought
653 	 * not know about them.
654 	 */
655 	if (is_first_reference && !by_act)
656 		tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
657 				RTM_NEWCHAIN, false, NULL);
658 
659 	return chain;
660 
661 errout:
662 	mutex_unlock(&block->lock);
663 	return chain;
664 }
665 
tcf_chain_get(struct tcf_block * block,u32 chain_index,bool create)666 static struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
667 				       bool create)
668 {
669 	return __tcf_chain_get(block, chain_index, create, false);
670 }
671 
tcf_chain_get_by_act(struct tcf_block * block,u32 chain_index)672 struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index)
673 {
674 	return __tcf_chain_get(block, chain_index, true, true);
675 }
676 EXPORT_SYMBOL(tcf_chain_get_by_act);
677 
678 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
679 			       void *tmplt_priv);
680 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
681 				  void *tmplt_priv, u32 chain_index,
682 				  struct tcf_block *block, struct sk_buff *oskb,
683 				  u32 seq, u16 flags);
684 
__tcf_chain_put(struct tcf_chain * chain,bool by_act,bool explicitly_created)685 static void __tcf_chain_put(struct tcf_chain *chain, bool by_act,
686 			    bool explicitly_created)
687 {
688 	struct tcf_block *block = chain->block;
689 	const struct tcf_proto_ops *tmplt_ops;
690 	unsigned int refcnt, non_act_refcnt;
691 	bool free_block = false;
692 	void *tmplt_priv;
693 
694 	mutex_lock(&block->lock);
695 	if (explicitly_created) {
696 		if (!chain->explicitly_created) {
697 			mutex_unlock(&block->lock);
698 			return;
699 		}
700 		chain->explicitly_created = false;
701 	}
702 
703 	if (by_act)
704 		chain->action_refcnt--;
705 
706 	/* tc_chain_notify_delete can't be called while holding block lock.
707 	 * However, when block is unlocked chain can be changed concurrently, so
708 	 * save these to temporary variables.
709 	 */
710 	refcnt = --chain->refcnt;
711 	non_act_refcnt = refcnt - chain->action_refcnt;
712 	tmplt_ops = chain->tmplt_ops;
713 	tmplt_priv = chain->tmplt_priv;
714 
715 	if (non_act_refcnt == chain->explicitly_created && !by_act) {
716 		if (non_act_refcnt == 0)
717 			tc_chain_notify_delete(tmplt_ops, tmplt_priv,
718 					       chain->index, block, NULL, 0, 0);
719 		/* Last reference to chain, no need to lock. */
720 		chain->flushing = false;
721 	}
722 
723 	if (refcnt == 0)
724 		free_block = tcf_chain_detach(chain);
725 	mutex_unlock(&block->lock);
726 
727 	if (refcnt == 0) {
728 		tc_chain_tmplt_del(tmplt_ops, tmplt_priv);
729 		tcf_chain_destroy(chain, free_block);
730 	}
731 }
732 
tcf_chain_put(struct tcf_chain * chain)733 static void tcf_chain_put(struct tcf_chain *chain)
734 {
735 	__tcf_chain_put(chain, false, false);
736 }
737 
tcf_chain_put_by_act(struct tcf_chain * chain)738 void tcf_chain_put_by_act(struct tcf_chain *chain)
739 {
740 	__tcf_chain_put(chain, true, false);
741 }
742 EXPORT_SYMBOL(tcf_chain_put_by_act);
743 
tcf_chain_put_explicitly_created(struct tcf_chain * chain)744 static void tcf_chain_put_explicitly_created(struct tcf_chain *chain)
745 {
746 	__tcf_chain_put(chain, false, true);
747 }
748 
tcf_chain_flush(struct tcf_chain * chain,bool rtnl_held)749 static void tcf_chain_flush(struct tcf_chain *chain, bool rtnl_held)
750 {
751 	struct tcf_proto *tp, *tp_next;
752 
753 	mutex_lock(&chain->filter_chain_lock);
754 	tp = tcf_chain_dereference(chain->filter_chain, chain);
755 	while (tp) {
756 		tp_next = rcu_dereference_protected(tp->next, 1);
757 		tcf_proto_signal_destroying(chain, tp);
758 		tp = tp_next;
759 	}
760 	tp = tcf_chain_dereference(chain->filter_chain, chain);
761 	RCU_INIT_POINTER(chain->filter_chain, NULL);
762 	tcf_chain0_head_change(chain, NULL);
763 	chain->flushing = true;
764 	mutex_unlock(&chain->filter_chain_lock);
765 
766 	while (tp) {
767 		tp_next = rcu_dereference_protected(tp->next, 1);
768 		tcf_proto_put(tp, rtnl_held, NULL);
769 		tp = tp_next;
770 	}
771 }
772 
773 static int tcf_block_setup(struct tcf_block *block,
774 			   struct flow_block_offload *bo);
775 
tcf_block_offload_init(struct flow_block_offload * bo,struct net_device * dev,struct Qdisc * sch,enum flow_block_command command,enum flow_block_binder_type binder_type,struct flow_block * flow_block,bool shared,struct netlink_ext_ack * extack)776 static void tcf_block_offload_init(struct flow_block_offload *bo,
777 				   struct net_device *dev, struct Qdisc *sch,
778 				   enum flow_block_command command,
779 				   enum flow_block_binder_type binder_type,
780 				   struct flow_block *flow_block,
781 				   bool shared, struct netlink_ext_ack *extack)
782 {
783 	bo->net = dev_net(dev);
784 	bo->command = command;
785 	bo->binder_type = binder_type;
786 	bo->block = flow_block;
787 	bo->block_shared = shared;
788 	bo->extack = extack;
789 	bo->sch = sch;
790 	bo->cb_list_head = &flow_block->cb_list;
791 	INIT_LIST_HEAD(&bo->cb_list);
792 }
793 
794 static void tcf_block_unbind(struct tcf_block *block,
795 			     struct flow_block_offload *bo);
796 
tc_block_indr_cleanup(struct flow_block_cb * block_cb)797 static void tc_block_indr_cleanup(struct flow_block_cb *block_cb)
798 {
799 	struct tcf_block *block = block_cb->indr.data;
800 	struct net_device *dev = block_cb->indr.dev;
801 	struct Qdisc *sch = block_cb->indr.sch;
802 	struct netlink_ext_ack extack = {};
803 	struct flow_block_offload bo = {};
804 
805 	tcf_block_offload_init(&bo, dev, sch, FLOW_BLOCK_UNBIND,
806 			       block_cb->indr.binder_type,
807 			       &block->flow_block, tcf_block_shared(block),
808 			       &extack);
809 	rtnl_lock();
810 	down_write(&block->cb_lock);
811 	list_del(&block_cb->driver_list);
812 	list_move(&block_cb->list, &bo.cb_list);
813 	tcf_block_unbind(block, &bo);
814 	up_write(&block->cb_lock);
815 	rtnl_unlock();
816 }
817 
tcf_block_offload_in_use(struct tcf_block * block)818 static bool tcf_block_offload_in_use(struct tcf_block *block)
819 {
820 	return atomic_read(&block->offloadcnt);
821 }
822 
tcf_block_offload_cmd(struct tcf_block * block,struct net_device * dev,struct Qdisc * sch,struct tcf_block_ext_info * ei,enum flow_block_command command,struct netlink_ext_ack * extack)823 static int tcf_block_offload_cmd(struct tcf_block *block,
824 				 struct net_device *dev, struct Qdisc *sch,
825 				 struct tcf_block_ext_info *ei,
826 				 enum flow_block_command command,
827 				 struct netlink_ext_ack *extack)
828 {
829 	struct flow_block_offload bo = {};
830 
831 	tcf_block_offload_init(&bo, dev, sch, command, ei->binder_type,
832 			       &block->flow_block, tcf_block_shared(block),
833 			       extack);
834 
835 	if (dev->netdev_ops->ndo_setup_tc) {
836 		int err;
837 
838 		err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
839 		if (err < 0) {
840 			if (err != -EOPNOTSUPP)
841 				NL_SET_ERR_MSG(extack, "Driver ndo_setup_tc failed");
842 			return err;
843 		}
844 
845 		return tcf_block_setup(block, &bo);
846 	}
847 
848 	flow_indr_dev_setup_offload(dev, sch, TC_SETUP_BLOCK, block, &bo,
849 				    tc_block_indr_cleanup);
850 	tcf_block_setup(block, &bo);
851 
852 	return -EOPNOTSUPP;
853 }
854 
tcf_block_offload_bind(struct tcf_block * block,struct Qdisc * q,struct tcf_block_ext_info * ei,struct netlink_ext_ack * extack)855 static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
856 				  struct tcf_block_ext_info *ei,
857 				  struct netlink_ext_ack *extack)
858 {
859 	struct net_device *dev = q->dev_queue->dev;
860 	int err;
861 
862 	down_write(&block->cb_lock);
863 
864 	/* If tc offload feature is disabled and the block we try to bind
865 	 * to already has some offloaded filters, forbid to bind.
866 	 */
867 	if (dev->netdev_ops->ndo_setup_tc &&
868 	    !tc_can_offload(dev) &&
869 	    tcf_block_offload_in_use(block)) {
870 		NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled");
871 		err = -EOPNOTSUPP;
872 		goto err_unlock;
873 	}
874 
875 	err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_BIND, extack);
876 	if (err == -EOPNOTSUPP)
877 		goto no_offload_dev_inc;
878 	if (err)
879 		goto err_unlock;
880 
881 	up_write(&block->cb_lock);
882 	return 0;
883 
884 no_offload_dev_inc:
885 	if (tcf_block_offload_in_use(block))
886 		goto err_unlock;
887 
888 	err = 0;
889 	block->nooffloaddevcnt++;
890 err_unlock:
891 	up_write(&block->cb_lock);
892 	return err;
893 }
894 
tcf_block_offload_unbind(struct tcf_block * block,struct Qdisc * q,struct tcf_block_ext_info * ei)895 static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
896 				     struct tcf_block_ext_info *ei)
897 {
898 	struct net_device *dev = q->dev_queue->dev;
899 	int err;
900 
901 	down_write(&block->cb_lock);
902 	err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_UNBIND, NULL);
903 	if (err == -EOPNOTSUPP)
904 		goto no_offload_dev_dec;
905 	up_write(&block->cb_lock);
906 	return;
907 
908 no_offload_dev_dec:
909 	WARN_ON(block->nooffloaddevcnt-- == 0);
910 	up_write(&block->cb_lock);
911 }
912 
913 static int
tcf_chain0_head_change_cb_add(struct tcf_block * block,struct tcf_block_ext_info * ei,struct netlink_ext_ack * extack)914 tcf_chain0_head_change_cb_add(struct tcf_block *block,
915 			      struct tcf_block_ext_info *ei,
916 			      struct netlink_ext_ack *extack)
917 {
918 	struct tcf_filter_chain_list_item *item;
919 	struct tcf_chain *chain0;
920 
921 	item = kmalloc(sizeof(*item), GFP_KERNEL);
922 	if (!item) {
923 		NL_SET_ERR_MSG(extack, "Memory allocation for head change callback item failed");
924 		return -ENOMEM;
925 	}
926 	item->chain_head_change = ei->chain_head_change;
927 	item->chain_head_change_priv = ei->chain_head_change_priv;
928 
929 	mutex_lock(&block->lock);
930 	chain0 = block->chain0.chain;
931 	if (chain0)
932 		tcf_chain_hold(chain0);
933 	else
934 		list_add(&item->list, &block->chain0.filter_chain_list);
935 	mutex_unlock(&block->lock);
936 
937 	if (chain0) {
938 		struct tcf_proto *tp_head;
939 
940 		mutex_lock(&chain0->filter_chain_lock);
941 
942 		tp_head = tcf_chain_dereference(chain0->filter_chain, chain0);
943 		if (tp_head)
944 			tcf_chain_head_change_item(item, tp_head);
945 
946 		mutex_lock(&block->lock);
947 		list_add(&item->list, &block->chain0.filter_chain_list);
948 		mutex_unlock(&block->lock);
949 
950 		mutex_unlock(&chain0->filter_chain_lock);
951 		tcf_chain_put(chain0);
952 	}
953 
954 	return 0;
955 }
956 
957 static void
tcf_chain0_head_change_cb_del(struct tcf_block * block,struct tcf_block_ext_info * ei)958 tcf_chain0_head_change_cb_del(struct tcf_block *block,
959 			      struct tcf_block_ext_info *ei)
960 {
961 	struct tcf_filter_chain_list_item *item;
962 
963 	mutex_lock(&block->lock);
964 	list_for_each_entry(item, &block->chain0.filter_chain_list, list) {
965 		if ((!ei->chain_head_change && !ei->chain_head_change_priv) ||
966 		    (item->chain_head_change == ei->chain_head_change &&
967 		     item->chain_head_change_priv == ei->chain_head_change_priv)) {
968 			if (block->chain0.chain)
969 				tcf_chain_head_change_item(item, NULL);
970 			list_del(&item->list);
971 			mutex_unlock(&block->lock);
972 
973 			kfree(item);
974 			return;
975 		}
976 	}
977 	mutex_unlock(&block->lock);
978 	WARN_ON(1);
979 }
980 
981 struct tcf_net {
982 	spinlock_t idr_lock; /* Protects idr */
983 	struct idr idr;
984 };
985 
986 static unsigned int tcf_net_id;
987 
tcf_block_insert(struct tcf_block * block,struct net * net,struct netlink_ext_ack * extack)988 static int tcf_block_insert(struct tcf_block *block, struct net *net,
989 			    struct netlink_ext_ack *extack)
990 {
991 	struct tcf_net *tn = net_generic(net, tcf_net_id);
992 	int err;
993 
994 	idr_preload(GFP_KERNEL);
995 	spin_lock(&tn->idr_lock);
996 	err = idr_alloc_u32(&tn->idr, block, &block->index, block->index,
997 			    GFP_NOWAIT);
998 	spin_unlock(&tn->idr_lock);
999 	idr_preload_end();
1000 
1001 	return err;
1002 }
1003 
tcf_block_remove(struct tcf_block * block,struct net * net)1004 static void tcf_block_remove(struct tcf_block *block, struct net *net)
1005 {
1006 	struct tcf_net *tn = net_generic(net, tcf_net_id);
1007 
1008 	spin_lock(&tn->idr_lock);
1009 	idr_remove(&tn->idr, block->index);
1010 	spin_unlock(&tn->idr_lock);
1011 }
1012 
tcf_block_create(struct net * net,struct Qdisc * q,u32 block_index,struct netlink_ext_ack * extack)1013 static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
1014 					  u32 block_index,
1015 					  struct netlink_ext_ack *extack)
1016 {
1017 	struct tcf_block *block;
1018 
1019 	block = kzalloc(sizeof(*block), GFP_KERNEL);
1020 	if (!block) {
1021 		NL_SET_ERR_MSG(extack, "Memory allocation for block failed");
1022 		return ERR_PTR(-ENOMEM);
1023 	}
1024 	mutex_init(&block->lock);
1025 	mutex_init(&block->proto_destroy_lock);
1026 	init_rwsem(&block->cb_lock);
1027 	flow_block_init(&block->flow_block);
1028 	INIT_LIST_HEAD(&block->chain_list);
1029 	INIT_LIST_HEAD(&block->owner_list);
1030 	INIT_LIST_HEAD(&block->chain0.filter_chain_list);
1031 
1032 	refcount_set(&block->refcnt, 1);
1033 	block->net = net;
1034 	block->index = block_index;
1035 	xa_init(&block->ports);
1036 
1037 	/* Don't store q pointer for blocks which are shared */
1038 	if (!tcf_block_shared(block))
1039 		block->q = q;
1040 	return block;
1041 }
1042 
tcf_block_lookup(struct net * net,u32 block_index)1043 struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index)
1044 {
1045 	struct tcf_net *tn = net_generic(net, tcf_net_id);
1046 
1047 	return idr_find(&tn->idr, block_index);
1048 }
1049 EXPORT_SYMBOL(tcf_block_lookup);
1050 
tcf_block_refcnt_get(struct net * net,u32 block_index)1051 static struct tcf_block *tcf_block_refcnt_get(struct net *net, u32 block_index)
1052 {
1053 	struct tcf_block *block;
1054 
1055 	rcu_read_lock();
1056 	block = tcf_block_lookup(net, block_index);
1057 	if (block && !refcount_inc_not_zero(&block->refcnt))
1058 		block = NULL;
1059 	rcu_read_unlock();
1060 
1061 	return block;
1062 }
1063 
1064 static struct tcf_chain *
__tcf_get_next_chain(struct tcf_block * block,struct tcf_chain * chain)1065 __tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
1066 {
1067 	mutex_lock(&block->lock);
1068 	if (chain)
1069 		chain = list_is_last(&chain->list, &block->chain_list) ?
1070 			NULL : list_next_entry(chain, list);
1071 	else
1072 		chain = list_first_entry_or_null(&block->chain_list,
1073 						 struct tcf_chain, list);
1074 
1075 	/* skip all action-only chains */
1076 	while (chain && tcf_chain_held_by_acts_only(chain))
1077 		chain = list_is_last(&chain->list, &block->chain_list) ?
1078 			NULL : list_next_entry(chain, list);
1079 
1080 	if (chain)
1081 		tcf_chain_hold(chain);
1082 	mutex_unlock(&block->lock);
1083 
1084 	return chain;
1085 }
1086 
1087 /* Function to be used by all clients that want to iterate over all chains on
1088  * block. It properly obtains block->lock and takes reference to chain before
1089  * returning it. Users of this function must be tolerant to concurrent chain
1090  * insertion/deletion or ensure that no concurrent chain modification is
1091  * possible. Note that all netlink dump callbacks cannot guarantee to provide
1092  * consistent dump because rtnl lock is released each time skb is filled with
1093  * data and sent to user-space.
1094  */
1095 
1096 struct tcf_chain *
tcf_get_next_chain(struct tcf_block * block,struct tcf_chain * chain)1097 tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
1098 {
1099 	struct tcf_chain *chain_next = __tcf_get_next_chain(block, chain);
1100 
1101 	if (chain)
1102 		tcf_chain_put(chain);
1103 
1104 	return chain_next;
1105 }
1106 EXPORT_SYMBOL(tcf_get_next_chain);
1107 
1108 static struct tcf_proto *
__tcf_get_next_proto(struct tcf_chain * chain,struct tcf_proto * tp)1109 __tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp)
1110 {
1111 	u32 prio = 0;
1112 
1113 	ASSERT_RTNL();
1114 	mutex_lock(&chain->filter_chain_lock);
1115 
1116 	if (!tp) {
1117 		tp = tcf_chain_dereference(chain->filter_chain, chain);
1118 	} else if (tcf_proto_is_deleting(tp)) {
1119 		/* 'deleting' flag is set and chain->filter_chain_lock was
1120 		 * unlocked, which means next pointer could be invalid. Restart
1121 		 * search.
1122 		 */
1123 		prio = tp->prio + 1;
1124 		tp = tcf_chain_dereference(chain->filter_chain, chain);
1125 
1126 		for (; tp; tp = tcf_chain_dereference(tp->next, chain))
1127 			if (!tp->deleting && tp->prio >= prio)
1128 				break;
1129 	} else {
1130 		tp = tcf_chain_dereference(tp->next, chain);
1131 	}
1132 
1133 	if (tp)
1134 		tcf_proto_get(tp);
1135 
1136 	mutex_unlock(&chain->filter_chain_lock);
1137 
1138 	return tp;
1139 }
1140 
1141 /* Function to be used by all clients that want to iterate over all tp's on
1142  * chain. Users of this function must be tolerant to concurrent tp
1143  * insertion/deletion or ensure that no concurrent chain modification is
1144  * possible. Note that all netlink dump callbacks cannot guarantee to provide
1145  * consistent dump because rtnl lock is released each time skb is filled with
1146  * data and sent to user-space.
1147  */
1148 
1149 struct tcf_proto *
tcf_get_next_proto(struct tcf_chain * chain,struct tcf_proto * tp)1150 tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp)
1151 {
1152 	struct tcf_proto *tp_next = __tcf_get_next_proto(chain, tp);
1153 
1154 	if (tp)
1155 		tcf_proto_put(tp, true, NULL);
1156 
1157 	return tp_next;
1158 }
1159 EXPORT_SYMBOL(tcf_get_next_proto);
1160 
tcf_block_flush_all_chains(struct tcf_block * block,bool rtnl_held)1161 static void tcf_block_flush_all_chains(struct tcf_block *block, bool rtnl_held)
1162 {
1163 	struct tcf_chain *chain;
1164 
1165 	/* Last reference to block. At this point chains cannot be added or
1166 	 * removed concurrently.
1167 	 */
1168 	for (chain = tcf_get_next_chain(block, NULL);
1169 	     chain;
1170 	     chain = tcf_get_next_chain(block, chain)) {
1171 		tcf_chain_put_explicitly_created(chain);
1172 		tcf_chain_flush(chain, rtnl_held);
1173 	}
1174 }
1175 
1176 /* Lookup Qdisc and increments its reference counter.
1177  * Set parent, if necessary.
1178  */
1179 
__tcf_qdisc_find(struct net * net,struct Qdisc ** q,u32 * parent,int ifindex,bool rtnl_held,struct netlink_ext_ack * extack)1180 static int __tcf_qdisc_find(struct net *net, struct Qdisc **q,
1181 			    u32 *parent, int ifindex, bool rtnl_held,
1182 			    struct netlink_ext_ack *extack)
1183 {
1184 	const struct Qdisc_class_ops *cops;
1185 	struct net_device *dev;
1186 	int err = 0;
1187 
1188 	if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1189 		return 0;
1190 
1191 	rcu_read_lock();
1192 
1193 	/* Find link */
1194 	dev = dev_get_by_index_rcu(net, ifindex);
1195 	if (!dev) {
1196 		rcu_read_unlock();
1197 		return -ENODEV;
1198 	}
1199 
1200 	/* Find qdisc */
1201 	if (!*parent) {
1202 		*q = rcu_dereference(dev->qdisc);
1203 		*parent = (*q)->handle;
1204 	} else {
1205 		*q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent));
1206 		if (!*q) {
1207 			NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1208 			err = -EINVAL;
1209 			goto errout_rcu;
1210 		}
1211 	}
1212 
1213 	*q = qdisc_refcount_inc_nz(*q);
1214 	if (!*q) {
1215 		NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1216 		err = -EINVAL;
1217 		goto errout_rcu;
1218 	}
1219 
1220 	/* Is it classful? */
1221 	cops = (*q)->ops->cl_ops;
1222 	if (!cops) {
1223 		NL_SET_ERR_MSG(extack, "Qdisc not classful");
1224 		err = -EINVAL;
1225 		goto errout_qdisc;
1226 	}
1227 
1228 	if (!cops->tcf_block) {
1229 		NL_SET_ERR_MSG(extack, "Class doesn't support blocks");
1230 		err = -EOPNOTSUPP;
1231 		goto errout_qdisc;
1232 	}
1233 
1234 errout_rcu:
1235 	/* At this point we know that qdisc is not noop_qdisc,
1236 	 * which means that qdisc holds a reference to net_device
1237 	 * and we hold a reference to qdisc, so it is safe to release
1238 	 * rcu read lock.
1239 	 */
1240 	rcu_read_unlock();
1241 	return err;
1242 
1243 errout_qdisc:
1244 	rcu_read_unlock();
1245 
1246 	if (rtnl_held)
1247 		qdisc_put(*q);
1248 	else
1249 		qdisc_put_unlocked(*q);
1250 	*q = NULL;
1251 
1252 	return err;
1253 }
1254 
__tcf_qdisc_cl_find(struct Qdisc * q,u32 parent,unsigned long * cl,int ifindex,struct netlink_ext_ack * extack)1255 static int __tcf_qdisc_cl_find(struct Qdisc *q, u32 parent, unsigned long *cl,
1256 			       int ifindex, struct netlink_ext_ack *extack)
1257 {
1258 	if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1259 		return 0;
1260 
1261 	/* Do we search for filter, attached to class? */
1262 	if (TC_H_MIN(parent)) {
1263 		const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1264 
1265 		*cl = cops->find(q, parent);
1266 		if (*cl == 0) {
1267 			NL_SET_ERR_MSG(extack, "Specified class doesn't exist");
1268 			return -ENOENT;
1269 		}
1270 	}
1271 
1272 	return 0;
1273 }
1274 
__tcf_block_find(struct net * net,struct Qdisc * q,unsigned long cl,int ifindex,u32 block_index,struct netlink_ext_ack * extack)1275 static struct tcf_block *__tcf_block_find(struct net *net, struct Qdisc *q,
1276 					  unsigned long cl, int ifindex,
1277 					  u32 block_index,
1278 					  struct netlink_ext_ack *extack)
1279 {
1280 	struct tcf_block *block;
1281 
1282 	if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
1283 		block = tcf_block_refcnt_get(net, block_index);
1284 		if (!block) {
1285 			NL_SET_ERR_MSG(extack, "Block of given index was not found");
1286 			return ERR_PTR(-EINVAL);
1287 		}
1288 	} else {
1289 		const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1290 
1291 		block = cops->tcf_block(q, cl, extack);
1292 		if (!block)
1293 			return ERR_PTR(-EINVAL);
1294 
1295 		if (tcf_block_shared(block)) {
1296 			NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters");
1297 			return ERR_PTR(-EOPNOTSUPP);
1298 		}
1299 
1300 		/* Always take reference to block in order to support execution
1301 		 * of rules update path of cls API without rtnl lock. Caller
1302 		 * must release block when it is finished using it. 'if' block
1303 		 * of this conditional obtain reference to block by calling
1304 		 * tcf_block_refcnt_get().
1305 		 */
1306 		refcount_inc(&block->refcnt);
1307 	}
1308 
1309 	return block;
1310 }
1311 
__tcf_block_put(struct tcf_block * block,struct Qdisc * q,struct tcf_block_ext_info * ei,bool rtnl_held)1312 static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q,
1313 			    struct tcf_block_ext_info *ei, bool rtnl_held)
1314 {
1315 	if (refcount_dec_and_mutex_lock(&block->refcnt, &block->lock)) {
1316 		/* Flushing/putting all chains will cause the block to be
1317 		 * deallocated when last chain is freed. However, if chain_list
1318 		 * is empty, block has to be manually deallocated. After block
1319 		 * reference counter reached 0, it is no longer possible to
1320 		 * increment it or add new chains to block.
1321 		 */
1322 		bool free_block = list_empty(&block->chain_list);
1323 
1324 		mutex_unlock(&block->lock);
1325 		if (tcf_block_shared(block))
1326 			tcf_block_remove(block, block->net);
1327 
1328 		if (q)
1329 			tcf_block_offload_unbind(block, q, ei);
1330 
1331 		if (free_block)
1332 			tcf_block_destroy(block);
1333 		else
1334 			tcf_block_flush_all_chains(block, rtnl_held);
1335 	} else if (q) {
1336 		tcf_block_offload_unbind(block, q, ei);
1337 	}
1338 }
1339 
tcf_block_refcnt_put(struct tcf_block * block,bool rtnl_held)1340 static void tcf_block_refcnt_put(struct tcf_block *block, bool rtnl_held)
1341 {
1342 	__tcf_block_put(block, NULL, NULL, rtnl_held);
1343 }
1344 
1345 /* Find tcf block.
1346  * Set q, parent, cl when appropriate.
1347  */
1348 
tcf_block_find(struct net * net,struct Qdisc ** q,u32 * parent,unsigned long * cl,int ifindex,u32 block_index,struct netlink_ext_ack * extack)1349 static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q,
1350 					u32 *parent, unsigned long *cl,
1351 					int ifindex, u32 block_index,
1352 					struct netlink_ext_ack *extack)
1353 {
1354 	struct tcf_block *block;
1355 	int err = 0;
1356 
1357 	ASSERT_RTNL();
1358 
1359 	err = __tcf_qdisc_find(net, q, parent, ifindex, true, extack);
1360 	if (err)
1361 		goto errout;
1362 
1363 	err = __tcf_qdisc_cl_find(*q, *parent, cl, ifindex, extack);
1364 	if (err)
1365 		goto errout_qdisc;
1366 
1367 	block = __tcf_block_find(net, *q, *cl, ifindex, block_index, extack);
1368 	if (IS_ERR(block)) {
1369 		err = PTR_ERR(block);
1370 		goto errout_qdisc;
1371 	}
1372 
1373 	return block;
1374 
1375 errout_qdisc:
1376 	if (*q)
1377 		qdisc_put(*q);
1378 errout:
1379 	*q = NULL;
1380 	return ERR_PTR(err);
1381 }
1382 
tcf_block_release(struct Qdisc * q,struct tcf_block * block,bool rtnl_held)1383 static void tcf_block_release(struct Qdisc *q, struct tcf_block *block,
1384 			      bool rtnl_held)
1385 {
1386 	if (!IS_ERR_OR_NULL(block))
1387 		tcf_block_refcnt_put(block, rtnl_held);
1388 
1389 	if (q) {
1390 		if (rtnl_held)
1391 			qdisc_put(q);
1392 		else
1393 			qdisc_put_unlocked(q);
1394 	}
1395 }
1396 
1397 struct tcf_block_owner_item {
1398 	struct list_head list;
1399 	struct Qdisc *q;
1400 	enum flow_block_binder_type binder_type;
1401 };
1402 
1403 static void
tcf_block_owner_netif_keep_dst(struct tcf_block * block,struct Qdisc * q,enum flow_block_binder_type binder_type)1404 tcf_block_owner_netif_keep_dst(struct tcf_block *block,
1405 			       struct Qdisc *q,
1406 			       enum flow_block_binder_type binder_type)
1407 {
1408 	if (block->keep_dst &&
1409 	    binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
1410 	    binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
1411 		netif_keep_dst(qdisc_dev(q));
1412 }
1413 
tcf_block_netif_keep_dst(struct tcf_block * block)1414 void tcf_block_netif_keep_dst(struct tcf_block *block)
1415 {
1416 	struct tcf_block_owner_item *item;
1417 
1418 	block->keep_dst = true;
1419 	list_for_each_entry(item, &block->owner_list, list)
1420 		tcf_block_owner_netif_keep_dst(block, item->q,
1421 					       item->binder_type);
1422 }
1423 EXPORT_SYMBOL(tcf_block_netif_keep_dst);
1424 
tcf_block_owner_add(struct tcf_block * block,struct Qdisc * q,enum flow_block_binder_type binder_type)1425 static int tcf_block_owner_add(struct tcf_block *block,
1426 			       struct Qdisc *q,
1427 			       enum flow_block_binder_type binder_type)
1428 {
1429 	struct tcf_block_owner_item *item;
1430 
1431 	item = kmalloc(sizeof(*item), GFP_KERNEL);
1432 	if (!item)
1433 		return -ENOMEM;
1434 	item->q = q;
1435 	item->binder_type = binder_type;
1436 	list_add(&item->list, &block->owner_list);
1437 	return 0;
1438 }
1439 
tcf_block_owner_del(struct tcf_block * block,struct Qdisc * q,enum flow_block_binder_type binder_type)1440 static void tcf_block_owner_del(struct tcf_block *block,
1441 				struct Qdisc *q,
1442 				enum flow_block_binder_type binder_type)
1443 {
1444 	struct tcf_block_owner_item *item;
1445 
1446 	list_for_each_entry(item, &block->owner_list, list) {
1447 		if (item->q == q && item->binder_type == binder_type) {
1448 			list_del(&item->list);
1449 			kfree(item);
1450 			return;
1451 		}
1452 	}
1453 	WARN_ON(1);
1454 }
1455 
tcf_block_tracks_dev(struct tcf_block * block,struct tcf_block_ext_info * ei)1456 static bool tcf_block_tracks_dev(struct tcf_block *block,
1457 				 struct tcf_block_ext_info *ei)
1458 {
1459 	return tcf_block_shared(block) &&
1460 	       (ei->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS ||
1461 		ei->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS);
1462 }
1463 
tcf_block_get_ext(struct tcf_block ** p_block,struct Qdisc * q,struct tcf_block_ext_info * ei,struct netlink_ext_ack * extack)1464 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
1465 		      struct tcf_block_ext_info *ei,
1466 		      struct netlink_ext_ack *extack)
1467 {
1468 	struct net_device *dev = qdisc_dev(q);
1469 	struct net *net = qdisc_net(q);
1470 	struct tcf_block *block = NULL;
1471 	int err;
1472 
1473 	if (ei->block_index)
1474 		/* block_index not 0 means the shared block is requested */
1475 		block = tcf_block_refcnt_get(net, ei->block_index);
1476 
1477 	if (!block) {
1478 		block = tcf_block_create(net, q, ei->block_index, extack);
1479 		if (IS_ERR(block))
1480 			return PTR_ERR(block);
1481 		if (tcf_block_shared(block)) {
1482 			err = tcf_block_insert(block, net, extack);
1483 			if (err)
1484 				goto err_block_insert;
1485 		}
1486 	}
1487 
1488 	err = tcf_block_owner_add(block, q, ei->binder_type);
1489 	if (err)
1490 		goto err_block_owner_add;
1491 
1492 	tcf_block_owner_netif_keep_dst(block, q, ei->binder_type);
1493 
1494 	err = tcf_chain0_head_change_cb_add(block, ei, extack);
1495 	if (err)
1496 		goto err_chain0_head_change_cb_add;
1497 
1498 	err = tcf_block_offload_bind(block, q, ei, extack);
1499 	if (err)
1500 		goto err_block_offload_bind;
1501 
1502 	if (tcf_block_tracks_dev(block, ei)) {
1503 		err = xa_insert(&block->ports, dev->ifindex, dev, GFP_KERNEL);
1504 		if (err) {
1505 			NL_SET_ERR_MSG(extack, "block dev insert failed");
1506 			goto err_dev_insert;
1507 		}
1508 	}
1509 
1510 	*p_block = block;
1511 	return 0;
1512 
1513 err_dev_insert:
1514 	tcf_block_offload_unbind(block, q, ei);
1515 err_block_offload_bind:
1516 	tcf_chain0_head_change_cb_del(block, ei);
1517 err_chain0_head_change_cb_add:
1518 	tcf_block_owner_del(block, q, ei->binder_type);
1519 err_block_owner_add:
1520 err_block_insert:
1521 	tcf_block_refcnt_put(block, true);
1522 	return err;
1523 }
1524 EXPORT_SYMBOL(tcf_block_get_ext);
1525 
tcf_chain_head_change_dflt(struct tcf_proto * tp_head,void * priv)1526 static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv)
1527 {
1528 	struct tcf_proto __rcu **p_filter_chain = priv;
1529 
1530 	rcu_assign_pointer(*p_filter_chain, tp_head);
1531 }
1532 
tcf_block_get(struct tcf_block ** p_block,struct tcf_proto __rcu ** p_filter_chain,struct Qdisc * q,struct netlink_ext_ack * extack)1533 int tcf_block_get(struct tcf_block **p_block,
1534 		  struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
1535 		  struct netlink_ext_ack *extack)
1536 {
1537 	struct tcf_block_ext_info ei = {
1538 		.chain_head_change = tcf_chain_head_change_dflt,
1539 		.chain_head_change_priv = p_filter_chain,
1540 	};
1541 
1542 	WARN_ON(!p_filter_chain);
1543 	return tcf_block_get_ext(p_block, q, &ei, extack);
1544 }
1545 EXPORT_SYMBOL(tcf_block_get);
1546 
1547 /* XXX: Standalone actions are not allowed to jump to any chain, and bound
1548  * actions should be all removed after flushing.
1549  */
tcf_block_put_ext(struct tcf_block * block,struct Qdisc * q,struct tcf_block_ext_info * ei)1550 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
1551 		       struct tcf_block_ext_info *ei)
1552 {
1553 	struct net_device *dev = qdisc_dev(q);
1554 
1555 	if (!block)
1556 		return;
1557 	if (tcf_block_tracks_dev(block, ei))
1558 		xa_erase(&block->ports, dev->ifindex);
1559 	tcf_chain0_head_change_cb_del(block, ei);
1560 	tcf_block_owner_del(block, q, ei->binder_type);
1561 
1562 	__tcf_block_put(block, q, ei, true);
1563 }
1564 EXPORT_SYMBOL(tcf_block_put_ext);
1565 
tcf_block_put(struct tcf_block * block)1566 void tcf_block_put(struct tcf_block *block)
1567 {
1568 	struct tcf_block_ext_info ei = {0, };
1569 
1570 	if (!block)
1571 		return;
1572 	tcf_block_put_ext(block, block->q, &ei);
1573 }
1574 
1575 EXPORT_SYMBOL(tcf_block_put);
1576 
1577 static int
tcf_block_playback_offloads(struct tcf_block * block,flow_setup_cb_t * cb,void * cb_priv,bool add,bool offload_in_use,struct netlink_ext_ack * extack)1578 tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb,
1579 			    void *cb_priv, bool add, bool offload_in_use,
1580 			    struct netlink_ext_ack *extack)
1581 {
1582 	struct tcf_chain *chain, *chain_prev;
1583 	struct tcf_proto *tp, *tp_prev;
1584 	int err;
1585 
1586 	lockdep_assert_held(&block->cb_lock);
1587 
1588 	for (chain = __tcf_get_next_chain(block, NULL);
1589 	     chain;
1590 	     chain_prev = chain,
1591 		     chain = __tcf_get_next_chain(block, chain),
1592 		     tcf_chain_put(chain_prev)) {
1593 		if (chain->tmplt_ops && add)
1594 			chain->tmplt_ops->tmplt_reoffload(chain, true, cb,
1595 							  cb_priv);
1596 		for (tp = __tcf_get_next_proto(chain, NULL); tp;
1597 		     tp_prev = tp,
1598 			     tp = __tcf_get_next_proto(chain, tp),
1599 			     tcf_proto_put(tp_prev, true, NULL)) {
1600 			if (tp->ops->reoffload) {
1601 				err = tp->ops->reoffload(tp, add, cb, cb_priv,
1602 							 extack);
1603 				if (err && add)
1604 					goto err_playback_remove;
1605 			} else if (add && offload_in_use) {
1606 				err = -EOPNOTSUPP;
1607 				NL_SET_ERR_MSG(extack, "Filter HW offload failed - classifier without re-offloading support");
1608 				goto err_playback_remove;
1609 			}
1610 		}
1611 		if (chain->tmplt_ops && !add)
1612 			chain->tmplt_ops->tmplt_reoffload(chain, false, cb,
1613 							  cb_priv);
1614 	}
1615 
1616 	return 0;
1617 
1618 err_playback_remove:
1619 	tcf_proto_put(tp, true, NULL);
1620 	tcf_chain_put(chain);
1621 	tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use,
1622 				    extack);
1623 	return err;
1624 }
1625 
tcf_block_bind(struct tcf_block * block,struct flow_block_offload * bo)1626 static int tcf_block_bind(struct tcf_block *block,
1627 			  struct flow_block_offload *bo)
1628 {
1629 	struct flow_block_cb *block_cb, *next;
1630 	int err, i = 0;
1631 
1632 	lockdep_assert_held(&block->cb_lock);
1633 
1634 	list_for_each_entry(block_cb, &bo->cb_list, list) {
1635 		err = tcf_block_playback_offloads(block, block_cb->cb,
1636 						  block_cb->cb_priv, true,
1637 						  tcf_block_offload_in_use(block),
1638 						  bo->extack);
1639 		if (err)
1640 			goto err_unroll;
1641 		if (!bo->unlocked_driver_cb)
1642 			block->lockeddevcnt++;
1643 
1644 		i++;
1645 	}
1646 	list_splice(&bo->cb_list, &block->flow_block.cb_list);
1647 
1648 	return 0;
1649 
1650 err_unroll:
1651 	list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1652 		list_del(&block_cb->driver_list);
1653 		if (i-- > 0) {
1654 			list_del(&block_cb->list);
1655 			tcf_block_playback_offloads(block, block_cb->cb,
1656 						    block_cb->cb_priv, false,
1657 						    tcf_block_offload_in_use(block),
1658 						    NULL);
1659 			if (!bo->unlocked_driver_cb)
1660 				block->lockeddevcnt--;
1661 		}
1662 		flow_block_cb_free(block_cb);
1663 	}
1664 
1665 	return err;
1666 }
1667 
tcf_block_unbind(struct tcf_block * block,struct flow_block_offload * bo)1668 static void tcf_block_unbind(struct tcf_block *block,
1669 			     struct flow_block_offload *bo)
1670 {
1671 	struct flow_block_cb *block_cb, *next;
1672 
1673 	lockdep_assert_held(&block->cb_lock);
1674 
1675 	list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1676 		tcf_block_playback_offloads(block, block_cb->cb,
1677 					    block_cb->cb_priv, false,
1678 					    tcf_block_offload_in_use(block),
1679 					    NULL);
1680 		list_del(&block_cb->list);
1681 		flow_block_cb_free(block_cb);
1682 		if (!bo->unlocked_driver_cb)
1683 			block->lockeddevcnt--;
1684 	}
1685 }
1686 
tcf_block_setup(struct tcf_block * block,struct flow_block_offload * bo)1687 static int tcf_block_setup(struct tcf_block *block,
1688 			   struct flow_block_offload *bo)
1689 {
1690 	int err;
1691 
1692 	switch (bo->command) {
1693 	case FLOW_BLOCK_BIND:
1694 		err = tcf_block_bind(block, bo);
1695 		break;
1696 	case FLOW_BLOCK_UNBIND:
1697 		err = 0;
1698 		tcf_block_unbind(block, bo);
1699 		break;
1700 	default:
1701 		WARN_ON_ONCE(1);
1702 		err = -EOPNOTSUPP;
1703 	}
1704 
1705 	return err;
1706 }
1707 
1708 /* Main classifier routine: scans classifier chain attached
1709  * to this qdisc, (optionally) tests for protocol and asks
1710  * specific classifiers.
1711  */
__tcf_classify(struct sk_buff * skb,const struct tcf_proto * tp,const struct tcf_proto * orig_tp,struct tcf_result * res,bool compat_mode,struct tcf_exts_miss_cookie_node * n,int act_index,u32 * last_executed_chain)1712 static inline int __tcf_classify(struct sk_buff *skb,
1713 				 const struct tcf_proto *tp,
1714 				 const struct tcf_proto *orig_tp,
1715 				 struct tcf_result *res,
1716 				 bool compat_mode,
1717 				 struct tcf_exts_miss_cookie_node *n,
1718 				 int act_index,
1719 				 u32 *last_executed_chain)
1720 {
1721 #ifdef CONFIG_NET_CLS_ACT
1722 	const int max_reclassify_loop = 16;
1723 	const struct tcf_proto *first_tp;
1724 	int limit = 0;
1725 
1726 reclassify:
1727 #endif
1728 	for (; tp; tp = rcu_dereference_bh(tp->next)) {
1729 		__be16 protocol = skb_protocol(skb, false);
1730 		int err = 0;
1731 
1732 		if (n) {
1733 			struct tcf_exts *exts;
1734 
1735 			if (n->tp_prio != tp->prio)
1736 				continue;
1737 
1738 			/* We re-lookup the tp and chain based on index instead
1739 			 * of having hard refs and locks to them, so do a sanity
1740 			 * check if any of tp,chain,exts was replaced by the
1741 			 * time we got here with a cookie from hardware.
1742 			 */
1743 			if (unlikely(n->tp != tp || n->tp->chain != n->chain ||
1744 				     !tp->ops->get_exts)) {
1745 				tcf_set_drop_reason(skb,
1746 						    SKB_DROP_REASON_TC_COOKIE_ERROR);
1747 				return TC_ACT_SHOT;
1748 			}
1749 
1750 			exts = tp->ops->get_exts(tp, n->handle);
1751 			if (unlikely(!exts || n->exts != exts)) {
1752 				tcf_set_drop_reason(skb,
1753 						    SKB_DROP_REASON_TC_COOKIE_ERROR);
1754 				return TC_ACT_SHOT;
1755 			}
1756 
1757 			n = NULL;
1758 			err = tcf_exts_exec_ex(skb, exts, act_index, res);
1759 		} else {
1760 			if (tp->protocol != protocol &&
1761 			    tp->protocol != htons(ETH_P_ALL))
1762 				continue;
1763 
1764 			err = tc_classify(skb, tp, res);
1765 		}
1766 #ifdef CONFIG_NET_CLS_ACT
1767 		if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) {
1768 			first_tp = orig_tp;
1769 			*last_executed_chain = first_tp->chain->index;
1770 			goto reset;
1771 		} else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) {
1772 			first_tp = res->goto_tp;
1773 			*last_executed_chain = err & TC_ACT_EXT_VAL_MASK;
1774 			goto reset;
1775 		}
1776 #endif
1777 		if (err >= 0)
1778 			return err;
1779 	}
1780 
1781 	if (unlikely(n)) {
1782 		tcf_set_drop_reason(skb,
1783 				    SKB_DROP_REASON_TC_COOKIE_ERROR);
1784 		return TC_ACT_SHOT;
1785 	}
1786 
1787 	return TC_ACT_UNSPEC; /* signal: continue lookup */
1788 #ifdef CONFIG_NET_CLS_ACT
1789 reset:
1790 	if (unlikely(limit++ >= max_reclassify_loop)) {
1791 		net_notice_ratelimited("%u: reclassify loop, rule prio %u, protocol %02x\n",
1792 				       tp->chain->block->index,
1793 				       tp->prio & 0xffff,
1794 				       ntohs(tp->protocol));
1795 		tcf_set_drop_reason(skb,
1796 				    SKB_DROP_REASON_TC_RECLASSIFY_LOOP);
1797 		return TC_ACT_SHOT;
1798 	}
1799 
1800 	tp = first_tp;
1801 	goto reclassify;
1802 #endif
1803 }
1804 
tcf_classify(struct sk_buff * skb,const struct tcf_block * block,const struct tcf_proto * tp,struct tcf_result * res,bool compat_mode)1805 int tcf_classify(struct sk_buff *skb,
1806 		 const struct tcf_block *block,
1807 		 const struct tcf_proto *tp,
1808 		 struct tcf_result *res, bool compat_mode)
1809 {
1810 #if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
1811 	u32 last_executed_chain = 0;
1812 
1813 	return __tcf_classify(skb, tp, tp, res, compat_mode, NULL, 0,
1814 			      &last_executed_chain);
1815 #else
1816 	u32 last_executed_chain = tp ? tp->chain->index : 0;
1817 	struct tcf_exts_miss_cookie_node *n = NULL;
1818 	const struct tcf_proto *orig_tp = tp;
1819 	struct tc_skb_ext *ext;
1820 	int act_index = 0;
1821 	int ret;
1822 
1823 	if (block) {
1824 		ext = skb_ext_find(skb, TC_SKB_EXT);
1825 
1826 		if (ext && (ext->chain || ext->act_miss)) {
1827 			struct tcf_chain *fchain;
1828 			u32 chain;
1829 
1830 			if (ext->act_miss) {
1831 				n = tcf_exts_miss_cookie_lookup(ext->act_miss_cookie,
1832 								&act_index);
1833 				if (!n) {
1834 					tcf_set_drop_reason(skb,
1835 							    SKB_DROP_REASON_TC_COOKIE_ERROR);
1836 					return TC_ACT_SHOT;
1837 				}
1838 
1839 				chain = n->chain_index;
1840 			} else {
1841 				chain = ext->chain;
1842 			}
1843 
1844 			fchain = tcf_chain_lookup_rcu(block, chain);
1845 			if (!fchain) {
1846 				tcf_set_drop_reason(skb,
1847 						    SKB_DROP_REASON_TC_CHAIN_NOTFOUND);
1848 
1849 				return TC_ACT_SHOT;
1850 			}
1851 
1852 			/* Consume, so cloned/redirect skbs won't inherit ext */
1853 			skb_ext_del(skb, TC_SKB_EXT);
1854 
1855 			tp = rcu_dereference_bh(fchain->filter_chain);
1856 			last_executed_chain = fchain->index;
1857 		}
1858 	}
1859 
1860 	ret = __tcf_classify(skb, tp, orig_tp, res, compat_mode, n, act_index,
1861 			     &last_executed_chain);
1862 
1863 	if (tc_skb_ext_tc_enabled()) {
1864 		/* If we missed on some chain */
1865 		if (ret == TC_ACT_UNSPEC && last_executed_chain) {
1866 			struct tc_skb_cb *cb = tc_skb_cb(skb);
1867 
1868 			ext = tc_skb_ext_alloc(skb);
1869 			if (WARN_ON_ONCE(!ext)) {
1870 				tcf_set_drop_reason(skb, SKB_DROP_REASON_NOMEM);
1871 				return TC_ACT_SHOT;
1872 			}
1873 			ext->chain = last_executed_chain;
1874 			ext->mru = cb->mru;
1875 			ext->post_ct = cb->post_ct;
1876 			ext->post_ct_snat = cb->post_ct_snat;
1877 			ext->post_ct_dnat = cb->post_ct_dnat;
1878 			ext->zone = cb->zone;
1879 		}
1880 	}
1881 
1882 	return ret;
1883 #endif
1884 }
1885 EXPORT_SYMBOL(tcf_classify);
1886 
1887 struct tcf_chain_info {
1888 	struct tcf_proto __rcu **pprev;
1889 	struct tcf_proto __rcu *next;
1890 };
1891 
tcf_chain_tp_prev(struct tcf_chain * chain,struct tcf_chain_info * chain_info)1892 static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain *chain,
1893 					   struct tcf_chain_info *chain_info)
1894 {
1895 	return tcf_chain_dereference(*chain_info->pprev, chain);
1896 }
1897 
tcf_chain_tp_insert(struct tcf_chain * chain,struct tcf_chain_info * chain_info,struct tcf_proto * tp)1898 static int tcf_chain_tp_insert(struct tcf_chain *chain,
1899 			       struct tcf_chain_info *chain_info,
1900 			       struct tcf_proto *tp)
1901 {
1902 	if (chain->flushing)
1903 		return -EAGAIN;
1904 
1905 	RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info));
1906 	if (*chain_info->pprev == chain->filter_chain)
1907 		tcf_chain0_head_change(chain, tp);
1908 	tcf_proto_get(tp);
1909 	rcu_assign_pointer(*chain_info->pprev, tp);
1910 
1911 	return 0;
1912 }
1913 
tcf_chain_tp_remove(struct tcf_chain * chain,struct tcf_chain_info * chain_info,struct tcf_proto * tp)1914 static void tcf_chain_tp_remove(struct tcf_chain *chain,
1915 				struct tcf_chain_info *chain_info,
1916 				struct tcf_proto *tp)
1917 {
1918 	struct tcf_proto *next = tcf_chain_dereference(chain_info->next, chain);
1919 
1920 	tcf_proto_mark_delete(tp);
1921 	if (tp == chain->filter_chain)
1922 		tcf_chain0_head_change(chain, next);
1923 	RCU_INIT_POINTER(*chain_info->pprev, next);
1924 }
1925 
1926 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1927 					   struct tcf_chain_info *chain_info,
1928 					   u32 protocol, u32 prio,
1929 					   bool prio_allocate,
1930 					   struct netlink_ext_ack *extack);
1931 
1932 /* Try to insert new proto.
1933  * If proto with specified priority already exists, free new proto
1934  * and return existing one.
1935  */
1936 
tcf_chain_tp_insert_unique(struct tcf_chain * chain,struct tcf_proto * tp_new,u32 protocol,u32 prio,bool rtnl_held)1937 static struct tcf_proto *tcf_chain_tp_insert_unique(struct tcf_chain *chain,
1938 						    struct tcf_proto *tp_new,
1939 						    u32 protocol, u32 prio,
1940 						    bool rtnl_held)
1941 {
1942 	struct tcf_chain_info chain_info;
1943 	struct tcf_proto *tp;
1944 	int err = 0;
1945 
1946 	mutex_lock(&chain->filter_chain_lock);
1947 
1948 	if (tcf_proto_exists_destroying(chain, tp_new)) {
1949 		mutex_unlock(&chain->filter_chain_lock);
1950 		tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1951 		return ERR_PTR(-EAGAIN);
1952 	}
1953 
1954 	tp = tcf_chain_tp_find(chain, &chain_info, protocol, prio, false, NULL);
1955 	if (!tp)
1956 		err = tcf_chain_tp_insert(chain, &chain_info, tp_new);
1957 	mutex_unlock(&chain->filter_chain_lock);
1958 
1959 	if (tp) {
1960 		tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1961 		tp_new = tp;
1962 	} else if (err) {
1963 		tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1964 		tp_new = ERR_PTR(err);
1965 	}
1966 
1967 	return tp_new;
1968 }
1969 
tcf_chain_tp_delete_empty(struct tcf_chain * chain,struct tcf_proto * tp,bool rtnl_held,struct netlink_ext_ack * extack)1970 static void tcf_chain_tp_delete_empty(struct tcf_chain *chain,
1971 				      struct tcf_proto *tp, bool rtnl_held,
1972 				      struct netlink_ext_ack *extack)
1973 {
1974 	struct tcf_chain_info chain_info;
1975 	struct tcf_proto *tp_iter;
1976 	struct tcf_proto **pprev;
1977 	struct tcf_proto *next;
1978 
1979 	mutex_lock(&chain->filter_chain_lock);
1980 
1981 	/* Atomically find and remove tp from chain. */
1982 	for (pprev = &chain->filter_chain;
1983 	     (tp_iter = tcf_chain_dereference(*pprev, chain));
1984 	     pprev = &tp_iter->next) {
1985 		if (tp_iter == tp) {
1986 			chain_info.pprev = pprev;
1987 			chain_info.next = tp_iter->next;
1988 			WARN_ON(tp_iter->deleting);
1989 			break;
1990 		}
1991 	}
1992 	/* Verify that tp still exists and no new filters were inserted
1993 	 * concurrently.
1994 	 * Mark tp for deletion if it is empty.
1995 	 */
1996 	if (!tp_iter || !tcf_proto_check_delete(tp)) {
1997 		mutex_unlock(&chain->filter_chain_lock);
1998 		return;
1999 	}
2000 
2001 	tcf_proto_signal_destroying(chain, tp);
2002 	next = tcf_chain_dereference(chain_info.next, chain);
2003 	if (tp == chain->filter_chain)
2004 		tcf_chain0_head_change(chain, next);
2005 	RCU_INIT_POINTER(*chain_info.pprev, next);
2006 	mutex_unlock(&chain->filter_chain_lock);
2007 
2008 	tcf_proto_put(tp, rtnl_held, extack);
2009 }
2010 
tcf_chain_tp_find(struct tcf_chain * chain,struct tcf_chain_info * chain_info,u32 protocol,u32 prio,bool prio_allocate,struct netlink_ext_ack * extack)2011 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
2012 					   struct tcf_chain_info *chain_info,
2013 					   u32 protocol, u32 prio,
2014 					   bool prio_allocate,
2015 					   struct netlink_ext_ack *extack)
2016 {
2017 	struct tcf_proto **pprev;
2018 	struct tcf_proto *tp;
2019 
2020 	/* Check the chain for existence of proto-tcf with this priority */
2021 	for (pprev = &chain->filter_chain;
2022 	     (tp = tcf_chain_dereference(*pprev, chain));
2023 	     pprev = &tp->next) {
2024 		if (tp->prio >= prio) {
2025 			if (tp->prio == prio) {
2026 				if (prio_allocate) {
2027 					NL_SET_ERR_MSG(extack, "Lowest ID from auto-alloc range already in use");
2028 					return ERR_PTR(-ENOSPC);
2029 				}
2030 				if (tp->protocol != protocol && protocol) {
2031 					NL_SET_ERR_MSG(extack, "Protocol mismatch for filter with specified priority");
2032 					return ERR_PTR(-EINVAL);
2033 				}
2034 			} else {
2035 				tp = NULL;
2036 			}
2037 			break;
2038 		}
2039 	}
2040 	chain_info->pprev = pprev;
2041 	if (tp) {
2042 		chain_info->next = tp->next;
2043 		tcf_proto_get(tp);
2044 	} else {
2045 		chain_info->next = NULL;
2046 	}
2047 	return tp;
2048 }
2049 
tcf_fill_node(struct net * net,struct sk_buff * skb,struct tcf_proto * tp,struct tcf_block * block,struct Qdisc * q,u32 parent,void * fh,u32 portid,u32 seq,u16 flags,int event,bool terse_dump,bool rtnl_held,struct netlink_ext_ack * extack)2050 static int tcf_fill_node(struct net *net, struct sk_buff *skb,
2051 			 struct tcf_proto *tp, struct tcf_block *block,
2052 			 struct Qdisc *q, u32 parent, void *fh,
2053 			 u32 portid, u32 seq, u16 flags, int event,
2054 			 bool terse_dump, bool rtnl_held,
2055 			 struct netlink_ext_ack *extack)
2056 {
2057 	struct tcmsg *tcm;
2058 	struct nlmsghdr  *nlh;
2059 	unsigned char *b = skb_tail_pointer(skb);
2060 	int ret = -EMSGSIZE;
2061 
2062 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
2063 	if (!nlh)
2064 		goto out_nlmsg_trim;
2065 	tcm = nlmsg_data(nlh);
2066 	tcm->tcm_family = AF_UNSPEC;
2067 	tcm->tcm__pad1 = 0;
2068 	tcm->tcm__pad2 = 0;
2069 	if (q) {
2070 		tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
2071 		tcm->tcm_parent = parent;
2072 	} else {
2073 		tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
2074 		tcm->tcm_block_index = block->index;
2075 	}
2076 	tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
2077 	if (nla_put_string(skb, TCA_KIND, tp->ops->kind))
2078 		goto nla_put_failure;
2079 	if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index))
2080 		goto nla_put_failure;
2081 	if (!fh) {
2082 		tcm->tcm_handle = 0;
2083 	} else if (terse_dump) {
2084 		if (tp->ops->terse_dump) {
2085 			if (tp->ops->terse_dump(net, tp, fh, skb, tcm,
2086 						rtnl_held) < 0)
2087 				goto nla_put_failure;
2088 		} else {
2089 			goto cls_op_not_supp;
2090 		}
2091 	} else {
2092 		if (tp->ops->dump &&
2093 		    tp->ops->dump(net, tp, fh, skb, tcm, rtnl_held) < 0)
2094 			goto nla_put_failure;
2095 	}
2096 
2097 	if (extack && extack->_msg &&
2098 	    nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg))
2099 		goto nla_put_failure;
2100 
2101 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2102 
2103 	return skb->len;
2104 
2105 cls_op_not_supp:
2106 	ret = -EOPNOTSUPP;
2107 out_nlmsg_trim:
2108 nla_put_failure:
2109 	nlmsg_trim(skb, b);
2110 	return ret;
2111 }
2112 
tfilter_notify_prep(struct net * net,struct sk_buff * oskb,struct nlmsghdr * n,struct tcf_proto * tp,struct tcf_block * block,struct Qdisc * q,u32 parent,void * fh,int event,u32 portid,bool rtnl_held,struct netlink_ext_ack * extack)2113 static struct sk_buff *tfilter_notify_prep(struct net *net,
2114 					   struct sk_buff *oskb,
2115 					   struct nlmsghdr *n,
2116 					   struct tcf_proto *tp,
2117 					   struct tcf_block *block,
2118 					   struct Qdisc *q, u32 parent,
2119 					   void *fh, int event,
2120 					   u32 portid, bool rtnl_held,
2121 					   struct netlink_ext_ack *extack)
2122 {
2123 	unsigned int size = oskb ? max(NLMSG_GOODSIZE, oskb->len) : NLMSG_GOODSIZE;
2124 	struct sk_buff *skb;
2125 	int ret;
2126 
2127 retry:
2128 	skb = alloc_skb(size, GFP_KERNEL);
2129 	if (!skb)
2130 		return ERR_PTR(-ENOBUFS);
2131 
2132 	ret = tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
2133 			    n->nlmsg_seq, n->nlmsg_flags, event, false,
2134 			    rtnl_held, extack);
2135 	if (ret <= 0) {
2136 		kfree_skb(skb);
2137 		if (ret == -EMSGSIZE) {
2138 			size += NLMSG_GOODSIZE;
2139 			goto retry;
2140 		}
2141 		return ERR_PTR(-EINVAL);
2142 	}
2143 	return skb;
2144 }
2145 
tfilter_notify(struct net * net,struct sk_buff * oskb,struct nlmsghdr * n,struct tcf_proto * tp,struct tcf_block * block,struct Qdisc * q,u32 parent,void * fh,int event,bool unicast,bool rtnl_held,struct netlink_ext_ack * extack)2146 static int tfilter_notify(struct net *net, struct sk_buff *oskb,
2147 			  struct nlmsghdr *n, struct tcf_proto *tp,
2148 			  struct tcf_block *block, struct Qdisc *q,
2149 			  u32 parent, void *fh, int event, bool unicast,
2150 			  bool rtnl_held, struct netlink_ext_ack *extack)
2151 {
2152 	struct sk_buff *skb;
2153 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2154 	int err = 0;
2155 
2156 	if (!unicast && !rtnl_notify_needed(net, n->nlmsg_flags, RTNLGRP_TC))
2157 		return 0;
2158 
2159 	skb = tfilter_notify_prep(net, oskb, n, tp, block, q, parent, fh, event,
2160 				  portid, rtnl_held, extack);
2161 	if (IS_ERR(skb))
2162 		return PTR_ERR(skb);
2163 
2164 	if (unicast)
2165 		err = rtnl_unicast(skb, net, portid);
2166 	else
2167 		err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
2168 				     n->nlmsg_flags & NLM_F_ECHO);
2169 	return err;
2170 }
2171 
tfilter_del_notify(struct net * net,struct sk_buff * oskb,struct nlmsghdr * n,struct tcf_proto * tp,struct tcf_block * block,struct Qdisc * q,u32 parent,void * fh,bool * last,bool rtnl_held,struct netlink_ext_ack * extack)2172 static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
2173 			      struct nlmsghdr *n, struct tcf_proto *tp,
2174 			      struct tcf_block *block, struct Qdisc *q,
2175 			      u32 parent, void *fh, bool *last, bool rtnl_held,
2176 			      struct netlink_ext_ack *extack)
2177 {
2178 	struct sk_buff *skb;
2179 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2180 	int err;
2181 
2182 	if (!rtnl_notify_needed(net, n->nlmsg_flags, RTNLGRP_TC))
2183 		return tp->ops->delete(tp, fh, last, rtnl_held, extack);
2184 
2185 	skb = tfilter_notify_prep(net, oskb, n, tp, block, q, parent, fh,
2186 				  RTM_DELTFILTER, portid, rtnl_held, extack);
2187 	if (IS_ERR(skb)) {
2188 		NL_SET_ERR_MSG(extack, "Failed to build del event notification");
2189 		return PTR_ERR(skb);
2190 	}
2191 
2192 	err = tp->ops->delete(tp, fh, last, rtnl_held, extack);
2193 	if (err) {
2194 		kfree_skb(skb);
2195 		return err;
2196 	}
2197 
2198 	err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
2199 			     n->nlmsg_flags & NLM_F_ECHO);
2200 	if (err < 0)
2201 		NL_SET_ERR_MSG(extack, "Failed to send filter delete notification");
2202 
2203 	return err;
2204 }
2205 
tfilter_notify_chain(struct net * net,struct sk_buff * oskb,struct tcf_block * block,struct Qdisc * q,u32 parent,struct nlmsghdr * n,struct tcf_chain * chain,int event,struct netlink_ext_ack * extack)2206 static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
2207 				 struct tcf_block *block, struct Qdisc *q,
2208 				 u32 parent, struct nlmsghdr *n,
2209 				 struct tcf_chain *chain, int event,
2210 				 struct netlink_ext_ack *extack)
2211 {
2212 	struct tcf_proto *tp;
2213 
2214 	for (tp = tcf_get_next_proto(chain, NULL);
2215 	     tp; tp = tcf_get_next_proto(chain, tp))
2216 		tfilter_notify(net, oskb, n, tp, block, q, parent, NULL,
2217 			       event, false, true, extack);
2218 }
2219 
tfilter_put(struct tcf_proto * tp,void * fh)2220 static void tfilter_put(struct tcf_proto *tp, void *fh)
2221 {
2222 	if (tp->ops->put && fh)
2223 		tp->ops->put(tp, fh);
2224 }
2225 
is_qdisc_ingress(__u32 classid)2226 static bool is_qdisc_ingress(__u32 classid)
2227 {
2228 	return (TC_H_MIN(classid) == TC_H_MIN(TC_H_MIN_INGRESS));
2229 }
2230 
tc_new_tfilter(struct sk_buff * skb,struct nlmsghdr * n,struct netlink_ext_ack * extack)2231 static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2232 			  struct netlink_ext_ack *extack)
2233 {
2234 	struct net *net = sock_net(skb->sk);
2235 	struct nlattr *tca[TCA_MAX + 1];
2236 	char name[IFNAMSIZ];
2237 	struct tcmsg *t;
2238 	u32 protocol;
2239 	u32 prio;
2240 	bool prio_allocate;
2241 	u32 parent;
2242 	u32 chain_index;
2243 	struct Qdisc *q;
2244 	struct tcf_chain_info chain_info;
2245 	struct tcf_chain *chain;
2246 	struct tcf_block *block;
2247 	struct tcf_proto *tp;
2248 	unsigned long cl;
2249 	void *fh;
2250 	int err;
2251 	int tp_created;
2252 	bool rtnl_held = false;
2253 	u32 flags;
2254 
2255 replay:
2256 	tp_created = 0;
2257 
2258 	err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2259 				     rtm_tca_policy, extack);
2260 	if (err < 0)
2261 		return err;
2262 
2263 	t = nlmsg_data(n);
2264 	protocol = TC_H_MIN(t->tcm_info);
2265 	prio = TC_H_MAJ(t->tcm_info);
2266 	prio_allocate = false;
2267 	parent = t->tcm_parent;
2268 	tp = NULL;
2269 	cl = 0;
2270 	block = NULL;
2271 	q = NULL;
2272 	chain = NULL;
2273 	flags = 0;
2274 
2275 	if (prio == 0) {
2276 		/* If no priority is provided by the user,
2277 		 * we allocate one.
2278 		 */
2279 		if (n->nlmsg_flags & NLM_F_CREATE) {
2280 			prio = TC_H_MAKE(0x80000000U, 0U);
2281 			prio_allocate = true;
2282 		} else {
2283 			NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
2284 			return -ENOENT;
2285 		}
2286 	}
2287 
2288 	/* Find head of filter chain. */
2289 
2290 	err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2291 	if (err)
2292 		return err;
2293 
2294 	if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2295 		NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2296 		err = -EINVAL;
2297 		goto errout;
2298 	}
2299 
2300 	/* Take rtnl mutex if rtnl_held was set to true on previous iteration,
2301 	 * block is shared (no qdisc found), qdisc is not unlocked, classifier
2302 	 * type is not specified, classifier is not unlocked.
2303 	 */
2304 	if (rtnl_held ||
2305 	    (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2306 	    !tcf_proto_is_unlocked(name)) {
2307 		rtnl_held = true;
2308 		rtnl_lock();
2309 	}
2310 
2311 	err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2312 	if (err)
2313 		goto errout;
2314 
2315 	block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2316 				 extack);
2317 	if (IS_ERR(block)) {
2318 		err = PTR_ERR(block);
2319 		goto errout;
2320 	}
2321 	block->classid = parent;
2322 
2323 	chain_index = nla_get_u32_default(tca[TCA_CHAIN], 0);
2324 	if (chain_index > TC_ACT_EXT_VAL_MASK) {
2325 		NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2326 		err = -EINVAL;
2327 		goto errout;
2328 	}
2329 	chain = tcf_chain_get(block, chain_index, true);
2330 	if (!chain) {
2331 		NL_SET_ERR_MSG(extack, "Cannot create specified filter chain");
2332 		err = -ENOMEM;
2333 		goto errout;
2334 	}
2335 
2336 	mutex_lock(&chain->filter_chain_lock);
2337 	tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2338 			       prio, prio_allocate, extack);
2339 	if (IS_ERR(tp)) {
2340 		err = PTR_ERR(tp);
2341 		goto errout_locked;
2342 	}
2343 
2344 	if (tp == NULL) {
2345 		struct tcf_proto *tp_new = NULL;
2346 
2347 		if (chain->flushing) {
2348 			err = -EAGAIN;
2349 			goto errout_locked;
2350 		}
2351 
2352 		/* Proto-tcf does not exist, create new one */
2353 
2354 		if (tca[TCA_KIND] == NULL || !protocol) {
2355 			NL_SET_ERR_MSG(extack, "Filter kind and protocol must be specified");
2356 			err = -EINVAL;
2357 			goto errout_locked;
2358 		}
2359 
2360 		if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2361 			NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2362 			err = -ENOENT;
2363 			goto errout_locked;
2364 		}
2365 
2366 		if (prio_allocate)
2367 			prio = tcf_auto_prio(tcf_chain_tp_prev(chain,
2368 							       &chain_info));
2369 
2370 		mutex_unlock(&chain->filter_chain_lock);
2371 		tp_new = tcf_proto_create(name, protocol, prio, chain,
2372 					  rtnl_held, extack);
2373 		if (IS_ERR(tp_new)) {
2374 			err = PTR_ERR(tp_new);
2375 			goto errout_tp;
2376 		}
2377 
2378 		tp_created = 1;
2379 		tp = tcf_chain_tp_insert_unique(chain, tp_new, protocol, prio,
2380 						rtnl_held);
2381 		if (IS_ERR(tp)) {
2382 			err = PTR_ERR(tp);
2383 			goto errout_tp;
2384 		}
2385 	} else {
2386 		mutex_unlock(&chain->filter_chain_lock);
2387 	}
2388 
2389 	if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2390 		NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2391 		err = -EINVAL;
2392 		goto errout;
2393 	}
2394 
2395 	fh = tp->ops->get(tp, t->tcm_handle);
2396 
2397 	if (!fh) {
2398 		if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2399 			NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2400 			err = -ENOENT;
2401 			goto errout;
2402 		}
2403 	} else if (n->nlmsg_flags & NLM_F_EXCL) {
2404 		tfilter_put(tp, fh);
2405 		NL_SET_ERR_MSG(extack, "Filter already exists");
2406 		err = -EEXIST;
2407 		goto errout;
2408 	}
2409 
2410 	if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) {
2411 		tfilter_put(tp, fh);
2412 		NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind");
2413 		err = -EINVAL;
2414 		goto errout;
2415 	}
2416 
2417 	if (!(n->nlmsg_flags & NLM_F_CREATE))
2418 		flags |= TCA_ACT_FLAGS_REPLACE;
2419 	if (!rtnl_held)
2420 		flags |= TCA_ACT_FLAGS_NO_RTNL;
2421 	if (is_qdisc_ingress(parent))
2422 		flags |= TCA_ACT_FLAGS_AT_INGRESS;
2423 	err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
2424 			      flags, extack);
2425 	if (err == 0) {
2426 		tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2427 			       RTM_NEWTFILTER, false, rtnl_held, extack);
2428 		tfilter_put(tp, fh);
2429 		tcf_proto_count_usesw(tp, true);
2430 		/* q pointer is NULL for shared blocks */
2431 		if (q)
2432 			q->flags &= ~TCQ_F_CAN_BYPASS;
2433 	}
2434 
2435 errout:
2436 	if (err && tp_created)
2437 		tcf_chain_tp_delete_empty(chain, tp, rtnl_held, NULL);
2438 errout_tp:
2439 	if (chain) {
2440 		if (tp && !IS_ERR(tp))
2441 			tcf_proto_put(tp, rtnl_held, NULL);
2442 		if (!tp_created)
2443 			tcf_chain_put(chain);
2444 	}
2445 	tcf_block_release(q, block, rtnl_held);
2446 
2447 	if (rtnl_held)
2448 		rtnl_unlock();
2449 
2450 	if (err == -EAGAIN) {
2451 		/* Take rtnl lock in case EAGAIN is caused by concurrent flush
2452 		 * of target chain.
2453 		 */
2454 		rtnl_held = true;
2455 		/* Replay the request. */
2456 		goto replay;
2457 	}
2458 	return err;
2459 
2460 errout_locked:
2461 	mutex_unlock(&chain->filter_chain_lock);
2462 	goto errout;
2463 }
2464 
tc_del_tfilter(struct sk_buff * skb,struct nlmsghdr * n,struct netlink_ext_ack * extack)2465 static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2466 			  struct netlink_ext_ack *extack)
2467 {
2468 	struct net *net = sock_net(skb->sk);
2469 	struct nlattr *tca[TCA_MAX + 1];
2470 	char name[IFNAMSIZ];
2471 	struct tcmsg *t;
2472 	u32 protocol;
2473 	u32 prio;
2474 	u32 parent;
2475 	u32 chain_index;
2476 	struct Qdisc *q = NULL;
2477 	struct tcf_chain_info chain_info;
2478 	struct tcf_chain *chain = NULL;
2479 	struct tcf_block *block = NULL;
2480 	struct tcf_proto *tp = NULL;
2481 	unsigned long cl = 0;
2482 	void *fh = NULL;
2483 	int err;
2484 	bool rtnl_held = false;
2485 
2486 	err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2487 				     rtm_tca_policy, extack);
2488 	if (err < 0)
2489 		return err;
2490 
2491 	t = nlmsg_data(n);
2492 	protocol = TC_H_MIN(t->tcm_info);
2493 	prio = TC_H_MAJ(t->tcm_info);
2494 	parent = t->tcm_parent;
2495 
2496 	if (prio == 0 && (protocol || t->tcm_handle || tca[TCA_KIND])) {
2497 		NL_SET_ERR_MSG(extack, "Cannot flush filters with protocol, handle or kind set");
2498 		return -ENOENT;
2499 	}
2500 
2501 	/* Find head of filter chain. */
2502 
2503 	err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2504 	if (err)
2505 		return err;
2506 
2507 	if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2508 		NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2509 		err = -EINVAL;
2510 		goto errout;
2511 	}
2512 	/* Take rtnl mutex if flushing whole chain, block is shared (no qdisc
2513 	 * found), qdisc is not unlocked, classifier type is not specified,
2514 	 * classifier is not unlocked.
2515 	 */
2516 	if (!prio ||
2517 	    (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2518 	    !tcf_proto_is_unlocked(name)) {
2519 		rtnl_held = true;
2520 		rtnl_lock();
2521 	}
2522 
2523 	err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2524 	if (err)
2525 		goto errout;
2526 
2527 	block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2528 				 extack);
2529 	if (IS_ERR(block)) {
2530 		err = PTR_ERR(block);
2531 		goto errout;
2532 	}
2533 
2534 	chain_index = nla_get_u32_default(tca[TCA_CHAIN], 0);
2535 	if (chain_index > TC_ACT_EXT_VAL_MASK) {
2536 		NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2537 		err = -EINVAL;
2538 		goto errout;
2539 	}
2540 	chain = tcf_chain_get(block, chain_index, false);
2541 	if (!chain) {
2542 		/* User requested flush on non-existent chain. Nothing to do,
2543 		 * so just return success.
2544 		 */
2545 		if (prio == 0) {
2546 			err = 0;
2547 			goto errout;
2548 		}
2549 		NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2550 		err = -ENOENT;
2551 		goto errout;
2552 	}
2553 
2554 	if (prio == 0) {
2555 		tfilter_notify_chain(net, skb, block, q, parent, n,
2556 				     chain, RTM_DELTFILTER, extack);
2557 		tcf_chain_flush(chain, rtnl_held);
2558 		err = 0;
2559 		goto errout;
2560 	}
2561 
2562 	mutex_lock(&chain->filter_chain_lock);
2563 	tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2564 			       prio, false, extack);
2565 	if (!tp) {
2566 		err = -ENOENT;
2567 		NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2568 		goto errout_locked;
2569 	} else if (IS_ERR(tp)) {
2570 		err = PTR_ERR(tp);
2571 		goto errout_locked;
2572 	} else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2573 		NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2574 		err = -EINVAL;
2575 		goto errout_locked;
2576 	} else if (t->tcm_handle == 0) {
2577 		tcf_proto_signal_destroying(chain, tp);
2578 		tcf_chain_tp_remove(chain, &chain_info, tp);
2579 		mutex_unlock(&chain->filter_chain_lock);
2580 
2581 		tcf_proto_put(tp, rtnl_held, NULL);
2582 		tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2583 			       RTM_DELTFILTER, false, rtnl_held, extack);
2584 		err = 0;
2585 		goto errout;
2586 	}
2587 	mutex_unlock(&chain->filter_chain_lock);
2588 
2589 	fh = tp->ops->get(tp, t->tcm_handle);
2590 
2591 	if (!fh) {
2592 		NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2593 		err = -ENOENT;
2594 	} else {
2595 		bool last;
2596 
2597 		err = tfilter_del_notify(net, skb, n, tp, block, q, parent, fh,
2598 					 &last, rtnl_held, extack);
2599 
2600 		if (err)
2601 			goto errout;
2602 		if (last)
2603 			tcf_chain_tp_delete_empty(chain, tp, rtnl_held, extack);
2604 	}
2605 
2606 errout:
2607 	if (chain) {
2608 		if (tp && !IS_ERR(tp))
2609 			tcf_proto_put(tp, rtnl_held, NULL);
2610 		tcf_chain_put(chain);
2611 	}
2612 	tcf_block_release(q, block, rtnl_held);
2613 
2614 	if (rtnl_held)
2615 		rtnl_unlock();
2616 
2617 	return err;
2618 
2619 errout_locked:
2620 	mutex_unlock(&chain->filter_chain_lock);
2621 	goto errout;
2622 }
2623 
tc_get_tfilter(struct sk_buff * skb,struct nlmsghdr * n,struct netlink_ext_ack * extack)2624 static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2625 			  struct netlink_ext_ack *extack)
2626 {
2627 	struct net *net = sock_net(skb->sk);
2628 	struct nlattr *tca[TCA_MAX + 1];
2629 	char name[IFNAMSIZ];
2630 	struct tcmsg *t;
2631 	u32 protocol;
2632 	u32 prio;
2633 	u32 parent;
2634 	u32 chain_index;
2635 	struct Qdisc *q = NULL;
2636 	struct tcf_chain_info chain_info;
2637 	struct tcf_chain *chain = NULL;
2638 	struct tcf_block *block = NULL;
2639 	struct tcf_proto *tp = NULL;
2640 	unsigned long cl = 0;
2641 	void *fh = NULL;
2642 	int err;
2643 	bool rtnl_held = false;
2644 
2645 	err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2646 				     rtm_tca_policy, extack);
2647 	if (err < 0)
2648 		return err;
2649 
2650 	t = nlmsg_data(n);
2651 	protocol = TC_H_MIN(t->tcm_info);
2652 	prio = TC_H_MAJ(t->tcm_info);
2653 	parent = t->tcm_parent;
2654 
2655 	if (prio == 0) {
2656 		NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
2657 		return -ENOENT;
2658 	}
2659 
2660 	/* Find head of filter chain. */
2661 
2662 	err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2663 	if (err)
2664 		return err;
2665 
2666 	if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2667 		NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2668 		err = -EINVAL;
2669 		goto errout;
2670 	}
2671 	/* Take rtnl mutex if block is shared (no qdisc found), qdisc is not
2672 	 * unlocked, classifier type is not specified, classifier is not
2673 	 * unlocked.
2674 	 */
2675 	if ((q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2676 	    !tcf_proto_is_unlocked(name)) {
2677 		rtnl_held = true;
2678 		rtnl_lock();
2679 	}
2680 
2681 	err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2682 	if (err)
2683 		goto errout;
2684 
2685 	block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2686 				 extack);
2687 	if (IS_ERR(block)) {
2688 		err = PTR_ERR(block);
2689 		goto errout;
2690 	}
2691 
2692 	chain_index = nla_get_u32_default(tca[TCA_CHAIN], 0);
2693 	if (chain_index > TC_ACT_EXT_VAL_MASK) {
2694 		NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2695 		err = -EINVAL;
2696 		goto errout;
2697 	}
2698 	chain = tcf_chain_get(block, chain_index, false);
2699 	if (!chain) {
2700 		NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2701 		err = -EINVAL;
2702 		goto errout;
2703 	}
2704 
2705 	mutex_lock(&chain->filter_chain_lock);
2706 	tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2707 			       prio, false, extack);
2708 	mutex_unlock(&chain->filter_chain_lock);
2709 	if (!tp) {
2710 		err = -ENOENT;
2711 		NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2712 		goto errout;
2713 	} else if (IS_ERR(tp)) {
2714 		err = PTR_ERR(tp);
2715 		goto errout;
2716 	} else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2717 		NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2718 		err = -EINVAL;
2719 		goto errout;
2720 	}
2721 
2722 	fh = tp->ops->get(tp, t->tcm_handle);
2723 
2724 	if (!fh) {
2725 		NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2726 		err = -ENOENT;
2727 	} else {
2728 		err = tfilter_notify(net, skb, n, tp, block, q, parent,
2729 				     fh, RTM_NEWTFILTER, true, rtnl_held, NULL);
2730 		if (err < 0)
2731 			NL_SET_ERR_MSG(extack, "Failed to send filter notify message");
2732 	}
2733 
2734 	tfilter_put(tp, fh);
2735 errout:
2736 	if (chain) {
2737 		if (tp && !IS_ERR(tp))
2738 			tcf_proto_put(tp, rtnl_held, NULL);
2739 		tcf_chain_put(chain);
2740 	}
2741 	tcf_block_release(q, block, rtnl_held);
2742 
2743 	if (rtnl_held)
2744 		rtnl_unlock();
2745 
2746 	return err;
2747 }
2748 
2749 struct tcf_dump_args {
2750 	struct tcf_walker w;
2751 	struct sk_buff *skb;
2752 	struct netlink_callback *cb;
2753 	struct tcf_block *block;
2754 	struct Qdisc *q;
2755 	u32 parent;
2756 	bool terse_dump;
2757 };
2758 
tcf_node_dump(struct tcf_proto * tp,void * n,struct tcf_walker * arg)2759 static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
2760 {
2761 	struct tcf_dump_args *a = (void *)arg;
2762 	struct net *net = sock_net(a->skb->sk);
2763 
2764 	return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent,
2765 			     n, NETLINK_CB(a->cb->skb).portid,
2766 			     a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
2767 			     RTM_NEWTFILTER, a->terse_dump, true, NULL);
2768 }
2769 
tcf_chain_dump(struct tcf_chain * chain,struct Qdisc * q,u32 parent,struct sk_buff * skb,struct netlink_callback * cb,long index_start,long * p_index,bool terse)2770 static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
2771 			   struct sk_buff *skb, struct netlink_callback *cb,
2772 			   long index_start, long *p_index, bool terse)
2773 {
2774 	struct net *net = sock_net(skb->sk);
2775 	struct tcf_block *block = chain->block;
2776 	struct tcmsg *tcm = nlmsg_data(cb->nlh);
2777 	struct tcf_proto *tp, *tp_prev;
2778 	struct tcf_dump_args arg;
2779 
2780 	for (tp = __tcf_get_next_proto(chain, NULL);
2781 	     tp;
2782 	     tp_prev = tp,
2783 		     tp = __tcf_get_next_proto(chain, tp),
2784 		     tcf_proto_put(tp_prev, true, NULL),
2785 		     (*p_index)++) {
2786 		if (*p_index < index_start)
2787 			continue;
2788 		if (TC_H_MAJ(tcm->tcm_info) &&
2789 		    TC_H_MAJ(tcm->tcm_info) != tp->prio)
2790 			continue;
2791 		if (TC_H_MIN(tcm->tcm_info) &&
2792 		    TC_H_MIN(tcm->tcm_info) != tp->protocol)
2793 			continue;
2794 		if (*p_index > index_start)
2795 			memset(&cb->args[1], 0,
2796 			       sizeof(cb->args) - sizeof(cb->args[0]));
2797 		if (cb->args[1] == 0) {
2798 			if (tcf_fill_node(net, skb, tp, block, q, parent, NULL,
2799 					  NETLINK_CB(cb->skb).portid,
2800 					  cb->nlh->nlmsg_seq, NLM_F_MULTI,
2801 					  RTM_NEWTFILTER, false, true, NULL) <= 0)
2802 				goto errout;
2803 			cb->args[1] = 1;
2804 		}
2805 		if (!tp->ops->walk)
2806 			continue;
2807 		arg.w.fn = tcf_node_dump;
2808 		arg.skb = skb;
2809 		arg.cb = cb;
2810 		arg.block = block;
2811 		arg.q = q;
2812 		arg.parent = parent;
2813 		arg.w.stop = 0;
2814 		arg.w.skip = cb->args[1] - 1;
2815 		arg.w.count = 0;
2816 		arg.w.cookie = cb->args[2];
2817 		arg.terse_dump = terse;
2818 		tp->ops->walk(tp, &arg.w, true);
2819 		cb->args[2] = arg.w.cookie;
2820 		cb->args[1] = arg.w.count + 1;
2821 		if (arg.w.stop)
2822 			goto errout;
2823 	}
2824 	return true;
2825 
2826 errout:
2827 	tcf_proto_put(tp, true, NULL);
2828 	return false;
2829 }
2830 
2831 static const struct nla_policy tcf_tfilter_dump_policy[TCA_MAX + 1] = {
2832 	[TCA_CHAIN]      = { .type = NLA_U32 },
2833 	[TCA_DUMP_FLAGS] = NLA_POLICY_BITFIELD32(TCA_DUMP_FLAGS_TERSE),
2834 };
2835 
2836 /* called with RTNL */
tc_dump_tfilter(struct sk_buff * skb,struct netlink_callback * cb)2837 static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
2838 {
2839 	struct tcf_chain *chain, *chain_prev;
2840 	struct net *net = sock_net(skb->sk);
2841 	struct nlattr *tca[TCA_MAX + 1];
2842 	struct Qdisc *q = NULL;
2843 	struct tcf_block *block;
2844 	struct tcmsg *tcm = nlmsg_data(cb->nlh);
2845 	bool terse_dump = false;
2846 	long index_start;
2847 	long index;
2848 	u32 parent;
2849 	int err;
2850 
2851 	if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2852 		return skb->len;
2853 
2854 	err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
2855 				     tcf_tfilter_dump_policy, cb->extack);
2856 	if (err)
2857 		return err;
2858 
2859 	if (tca[TCA_DUMP_FLAGS]) {
2860 		struct nla_bitfield32 flags =
2861 			nla_get_bitfield32(tca[TCA_DUMP_FLAGS]);
2862 
2863 		terse_dump = flags.value & TCA_DUMP_FLAGS_TERSE;
2864 	}
2865 
2866 	if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
2867 		block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
2868 		if (!block)
2869 			goto out;
2870 		/* If we work with block index, q is NULL and parent value
2871 		 * will never be used in the following code. The check
2872 		 * in tcf_fill_node prevents it. However, compiler does not
2873 		 * see that far, so set parent to zero to silence the warning
2874 		 * about parent being uninitialized.
2875 		 */
2876 		parent = 0;
2877 	} else {
2878 		const struct Qdisc_class_ops *cops;
2879 		struct net_device *dev;
2880 		unsigned long cl = 0;
2881 
2882 		dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2883 		if (!dev)
2884 			return skb->len;
2885 
2886 		parent = tcm->tcm_parent;
2887 		if (!parent)
2888 			q = rtnl_dereference(dev->qdisc);
2889 		else
2890 			q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2891 		if (!q)
2892 			goto out;
2893 		cops = q->ops->cl_ops;
2894 		if (!cops)
2895 			goto out;
2896 		if (!cops->tcf_block)
2897 			goto out;
2898 		if (TC_H_MIN(tcm->tcm_parent)) {
2899 			cl = cops->find(q, tcm->tcm_parent);
2900 			if (cl == 0)
2901 				goto out;
2902 		}
2903 		block = cops->tcf_block(q, cl, NULL);
2904 		if (!block)
2905 			goto out;
2906 		parent = block->classid;
2907 		if (tcf_block_shared(block))
2908 			q = NULL;
2909 	}
2910 
2911 	index_start = cb->args[0];
2912 	index = 0;
2913 
2914 	for (chain = __tcf_get_next_chain(block, NULL);
2915 	     chain;
2916 	     chain_prev = chain,
2917 		     chain = __tcf_get_next_chain(block, chain),
2918 		     tcf_chain_put(chain_prev)) {
2919 		if (tca[TCA_CHAIN] &&
2920 		    nla_get_u32(tca[TCA_CHAIN]) != chain->index)
2921 			continue;
2922 		if (!tcf_chain_dump(chain, q, parent, skb, cb,
2923 				    index_start, &index, terse_dump)) {
2924 			tcf_chain_put(chain);
2925 			err = -EMSGSIZE;
2926 			break;
2927 		}
2928 	}
2929 
2930 	if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
2931 		tcf_block_refcnt_put(block, true);
2932 	cb->args[0] = index;
2933 
2934 out:
2935 	/* If we did no progress, the error (EMSGSIZE) is real */
2936 	if (skb->len == 0 && err)
2937 		return err;
2938 	return skb->len;
2939 }
2940 
tc_chain_fill_node(const struct tcf_proto_ops * tmplt_ops,void * tmplt_priv,u32 chain_index,struct net * net,struct sk_buff * skb,struct tcf_block * block,u32 portid,u32 seq,u16 flags,int event,struct netlink_ext_ack * extack)2941 static int tc_chain_fill_node(const struct tcf_proto_ops *tmplt_ops,
2942 			      void *tmplt_priv, u32 chain_index,
2943 			      struct net *net, struct sk_buff *skb,
2944 			      struct tcf_block *block,
2945 			      u32 portid, u32 seq, u16 flags, int event,
2946 			      struct netlink_ext_ack *extack)
2947 {
2948 	unsigned char *b = skb_tail_pointer(skb);
2949 	const struct tcf_proto_ops *ops;
2950 	struct nlmsghdr *nlh;
2951 	struct tcmsg *tcm;
2952 	void *priv;
2953 
2954 	ops = tmplt_ops;
2955 	priv = tmplt_priv;
2956 
2957 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
2958 	if (!nlh)
2959 		goto out_nlmsg_trim;
2960 	tcm = nlmsg_data(nlh);
2961 	tcm->tcm_family = AF_UNSPEC;
2962 	tcm->tcm__pad1 = 0;
2963 	tcm->tcm__pad2 = 0;
2964 	tcm->tcm_handle = 0;
2965 	if (block->q) {
2966 		tcm->tcm_ifindex = qdisc_dev(block->q)->ifindex;
2967 		tcm->tcm_parent = block->q->handle;
2968 	} else {
2969 		tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
2970 		tcm->tcm_block_index = block->index;
2971 	}
2972 
2973 	if (nla_put_u32(skb, TCA_CHAIN, chain_index))
2974 		goto nla_put_failure;
2975 
2976 	if (ops) {
2977 		if (nla_put_string(skb, TCA_KIND, ops->kind))
2978 			goto nla_put_failure;
2979 		if (ops->tmplt_dump(skb, net, priv) < 0)
2980 			goto nla_put_failure;
2981 	}
2982 
2983 	if (extack && extack->_msg &&
2984 	    nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg))
2985 		goto out_nlmsg_trim;
2986 
2987 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2988 
2989 	return skb->len;
2990 
2991 out_nlmsg_trim:
2992 nla_put_failure:
2993 	nlmsg_trim(skb, b);
2994 	return -EMSGSIZE;
2995 }
2996 
tc_chain_notify(struct tcf_chain * chain,struct sk_buff * oskb,u32 seq,u16 flags,int event,bool unicast,struct netlink_ext_ack * extack)2997 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
2998 			   u32 seq, u16 flags, int event, bool unicast,
2999 			   struct netlink_ext_ack *extack)
3000 {
3001 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
3002 	struct tcf_block *block = chain->block;
3003 	struct net *net = block->net;
3004 	struct sk_buff *skb;
3005 	int err = 0;
3006 
3007 	if (!unicast && !rtnl_notify_needed(net, flags, RTNLGRP_TC))
3008 		return 0;
3009 
3010 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
3011 	if (!skb)
3012 		return -ENOBUFS;
3013 
3014 	if (tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
3015 			       chain->index, net, skb, block, portid,
3016 			       seq, flags, event, extack) <= 0) {
3017 		kfree_skb(skb);
3018 		return -EINVAL;
3019 	}
3020 
3021 	if (unicast)
3022 		err = rtnl_unicast(skb, net, portid);
3023 	else
3024 		err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
3025 				     flags & NLM_F_ECHO);
3026 
3027 	return err;
3028 }
3029 
tc_chain_notify_delete(const struct tcf_proto_ops * tmplt_ops,void * tmplt_priv,u32 chain_index,struct tcf_block * block,struct sk_buff * oskb,u32 seq,u16 flags)3030 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
3031 				  void *tmplt_priv, u32 chain_index,
3032 				  struct tcf_block *block, struct sk_buff *oskb,
3033 				  u32 seq, u16 flags)
3034 {
3035 	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
3036 	struct net *net = block->net;
3037 	struct sk_buff *skb;
3038 
3039 	if (!rtnl_notify_needed(net, flags, RTNLGRP_TC))
3040 		return 0;
3041 
3042 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
3043 	if (!skb)
3044 		return -ENOBUFS;
3045 
3046 	if (tc_chain_fill_node(tmplt_ops, tmplt_priv, chain_index, net, skb,
3047 			       block, portid, seq, flags, RTM_DELCHAIN, NULL) <= 0) {
3048 		kfree_skb(skb);
3049 		return -EINVAL;
3050 	}
3051 
3052 	return rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO);
3053 }
3054 
tc_chain_tmplt_add(struct tcf_chain * chain,struct net * net,struct nlattr ** tca,struct netlink_ext_ack * extack)3055 static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net,
3056 			      struct nlattr **tca,
3057 			      struct netlink_ext_ack *extack)
3058 {
3059 	const struct tcf_proto_ops *ops;
3060 	char name[IFNAMSIZ];
3061 	void *tmplt_priv;
3062 
3063 	/* If kind is not set, user did not specify template. */
3064 	if (!tca[TCA_KIND])
3065 		return 0;
3066 
3067 	if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
3068 		NL_SET_ERR_MSG(extack, "Specified TC chain template name too long");
3069 		return -EINVAL;
3070 	}
3071 
3072 	ops = tcf_proto_lookup_ops(name, true, extack);
3073 	if (IS_ERR(ops))
3074 		return PTR_ERR(ops);
3075 	if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump ||
3076 	    !ops->tmplt_reoffload) {
3077 		NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier");
3078 		module_put(ops->owner);
3079 		return -EOPNOTSUPP;
3080 	}
3081 
3082 	tmplt_priv = ops->tmplt_create(net, chain, tca, extack);
3083 	if (IS_ERR(tmplt_priv)) {
3084 		module_put(ops->owner);
3085 		return PTR_ERR(tmplt_priv);
3086 	}
3087 	chain->tmplt_ops = ops;
3088 	chain->tmplt_priv = tmplt_priv;
3089 	return 0;
3090 }
3091 
tc_chain_tmplt_del(const struct tcf_proto_ops * tmplt_ops,void * tmplt_priv)3092 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
3093 			       void *tmplt_priv)
3094 {
3095 	/* If template ops are set, no work to do for us. */
3096 	if (!tmplt_ops)
3097 		return;
3098 
3099 	tmplt_ops->tmplt_destroy(tmplt_priv);
3100 	module_put(tmplt_ops->owner);
3101 }
3102 
3103 /* Add/delete/get a chain */
3104 
tc_ctl_chain(struct sk_buff * skb,struct nlmsghdr * n,struct netlink_ext_ack * extack)3105 static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n,
3106 			struct netlink_ext_ack *extack)
3107 {
3108 	struct net *net = sock_net(skb->sk);
3109 	struct nlattr *tca[TCA_MAX + 1];
3110 	struct tcmsg *t;
3111 	u32 parent;
3112 	u32 chain_index;
3113 	struct Qdisc *q;
3114 	struct tcf_chain *chain;
3115 	struct tcf_block *block;
3116 	unsigned long cl;
3117 	int err;
3118 
3119 replay:
3120 	q = NULL;
3121 	err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
3122 				     rtm_tca_policy, extack);
3123 	if (err < 0)
3124 		return err;
3125 
3126 	t = nlmsg_data(n);
3127 	parent = t->tcm_parent;
3128 	cl = 0;
3129 
3130 	block = tcf_block_find(net, &q, &parent, &cl,
3131 			       t->tcm_ifindex, t->tcm_block_index, extack);
3132 	if (IS_ERR(block))
3133 		return PTR_ERR(block);
3134 
3135 	chain_index = nla_get_u32_default(tca[TCA_CHAIN], 0);
3136 	if (chain_index > TC_ACT_EXT_VAL_MASK) {
3137 		NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
3138 		err = -EINVAL;
3139 		goto errout_block;
3140 	}
3141 
3142 	mutex_lock(&block->lock);
3143 	chain = tcf_chain_lookup(block, chain_index);
3144 	if (n->nlmsg_type == RTM_NEWCHAIN) {
3145 		if (chain) {
3146 			if (tcf_chain_held_by_acts_only(chain)) {
3147 				/* The chain exists only because there is
3148 				 * some action referencing it.
3149 				 */
3150 				tcf_chain_hold(chain);
3151 			} else {
3152 				NL_SET_ERR_MSG(extack, "Filter chain already exists");
3153 				err = -EEXIST;
3154 				goto errout_block_locked;
3155 			}
3156 		} else {
3157 			if (!(n->nlmsg_flags & NLM_F_CREATE)) {
3158 				NL_SET_ERR_MSG(extack, "Need both RTM_NEWCHAIN and NLM_F_CREATE to create a new chain");
3159 				err = -ENOENT;
3160 				goto errout_block_locked;
3161 			}
3162 			chain = tcf_chain_create(block, chain_index);
3163 			if (!chain) {
3164 				NL_SET_ERR_MSG(extack, "Failed to create filter chain");
3165 				err = -ENOMEM;
3166 				goto errout_block_locked;
3167 			}
3168 		}
3169 	} else {
3170 		if (!chain || tcf_chain_held_by_acts_only(chain)) {
3171 			NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
3172 			err = -EINVAL;
3173 			goto errout_block_locked;
3174 		}
3175 		tcf_chain_hold(chain);
3176 	}
3177 
3178 	if (n->nlmsg_type == RTM_NEWCHAIN) {
3179 		/* Modifying chain requires holding parent block lock. In case
3180 		 * the chain was successfully added, take a reference to the
3181 		 * chain. This ensures that an empty chain does not disappear at
3182 		 * the end of this function.
3183 		 */
3184 		tcf_chain_hold(chain);
3185 		chain->explicitly_created = true;
3186 	}
3187 	mutex_unlock(&block->lock);
3188 
3189 	switch (n->nlmsg_type) {
3190 	case RTM_NEWCHAIN:
3191 		err = tc_chain_tmplt_add(chain, net, tca, extack);
3192 		if (err) {
3193 			tcf_chain_put_explicitly_created(chain);
3194 			goto errout;
3195 		}
3196 
3197 		tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
3198 				RTM_NEWCHAIN, false, extack);
3199 		break;
3200 	case RTM_DELCHAIN:
3201 		tfilter_notify_chain(net, skb, block, q, parent, n,
3202 				     chain, RTM_DELTFILTER, extack);
3203 		/* Flush the chain first as the user requested chain removal. */
3204 		tcf_chain_flush(chain, true);
3205 		/* In case the chain was successfully deleted, put a reference
3206 		 * to the chain previously taken during addition.
3207 		 */
3208 		tcf_chain_put_explicitly_created(chain);
3209 		break;
3210 	case RTM_GETCHAIN:
3211 		err = tc_chain_notify(chain, skb, n->nlmsg_seq,
3212 				      n->nlmsg_flags, n->nlmsg_type, true, extack);
3213 		if (err < 0)
3214 			NL_SET_ERR_MSG(extack, "Failed to send chain notify message");
3215 		break;
3216 	default:
3217 		err = -EOPNOTSUPP;
3218 		NL_SET_ERR_MSG(extack, "Unsupported message type");
3219 		goto errout;
3220 	}
3221 
3222 errout:
3223 	tcf_chain_put(chain);
3224 errout_block:
3225 	tcf_block_release(q, block, true);
3226 	if (err == -EAGAIN)
3227 		/* Replay the request. */
3228 		goto replay;
3229 	return err;
3230 
3231 errout_block_locked:
3232 	mutex_unlock(&block->lock);
3233 	goto errout_block;
3234 }
3235 
3236 /* called with RTNL */
tc_dump_chain(struct sk_buff * skb,struct netlink_callback * cb)3237 static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb)
3238 {
3239 	struct net *net = sock_net(skb->sk);
3240 	struct nlattr *tca[TCA_MAX + 1];
3241 	struct Qdisc *q = NULL;
3242 	struct tcf_block *block;
3243 	struct tcmsg *tcm = nlmsg_data(cb->nlh);
3244 	struct tcf_chain *chain;
3245 	long index_start;
3246 	long index;
3247 	int err;
3248 
3249 	if (nlmsg_len(cb->nlh) < sizeof(*tcm))
3250 		return skb->len;
3251 
3252 	err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
3253 				     rtm_tca_policy, cb->extack);
3254 	if (err)
3255 		return err;
3256 
3257 	if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
3258 		block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
3259 		if (!block)
3260 			goto out;
3261 	} else {
3262 		const struct Qdisc_class_ops *cops;
3263 		struct net_device *dev;
3264 		unsigned long cl = 0;
3265 
3266 		dev = __dev_get_by_index(net, tcm->tcm_ifindex);
3267 		if (!dev)
3268 			return skb->len;
3269 
3270 		if (!tcm->tcm_parent)
3271 			q = rtnl_dereference(dev->qdisc);
3272 		else
3273 			q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
3274 
3275 		if (!q)
3276 			goto out;
3277 		cops = q->ops->cl_ops;
3278 		if (!cops)
3279 			goto out;
3280 		if (!cops->tcf_block)
3281 			goto out;
3282 		if (TC_H_MIN(tcm->tcm_parent)) {
3283 			cl = cops->find(q, tcm->tcm_parent);
3284 			if (cl == 0)
3285 				goto out;
3286 		}
3287 		block = cops->tcf_block(q, cl, NULL);
3288 		if (!block)
3289 			goto out;
3290 		if (tcf_block_shared(block))
3291 			q = NULL;
3292 	}
3293 
3294 	index_start = cb->args[0];
3295 	index = 0;
3296 
3297 	mutex_lock(&block->lock);
3298 	list_for_each_entry(chain, &block->chain_list, list) {
3299 		if ((tca[TCA_CHAIN] &&
3300 		     nla_get_u32(tca[TCA_CHAIN]) != chain->index))
3301 			continue;
3302 		if (index < index_start) {
3303 			index++;
3304 			continue;
3305 		}
3306 		if (tcf_chain_held_by_acts_only(chain))
3307 			continue;
3308 		err = tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
3309 					 chain->index, net, skb, block,
3310 					 NETLINK_CB(cb->skb).portid,
3311 					 cb->nlh->nlmsg_seq, NLM_F_MULTI,
3312 					 RTM_NEWCHAIN, NULL);
3313 		if (err <= 0)
3314 			break;
3315 		index++;
3316 	}
3317 	mutex_unlock(&block->lock);
3318 
3319 	if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
3320 		tcf_block_refcnt_put(block, true);
3321 	cb->args[0] = index;
3322 
3323 out:
3324 	/* If we did no progress, the error (EMSGSIZE) is real */
3325 	if (skb->len == 0 && err)
3326 		return err;
3327 	return skb->len;
3328 }
3329 
tcf_exts_init_ex(struct tcf_exts * exts,struct net * net,int action,int police,struct tcf_proto * tp,u32 handle,bool use_action_miss)3330 int tcf_exts_init_ex(struct tcf_exts *exts, struct net *net, int action,
3331 		     int police, struct tcf_proto *tp, u32 handle,
3332 		     bool use_action_miss)
3333 {
3334 	int err = 0;
3335 
3336 #ifdef CONFIG_NET_CLS_ACT
3337 	exts->type = 0;
3338 	exts->nr_actions = 0;
3339 	exts->miss_cookie_node = NULL;
3340 	/* Note: we do not own yet a reference on net.
3341 	 * This reference might be taken later from tcf_exts_get_net().
3342 	 */
3343 	exts->net = net;
3344 	exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
3345 				GFP_KERNEL);
3346 	if (!exts->actions)
3347 		return -ENOMEM;
3348 #endif
3349 
3350 	exts->action = action;
3351 	exts->police = police;
3352 
3353 	if (!use_action_miss)
3354 		return 0;
3355 
3356 	err = tcf_exts_miss_cookie_base_alloc(exts, tp, handle);
3357 	if (err)
3358 		goto err_miss_alloc;
3359 
3360 	return 0;
3361 
3362 err_miss_alloc:
3363 	tcf_exts_destroy(exts);
3364 #ifdef CONFIG_NET_CLS_ACT
3365 	exts->actions = NULL;
3366 #endif
3367 	return err;
3368 }
3369 EXPORT_SYMBOL(tcf_exts_init_ex);
3370 
tcf_exts_destroy(struct tcf_exts * exts)3371 void tcf_exts_destroy(struct tcf_exts *exts)
3372 {
3373 	tcf_exts_miss_cookie_base_destroy(exts);
3374 
3375 #ifdef CONFIG_NET_CLS_ACT
3376 	if (exts->actions) {
3377 		tcf_action_destroy(exts->actions, TCA_ACT_UNBIND);
3378 		kfree(exts->actions);
3379 	}
3380 	exts->nr_actions = 0;
3381 #endif
3382 }
3383 EXPORT_SYMBOL(tcf_exts_destroy);
3384 
tcf_exts_validate_ex(struct net * net,struct tcf_proto * tp,struct nlattr ** tb,struct nlattr * rate_tlv,struct tcf_exts * exts,u32 flags,u32 fl_flags,struct netlink_ext_ack * extack)3385 int tcf_exts_validate_ex(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
3386 			 struct nlattr *rate_tlv, struct tcf_exts *exts,
3387 			 u32 flags, u32 fl_flags, struct netlink_ext_ack *extack)
3388 {
3389 #ifdef CONFIG_NET_CLS_ACT
3390 	{
3391 		int init_res[TCA_ACT_MAX_PRIO] = {};
3392 		struct tc_action *act;
3393 		size_t attr_size = 0;
3394 
3395 		if (exts->police && tb[exts->police]) {
3396 			struct tc_action_ops *a_o;
3397 
3398 			flags |= TCA_ACT_FLAGS_POLICE | TCA_ACT_FLAGS_BIND;
3399 			a_o = tc_action_load_ops(tb[exts->police], flags,
3400 						 extack);
3401 			if (IS_ERR(a_o))
3402 				return PTR_ERR(a_o);
3403 			act = tcf_action_init_1(net, tp, tb[exts->police],
3404 						rate_tlv, a_o, init_res, flags,
3405 						extack);
3406 			module_put(a_o->owner);
3407 			if (IS_ERR(act))
3408 				return PTR_ERR(act);
3409 
3410 			act->type = exts->type = TCA_OLD_COMPAT;
3411 			exts->actions[0] = act;
3412 			exts->nr_actions = 1;
3413 			tcf_idr_insert_many(exts->actions, init_res);
3414 		} else if (exts->action && tb[exts->action]) {
3415 			int err;
3416 
3417 			flags |= TCA_ACT_FLAGS_BIND;
3418 			err = tcf_action_init(net, tp, tb[exts->action],
3419 					      rate_tlv, exts->actions, init_res,
3420 					      &attr_size, flags, fl_flags,
3421 					      extack);
3422 			if (err < 0)
3423 				return err;
3424 			exts->nr_actions = err;
3425 		}
3426 	}
3427 #else
3428 	if ((exts->action && tb[exts->action]) ||
3429 	    (exts->police && tb[exts->police])) {
3430 		NL_SET_ERR_MSG(extack, "Classifier actions are not supported per compile options (CONFIG_NET_CLS_ACT)");
3431 		return -EOPNOTSUPP;
3432 	}
3433 #endif
3434 
3435 	return 0;
3436 }
3437 EXPORT_SYMBOL(tcf_exts_validate_ex);
3438 
tcf_exts_validate(struct net * net,struct tcf_proto * tp,struct nlattr ** tb,struct nlattr * rate_tlv,struct tcf_exts * exts,u32 flags,struct netlink_ext_ack * extack)3439 int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
3440 		      struct nlattr *rate_tlv, struct tcf_exts *exts,
3441 		      u32 flags, struct netlink_ext_ack *extack)
3442 {
3443 	return tcf_exts_validate_ex(net, tp, tb, rate_tlv, exts,
3444 				    flags, 0, extack);
3445 }
3446 EXPORT_SYMBOL(tcf_exts_validate);
3447 
tcf_exts_change(struct tcf_exts * dst,struct tcf_exts * src)3448 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src)
3449 {
3450 #ifdef CONFIG_NET_CLS_ACT
3451 	struct tcf_exts old = *dst;
3452 
3453 	*dst = *src;
3454 	tcf_exts_destroy(&old);
3455 #endif
3456 }
3457 EXPORT_SYMBOL(tcf_exts_change);
3458 
3459 #ifdef CONFIG_NET_CLS_ACT
tcf_exts_first_act(struct tcf_exts * exts)3460 static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts)
3461 {
3462 	if (exts->nr_actions == 0)
3463 		return NULL;
3464 	else
3465 		return exts->actions[0];
3466 }
3467 #endif
3468 
tcf_exts_dump(struct sk_buff * skb,struct tcf_exts * exts)3469 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
3470 {
3471 #ifdef CONFIG_NET_CLS_ACT
3472 	struct nlattr *nest;
3473 
3474 	if (exts->action && tcf_exts_has_actions(exts)) {
3475 		/*
3476 		 * again for backward compatible mode - we want
3477 		 * to work with both old and new modes of entering
3478 		 * tc data even if iproute2  was newer - jhs
3479 		 */
3480 		if (exts->type != TCA_OLD_COMPAT) {
3481 			nest = nla_nest_start_noflag(skb, exts->action);
3482 			if (nest == NULL)
3483 				goto nla_put_failure;
3484 
3485 			if (tcf_action_dump(skb, exts->actions, 0, 0, false)
3486 			    < 0)
3487 				goto nla_put_failure;
3488 			nla_nest_end(skb, nest);
3489 		} else if (exts->police) {
3490 			struct tc_action *act = tcf_exts_first_act(exts);
3491 			nest = nla_nest_start_noflag(skb, exts->police);
3492 			if (nest == NULL || !act)
3493 				goto nla_put_failure;
3494 			if (tcf_action_dump_old(skb, act, 0, 0) < 0)
3495 				goto nla_put_failure;
3496 			nla_nest_end(skb, nest);
3497 		}
3498 	}
3499 	return 0;
3500 
3501 nla_put_failure:
3502 	nla_nest_cancel(skb, nest);
3503 	return -1;
3504 #else
3505 	return 0;
3506 #endif
3507 }
3508 EXPORT_SYMBOL(tcf_exts_dump);
3509 
tcf_exts_terse_dump(struct sk_buff * skb,struct tcf_exts * exts)3510 int tcf_exts_terse_dump(struct sk_buff *skb, struct tcf_exts *exts)
3511 {
3512 #ifdef CONFIG_NET_CLS_ACT
3513 	struct nlattr *nest;
3514 
3515 	if (!exts->action || !tcf_exts_has_actions(exts))
3516 		return 0;
3517 
3518 	nest = nla_nest_start_noflag(skb, exts->action);
3519 	if (!nest)
3520 		goto nla_put_failure;
3521 
3522 	if (tcf_action_dump(skb, exts->actions, 0, 0, true) < 0)
3523 		goto nla_put_failure;
3524 	nla_nest_end(skb, nest);
3525 	return 0;
3526 
3527 nla_put_failure:
3528 	nla_nest_cancel(skb, nest);
3529 	return -1;
3530 #else
3531 	return 0;
3532 #endif
3533 }
3534 EXPORT_SYMBOL(tcf_exts_terse_dump);
3535 
tcf_exts_dump_stats(struct sk_buff * skb,struct tcf_exts * exts)3536 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts)
3537 {
3538 #ifdef CONFIG_NET_CLS_ACT
3539 	struct tc_action *a = tcf_exts_first_act(exts);
3540 	if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0)
3541 		return -1;
3542 #endif
3543 	return 0;
3544 }
3545 EXPORT_SYMBOL(tcf_exts_dump_stats);
3546 
tcf_block_offload_inc(struct tcf_block * block,u32 * flags)3547 static void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
3548 {
3549 	if (*flags & TCA_CLS_FLAGS_IN_HW)
3550 		return;
3551 	*flags |= TCA_CLS_FLAGS_IN_HW;
3552 	atomic_inc(&block->offloadcnt);
3553 }
3554 
tcf_block_offload_dec(struct tcf_block * block,u32 * flags)3555 static void tcf_block_offload_dec(struct tcf_block *block, u32 *flags)
3556 {
3557 	if (!(*flags & TCA_CLS_FLAGS_IN_HW))
3558 		return;
3559 	*flags &= ~TCA_CLS_FLAGS_IN_HW;
3560 	atomic_dec(&block->offloadcnt);
3561 }
3562 
tc_cls_offload_cnt_update(struct tcf_block * block,struct tcf_proto * tp,u32 * cnt,u32 * flags,u32 diff,bool add)3563 static void tc_cls_offload_cnt_update(struct tcf_block *block,
3564 				      struct tcf_proto *tp, u32 *cnt,
3565 				      u32 *flags, u32 diff, bool add)
3566 {
3567 	lockdep_assert_held(&block->cb_lock);
3568 
3569 	spin_lock(&tp->lock);
3570 	if (add) {
3571 		if (!*cnt)
3572 			tcf_block_offload_inc(block, flags);
3573 		*cnt += diff;
3574 	} else {
3575 		*cnt -= diff;
3576 		if (!*cnt)
3577 			tcf_block_offload_dec(block, flags);
3578 	}
3579 	spin_unlock(&tp->lock);
3580 }
3581 
3582 static void
tc_cls_offload_cnt_reset(struct tcf_block * block,struct tcf_proto * tp,u32 * cnt,u32 * flags)3583 tc_cls_offload_cnt_reset(struct tcf_block *block, struct tcf_proto *tp,
3584 			 u32 *cnt, u32 *flags)
3585 {
3586 	lockdep_assert_held(&block->cb_lock);
3587 
3588 	spin_lock(&tp->lock);
3589 	tcf_block_offload_dec(block, flags);
3590 	*cnt = 0;
3591 	spin_unlock(&tp->lock);
3592 }
3593 
3594 static int
__tc_setup_cb_call(struct tcf_block * block,enum tc_setup_type type,void * type_data,bool err_stop)3595 __tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3596 		   void *type_data, bool err_stop)
3597 {
3598 	struct flow_block_cb *block_cb;
3599 	int ok_count = 0;
3600 	int err;
3601 
3602 	list_for_each_entry(block_cb, &block->flow_block.cb_list, list) {
3603 		err = block_cb->cb(type, type_data, block_cb->cb_priv);
3604 		if (err) {
3605 			if (err_stop)
3606 				return err;
3607 		} else {
3608 			ok_count++;
3609 		}
3610 	}
3611 	return ok_count;
3612 }
3613 
tc_setup_cb_call(struct tcf_block * block,enum tc_setup_type type,void * type_data,bool err_stop,bool rtnl_held)3614 int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3615 		     void *type_data, bool err_stop, bool rtnl_held)
3616 {
3617 	bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3618 	int ok_count;
3619 
3620 retry:
3621 	if (take_rtnl)
3622 		rtnl_lock();
3623 	down_read(&block->cb_lock);
3624 	/* Need to obtain rtnl lock if block is bound to devs that require it.
3625 	 * In block bind code cb_lock is obtained while holding rtnl, so we must
3626 	 * obtain the locks in same order here.
3627 	 */
3628 	if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3629 		up_read(&block->cb_lock);
3630 		take_rtnl = true;
3631 		goto retry;
3632 	}
3633 
3634 	ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3635 
3636 	up_read(&block->cb_lock);
3637 	if (take_rtnl)
3638 		rtnl_unlock();
3639 	return ok_count;
3640 }
3641 EXPORT_SYMBOL(tc_setup_cb_call);
3642 
3643 /* Non-destructive filter add. If filter that wasn't already in hardware is
3644  * successfully offloaded, increment block offloads counter. On failure,
3645  * previously offloaded filter is considered to be intact and offloads counter
3646  * is not decremented.
3647  */
3648 
tc_setup_cb_add(struct tcf_block * block,struct tcf_proto * tp,enum tc_setup_type type,void * type_data,bool err_stop,u32 * flags,unsigned int * in_hw_count,bool rtnl_held)3649 int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp,
3650 		    enum tc_setup_type type, void *type_data, bool err_stop,
3651 		    u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3652 {
3653 	bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3654 	int ok_count;
3655 
3656 retry:
3657 	if (take_rtnl)
3658 		rtnl_lock();
3659 	down_read(&block->cb_lock);
3660 	/* Need to obtain rtnl lock if block is bound to devs that require it.
3661 	 * In block bind code cb_lock is obtained while holding rtnl, so we must
3662 	 * obtain the locks in same order here.
3663 	 */
3664 	if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3665 		up_read(&block->cb_lock);
3666 		take_rtnl = true;
3667 		goto retry;
3668 	}
3669 
3670 	/* Make sure all netdevs sharing this block are offload-capable. */
3671 	if (block->nooffloaddevcnt && err_stop) {
3672 		ok_count = -EOPNOTSUPP;
3673 		goto err_unlock;
3674 	}
3675 
3676 	ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3677 	if (ok_count < 0)
3678 		goto err_unlock;
3679 
3680 	if (tp->ops->hw_add)
3681 		tp->ops->hw_add(tp, type_data);
3682 	if (ok_count > 0)
3683 		tc_cls_offload_cnt_update(block, tp, in_hw_count, flags,
3684 					  ok_count, true);
3685 err_unlock:
3686 	up_read(&block->cb_lock);
3687 	if (take_rtnl)
3688 		rtnl_unlock();
3689 	return min(ok_count, 0);
3690 }
3691 EXPORT_SYMBOL(tc_setup_cb_add);
3692 
3693 /* Destructive filter replace. If filter that wasn't already in hardware is
3694  * successfully offloaded, increment block offload counter. On failure,
3695  * previously offloaded filter is considered to be destroyed and offload counter
3696  * is decremented.
3697  */
3698 
tc_setup_cb_replace(struct tcf_block * block,struct tcf_proto * tp,enum tc_setup_type type,void * type_data,bool err_stop,u32 * old_flags,unsigned int * old_in_hw_count,u32 * new_flags,unsigned int * new_in_hw_count,bool rtnl_held)3699 int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp,
3700 			enum tc_setup_type type, void *type_data, bool err_stop,
3701 			u32 *old_flags, unsigned int *old_in_hw_count,
3702 			u32 *new_flags, unsigned int *new_in_hw_count,
3703 			bool rtnl_held)
3704 {
3705 	bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3706 	int ok_count;
3707 
3708 retry:
3709 	if (take_rtnl)
3710 		rtnl_lock();
3711 	down_read(&block->cb_lock);
3712 	/* Need to obtain rtnl lock if block is bound to devs that require it.
3713 	 * In block bind code cb_lock is obtained while holding rtnl, so we must
3714 	 * obtain the locks in same order here.
3715 	 */
3716 	if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3717 		up_read(&block->cb_lock);
3718 		take_rtnl = true;
3719 		goto retry;
3720 	}
3721 
3722 	/* Make sure all netdevs sharing this block are offload-capable. */
3723 	if (block->nooffloaddevcnt && err_stop) {
3724 		ok_count = -EOPNOTSUPP;
3725 		goto err_unlock;
3726 	}
3727 
3728 	tc_cls_offload_cnt_reset(block, tp, old_in_hw_count, old_flags);
3729 	if (tp->ops->hw_del)
3730 		tp->ops->hw_del(tp, type_data);
3731 
3732 	ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3733 	if (ok_count < 0)
3734 		goto err_unlock;
3735 
3736 	if (tp->ops->hw_add)
3737 		tp->ops->hw_add(tp, type_data);
3738 	if (ok_count > 0)
3739 		tc_cls_offload_cnt_update(block, tp, new_in_hw_count,
3740 					  new_flags, ok_count, true);
3741 err_unlock:
3742 	up_read(&block->cb_lock);
3743 	if (take_rtnl)
3744 		rtnl_unlock();
3745 	return min(ok_count, 0);
3746 }
3747 EXPORT_SYMBOL(tc_setup_cb_replace);
3748 
3749 /* Destroy filter and decrement block offload counter, if filter was previously
3750  * offloaded.
3751  */
3752 
tc_setup_cb_destroy(struct tcf_block * block,struct tcf_proto * tp,enum tc_setup_type type,void * type_data,bool err_stop,u32 * flags,unsigned int * in_hw_count,bool rtnl_held)3753 int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp,
3754 			enum tc_setup_type type, void *type_data, bool err_stop,
3755 			u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3756 {
3757 	bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3758 	int ok_count;
3759 
3760 retry:
3761 	if (take_rtnl)
3762 		rtnl_lock();
3763 	down_read(&block->cb_lock);
3764 	/* Need to obtain rtnl lock if block is bound to devs that require it.
3765 	 * In block bind code cb_lock is obtained while holding rtnl, so we must
3766 	 * obtain the locks in same order here.
3767 	 */
3768 	if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3769 		up_read(&block->cb_lock);
3770 		take_rtnl = true;
3771 		goto retry;
3772 	}
3773 
3774 	ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3775 
3776 	tc_cls_offload_cnt_reset(block, tp, in_hw_count, flags);
3777 	if (tp->ops->hw_del)
3778 		tp->ops->hw_del(tp, type_data);
3779 
3780 	up_read(&block->cb_lock);
3781 	if (take_rtnl)
3782 		rtnl_unlock();
3783 	return min(ok_count, 0);
3784 }
3785 EXPORT_SYMBOL(tc_setup_cb_destroy);
3786 
tc_setup_cb_reoffload(struct tcf_block * block,struct tcf_proto * tp,bool add,flow_setup_cb_t * cb,enum tc_setup_type type,void * type_data,void * cb_priv,u32 * flags,unsigned int * in_hw_count)3787 int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp,
3788 			  bool add, flow_setup_cb_t *cb,
3789 			  enum tc_setup_type type, void *type_data,
3790 			  void *cb_priv, u32 *flags, unsigned int *in_hw_count)
3791 {
3792 	int err = cb(type, type_data, cb_priv);
3793 
3794 	if (err) {
3795 		if (add && tc_skip_sw(*flags))
3796 			return err;
3797 	} else {
3798 		tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, 1,
3799 					  add);
3800 	}
3801 
3802 	return 0;
3803 }
3804 EXPORT_SYMBOL(tc_setup_cb_reoffload);
3805 
tcf_act_get_user_cookie(struct flow_action_entry * entry,const struct tc_action * act)3806 static int tcf_act_get_user_cookie(struct flow_action_entry *entry,
3807 				   const struct tc_action *act)
3808 {
3809 	struct tc_cookie *user_cookie;
3810 	int err = 0;
3811 
3812 	rcu_read_lock();
3813 	user_cookie = rcu_dereference(act->user_cookie);
3814 	if (user_cookie) {
3815 		entry->user_cookie = flow_action_cookie_create(user_cookie->data,
3816 							       user_cookie->len,
3817 							       GFP_ATOMIC);
3818 		if (!entry->user_cookie)
3819 			err = -ENOMEM;
3820 	}
3821 	rcu_read_unlock();
3822 	return err;
3823 }
3824 
tcf_act_put_user_cookie(struct flow_action_entry * entry)3825 static void tcf_act_put_user_cookie(struct flow_action_entry *entry)
3826 {
3827 	flow_action_cookie_destroy(entry->user_cookie);
3828 }
3829 
tc_cleanup_offload_action(struct flow_action * flow_action)3830 void tc_cleanup_offload_action(struct flow_action *flow_action)
3831 {
3832 	struct flow_action_entry *entry;
3833 	int i;
3834 
3835 	flow_action_for_each(i, entry, flow_action) {
3836 		tcf_act_put_user_cookie(entry);
3837 		if (entry->destructor)
3838 			entry->destructor(entry->destructor_priv);
3839 	}
3840 }
3841 EXPORT_SYMBOL(tc_cleanup_offload_action);
3842 
tc_setup_offload_act(struct tc_action * act,struct flow_action_entry * entry,u32 * index_inc,struct netlink_ext_ack * extack)3843 static int tc_setup_offload_act(struct tc_action *act,
3844 				struct flow_action_entry *entry,
3845 				u32 *index_inc,
3846 				struct netlink_ext_ack *extack)
3847 {
3848 #ifdef CONFIG_NET_CLS_ACT
3849 	if (act->ops->offload_act_setup) {
3850 		return act->ops->offload_act_setup(act, entry, index_inc, true,
3851 						   extack);
3852 	} else {
3853 		NL_SET_ERR_MSG(extack, "Action does not support offload");
3854 		return -EOPNOTSUPP;
3855 	}
3856 #else
3857 	return 0;
3858 #endif
3859 }
3860 
tc_setup_action(struct flow_action * flow_action,struct tc_action * actions[],u32 miss_cookie_base,struct netlink_ext_ack * extack)3861 int tc_setup_action(struct flow_action *flow_action,
3862 		    struct tc_action *actions[],
3863 		    u32 miss_cookie_base,
3864 		    struct netlink_ext_ack *extack)
3865 {
3866 	int i, j, k, index, err = 0;
3867 	struct tc_action *act;
3868 
3869 	BUILD_BUG_ON(TCA_ACT_HW_STATS_ANY != FLOW_ACTION_HW_STATS_ANY);
3870 	BUILD_BUG_ON(TCA_ACT_HW_STATS_IMMEDIATE != FLOW_ACTION_HW_STATS_IMMEDIATE);
3871 	BUILD_BUG_ON(TCA_ACT_HW_STATS_DELAYED != FLOW_ACTION_HW_STATS_DELAYED);
3872 
3873 	if (!actions)
3874 		return 0;
3875 
3876 	j = 0;
3877 	tcf_act_for_each_action(i, act, actions) {
3878 		struct flow_action_entry *entry;
3879 
3880 		entry = &flow_action->entries[j];
3881 		spin_lock_bh(&act->tcfa_lock);
3882 		err = tcf_act_get_user_cookie(entry, act);
3883 		if (err)
3884 			goto err_out_locked;
3885 
3886 		index = 0;
3887 		err = tc_setup_offload_act(act, entry, &index, extack);
3888 		if (err)
3889 			goto err_out_locked;
3890 
3891 		for (k = 0; k < index ; k++) {
3892 			entry[k].hw_stats = tc_act_hw_stats(act->hw_stats);
3893 			entry[k].hw_index = act->tcfa_index;
3894 			entry[k].cookie = (unsigned long)act;
3895 			entry[k].miss_cookie =
3896 				tcf_exts_miss_cookie_get(miss_cookie_base, i);
3897 		}
3898 
3899 		j += index;
3900 
3901 		spin_unlock_bh(&act->tcfa_lock);
3902 	}
3903 
3904 err_out:
3905 	if (err)
3906 		tc_cleanup_offload_action(flow_action);
3907 
3908 	return err;
3909 err_out_locked:
3910 	spin_unlock_bh(&act->tcfa_lock);
3911 	goto err_out;
3912 }
3913 
tc_setup_offload_action(struct flow_action * flow_action,const struct tcf_exts * exts,struct netlink_ext_ack * extack)3914 int tc_setup_offload_action(struct flow_action *flow_action,
3915 			    const struct tcf_exts *exts,
3916 			    struct netlink_ext_ack *extack)
3917 {
3918 #ifdef CONFIG_NET_CLS_ACT
3919 	u32 miss_cookie_base;
3920 
3921 	if (!exts)
3922 		return 0;
3923 
3924 	miss_cookie_base = exts->miss_cookie_node ?
3925 			   exts->miss_cookie_node->miss_cookie_base : 0;
3926 	return tc_setup_action(flow_action, exts->actions, miss_cookie_base,
3927 			       extack);
3928 #else
3929 	return 0;
3930 #endif
3931 }
3932 EXPORT_SYMBOL(tc_setup_offload_action);
3933 
tcf_exts_num_actions(struct tcf_exts * exts)3934 unsigned int tcf_exts_num_actions(struct tcf_exts *exts)
3935 {
3936 	unsigned int num_acts = 0;
3937 	struct tc_action *act;
3938 	int i;
3939 
3940 	tcf_exts_for_each_action(i, act, exts) {
3941 		if (is_tcf_pedit(act))
3942 			num_acts += tcf_pedit_nkeys(act);
3943 		else
3944 			num_acts++;
3945 	}
3946 	return num_acts;
3947 }
3948 EXPORT_SYMBOL(tcf_exts_num_actions);
3949 
3950 #ifdef CONFIG_NET_CLS_ACT
tcf_qevent_parse_block_index(struct nlattr * block_index_attr,u32 * p_block_index,struct netlink_ext_ack * extack)3951 static int tcf_qevent_parse_block_index(struct nlattr *block_index_attr,
3952 					u32 *p_block_index,
3953 					struct netlink_ext_ack *extack)
3954 {
3955 	*p_block_index = nla_get_u32(block_index_attr);
3956 	if (!*p_block_index) {
3957 		NL_SET_ERR_MSG(extack, "Block number may not be zero");
3958 		return -EINVAL;
3959 	}
3960 
3961 	return 0;
3962 }
3963 
tcf_qevent_init(struct tcf_qevent * qe,struct Qdisc * sch,enum flow_block_binder_type binder_type,struct nlattr * block_index_attr,struct netlink_ext_ack * extack)3964 int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch,
3965 		    enum flow_block_binder_type binder_type,
3966 		    struct nlattr *block_index_attr,
3967 		    struct netlink_ext_ack *extack)
3968 {
3969 	u32 block_index;
3970 	int err;
3971 
3972 	if (!block_index_attr)
3973 		return 0;
3974 
3975 	err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack);
3976 	if (err)
3977 		return err;
3978 
3979 	qe->info.binder_type = binder_type;
3980 	qe->info.chain_head_change = tcf_chain_head_change_dflt;
3981 	qe->info.chain_head_change_priv = &qe->filter_chain;
3982 	qe->info.block_index = block_index;
3983 
3984 	return tcf_block_get_ext(&qe->block, sch, &qe->info, extack);
3985 }
3986 EXPORT_SYMBOL(tcf_qevent_init);
3987 
tcf_qevent_destroy(struct tcf_qevent * qe,struct Qdisc * sch)3988 void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch)
3989 {
3990 	if (qe->info.block_index)
3991 		tcf_block_put_ext(qe->block, sch, &qe->info);
3992 }
3993 EXPORT_SYMBOL(tcf_qevent_destroy);
3994 
tcf_qevent_validate_change(struct tcf_qevent * qe,struct nlattr * block_index_attr,struct netlink_ext_ack * extack)3995 int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr,
3996 			       struct netlink_ext_ack *extack)
3997 {
3998 	u32 block_index;
3999 	int err;
4000 
4001 	if (!block_index_attr)
4002 		return 0;
4003 
4004 	err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack);
4005 	if (err)
4006 		return err;
4007 
4008 	/* Bounce newly-configured block or change in block. */
4009 	if (block_index != qe->info.block_index) {
4010 		NL_SET_ERR_MSG(extack, "Change of blocks is not supported");
4011 		return -EINVAL;
4012 	}
4013 
4014 	return 0;
4015 }
4016 EXPORT_SYMBOL(tcf_qevent_validate_change);
4017 
tcf_qevent_handle(struct tcf_qevent * qe,struct Qdisc * sch,struct sk_buff * skb,struct sk_buff ** to_free,int * ret)4018 struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb,
4019 				  struct sk_buff **to_free, int *ret)
4020 {
4021 	struct tcf_result cl_res;
4022 	struct tcf_proto *fl;
4023 
4024 	if (!qe->info.block_index)
4025 		return skb;
4026 
4027 	fl = rcu_dereference_bh(qe->filter_chain);
4028 
4029 	switch (tcf_classify(skb, NULL, fl, &cl_res, false)) {
4030 	case TC_ACT_SHOT:
4031 		qdisc_qstats_drop(sch);
4032 		__qdisc_drop(skb, to_free);
4033 		*ret = __NET_XMIT_BYPASS;
4034 		return NULL;
4035 	case TC_ACT_STOLEN:
4036 	case TC_ACT_QUEUED:
4037 	case TC_ACT_TRAP:
4038 		__qdisc_drop(skb, to_free);
4039 		*ret = __NET_XMIT_STOLEN;
4040 		return NULL;
4041 	case TC_ACT_REDIRECT:
4042 		skb_do_redirect(skb);
4043 		*ret = __NET_XMIT_STOLEN;
4044 		return NULL;
4045 	}
4046 
4047 	return skb;
4048 }
4049 EXPORT_SYMBOL(tcf_qevent_handle);
4050 
tcf_qevent_dump(struct sk_buff * skb,int attr_name,struct tcf_qevent * qe)4051 int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe)
4052 {
4053 	if (!qe->info.block_index)
4054 		return 0;
4055 	return nla_put_u32(skb, attr_name, qe->info.block_index);
4056 }
4057 EXPORT_SYMBOL(tcf_qevent_dump);
4058 #endif
4059 
tcf_net_init(struct net * net)4060 static __net_init int tcf_net_init(struct net *net)
4061 {
4062 	struct tcf_net *tn = net_generic(net, tcf_net_id);
4063 
4064 	spin_lock_init(&tn->idr_lock);
4065 	idr_init(&tn->idr);
4066 	return 0;
4067 }
4068 
tcf_net_exit(struct net * net)4069 static void __net_exit tcf_net_exit(struct net *net)
4070 {
4071 	struct tcf_net *tn = net_generic(net, tcf_net_id);
4072 
4073 	idr_destroy(&tn->idr);
4074 }
4075 
4076 static struct pernet_operations tcf_net_ops = {
4077 	.init = tcf_net_init,
4078 	.exit = tcf_net_exit,
4079 	.id   = &tcf_net_id,
4080 	.size = sizeof(struct tcf_net),
4081 };
4082 
4083 static const struct rtnl_msg_handler tc_filter_rtnl_msg_handlers[] __initconst = {
4084 	{.msgtype = RTM_NEWTFILTER, .doit = tc_new_tfilter,
4085 	 .flags = RTNL_FLAG_DOIT_UNLOCKED},
4086 	{.msgtype = RTM_DELTFILTER, .doit = tc_del_tfilter,
4087 	 .flags = RTNL_FLAG_DOIT_UNLOCKED},
4088 	{.msgtype = RTM_GETTFILTER, .doit = tc_get_tfilter,
4089 	 .dumpit = tc_dump_tfilter, .flags = RTNL_FLAG_DOIT_UNLOCKED},
4090 	{.msgtype = RTM_NEWCHAIN, .doit = tc_ctl_chain},
4091 	{.msgtype = RTM_DELCHAIN, .doit = tc_ctl_chain},
4092 	{.msgtype = RTM_GETCHAIN, .doit = tc_ctl_chain,
4093 	 .dumpit = tc_dump_chain},
4094 };
4095 
tc_filter_init(void)4096 static int __init tc_filter_init(void)
4097 {
4098 	int err;
4099 
4100 	tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0);
4101 	if (!tc_filter_wq)
4102 		return -ENOMEM;
4103 
4104 	err = register_pernet_subsys(&tcf_net_ops);
4105 	if (err)
4106 		goto err_register_pernet_subsys;
4107 
4108 	xa_init_flags(&tcf_exts_miss_cookies_xa, XA_FLAGS_ALLOC1);
4109 	rtnl_register_many(tc_filter_rtnl_msg_handlers);
4110 
4111 	return 0;
4112 
4113 err_register_pernet_subsys:
4114 	destroy_workqueue(tc_filter_wq);
4115 	return err;
4116 }
4117 
4118 subsys_initcall(tc_filter_init);
4119