Lines Matching full:block

181 	struct tcf_block *block = chain->block;  in tcf_proto_signal_destroying()  local
183 mutex_lock(&block->proto_destroy_lock); in tcf_proto_signal_destroying()
184 hash_add_rcu(block->proto_destroy_ht, &tp->destroy_ht_node, in tcf_proto_signal_destroying()
186 mutex_unlock(&block->proto_destroy_lock); in tcf_proto_signal_destroying()
205 hash_for_each_possible_rcu(chain->block->proto_destroy_ht, iter, in tcf_proto_exists_destroying()
220 struct tcf_block *block = chain->block; in tcf_proto_signal_destroyed() local
222 mutex_lock(&block->proto_destroy_lock); in tcf_proto_signal_destroyed()
225 mutex_unlock(&block->proto_destroy_lock); in tcf_proto_signal_destroyed()
417 struct tcf_block *block = tp->chain->block; in tcf_proto_count_usesw() local
422 if (!atomic_dec_return(&block->useswcnt)) in tcf_proto_count_usesw()
436 if (counted && atomic_inc_return(&block->useswcnt) == 1) in tcf_proto_count_usesw()
489 #define ASSERT_BLOCK_LOCKED(block) \ argument
490 lockdep_assert_held(&(block)->lock)
498 static struct tcf_chain *tcf_chain_create(struct tcf_block *block, in tcf_chain_create() argument
503 ASSERT_BLOCK_LOCKED(block); in tcf_chain_create()
508 list_add_tail_rcu(&chain->list, &block->chain_list); in tcf_chain_create()
510 chain->block = block; in tcf_chain_create()
514 block->chain0.chain = chain; in tcf_chain_create()
529 struct tcf_block *block = chain->block; in tcf_chain0_head_change() local
534 mutex_lock(&block->lock); in tcf_chain0_head_change()
535 list_for_each_entry(item, &block->chain0.filter_chain_list, list) in tcf_chain0_head_change()
537 mutex_unlock(&block->lock); in tcf_chain0_head_change()
540 /* Returns true if block can be safely freed. */
544 struct tcf_block *block = chain->block; in tcf_chain_detach() local
546 ASSERT_BLOCK_LOCKED(block); in tcf_chain_detach()
550 block->chain0.chain = NULL; in tcf_chain_detach()
552 if (list_empty(&block->chain_list) && in tcf_chain_detach()
553 refcount_read(&block->refcnt) == 0) in tcf_chain_detach()
559 static void tcf_block_destroy(struct tcf_block *block) in tcf_block_destroy() argument
561 mutex_destroy(&block->lock); in tcf_block_destroy()
562 mutex_destroy(&block->proto_destroy_lock); in tcf_block_destroy()
563 xa_destroy(&block->ports); in tcf_block_destroy()
564 kfree_rcu(block, rcu); in tcf_block_destroy()
569 struct tcf_block *block = chain->block; in tcf_chain_destroy() local
574 tcf_block_destroy(block); in tcf_chain_destroy()
579 ASSERT_BLOCK_LOCKED(chain->block); in tcf_chain_hold()
586 ASSERT_BLOCK_LOCKED(chain->block); in tcf_chain_held_by_acts_only()
594 static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block, in tcf_chain_lookup() argument
599 ASSERT_BLOCK_LOCKED(block); in tcf_chain_lookup()
601 list_for_each_entry(chain, &block->chain_list, list) { in tcf_chain_lookup()
609 static struct tcf_chain *tcf_chain_lookup_rcu(const struct tcf_block *block, in tcf_chain_lookup_rcu() argument
614 list_for_each_entry_rcu(chain, &block->chain_list, list) { in tcf_chain_lookup_rcu()
626 static struct tcf_chain *__tcf_chain_get(struct tcf_block *block, in __tcf_chain_get() argument
633 mutex_lock(&block->lock); in __tcf_chain_get()
634 chain = tcf_chain_lookup(block, chain_index); in __tcf_chain_get()
640 chain = tcf_chain_create(block, chain_index); in __tcf_chain_get()
648 mutex_unlock(&block->lock); in __tcf_chain_get()
662 mutex_unlock(&block->lock); in __tcf_chain_get()
666 static struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index, in tcf_chain_get() argument
669 return __tcf_chain_get(block, chain_index, create, false); in tcf_chain_get()
672 struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index) in tcf_chain_get_by_act() argument
674 return __tcf_chain_get(block, chain_index, true, true); in tcf_chain_get_by_act()
682 struct tcf_block *block, struct sk_buff *oskb,
688 struct tcf_block *block = chain->block; in __tcf_chain_put() local
694 mutex_lock(&block->lock); in __tcf_chain_put()
697 mutex_unlock(&block->lock); in __tcf_chain_put()
706 /* tc_chain_notify_delete can't be called while holding block lock. in __tcf_chain_put()
707 * However, when block is unlocked chain can be changed concurrently, so in __tcf_chain_put()
718 chain->index, block, NULL, 0, 0); in __tcf_chain_put()
725 mutex_unlock(&block->lock); in __tcf_chain_put()
773 static int tcf_block_setup(struct tcf_block *block,
786 bo->block = flow_block; in tcf_block_offload_init()
794 static void tcf_block_unbind(struct tcf_block *block,
799 struct tcf_block *block = block_cb->indr.data; in tc_block_indr_cleanup() local
807 &block->flow_block, tcf_block_shared(block), in tc_block_indr_cleanup()
810 down_write(&block->cb_lock); in tc_block_indr_cleanup()
813 tcf_block_unbind(block, &bo); in tc_block_indr_cleanup()
814 up_write(&block->cb_lock); in tc_block_indr_cleanup()
818 static bool tcf_block_offload_in_use(struct tcf_block *block) in tcf_block_offload_in_use() argument
820 return atomic_read(&block->offloadcnt); in tcf_block_offload_in_use()
823 static int tcf_block_offload_cmd(struct tcf_block *block, in tcf_block_offload_cmd() argument
832 &block->flow_block, tcf_block_shared(block), in tcf_block_offload_cmd()
845 return tcf_block_setup(block, &bo); in tcf_block_offload_cmd()
848 flow_indr_dev_setup_offload(dev, sch, TC_SETUP_BLOCK, block, &bo, in tcf_block_offload_cmd()
850 tcf_block_setup(block, &bo); in tcf_block_offload_cmd()
855 static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q, in tcf_block_offload_bind() argument
862 down_write(&block->cb_lock); in tcf_block_offload_bind()
864 /* If tc offload feature is disabled and the block we try to bind in tcf_block_offload_bind()
869 tcf_block_offload_in_use(block)) { in tcf_block_offload_bind()
870 NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled"); in tcf_block_offload_bind()
875 err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_BIND, extack); in tcf_block_offload_bind()
881 up_write(&block->cb_lock); in tcf_block_offload_bind()
885 if (tcf_block_offload_in_use(block)) in tcf_block_offload_bind()
889 block->nooffloaddevcnt++; in tcf_block_offload_bind()
891 up_write(&block->cb_lock); in tcf_block_offload_bind()
895 static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q, in tcf_block_offload_unbind() argument
901 down_write(&block->cb_lock); in tcf_block_offload_unbind()
902 err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_UNBIND, NULL); in tcf_block_offload_unbind()
905 up_write(&block->cb_lock); in tcf_block_offload_unbind()
909 WARN_ON(block->nooffloaddevcnt-- == 0); in tcf_block_offload_unbind()
910 up_write(&block->cb_lock); in tcf_block_offload_unbind()
914 tcf_chain0_head_change_cb_add(struct tcf_block *block, in tcf_chain0_head_change_cb_add() argument
929 mutex_lock(&block->lock); in tcf_chain0_head_change_cb_add()
930 chain0 = block->chain0.chain; in tcf_chain0_head_change_cb_add()
934 list_add(&item->list, &block->chain0.filter_chain_list); in tcf_chain0_head_change_cb_add()
935 mutex_unlock(&block->lock); in tcf_chain0_head_change_cb_add()
946 mutex_lock(&block->lock); in tcf_chain0_head_change_cb_add()
947 list_add(&item->list, &block->chain0.filter_chain_list); in tcf_chain0_head_change_cb_add()
948 mutex_unlock(&block->lock); in tcf_chain0_head_change_cb_add()
958 tcf_chain0_head_change_cb_del(struct tcf_block *block, in tcf_chain0_head_change_cb_del() argument
963 mutex_lock(&block->lock); in tcf_chain0_head_change_cb_del()
964 list_for_each_entry(item, &block->chain0.filter_chain_list, list) { in tcf_chain0_head_change_cb_del()
968 if (block->chain0.chain) in tcf_chain0_head_change_cb_del()
971 mutex_unlock(&block->lock); in tcf_chain0_head_change_cb_del()
977 mutex_unlock(&block->lock); in tcf_chain0_head_change_cb_del()
988 static int tcf_block_insert(struct tcf_block *block, struct net *net, in tcf_block_insert() argument
996 err = idr_alloc_u32(&tn->idr, block, &block->index, block->index, in tcf_block_insert()
1004 static void tcf_block_remove(struct tcf_block *block, struct net *net) in tcf_block_remove() argument
1009 idr_remove(&tn->idr, block->index); in tcf_block_remove()
1017 struct tcf_block *block; in tcf_block_create() local
1019 block = kzalloc(sizeof(*block), GFP_KERNEL); in tcf_block_create()
1020 if (!block) { in tcf_block_create()
1021 NL_SET_ERR_MSG(extack, "Memory allocation for block failed"); in tcf_block_create()
1024 mutex_init(&block->lock); in tcf_block_create()
1025 mutex_init(&block->proto_destroy_lock); in tcf_block_create()
1026 init_rwsem(&block->cb_lock); in tcf_block_create()
1027 flow_block_init(&block->flow_block); in tcf_block_create()
1028 INIT_LIST_HEAD(&block->chain_list); in tcf_block_create()
1029 INIT_LIST_HEAD(&block->owner_list); in tcf_block_create()
1030 INIT_LIST_HEAD(&block->chain0.filter_chain_list); in tcf_block_create()
1032 refcount_set(&block->refcnt, 1); in tcf_block_create()
1033 block->net = net; in tcf_block_create()
1034 block->index = block_index; in tcf_block_create()
1035 xa_init(&block->ports); in tcf_block_create()
1038 if (!tcf_block_shared(block)) in tcf_block_create()
1039 block->q = q; in tcf_block_create()
1040 return block; in tcf_block_create()
1053 struct tcf_block *block; in tcf_block_refcnt_get() local
1056 block = tcf_block_lookup(net, block_index); in tcf_block_refcnt_get()
1057 if (block && !refcount_inc_not_zero(&block->refcnt)) in tcf_block_refcnt_get()
1058 block = NULL; in tcf_block_refcnt_get()
1061 return block; in tcf_block_refcnt_get()
1065 __tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain) in __tcf_get_next_chain() argument
1067 mutex_lock(&block->lock); in __tcf_get_next_chain()
1069 chain = list_is_last(&chain->list, &block->chain_list) ? in __tcf_get_next_chain()
1072 chain = list_first_entry_or_null(&block->chain_list, in __tcf_get_next_chain()
1077 chain = list_is_last(&chain->list, &block->chain_list) ? in __tcf_get_next_chain()
1082 mutex_unlock(&block->lock); in __tcf_get_next_chain()
1088 * block. It properly obtains block->lock and takes reference to chain before
1097 tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain) in tcf_get_next_chain() argument
1099 struct tcf_chain *chain_next = __tcf_get_next_chain(block, chain); in tcf_get_next_chain()
1161 static void tcf_block_flush_all_chains(struct tcf_block *block, bool rtnl_held) in tcf_block_flush_all_chains() argument
1165 /* Last reference to block. At this point chains cannot be added or in tcf_block_flush_all_chains()
1168 for (chain = tcf_get_next_chain(block, NULL); in tcf_block_flush_all_chains()
1170 chain = tcf_get_next_chain(block, chain)) { in tcf_block_flush_all_chains()
1280 struct tcf_block *block; in __tcf_block_find() local
1283 block = tcf_block_refcnt_get(net, block_index); in __tcf_block_find()
1284 if (!block) { in __tcf_block_find()
1285 NL_SET_ERR_MSG(extack, "Block of given index was not found"); in __tcf_block_find()
1291 block = cops->tcf_block(q, cl, extack); in __tcf_block_find()
1292 if (!block) in __tcf_block_find()
1295 if (tcf_block_shared(block)) { in __tcf_block_find()
1296 …NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the … in __tcf_block_find()
1300 /* Always take reference to block in order to support execution in __tcf_block_find()
1302 * must release block when it is finished using it. 'if' block in __tcf_block_find()
1303 * of this conditional obtain reference to block by calling in __tcf_block_find()
1306 refcount_inc(&block->refcnt); in __tcf_block_find()
1309 return block; in __tcf_block_find()
1312 static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q, in __tcf_block_put() argument
1315 if (refcount_dec_and_mutex_lock(&block->refcnt, &block->lock)) { in __tcf_block_put()
1316 /* Flushing/putting all chains will cause the block to be in __tcf_block_put()
1318 * is empty, block has to be manually deallocated. After block in __tcf_block_put()
1320 * increment it or add new chains to block. in __tcf_block_put()
1322 bool free_block = list_empty(&block->chain_list); in __tcf_block_put()
1324 mutex_unlock(&block->lock); in __tcf_block_put()
1325 if (tcf_block_shared(block)) in __tcf_block_put()
1326 tcf_block_remove(block, block->net); in __tcf_block_put()
1329 tcf_block_offload_unbind(block, q, ei); in __tcf_block_put()
1332 tcf_block_destroy(block); in __tcf_block_put()
1334 tcf_block_flush_all_chains(block, rtnl_held); in __tcf_block_put()
1336 tcf_block_offload_unbind(block, q, ei); in __tcf_block_put()
1340 static void tcf_block_refcnt_put(struct tcf_block *block, bool rtnl_held) in tcf_block_refcnt_put() argument
1342 __tcf_block_put(block, NULL, NULL, rtnl_held); in tcf_block_refcnt_put()
1345 /* Find tcf block.
1354 struct tcf_block *block; in tcf_block_find() local
1367 block = __tcf_block_find(net, *q, *cl, ifindex, block_index, extack); in tcf_block_find()
1368 if (IS_ERR(block)) { in tcf_block_find()
1369 err = PTR_ERR(block); in tcf_block_find()
1373 return block; in tcf_block_find()
1383 static void tcf_block_release(struct Qdisc *q, struct tcf_block *block, in tcf_block_release() argument
1386 if (!IS_ERR_OR_NULL(block)) in tcf_block_release()
1387 tcf_block_refcnt_put(block, rtnl_held); in tcf_block_release()
1404 tcf_block_owner_netif_keep_dst(struct tcf_block *block, in tcf_block_owner_netif_keep_dst() argument
1408 if (block->keep_dst && in tcf_block_owner_netif_keep_dst()
1414 void tcf_block_netif_keep_dst(struct tcf_block *block) in tcf_block_netif_keep_dst() argument
1418 block->keep_dst = true; in tcf_block_netif_keep_dst()
1419 list_for_each_entry(item, &block->owner_list, list) in tcf_block_netif_keep_dst()
1420 tcf_block_owner_netif_keep_dst(block, item->q, in tcf_block_netif_keep_dst()
1425 static int tcf_block_owner_add(struct tcf_block *block, in tcf_block_owner_add() argument
1436 list_add(&item->list, &block->owner_list); in tcf_block_owner_add()
1440 static void tcf_block_owner_del(struct tcf_block *block, in tcf_block_owner_del() argument
1446 list_for_each_entry(item, &block->owner_list, list) { in tcf_block_owner_del()
1456 static bool tcf_block_tracks_dev(struct tcf_block *block, in tcf_block_tracks_dev() argument
1459 return tcf_block_shared(block) && in tcf_block_tracks_dev()
1470 struct tcf_block *block = NULL; in tcf_block_get_ext() local
1474 /* block_index not 0 means the shared block is requested */ in tcf_block_get_ext()
1475 block = tcf_block_refcnt_get(net, ei->block_index); in tcf_block_get_ext()
1477 if (!block) { in tcf_block_get_ext()
1478 block = tcf_block_create(net, q, ei->block_index, extack); in tcf_block_get_ext()
1479 if (IS_ERR(block)) in tcf_block_get_ext()
1480 return PTR_ERR(block); in tcf_block_get_ext()
1481 if (tcf_block_shared(block)) { in tcf_block_get_ext()
1482 err = tcf_block_insert(block, net, extack); in tcf_block_get_ext()
1488 err = tcf_block_owner_add(block, q, ei->binder_type); in tcf_block_get_ext()
1492 tcf_block_owner_netif_keep_dst(block, q, ei->binder_type); in tcf_block_get_ext()
1494 err = tcf_chain0_head_change_cb_add(block, ei, extack); in tcf_block_get_ext()
1498 err = tcf_block_offload_bind(block, q, ei, extack); in tcf_block_get_ext()
1502 if (tcf_block_tracks_dev(block, ei)) { in tcf_block_get_ext()
1503 err = xa_insert(&block->ports, dev->ifindex, dev, GFP_KERNEL); in tcf_block_get_ext()
1505 NL_SET_ERR_MSG(extack, "block dev insert failed"); in tcf_block_get_ext()
1510 *p_block = block; in tcf_block_get_ext()
1514 tcf_block_offload_unbind(block, q, ei); in tcf_block_get_ext()
1516 tcf_chain0_head_change_cb_del(block, ei); in tcf_block_get_ext()
1518 tcf_block_owner_del(block, q, ei->binder_type); in tcf_block_get_ext()
1521 tcf_block_refcnt_put(block, true); in tcf_block_get_ext()
1550 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q, in tcf_block_put_ext() argument
1555 if (!block) in tcf_block_put_ext()
1557 if (tcf_block_tracks_dev(block, ei)) in tcf_block_put_ext()
1558 xa_erase(&block->ports, dev->ifindex); in tcf_block_put_ext()
1559 tcf_chain0_head_change_cb_del(block, ei); in tcf_block_put_ext()
1560 tcf_block_owner_del(block, q, ei->binder_type); in tcf_block_put_ext()
1562 __tcf_block_put(block, q, ei, true); in tcf_block_put_ext()
1566 void tcf_block_put(struct tcf_block *block) in tcf_block_put() argument
1570 if (!block) in tcf_block_put()
1572 tcf_block_put_ext(block, block->q, &ei); in tcf_block_put()
1578 tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb, in tcf_block_playback_offloads() argument
1586 lockdep_assert_held(&block->cb_lock); in tcf_block_playback_offloads()
1588 for (chain = __tcf_get_next_chain(block, NULL); in tcf_block_playback_offloads()
1591 chain = __tcf_get_next_chain(block, chain), in tcf_block_playback_offloads()
1621 tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use, in tcf_block_playback_offloads()
1626 static int tcf_block_bind(struct tcf_block *block, in tcf_block_bind() argument
1632 lockdep_assert_held(&block->cb_lock); in tcf_block_bind()
1635 err = tcf_block_playback_offloads(block, block_cb->cb, in tcf_block_bind()
1637 tcf_block_offload_in_use(block), in tcf_block_bind()
1642 block->lockeddevcnt++; in tcf_block_bind()
1646 list_splice(&bo->cb_list, &block->flow_block.cb_list); in tcf_block_bind()
1655 tcf_block_playback_offloads(block, block_cb->cb, in tcf_block_bind()
1657 tcf_block_offload_in_use(block), in tcf_block_bind()
1660 block->lockeddevcnt--; in tcf_block_bind()
1668 static void tcf_block_unbind(struct tcf_block *block, in tcf_block_unbind() argument
1673 lockdep_assert_held(&block->cb_lock); in tcf_block_unbind()
1676 tcf_block_playback_offloads(block, block_cb->cb, in tcf_block_unbind()
1678 tcf_block_offload_in_use(block), in tcf_block_unbind()
1683 block->lockeddevcnt--; in tcf_block_unbind()
1687 static int tcf_block_setup(struct tcf_block *block, in tcf_block_setup() argument
1694 err = tcf_block_bind(block, bo); in tcf_block_setup()
1698 tcf_block_unbind(block, bo); in tcf_block_setup()
1792 tp->chain->block->index, in __tcf_classify()
1806 const struct tcf_block *block, in tcf_classify() argument
1823 if (block) { in tcf_classify()
1844 fchain = tcf_chain_lookup_rcu(block, chain); in tcf_classify()
2051 struct tcf_proto *tp, struct tcf_block *block, in tcf_fill_node() argument
2074 tcm->tcm_block_index = block->index; in tcf_fill_node()
2117 struct tcf_block *block, in tfilter_notify_prep() argument
2132 ret = tcf_fill_node(net, skb, tp, block, q, parent, fh, portid, in tfilter_notify_prep()
2148 struct tcf_block *block, struct Qdisc *q, in tfilter_notify() argument
2159 skb = tfilter_notify_prep(net, oskb, n, tp, block, q, parent, fh, event, in tfilter_notify()
2174 struct tcf_block *block, struct Qdisc *q, in tfilter_del_notify() argument
2185 skb = tfilter_notify_prep(net, oskb, n, tp, block, q, parent, fh, in tfilter_del_notify()
2207 struct tcf_block *block, struct Qdisc *q, in tfilter_notify_chain() argument
2216 tfilter_notify(net, oskb, n, tp, block, q, parent, NULL, in tfilter_notify_chain()
2246 struct tcf_block *block; in tc_new_tfilter() local
2270 block = NULL; in tc_new_tfilter()
2301 * block is shared (no qdisc found), qdisc is not unlocked, classifier in tc_new_tfilter()
2315 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index, in tc_new_tfilter()
2317 if (IS_ERR(block)) { in tc_new_tfilter()
2318 err = PTR_ERR(block); in tc_new_tfilter()
2321 block->classid = parent; in tc_new_tfilter()
2329 chain = tcf_chain_get(block, chain_index, true); in tc_new_tfilter()
2426 tfilter_notify(net, skb, n, tp, block, q, parent, fh, in tc_new_tfilter()
2445 tcf_block_release(q, block, rtnl_held); in tc_new_tfilter()
2479 struct tcf_block *block = NULL; in tc_del_tfilter() local
2512 /* Take rtnl mutex if flushing whole chain, block is shared (no qdisc in tc_del_tfilter()
2527 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index, in tc_del_tfilter()
2529 if (IS_ERR(block)) { in tc_del_tfilter()
2530 err = PTR_ERR(block); in tc_del_tfilter()
2540 chain = tcf_chain_get(block, chain_index, false); in tc_del_tfilter()
2555 tfilter_notify_chain(net, skb, block, q, parent, n, in tc_del_tfilter()
2582 tfilter_notify(net, skb, n, tp, block, q, parent, fh, in tc_del_tfilter()
2597 err = tfilter_del_notify(net, skb, n, tp, block, q, parent, fh, in tc_del_tfilter()
2612 tcf_block_release(q, block, rtnl_held); in tc_del_tfilter()
2638 struct tcf_block *block = NULL; in tc_get_tfilter() local
2671 /* Take rtnl mutex if block is shared (no qdisc found), qdisc is not in tc_get_tfilter()
2685 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index, in tc_get_tfilter()
2687 if (IS_ERR(block)) { in tc_get_tfilter()
2688 err = PTR_ERR(block); in tc_get_tfilter()
2698 chain = tcf_chain_get(block, chain_index, false); in tc_get_tfilter()
2728 err = tfilter_notify(net, skb, n, tp, block, q, parent, in tc_get_tfilter()
2741 tcf_block_release(q, block, rtnl_held); in tc_get_tfilter()
2753 struct tcf_block *block; member
2764 return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent, in tcf_node_dump()
2775 struct tcf_block *block = chain->block; in tcf_chain_dump() local
2798 if (tcf_fill_node(net, skb, tp, block, q, parent, NULL, in tcf_chain_dump()
2810 arg.block = block; in tcf_chain_dump()
2843 struct tcf_block *block; in tc_dump_tfilter() local
2867 block = tcf_block_refcnt_get(net, tcm->tcm_block_index); in tc_dump_tfilter()
2868 if (!block) in tc_dump_tfilter()
2870 /* If we work with block index, q is NULL and parent value in tc_dump_tfilter()
2903 block = cops->tcf_block(q, cl, NULL); in tc_dump_tfilter()
2904 if (!block) in tc_dump_tfilter()
2906 parent = block->classid; in tc_dump_tfilter()
2907 if (tcf_block_shared(block)) in tc_dump_tfilter()
2914 for (chain = __tcf_get_next_chain(block, NULL); in tc_dump_tfilter()
2917 chain = __tcf_get_next_chain(block, chain), in tc_dump_tfilter()
2931 tcf_block_refcnt_put(block, true); in tc_dump_tfilter()
2944 struct tcf_block *block, in tc_chain_fill_node() argument
2965 if (block->q) { in tc_chain_fill_node()
2966 tcm->tcm_ifindex = qdisc_dev(block->q)->ifindex; in tc_chain_fill_node()
2967 tcm->tcm_parent = block->q->handle; in tc_chain_fill_node()
2970 tcm->tcm_block_index = block->index; in tc_chain_fill_node()
3002 struct tcf_block *block = chain->block; in tc_chain_notify() local
3003 struct net *net = block->net; in tc_chain_notify()
3015 chain->index, net, skb, block, portid, in tc_chain_notify()
3032 struct tcf_block *block, struct sk_buff *oskb, in tc_chain_notify_delete() argument
3036 struct net *net = block->net; in tc_chain_notify_delete()
3047 block, portid, seq, flags, RTM_DELCHAIN, NULL) <= 0) { in tc_chain_notify_delete()
3115 struct tcf_block *block; in tc_ctl_chain() local
3130 block = tcf_block_find(net, &q, &parent, &cl, in tc_ctl_chain()
3132 if (IS_ERR(block)) in tc_ctl_chain()
3133 return PTR_ERR(block); in tc_ctl_chain()
3142 mutex_lock(&block->lock); in tc_ctl_chain()
3143 chain = tcf_chain_lookup(block, chain_index); in tc_ctl_chain()
3162 chain = tcf_chain_create(block, chain_index); in tc_ctl_chain()
3179 /* Modifying chain requires holding parent block lock. In case in tc_ctl_chain()
3187 mutex_unlock(&block->lock); in tc_ctl_chain()
3201 tfilter_notify_chain(net, skb, block, q, parent, n, in tc_ctl_chain()
3225 tcf_block_release(q, block, true); in tc_ctl_chain()
3232 mutex_unlock(&block->lock); in tc_ctl_chain()
3242 struct tcf_block *block; in tc_dump_chain() local
3258 block = tcf_block_refcnt_get(net, tcm->tcm_block_index); in tc_dump_chain()
3259 if (!block) in tc_dump_chain()
3287 block = cops->tcf_block(q, cl, NULL); in tc_dump_chain()
3288 if (!block) in tc_dump_chain()
3290 if (tcf_block_shared(block)) in tc_dump_chain()
3297 mutex_lock(&block->lock); in tc_dump_chain()
3298 list_for_each_entry(chain, &block->chain_list, list) { in tc_dump_chain()
3309 chain->index, net, skb, block, in tc_dump_chain()
3317 mutex_unlock(&block->lock); in tc_dump_chain()
3320 tcf_block_refcnt_put(block, true); in tc_dump_chain()
3547 static void tcf_block_offload_inc(struct tcf_block *block, u32 *flags) in tcf_block_offload_inc() argument
3552 atomic_inc(&block->offloadcnt); in tcf_block_offload_inc()
3555 static void tcf_block_offload_dec(struct tcf_block *block, u32 *flags) in tcf_block_offload_dec() argument
3560 atomic_dec(&block->offloadcnt); in tcf_block_offload_dec()
3563 static void tc_cls_offload_cnt_update(struct tcf_block *block, in tc_cls_offload_cnt_update() argument
3567 lockdep_assert_held(&block->cb_lock); in tc_cls_offload_cnt_update()
3572 tcf_block_offload_inc(block, flags); in tc_cls_offload_cnt_update()
3577 tcf_block_offload_dec(block, flags); in tc_cls_offload_cnt_update()
3583 tc_cls_offload_cnt_reset(struct tcf_block *block, struct tcf_proto *tp, in tc_cls_offload_cnt_reset() argument
3586 lockdep_assert_held(&block->cb_lock); in tc_cls_offload_cnt_reset()
3589 tcf_block_offload_dec(block, flags); in tc_cls_offload_cnt_reset()
3595 __tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type, in __tc_setup_cb_call() argument
3602 list_for_each_entry(block_cb, &block->flow_block.cb_list, list) { in __tc_setup_cb_call()
3614 int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type, in tc_setup_cb_call() argument
3617 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held; in tc_setup_cb_call()
3623 down_read(&block->cb_lock); in tc_setup_cb_call()
3624 /* Need to obtain rtnl lock if block is bound to devs that require it. in tc_setup_cb_call()
3625 * In block bind code cb_lock is obtained while holding rtnl, so we must in tc_setup_cb_call()
3628 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) { in tc_setup_cb_call()
3629 up_read(&block->cb_lock); in tc_setup_cb_call()
3634 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); in tc_setup_cb_call()
3636 up_read(&block->cb_lock); in tc_setup_cb_call()
3644 * successfully offloaded, increment block offloads counter. On failure,
3649 int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp, in tc_setup_cb_add() argument
3653 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held; in tc_setup_cb_add()
3659 down_read(&block->cb_lock); in tc_setup_cb_add()
3660 /* Need to obtain rtnl lock if block is bound to devs that require it. in tc_setup_cb_add()
3661 * In block bind code cb_lock is obtained while holding rtnl, so we must in tc_setup_cb_add()
3664 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) { in tc_setup_cb_add()
3665 up_read(&block->cb_lock); in tc_setup_cb_add()
3670 /* Make sure all netdevs sharing this block are offload-capable. */ in tc_setup_cb_add()
3671 if (block->nooffloaddevcnt && err_stop) { in tc_setup_cb_add()
3676 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); in tc_setup_cb_add()
3683 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, in tc_setup_cb_add()
3686 up_read(&block->cb_lock); in tc_setup_cb_add()
3694 * successfully offloaded, increment block offload counter. On failure,
3699 int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp, in tc_setup_cb_replace() argument
3705 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held; in tc_setup_cb_replace()
3711 down_read(&block->cb_lock); in tc_setup_cb_replace()
3712 /* Need to obtain rtnl lock if block is bound to devs that require it. in tc_setup_cb_replace()
3713 * In block bind code cb_lock is obtained while holding rtnl, so we must in tc_setup_cb_replace()
3716 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) { in tc_setup_cb_replace()
3717 up_read(&block->cb_lock); in tc_setup_cb_replace()
3722 /* Make sure all netdevs sharing this block are offload-capable. */ in tc_setup_cb_replace()
3723 if (block->nooffloaddevcnt && err_stop) { in tc_setup_cb_replace()
3728 tc_cls_offload_cnt_reset(block, tp, old_in_hw_count, old_flags); in tc_setup_cb_replace()
3732 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); in tc_setup_cb_replace()
3739 tc_cls_offload_cnt_update(block, tp, new_in_hw_count, in tc_setup_cb_replace()
3742 up_read(&block->cb_lock); in tc_setup_cb_replace()
3749 /* Destroy filter and decrement block offload counter, if filter was previously
3753 int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp, in tc_setup_cb_destroy() argument
3757 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held; in tc_setup_cb_destroy()
3763 down_read(&block->cb_lock); in tc_setup_cb_destroy()
3764 /* Need to obtain rtnl lock if block is bound to devs that require it. in tc_setup_cb_destroy()
3765 * In block bind code cb_lock is obtained while holding rtnl, so we must in tc_setup_cb_destroy()
3768 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) { in tc_setup_cb_destroy()
3769 up_read(&block->cb_lock); in tc_setup_cb_destroy()
3774 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); in tc_setup_cb_destroy()
3776 tc_cls_offload_cnt_reset(block, tp, in_hw_count, flags); in tc_setup_cb_destroy()
3780 up_read(&block->cb_lock); in tc_setup_cb_destroy()
3787 int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp, in tc_setup_cb_reoffload() argument
3798 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, 1, in tc_setup_cb_reoffload()
3957 NL_SET_ERR_MSG(extack, "Block number may not be zero"); in tcf_qevent_parse_block_index()
3984 return tcf_block_get_ext(&qe->block, sch, &qe->info, extack); in tcf_qevent_init()
3991 tcf_block_put_ext(qe->block, sch, &qe->info); in tcf_qevent_destroy()
4008 /* Bounce newly-configured block or change in block. */ in tcf_qevent_validate_change()