1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * net/sched/cls_api.c Packet classifier API.
5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
9 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
12 #include <linux/module.h>
13 #include <linux/types.h>
14 #include <linux/kernel.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
17 #include <linux/err.h>
18 #include <linux/skbuff.h>
19 #include <linux/init.h>
20 #include <linux/kmod.h>
21 #include <linux/slab.h>
22 #include <linux/idr.h>
23 #include <linux/jhash.h>
24 #include <linux/rculist.h>
25 #include <linux/rhashtable.h>
26 #include <net/net_namespace.h>
28 #include <net/netlink.h>
29 #include <net/pkt_sched.h>
30 #include <net/pkt_cls.h>
31 #include <net/tc_act/tc_pedit.h>
32 #include <net/tc_act/tc_mirred.h>
33 #include <net/tc_act/tc_vlan.h>
34 #include <net/tc_act/tc_tunnel_key.h>
35 #include <net/tc_act/tc_csum.h>
36 #include <net/tc_act/tc_gact.h>
37 #include <net/tc_act/tc_police.h>
38 #include <net/tc_act/tc_sample.h>
39 #include <net/tc_act/tc_skbedit.h>
40 #include <net/tc_act/tc_ct.h>
41 #include <net/tc_act/tc_mpls.h>
42 #include <net/tc_act/tc_gate.h>
43 #include <net/flow_offload.h>
44 #include <net/tc_wrapper.h>
46 /* The list of all installed classifier types */
47 static LIST_HEAD(tcf_proto_base);
49 /* Protects list of registered TC modules. It is pure SMP lock. */
50 static DEFINE_RWLOCK(cls_mod_lock);
52 static struct xarray tcf_exts_miss_cookies_xa;
53 struct tcf_exts_miss_cookie_node {
54 const struct tcf_chain *chain;
55 const struct tcf_proto *tp;
56 const struct tcf_exts *exts;
64 /* Each tc action entry cookie will be comprised of 32bit miss_cookie_base +
65 * action index in the exts tc actions array.
67 union tcf_exts_miss_cookie {
75 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
77 tcf_exts_miss_cookie_base_alloc(struct tcf_exts *exts, struct tcf_proto *tp,
80 struct tcf_exts_miss_cookie_node *n;
84 if (WARN_ON(!handle || !tp->ops->get_exts))
87 n = kzalloc(sizeof(*n), GFP_KERNEL);
91 n->chain_index = tp->chain->index;
93 n->tp_prio = tp->prio;
98 err = xa_alloc_cyclic(&tcf_exts_miss_cookies_xa, &n->miss_cookie_base,
99 n, xa_limit_32b, &next, GFP_KERNEL);
103 exts->miss_cookie_node = n;
111 static void tcf_exts_miss_cookie_base_destroy(struct tcf_exts *exts)
113 struct tcf_exts_miss_cookie_node *n;
115 if (!exts->miss_cookie_node)
118 n = exts->miss_cookie_node;
119 xa_erase(&tcf_exts_miss_cookies_xa, n->miss_cookie_base);
123 static struct tcf_exts_miss_cookie_node *
124 tcf_exts_miss_cookie_lookup(u64 miss_cookie, int *act_index)
126 union tcf_exts_miss_cookie mc = { .miss_cookie = miss_cookie, };
128 *act_index = mc.act_index;
129 return xa_load(&tcf_exts_miss_cookies_xa, mc.miss_cookie_base);
131 #else /* IS_ENABLED(CONFIG_NET_TC_SKB_EXT) */
133 tcf_exts_miss_cookie_base_alloc(struct tcf_exts *exts, struct tcf_proto *tp,
139 static void tcf_exts_miss_cookie_base_destroy(struct tcf_exts *exts)
142 #endif /* IS_ENABLED(CONFIG_NET_TC_SKB_EXT) */
144 static u64 tcf_exts_miss_cookie_get(u32 miss_cookie_base, int act_index)
146 union tcf_exts_miss_cookie mc = { .act_index = act_index, };
148 if (!miss_cookie_base)
151 mc.miss_cookie_base = miss_cookie_base;
152 return mc.miss_cookie;
155 #ifdef CONFIG_NET_CLS_ACT
156 DEFINE_STATIC_KEY_FALSE(tc_skb_ext_tc);
157 EXPORT_SYMBOL(tc_skb_ext_tc);
159 void tc_skb_ext_tc_enable(void)
161 static_branch_inc(&tc_skb_ext_tc);
163 EXPORT_SYMBOL(tc_skb_ext_tc_enable);
165 void tc_skb_ext_tc_disable(void)
167 static_branch_dec(&tc_skb_ext_tc);
169 EXPORT_SYMBOL(tc_skb_ext_tc_disable);
172 static u32 destroy_obj_hashfn(const struct tcf_proto *tp)
174 return jhash_3words(tp->chain->index, tp->prio,
175 (__force __u32)tp->protocol, 0);
178 static void tcf_proto_signal_destroying(struct tcf_chain *chain,
179 struct tcf_proto *tp)
181 struct tcf_block *block = chain->block;
183 mutex_lock(&block->proto_destroy_lock);
184 hash_add_rcu(block->proto_destroy_ht, &tp->destroy_ht_node,
185 destroy_obj_hashfn(tp));
186 mutex_unlock(&block->proto_destroy_lock);
189 static bool tcf_proto_cmp(const struct tcf_proto *tp1,
190 const struct tcf_proto *tp2)
192 return tp1->chain->index == tp2->chain->index &&
193 tp1->prio == tp2->prio &&
194 tp1->protocol == tp2->protocol;
197 static bool tcf_proto_exists_destroying(struct tcf_chain *chain,
198 struct tcf_proto *tp)
200 u32 hash = destroy_obj_hashfn(tp);
201 struct tcf_proto *iter;
205 hash_for_each_possible_rcu(chain->block->proto_destroy_ht, iter,
206 destroy_ht_node, hash) {
207 if (tcf_proto_cmp(tp, iter)) {
218 tcf_proto_signal_destroyed(struct tcf_chain *chain, struct tcf_proto *tp)
220 struct tcf_block *block = chain->block;
222 mutex_lock(&block->proto_destroy_lock);
223 if (hash_hashed(&tp->destroy_ht_node))
224 hash_del_rcu(&tp->destroy_ht_node);
225 mutex_unlock(&block->proto_destroy_lock);
228 /* Find classifier type by string name */
230 static const struct tcf_proto_ops *__tcf_proto_lookup_ops(const char *kind)
232 const struct tcf_proto_ops *t, *res = NULL;
235 read_lock(&cls_mod_lock);
236 list_for_each_entry(t, &tcf_proto_base, head) {
237 if (strcmp(kind, t->kind) == 0) {
238 if (try_module_get(t->owner))
243 read_unlock(&cls_mod_lock);
248 static const struct tcf_proto_ops *
249 tcf_proto_lookup_ops(const char *kind, bool rtnl_held,
250 struct netlink_ext_ack *extack)
252 const struct tcf_proto_ops *ops;
254 ops = __tcf_proto_lookup_ops(kind);
257 #ifdef CONFIG_MODULES
260 request_module("cls_%s", kind);
263 ops = __tcf_proto_lookup_ops(kind);
264 /* We dropped the RTNL semaphore in order to perform
265 * the module load. So, even if we succeeded in loading
266 * the module we have to replay the request. We indicate
267 * this using -EAGAIN.
270 module_put(ops->owner);
271 return ERR_PTR(-EAGAIN);
274 NL_SET_ERR_MSG(extack, "TC classifier not found");
275 return ERR_PTR(-ENOENT);
278 /* Register(unregister) new classifier type */
280 int register_tcf_proto_ops(struct tcf_proto_ops *ops)
282 struct tcf_proto_ops *t;
285 write_lock(&cls_mod_lock);
286 list_for_each_entry(t, &tcf_proto_base, head)
287 if (!strcmp(ops->kind, t->kind))
290 list_add_tail(&ops->head, &tcf_proto_base);
293 write_unlock(&cls_mod_lock);
296 EXPORT_SYMBOL(register_tcf_proto_ops);
298 static struct workqueue_struct *tc_filter_wq;
300 void unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
302 struct tcf_proto_ops *t;
305 /* Wait for outstanding call_rcu()s, if any, from a
306 * tcf_proto_ops's destroy() handler.
309 flush_workqueue(tc_filter_wq);
311 write_lock(&cls_mod_lock);
312 list_for_each_entry(t, &tcf_proto_base, head) {
319 write_unlock(&cls_mod_lock);
321 WARN(rc, "unregister tc filter kind(%s) failed %d\n", ops->kind, rc);
323 EXPORT_SYMBOL(unregister_tcf_proto_ops);
325 bool tcf_queue_work(struct rcu_work *rwork, work_func_t func)
327 INIT_RCU_WORK(rwork, func);
328 return queue_rcu_work(tc_filter_wq, rwork);
330 EXPORT_SYMBOL(tcf_queue_work);
332 /* Select new prio value from the range, managed by kernel. */
334 static inline u32 tcf_auto_prio(struct tcf_proto *tp)
336 u32 first = TC_H_MAKE(0xC0000000U, 0U);
339 first = tp->prio - 1;
341 return TC_H_MAJ(first);
344 static bool tcf_proto_check_kind(struct nlattr *kind, char *name)
347 return nla_strscpy(name, kind, IFNAMSIZ) < 0;
348 memset(name, 0, IFNAMSIZ);
352 static bool tcf_proto_is_unlocked(const char *kind)
354 const struct tcf_proto_ops *ops;
357 if (strlen(kind) == 0)
360 ops = tcf_proto_lookup_ops(kind, false, NULL);
361 /* On error return false to take rtnl lock. Proto lookup/create
362 * functions will perform lookup again and properly handle errors.
367 ret = !!(ops->flags & TCF_PROTO_OPS_DOIT_UNLOCKED);
368 module_put(ops->owner);
372 static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
373 u32 prio, struct tcf_chain *chain,
375 struct netlink_ext_ack *extack)
377 struct tcf_proto *tp;
380 tp = kzalloc(sizeof(*tp), GFP_KERNEL);
382 return ERR_PTR(-ENOBUFS);
384 tp->ops = tcf_proto_lookup_ops(kind, rtnl_held, extack);
385 if (IS_ERR(tp->ops)) {
386 err = PTR_ERR(tp->ops);
389 tp->classify = tp->ops->classify;
390 tp->protocol = protocol;
393 spin_lock_init(&tp->lock);
394 refcount_set(&tp->refcnt, 1);
396 err = tp->ops->init(tp);
398 module_put(tp->ops->owner);
408 static void tcf_proto_get(struct tcf_proto *tp)
410 refcount_inc(&tp->refcnt);
413 static void tcf_chain_put(struct tcf_chain *chain);
415 static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held,
416 bool sig_destroy, struct netlink_ext_ack *extack)
418 tp->ops->destroy(tp, rtnl_held, extack);
420 tcf_proto_signal_destroyed(tp->chain, tp);
421 tcf_chain_put(tp->chain);
422 module_put(tp->ops->owner);
426 static void tcf_proto_put(struct tcf_proto *tp, bool rtnl_held,
427 struct netlink_ext_ack *extack)
429 if (refcount_dec_and_test(&tp->refcnt))
430 tcf_proto_destroy(tp, rtnl_held, true, extack);
433 static bool tcf_proto_check_delete(struct tcf_proto *tp)
435 if (tp->ops->delete_empty)
436 return tp->ops->delete_empty(tp);
442 static void tcf_proto_mark_delete(struct tcf_proto *tp)
444 spin_lock(&tp->lock);
446 spin_unlock(&tp->lock);
449 static bool tcf_proto_is_deleting(struct tcf_proto *tp)
453 spin_lock(&tp->lock);
454 deleting = tp->deleting;
455 spin_unlock(&tp->lock);
460 #define ASSERT_BLOCK_LOCKED(block) \
461 lockdep_assert_held(&(block)->lock)
463 struct tcf_filter_chain_list_item {
464 struct list_head list;
465 tcf_chain_head_change_t *chain_head_change;
466 void *chain_head_change_priv;
469 static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
472 struct tcf_chain *chain;
474 ASSERT_BLOCK_LOCKED(block);
476 chain = kzalloc(sizeof(*chain), GFP_KERNEL);
479 list_add_tail_rcu(&chain->list, &block->chain_list);
480 mutex_init(&chain->filter_chain_lock);
481 chain->block = block;
482 chain->index = chain_index;
485 block->chain0.chain = chain;
489 static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item,
490 struct tcf_proto *tp_head)
492 if (item->chain_head_change)
493 item->chain_head_change(tp_head, item->chain_head_change_priv);
496 static void tcf_chain0_head_change(struct tcf_chain *chain,
497 struct tcf_proto *tp_head)
499 struct tcf_filter_chain_list_item *item;
500 struct tcf_block *block = chain->block;
505 mutex_lock(&block->lock);
506 list_for_each_entry(item, &block->chain0.filter_chain_list, list)
507 tcf_chain_head_change_item(item, tp_head);
508 mutex_unlock(&block->lock);
511 /* Returns true if block can be safely freed. */
513 static bool tcf_chain_detach(struct tcf_chain *chain)
515 struct tcf_block *block = chain->block;
517 ASSERT_BLOCK_LOCKED(block);
519 list_del_rcu(&chain->list);
521 block->chain0.chain = NULL;
523 if (list_empty(&block->chain_list) &&
524 refcount_read(&block->refcnt) == 0)
530 static void tcf_block_destroy(struct tcf_block *block)
532 mutex_destroy(&block->lock);
533 mutex_destroy(&block->proto_destroy_lock);
534 kfree_rcu(block, rcu);
537 static void tcf_chain_destroy(struct tcf_chain *chain, bool free_block)
539 struct tcf_block *block = chain->block;
541 mutex_destroy(&chain->filter_chain_lock);
542 kfree_rcu(chain, rcu);
544 tcf_block_destroy(block);
547 static void tcf_chain_hold(struct tcf_chain *chain)
549 ASSERT_BLOCK_LOCKED(chain->block);
554 static bool tcf_chain_held_by_acts_only(struct tcf_chain *chain)
556 ASSERT_BLOCK_LOCKED(chain->block);
558 /* In case all the references are action references, this
559 * chain should not be shown to the user.
561 return chain->refcnt == chain->action_refcnt;
564 static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block,
567 struct tcf_chain *chain;
569 ASSERT_BLOCK_LOCKED(block);
571 list_for_each_entry(chain, &block->chain_list, list) {
572 if (chain->index == chain_index)
578 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
579 static struct tcf_chain *tcf_chain_lookup_rcu(const struct tcf_block *block,
582 struct tcf_chain *chain;
584 list_for_each_entry_rcu(chain, &block->chain_list, list) {
585 if (chain->index == chain_index)
592 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
593 u32 seq, u16 flags, int event, bool unicast,
594 struct netlink_ext_ack *extack);
596 static struct tcf_chain *__tcf_chain_get(struct tcf_block *block,
597 u32 chain_index, bool create,
600 struct tcf_chain *chain = NULL;
601 bool is_first_reference;
603 mutex_lock(&block->lock);
604 chain = tcf_chain_lookup(block, chain_index);
606 tcf_chain_hold(chain);
610 chain = tcf_chain_create(block, chain_index);
616 ++chain->action_refcnt;
617 is_first_reference = chain->refcnt - chain->action_refcnt == 1;
618 mutex_unlock(&block->lock);
620 /* Send notification only in case we got the first
621 * non-action reference. Until then, the chain acts only as
622 * a placeholder for actions pointing to it and user ought
623 * not know about them.
625 if (is_first_reference && !by_act)
626 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
627 RTM_NEWCHAIN, false, NULL);
632 mutex_unlock(&block->lock);
636 static struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
639 return __tcf_chain_get(block, chain_index, create, false);
642 struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index)
644 return __tcf_chain_get(block, chain_index, true, true);
646 EXPORT_SYMBOL(tcf_chain_get_by_act);
648 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
650 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
651 void *tmplt_priv, u32 chain_index,
652 struct tcf_block *block, struct sk_buff *oskb,
653 u32 seq, u16 flags, bool unicast);
655 static void __tcf_chain_put(struct tcf_chain *chain, bool by_act,
656 bool explicitly_created)
658 struct tcf_block *block = chain->block;
659 const struct tcf_proto_ops *tmplt_ops;
660 bool free_block = false;
664 mutex_lock(&block->lock);
665 if (explicitly_created) {
666 if (!chain->explicitly_created) {
667 mutex_unlock(&block->lock);
670 chain->explicitly_created = false;
674 chain->action_refcnt--;
676 /* tc_chain_notify_delete can't be called while holding block lock.
677 * However, when block is unlocked chain can be changed concurrently, so
678 * save these to temporary variables.
680 refcnt = --chain->refcnt;
681 tmplt_ops = chain->tmplt_ops;
682 tmplt_priv = chain->tmplt_priv;
684 /* The last dropped non-action reference will trigger notification. */
685 if (refcnt - chain->action_refcnt == 0 && !by_act) {
686 tc_chain_notify_delete(tmplt_ops, tmplt_priv, chain->index,
687 block, NULL, 0, 0, false);
688 /* Last reference to chain, no need to lock. */
689 chain->flushing = false;
693 free_block = tcf_chain_detach(chain);
694 mutex_unlock(&block->lock);
697 tc_chain_tmplt_del(tmplt_ops, tmplt_priv);
698 tcf_chain_destroy(chain, free_block);
702 static void tcf_chain_put(struct tcf_chain *chain)
704 __tcf_chain_put(chain, false, false);
707 void tcf_chain_put_by_act(struct tcf_chain *chain)
709 __tcf_chain_put(chain, true, false);
711 EXPORT_SYMBOL(tcf_chain_put_by_act);
713 static void tcf_chain_put_explicitly_created(struct tcf_chain *chain)
715 __tcf_chain_put(chain, false, true);
718 static void tcf_chain_flush(struct tcf_chain *chain, bool rtnl_held)
720 struct tcf_proto *tp, *tp_next;
722 mutex_lock(&chain->filter_chain_lock);
723 tp = tcf_chain_dereference(chain->filter_chain, chain);
725 tp_next = rcu_dereference_protected(tp->next, 1);
726 tcf_proto_signal_destroying(chain, tp);
729 tp = tcf_chain_dereference(chain->filter_chain, chain);
730 RCU_INIT_POINTER(chain->filter_chain, NULL);
731 tcf_chain0_head_change(chain, NULL);
732 chain->flushing = true;
733 mutex_unlock(&chain->filter_chain_lock);
736 tp_next = rcu_dereference_protected(tp->next, 1);
737 tcf_proto_put(tp, rtnl_held, NULL);
742 static int tcf_block_setup(struct tcf_block *block,
743 struct flow_block_offload *bo);
745 static void tcf_block_offload_init(struct flow_block_offload *bo,
746 struct net_device *dev, struct Qdisc *sch,
747 enum flow_block_command command,
748 enum flow_block_binder_type binder_type,
749 struct flow_block *flow_block,
750 bool shared, struct netlink_ext_ack *extack)
752 bo->net = dev_net(dev);
753 bo->command = command;
754 bo->binder_type = binder_type;
755 bo->block = flow_block;
756 bo->block_shared = shared;
759 bo->cb_list_head = &flow_block->cb_list;
760 INIT_LIST_HEAD(&bo->cb_list);
763 static void tcf_block_unbind(struct tcf_block *block,
764 struct flow_block_offload *bo);
766 static void tc_block_indr_cleanup(struct flow_block_cb *block_cb)
768 struct tcf_block *block = block_cb->indr.data;
769 struct net_device *dev = block_cb->indr.dev;
770 struct Qdisc *sch = block_cb->indr.sch;
771 struct netlink_ext_ack extack = {};
772 struct flow_block_offload bo = {};
774 tcf_block_offload_init(&bo, dev, sch, FLOW_BLOCK_UNBIND,
775 block_cb->indr.binder_type,
776 &block->flow_block, tcf_block_shared(block),
779 down_write(&block->cb_lock);
780 list_del(&block_cb->driver_list);
781 list_move(&block_cb->list, &bo.cb_list);
782 tcf_block_unbind(block, &bo);
783 up_write(&block->cb_lock);
787 static bool tcf_block_offload_in_use(struct tcf_block *block)
789 return atomic_read(&block->offloadcnt);
792 static int tcf_block_offload_cmd(struct tcf_block *block,
793 struct net_device *dev, struct Qdisc *sch,
794 struct tcf_block_ext_info *ei,
795 enum flow_block_command command,
796 struct netlink_ext_ack *extack)
798 struct flow_block_offload bo = {};
800 tcf_block_offload_init(&bo, dev, sch, command, ei->binder_type,
801 &block->flow_block, tcf_block_shared(block),
804 if (dev->netdev_ops->ndo_setup_tc) {
807 err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
809 if (err != -EOPNOTSUPP)
810 NL_SET_ERR_MSG(extack, "Driver ndo_setup_tc failed");
814 return tcf_block_setup(block, &bo);
817 flow_indr_dev_setup_offload(dev, sch, TC_SETUP_BLOCK, block, &bo,
818 tc_block_indr_cleanup);
819 tcf_block_setup(block, &bo);
824 static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
825 struct tcf_block_ext_info *ei,
826 struct netlink_ext_ack *extack)
828 struct net_device *dev = q->dev_queue->dev;
831 down_write(&block->cb_lock);
833 /* If tc offload feature is disabled and the block we try to bind
834 * to already has some offloaded filters, forbid to bind.
836 if (dev->netdev_ops->ndo_setup_tc &&
837 !tc_can_offload(dev) &&
838 tcf_block_offload_in_use(block)) {
839 NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled");
844 err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_BIND, extack);
845 if (err == -EOPNOTSUPP)
846 goto no_offload_dev_inc;
850 up_write(&block->cb_lock);
854 if (tcf_block_offload_in_use(block))
858 block->nooffloaddevcnt++;
860 up_write(&block->cb_lock);
864 static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
865 struct tcf_block_ext_info *ei)
867 struct net_device *dev = q->dev_queue->dev;
870 down_write(&block->cb_lock);
871 err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_UNBIND, NULL);
872 if (err == -EOPNOTSUPP)
873 goto no_offload_dev_dec;
874 up_write(&block->cb_lock);
878 WARN_ON(block->nooffloaddevcnt-- == 0);
879 up_write(&block->cb_lock);
883 tcf_chain0_head_change_cb_add(struct tcf_block *block,
884 struct tcf_block_ext_info *ei,
885 struct netlink_ext_ack *extack)
887 struct tcf_filter_chain_list_item *item;
888 struct tcf_chain *chain0;
890 item = kmalloc(sizeof(*item), GFP_KERNEL);
892 NL_SET_ERR_MSG(extack, "Memory allocation for head change callback item failed");
895 item->chain_head_change = ei->chain_head_change;
896 item->chain_head_change_priv = ei->chain_head_change_priv;
898 mutex_lock(&block->lock);
899 chain0 = block->chain0.chain;
901 tcf_chain_hold(chain0);
903 list_add(&item->list, &block->chain0.filter_chain_list);
904 mutex_unlock(&block->lock);
907 struct tcf_proto *tp_head;
909 mutex_lock(&chain0->filter_chain_lock);
911 tp_head = tcf_chain_dereference(chain0->filter_chain, chain0);
913 tcf_chain_head_change_item(item, tp_head);
915 mutex_lock(&block->lock);
916 list_add(&item->list, &block->chain0.filter_chain_list);
917 mutex_unlock(&block->lock);
919 mutex_unlock(&chain0->filter_chain_lock);
920 tcf_chain_put(chain0);
927 tcf_chain0_head_change_cb_del(struct tcf_block *block,
928 struct tcf_block_ext_info *ei)
930 struct tcf_filter_chain_list_item *item;
932 mutex_lock(&block->lock);
933 list_for_each_entry(item, &block->chain0.filter_chain_list, list) {
934 if ((!ei->chain_head_change && !ei->chain_head_change_priv) ||
935 (item->chain_head_change == ei->chain_head_change &&
936 item->chain_head_change_priv == ei->chain_head_change_priv)) {
937 if (block->chain0.chain)
938 tcf_chain_head_change_item(item, NULL);
939 list_del(&item->list);
940 mutex_unlock(&block->lock);
946 mutex_unlock(&block->lock);
951 spinlock_t idr_lock; /* Protects idr */
955 static unsigned int tcf_net_id;
957 static int tcf_block_insert(struct tcf_block *block, struct net *net,
958 struct netlink_ext_ack *extack)
960 struct tcf_net *tn = net_generic(net, tcf_net_id);
963 idr_preload(GFP_KERNEL);
964 spin_lock(&tn->idr_lock);
965 err = idr_alloc_u32(&tn->idr, block, &block->index, block->index,
967 spin_unlock(&tn->idr_lock);
973 static void tcf_block_remove(struct tcf_block *block, struct net *net)
975 struct tcf_net *tn = net_generic(net, tcf_net_id);
977 spin_lock(&tn->idr_lock);
978 idr_remove(&tn->idr, block->index);
979 spin_unlock(&tn->idr_lock);
982 static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
984 struct netlink_ext_ack *extack)
986 struct tcf_block *block;
988 block = kzalloc(sizeof(*block), GFP_KERNEL);
990 NL_SET_ERR_MSG(extack, "Memory allocation for block failed");
991 return ERR_PTR(-ENOMEM);
993 mutex_init(&block->lock);
994 mutex_init(&block->proto_destroy_lock);
995 init_rwsem(&block->cb_lock);
996 flow_block_init(&block->flow_block);
997 INIT_LIST_HEAD(&block->chain_list);
998 INIT_LIST_HEAD(&block->owner_list);
999 INIT_LIST_HEAD(&block->chain0.filter_chain_list);
1001 refcount_set(&block->refcnt, 1);
1003 block->index = block_index;
1005 /* Don't store q pointer for blocks which are shared */
1006 if (!tcf_block_shared(block))
1011 static struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index)
1013 struct tcf_net *tn = net_generic(net, tcf_net_id);
1015 return idr_find(&tn->idr, block_index);
1018 static struct tcf_block *tcf_block_refcnt_get(struct net *net, u32 block_index)
1020 struct tcf_block *block;
1023 block = tcf_block_lookup(net, block_index);
1024 if (block && !refcount_inc_not_zero(&block->refcnt))
1031 static struct tcf_chain *
1032 __tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
1034 mutex_lock(&block->lock);
1036 chain = list_is_last(&chain->list, &block->chain_list) ?
1037 NULL : list_next_entry(chain, list);
1039 chain = list_first_entry_or_null(&block->chain_list,
1040 struct tcf_chain, list);
1042 /* skip all action-only chains */
1043 while (chain && tcf_chain_held_by_acts_only(chain))
1044 chain = list_is_last(&chain->list, &block->chain_list) ?
1045 NULL : list_next_entry(chain, list);
1048 tcf_chain_hold(chain);
1049 mutex_unlock(&block->lock);
1054 /* Function to be used by all clients that want to iterate over all chains on
1055 * block. It properly obtains block->lock and takes reference to chain before
1056 * returning it. Users of this function must be tolerant to concurrent chain
1057 * insertion/deletion or ensure that no concurrent chain modification is
1058 * possible. Note that all netlink dump callbacks cannot guarantee to provide
1059 * consistent dump because rtnl lock is released each time skb is filled with
1060 * data and sent to user-space.
1064 tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
1066 struct tcf_chain *chain_next = __tcf_get_next_chain(block, chain);
1069 tcf_chain_put(chain);
1073 EXPORT_SYMBOL(tcf_get_next_chain);
1075 static struct tcf_proto *
1076 __tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp)
1081 mutex_lock(&chain->filter_chain_lock);
1084 tp = tcf_chain_dereference(chain->filter_chain, chain);
1085 } else if (tcf_proto_is_deleting(tp)) {
1086 /* 'deleting' flag is set and chain->filter_chain_lock was
1087 * unlocked, which means next pointer could be invalid. Restart
1090 prio = tp->prio + 1;
1091 tp = tcf_chain_dereference(chain->filter_chain, chain);
1093 for (; tp; tp = tcf_chain_dereference(tp->next, chain))
1094 if (!tp->deleting && tp->prio >= prio)
1097 tp = tcf_chain_dereference(tp->next, chain);
1103 mutex_unlock(&chain->filter_chain_lock);
1108 /* Function to be used by all clients that want to iterate over all tp's on
1109 * chain. Users of this function must be tolerant to concurrent tp
1110 * insertion/deletion or ensure that no concurrent chain modification is
1111 * possible. Note that all netlink dump callbacks cannot guarantee to provide
1112 * consistent dump because rtnl lock is released each time skb is filled with
1113 * data and sent to user-space.
1117 tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp)
1119 struct tcf_proto *tp_next = __tcf_get_next_proto(chain, tp);
1122 tcf_proto_put(tp, true, NULL);
1126 EXPORT_SYMBOL(tcf_get_next_proto);
1128 static void tcf_block_flush_all_chains(struct tcf_block *block, bool rtnl_held)
1130 struct tcf_chain *chain;
1132 /* Last reference to block. At this point chains cannot be added or
1133 * removed concurrently.
1135 for (chain = tcf_get_next_chain(block, NULL);
1137 chain = tcf_get_next_chain(block, chain)) {
1138 tcf_chain_put_explicitly_created(chain);
1139 tcf_chain_flush(chain, rtnl_held);
1143 /* Lookup Qdisc and increments its reference counter.
1144 * Set parent, if necessary.
1147 static int __tcf_qdisc_find(struct net *net, struct Qdisc **q,
1148 u32 *parent, int ifindex, bool rtnl_held,
1149 struct netlink_ext_ack *extack)
1151 const struct Qdisc_class_ops *cops;
1152 struct net_device *dev;
1155 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1161 dev = dev_get_by_index_rcu(net, ifindex);
1169 *q = rcu_dereference(dev->qdisc);
1170 *parent = (*q)->handle;
1172 *q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent));
1174 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1180 *q = qdisc_refcount_inc_nz(*q);
1182 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1187 /* Is it classful? */
1188 cops = (*q)->ops->cl_ops;
1190 NL_SET_ERR_MSG(extack, "Qdisc not classful");
1195 if (!cops->tcf_block) {
1196 NL_SET_ERR_MSG(extack, "Class doesn't support blocks");
1202 /* At this point we know that qdisc is not noop_qdisc,
1203 * which means that qdisc holds a reference to net_device
1204 * and we hold a reference to qdisc, so it is safe to release
1216 qdisc_put_unlocked(*q);
1222 static int __tcf_qdisc_cl_find(struct Qdisc *q, u32 parent, unsigned long *cl,
1223 int ifindex, struct netlink_ext_ack *extack)
1225 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1228 /* Do we search for filter, attached to class? */
1229 if (TC_H_MIN(parent)) {
1230 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1232 *cl = cops->find(q, parent);
1234 NL_SET_ERR_MSG(extack, "Specified class doesn't exist");
1242 static struct tcf_block *__tcf_block_find(struct net *net, struct Qdisc *q,
1243 unsigned long cl, int ifindex,
1245 struct netlink_ext_ack *extack)
1247 struct tcf_block *block;
1249 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
1250 block = tcf_block_refcnt_get(net, block_index);
1252 NL_SET_ERR_MSG(extack, "Block of given index was not found");
1253 return ERR_PTR(-EINVAL);
1256 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1258 block = cops->tcf_block(q, cl, extack);
1260 return ERR_PTR(-EINVAL);
1262 if (tcf_block_shared(block)) {
1263 NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters");
1264 return ERR_PTR(-EOPNOTSUPP);
1267 /* Always take reference to block in order to support execution
1268 * of rules update path of cls API without rtnl lock. Caller
1269 * must release block when it is finished using it. 'if' block
1270 * of this conditional obtain reference to block by calling
1271 * tcf_block_refcnt_get().
1273 refcount_inc(&block->refcnt);
1279 static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q,
1280 struct tcf_block_ext_info *ei, bool rtnl_held)
1282 if (refcount_dec_and_mutex_lock(&block->refcnt, &block->lock)) {
1283 /* Flushing/putting all chains will cause the block to be
1284 * deallocated when last chain is freed. However, if chain_list
1285 * is empty, block has to be manually deallocated. After block
1286 * reference counter reached 0, it is no longer possible to
1287 * increment it or add new chains to block.
1289 bool free_block = list_empty(&block->chain_list);
1291 mutex_unlock(&block->lock);
1292 if (tcf_block_shared(block))
1293 tcf_block_remove(block, block->net);
1296 tcf_block_offload_unbind(block, q, ei);
1299 tcf_block_destroy(block);
1301 tcf_block_flush_all_chains(block, rtnl_held);
1303 tcf_block_offload_unbind(block, q, ei);
1307 static void tcf_block_refcnt_put(struct tcf_block *block, bool rtnl_held)
1309 __tcf_block_put(block, NULL, NULL, rtnl_held);
1313 * Set q, parent, cl when appropriate.
1316 static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q,
1317 u32 *parent, unsigned long *cl,
1318 int ifindex, u32 block_index,
1319 struct netlink_ext_ack *extack)
1321 struct tcf_block *block;
1326 err = __tcf_qdisc_find(net, q, parent, ifindex, true, extack);
1330 err = __tcf_qdisc_cl_find(*q, *parent, cl, ifindex, extack);
1334 block = __tcf_block_find(net, *q, *cl, ifindex, block_index, extack);
1335 if (IS_ERR(block)) {
1336 err = PTR_ERR(block);
1347 return ERR_PTR(err);
1350 static void tcf_block_release(struct Qdisc *q, struct tcf_block *block,
1353 if (!IS_ERR_OR_NULL(block))
1354 tcf_block_refcnt_put(block, rtnl_held);
1360 qdisc_put_unlocked(q);
1364 struct tcf_block_owner_item {
1365 struct list_head list;
1367 enum flow_block_binder_type binder_type;
1371 tcf_block_owner_netif_keep_dst(struct tcf_block *block,
1373 enum flow_block_binder_type binder_type)
1375 if (block->keep_dst &&
1376 binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
1377 binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
1378 netif_keep_dst(qdisc_dev(q));
1381 void tcf_block_netif_keep_dst(struct tcf_block *block)
1383 struct tcf_block_owner_item *item;
1385 block->keep_dst = true;
1386 list_for_each_entry(item, &block->owner_list, list)
1387 tcf_block_owner_netif_keep_dst(block, item->q,
1390 EXPORT_SYMBOL(tcf_block_netif_keep_dst);
1392 static int tcf_block_owner_add(struct tcf_block *block,
1394 enum flow_block_binder_type binder_type)
1396 struct tcf_block_owner_item *item;
1398 item = kmalloc(sizeof(*item), GFP_KERNEL);
1402 item->binder_type = binder_type;
1403 list_add(&item->list, &block->owner_list);
1407 static void tcf_block_owner_del(struct tcf_block *block,
1409 enum flow_block_binder_type binder_type)
1411 struct tcf_block_owner_item *item;
1413 list_for_each_entry(item, &block->owner_list, list) {
1414 if (item->q == q && item->binder_type == binder_type) {
1415 list_del(&item->list);
1423 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
1424 struct tcf_block_ext_info *ei,
1425 struct netlink_ext_ack *extack)
1427 struct net *net = qdisc_net(q);
1428 struct tcf_block *block = NULL;
1431 if (ei->block_index)
1432 /* block_index not 0 means the shared block is requested */
1433 block = tcf_block_refcnt_get(net, ei->block_index);
1436 block = tcf_block_create(net, q, ei->block_index, extack);
1438 return PTR_ERR(block);
1439 if (tcf_block_shared(block)) {
1440 err = tcf_block_insert(block, net, extack);
1442 goto err_block_insert;
1446 err = tcf_block_owner_add(block, q, ei->binder_type);
1448 goto err_block_owner_add;
1450 tcf_block_owner_netif_keep_dst(block, q, ei->binder_type);
1452 err = tcf_chain0_head_change_cb_add(block, ei, extack);
1454 goto err_chain0_head_change_cb_add;
1456 err = tcf_block_offload_bind(block, q, ei, extack);
1458 goto err_block_offload_bind;
1463 err_block_offload_bind:
1464 tcf_chain0_head_change_cb_del(block, ei);
1465 err_chain0_head_change_cb_add:
1466 tcf_block_owner_del(block, q, ei->binder_type);
1467 err_block_owner_add:
1469 tcf_block_refcnt_put(block, true);
1472 EXPORT_SYMBOL(tcf_block_get_ext);
1474 static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv)
1476 struct tcf_proto __rcu **p_filter_chain = priv;
1478 rcu_assign_pointer(*p_filter_chain, tp_head);
1481 int tcf_block_get(struct tcf_block **p_block,
1482 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
1483 struct netlink_ext_ack *extack)
1485 struct tcf_block_ext_info ei = {
1486 .chain_head_change = tcf_chain_head_change_dflt,
1487 .chain_head_change_priv = p_filter_chain,
1490 WARN_ON(!p_filter_chain);
1491 return tcf_block_get_ext(p_block, q, &ei, extack);
1493 EXPORT_SYMBOL(tcf_block_get);
1495 /* XXX: Standalone actions are not allowed to jump to any chain, and bound
1496 * actions should be all removed after flushing.
1498 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
1499 struct tcf_block_ext_info *ei)
1503 tcf_chain0_head_change_cb_del(block, ei);
1504 tcf_block_owner_del(block, q, ei->binder_type);
1506 __tcf_block_put(block, q, ei, true);
1508 EXPORT_SYMBOL(tcf_block_put_ext);
1510 void tcf_block_put(struct tcf_block *block)
1512 struct tcf_block_ext_info ei = {0, };
1516 tcf_block_put_ext(block, block->q, &ei);
1519 EXPORT_SYMBOL(tcf_block_put);
1522 tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb,
1523 void *cb_priv, bool add, bool offload_in_use,
1524 struct netlink_ext_ack *extack)
1526 struct tcf_chain *chain, *chain_prev;
1527 struct tcf_proto *tp, *tp_prev;
1530 lockdep_assert_held(&block->cb_lock);
1532 for (chain = __tcf_get_next_chain(block, NULL);
1535 chain = __tcf_get_next_chain(block, chain),
1536 tcf_chain_put(chain_prev)) {
1537 for (tp = __tcf_get_next_proto(chain, NULL); tp;
1539 tp = __tcf_get_next_proto(chain, tp),
1540 tcf_proto_put(tp_prev, true, NULL)) {
1541 if (tp->ops->reoffload) {
1542 err = tp->ops->reoffload(tp, add, cb, cb_priv,
1545 goto err_playback_remove;
1546 } else if (add && offload_in_use) {
1548 NL_SET_ERR_MSG(extack, "Filter HW offload failed - classifier without re-offloading support");
1549 goto err_playback_remove;
1556 err_playback_remove:
1557 tcf_proto_put(tp, true, NULL);
1558 tcf_chain_put(chain);
1559 tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use,
1564 static int tcf_block_bind(struct tcf_block *block,
1565 struct flow_block_offload *bo)
1567 struct flow_block_cb *block_cb, *next;
1570 lockdep_assert_held(&block->cb_lock);
1572 list_for_each_entry(block_cb, &bo->cb_list, list) {
1573 err = tcf_block_playback_offloads(block, block_cb->cb,
1574 block_cb->cb_priv, true,
1575 tcf_block_offload_in_use(block),
1579 if (!bo->unlocked_driver_cb)
1580 block->lockeddevcnt++;
1584 list_splice(&bo->cb_list, &block->flow_block.cb_list);
1589 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1590 list_del(&block_cb->driver_list);
1592 list_del(&block_cb->list);
1593 tcf_block_playback_offloads(block, block_cb->cb,
1594 block_cb->cb_priv, false,
1595 tcf_block_offload_in_use(block),
1597 if (!bo->unlocked_driver_cb)
1598 block->lockeddevcnt--;
1600 flow_block_cb_free(block_cb);
1606 static void tcf_block_unbind(struct tcf_block *block,
1607 struct flow_block_offload *bo)
1609 struct flow_block_cb *block_cb, *next;
1611 lockdep_assert_held(&block->cb_lock);
1613 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1614 tcf_block_playback_offloads(block, block_cb->cb,
1615 block_cb->cb_priv, false,
1616 tcf_block_offload_in_use(block),
1618 list_del(&block_cb->list);
1619 flow_block_cb_free(block_cb);
1620 if (!bo->unlocked_driver_cb)
1621 block->lockeddevcnt--;
1625 static int tcf_block_setup(struct tcf_block *block,
1626 struct flow_block_offload *bo)
1630 switch (bo->command) {
1631 case FLOW_BLOCK_BIND:
1632 err = tcf_block_bind(block, bo);
1634 case FLOW_BLOCK_UNBIND:
1636 tcf_block_unbind(block, bo);
1646 /* Main classifier routine: scans classifier chain attached
1647 * to this qdisc, (optionally) tests for protocol and asks
1648 * specific classifiers.
1650 static inline int __tcf_classify(struct sk_buff *skb,
1651 const struct tcf_proto *tp,
1652 const struct tcf_proto *orig_tp,
1653 struct tcf_result *res,
1655 struct tcf_exts_miss_cookie_node *n,
1657 u32 *last_executed_chain)
1659 #ifdef CONFIG_NET_CLS_ACT
1660 const int max_reclassify_loop = 16;
1661 const struct tcf_proto *first_tp;
1666 for (; tp; tp = rcu_dereference_bh(tp->next)) {
1667 __be16 protocol = skb_protocol(skb, false);
1671 struct tcf_exts *exts;
1673 if (n->tp_prio != tp->prio)
1676 /* We re-lookup the tp and chain based on index instead
1677 * of having hard refs and locks to them, so do a sanity
1678 * check if any of tp,chain,exts was replaced by the
1679 * time we got here with a cookie from hardware.
1681 if (unlikely(n->tp != tp || n->tp->chain != n->chain ||
1682 !tp->ops->get_exts))
1685 exts = tp->ops->get_exts(tp, n->handle);
1686 if (unlikely(!exts || n->exts != exts))
1690 err = tcf_exts_exec_ex(skb, exts, act_index, res);
1692 if (tp->protocol != protocol &&
1693 tp->protocol != htons(ETH_P_ALL))
1696 err = tc_classify(skb, tp, res);
1698 #ifdef CONFIG_NET_CLS_ACT
1699 if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) {
1701 *last_executed_chain = first_tp->chain->index;
1703 } else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) {
1704 first_tp = res->goto_tp;
1705 *last_executed_chain = err & TC_ACT_EXT_VAL_MASK;
1716 return TC_ACT_UNSPEC; /* signal: continue lookup */
1717 #ifdef CONFIG_NET_CLS_ACT
1719 if (unlikely(limit++ >= max_reclassify_loop)) {
1720 net_notice_ratelimited("%u: reclassify loop, rule prio %u, protocol %02x\n",
1721 tp->chain->block->index,
1723 ntohs(tp->protocol));
1732 int tcf_classify(struct sk_buff *skb,
1733 const struct tcf_block *block,
1734 const struct tcf_proto *tp,
1735 struct tcf_result *res, bool compat_mode)
1737 #if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
1738 u32 last_executed_chain = 0;
1740 return __tcf_classify(skb, tp, tp, res, compat_mode, NULL, 0,
1741 &last_executed_chain);
1743 u32 last_executed_chain = tp ? tp->chain->index : 0;
1744 struct tcf_exts_miss_cookie_node *n = NULL;
1745 const struct tcf_proto *orig_tp = tp;
1746 struct tc_skb_ext *ext;
1751 ext = skb_ext_find(skb, TC_SKB_EXT);
1753 if (ext && (ext->chain || ext->act_miss)) {
1754 struct tcf_chain *fchain;
1757 if (ext->act_miss) {
1758 n = tcf_exts_miss_cookie_lookup(ext->act_miss_cookie,
1763 chain = n->chain_index;
1768 fchain = tcf_chain_lookup_rcu(block, chain);
1772 /* Consume, so cloned/redirect skbs won't inherit ext */
1773 skb_ext_del(skb, TC_SKB_EXT);
1775 tp = rcu_dereference_bh(fchain->filter_chain);
1776 last_executed_chain = fchain->index;
1780 ret = __tcf_classify(skb, tp, orig_tp, res, compat_mode, n, act_index,
1781 &last_executed_chain);
1783 if (tc_skb_ext_tc_enabled()) {
1784 /* If we missed on some chain */
1785 if (ret == TC_ACT_UNSPEC && last_executed_chain) {
1786 struct tc_skb_cb *cb = tc_skb_cb(skb);
1788 ext = tc_skb_ext_alloc(skb);
1789 if (WARN_ON_ONCE(!ext))
1791 ext->chain = last_executed_chain;
1793 ext->post_ct = cb->post_ct;
1794 ext->post_ct_snat = cb->post_ct_snat;
1795 ext->post_ct_dnat = cb->post_ct_dnat;
1796 ext->zone = cb->zone;
1803 EXPORT_SYMBOL(tcf_classify);
1805 struct tcf_chain_info {
1806 struct tcf_proto __rcu **pprev;
1807 struct tcf_proto __rcu *next;
1810 static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain *chain,
1811 struct tcf_chain_info *chain_info)
1813 return tcf_chain_dereference(*chain_info->pprev, chain);
1816 static int tcf_chain_tp_insert(struct tcf_chain *chain,
1817 struct tcf_chain_info *chain_info,
1818 struct tcf_proto *tp)
1820 if (chain->flushing)
1823 RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info));
1824 if (*chain_info->pprev == chain->filter_chain)
1825 tcf_chain0_head_change(chain, tp);
1827 rcu_assign_pointer(*chain_info->pprev, tp);
1832 static void tcf_chain_tp_remove(struct tcf_chain *chain,
1833 struct tcf_chain_info *chain_info,
1834 struct tcf_proto *tp)
1836 struct tcf_proto *next = tcf_chain_dereference(chain_info->next, chain);
1838 tcf_proto_mark_delete(tp);
1839 if (tp == chain->filter_chain)
1840 tcf_chain0_head_change(chain, next);
1841 RCU_INIT_POINTER(*chain_info->pprev, next);
1844 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1845 struct tcf_chain_info *chain_info,
1846 u32 protocol, u32 prio,
1847 bool prio_allocate);
1849 /* Try to insert new proto.
1850 * If proto with specified priority already exists, free new proto
1851 * and return existing one.
1854 static struct tcf_proto *tcf_chain_tp_insert_unique(struct tcf_chain *chain,
1855 struct tcf_proto *tp_new,
1856 u32 protocol, u32 prio,
1859 struct tcf_chain_info chain_info;
1860 struct tcf_proto *tp;
1863 mutex_lock(&chain->filter_chain_lock);
1865 if (tcf_proto_exists_destroying(chain, tp_new)) {
1866 mutex_unlock(&chain->filter_chain_lock);
1867 tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1868 return ERR_PTR(-EAGAIN);
1871 tp = tcf_chain_tp_find(chain, &chain_info,
1872 protocol, prio, false);
1874 err = tcf_chain_tp_insert(chain, &chain_info, tp_new);
1875 mutex_unlock(&chain->filter_chain_lock);
1878 tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1881 tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1882 tp_new = ERR_PTR(err);
1888 static void tcf_chain_tp_delete_empty(struct tcf_chain *chain,
1889 struct tcf_proto *tp, bool rtnl_held,
1890 struct netlink_ext_ack *extack)
1892 struct tcf_chain_info chain_info;
1893 struct tcf_proto *tp_iter;
1894 struct tcf_proto **pprev;
1895 struct tcf_proto *next;
1897 mutex_lock(&chain->filter_chain_lock);
1899 /* Atomically find and remove tp from chain. */
1900 for (pprev = &chain->filter_chain;
1901 (tp_iter = tcf_chain_dereference(*pprev, chain));
1902 pprev = &tp_iter->next) {
1903 if (tp_iter == tp) {
1904 chain_info.pprev = pprev;
1905 chain_info.next = tp_iter->next;
1906 WARN_ON(tp_iter->deleting);
1910 /* Verify that tp still exists and no new filters were inserted
1912 * Mark tp for deletion if it is empty.
1914 if (!tp_iter || !tcf_proto_check_delete(tp)) {
1915 mutex_unlock(&chain->filter_chain_lock);
1919 tcf_proto_signal_destroying(chain, tp);
1920 next = tcf_chain_dereference(chain_info.next, chain);
1921 if (tp == chain->filter_chain)
1922 tcf_chain0_head_change(chain, next);
1923 RCU_INIT_POINTER(*chain_info.pprev, next);
1924 mutex_unlock(&chain->filter_chain_lock);
1926 tcf_proto_put(tp, rtnl_held, extack);
1929 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1930 struct tcf_chain_info *chain_info,
1931 u32 protocol, u32 prio,
1934 struct tcf_proto **pprev;
1935 struct tcf_proto *tp;
1937 /* Check the chain for existence of proto-tcf with this priority */
1938 for (pprev = &chain->filter_chain;
1939 (tp = tcf_chain_dereference(*pprev, chain));
1940 pprev = &tp->next) {
1941 if (tp->prio >= prio) {
1942 if (tp->prio == prio) {
1943 if (prio_allocate ||
1944 (tp->protocol != protocol && protocol))
1945 return ERR_PTR(-EINVAL);
1952 chain_info->pprev = pprev;
1954 chain_info->next = tp->next;
1957 chain_info->next = NULL;
1962 static int tcf_fill_node(struct net *net, struct sk_buff *skb,
1963 struct tcf_proto *tp, struct tcf_block *block,
1964 struct Qdisc *q, u32 parent, void *fh,
1965 u32 portid, u32 seq, u16 flags, int event,
1966 bool terse_dump, bool rtnl_held,
1967 struct netlink_ext_ack *extack)
1970 struct nlmsghdr *nlh;
1971 unsigned char *b = skb_tail_pointer(skb);
1973 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1975 goto out_nlmsg_trim;
1976 tcm = nlmsg_data(nlh);
1977 tcm->tcm_family = AF_UNSPEC;
1981 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1982 tcm->tcm_parent = parent;
1984 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
1985 tcm->tcm_block_index = block->index;
1987 tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
1988 if (nla_put_string(skb, TCA_KIND, tp->ops->kind))
1989 goto nla_put_failure;
1990 if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index))
1991 goto nla_put_failure;
1993 tcm->tcm_handle = 0;
1994 } else if (terse_dump) {
1995 if (tp->ops->terse_dump) {
1996 if (tp->ops->terse_dump(net, tp, fh, skb, tcm,
1998 goto nla_put_failure;
2000 goto cls_op_not_supp;
2003 if (tp->ops->dump &&
2004 tp->ops->dump(net, tp, fh, skb, tcm, rtnl_held) < 0)
2005 goto nla_put_failure;
2008 if (extack && extack->_msg &&
2009 nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg))
2010 goto nla_put_failure;
2012 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2023 static int tfilter_notify(struct net *net, struct sk_buff *oskb,
2024 struct nlmsghdr *n, struct tcf_proto *tp,
2025 struct tcf_block *block, struct Qdisc *q,
2026 u32 parent, void *fh, int event, bool unicast,
2027 bool rtnl_held, struct netlink_ext_ack *extack)
2029 struct sk_buff *skb;
2030 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2033 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2037 if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
2038 n->nlmsg_seq, n->nlmsg_flags, event,
2039 false, rtnl_held, extack) <= 0) {
2045 err = rtnl_unicast(skb, net, portid);
2047 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
2048 n->nlmsg_flags & NLM_F_ECHO);
2052 static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
2053 struct nlmsghdr *n, struct tcf_proto *tp,
2054 struct tcf_block *block, struct Qdisc *q,
2055 u32 parent, void *fh, bool unicast, bool *last,
2056 bool rtnl_held, struct netlink_ext_ack *extack)
2058 struct sk_buff *skb;
2059 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2062 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2066 if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
2067 n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER,
2068 false, rtnl_held, extack) <= 0) {
2069 NL_SET_ERR_MSG(extack, "Failed to build del event notification");
2074 err = tp->ops->delete(tp, fh, last, rtnl_held, extack);
2081 err = rtnl_unicast(skb, net, portid);
2083 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
2084 n->nlmsg_flags & NLM_F_ECHO);
2086 NL_SET_ERR_MSG(extack, "Failed to send filter delete notification");
2091 static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
2092 struct tcf_block *block, struct Qdisc *q,
2093 u32 parent, struct nlmsghdr *n,
2094 struct tcf_chain *chain, int event,
2095 struct netlink_ext_ack *extack)
2097 struct tcf_proto *tp;
2099 for (tp = tcf_get_next_proto(chain, NULL);
2100 tp; tp = tcf_get_next_proto(chain, tp))
2101 tfilter_notify(net, oskb, n, tp, block, q, parent, NULL,
2102 event, false, true, extack);
2105 static void tfilter_put(struct tcf_proto *tp, void *fh)
2107 if (tp->ops->put && fh)
2108 tp->ops->put(tp, fh);
2111 static bool is_qdisc_ingress(__u32 classid)
2113 return (TC_H_MIN(classid) == TC_H_MIN(TC_H_MIN_INGRESS));
2116 static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2117 struct netlink_ext_ack *extack)
2119 struct net *net = sock_net(skb->sk);
2120 struct nlattr *tca[TCA_MAX + 1];
2121 char name[IFNAMSIZ];
2129 struct tcf_chain_info chain_info;
2130 struct tcf_chain *chain;
2131 struct tcf_block *block;
2132 struct tcf_proto *tp;
2137 bool rtnl_held = false;
2143 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2144 rtm_tca_policy, extack);
2149 protocol = TC_H_MIN(t->tcm_info);
2150 prio = TC_H_MAJ(t->tcm_info);
2151 prio_allocate = false;
2152 parent = t->tcm_parent;
2161 /* If no priority is provided by the user,
2164 if (n->nlmsg_flags & NLM_F_CREATE) {
2165 prio = TC_H_MAKE(0x80000000U, 0U);
2166 prio_allocate = true;
2168 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
2173 /* Find head of filter chain. */
2175 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2179 if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2180 NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2185 /* Take rtnl mutex if rtnl_held was set to true on previous iteration,
2186 * block is shared (no qdisc found), qdisc is not unlocked, classifier
2187 * type is not specified, classifier is not unlocked.
2190 (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2191 !tcf_proto_is_unlocked(name)) {
2196 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2200 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2202 if (IS_ERR(block)) {
2203 err = PTR_ERR(block);
2206 block->classid = parent;
2208 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2209 if (chain_index > TC_ACT_EXT_VAL_MASK) {
2210 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2214 chain = tcf_chain_get(block, chain_index, true);
2216 NL_SET_ERR_MSG(extack, "Cannot create specified filter chain");
2221 mutex_lock(&chain->filter_chain_lock);
2222 tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2223 prio, prio_allocate);
2225 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2231 struct tcf_proto *tp_new = NULL;
2233 if (chain->flushing) {
2238 /* Proto-tcf does not exist, create new one */
2240 if (tca[TCA_KIND] == NULL || !protocol) {
2241 NL_SET_ERR_MSG(extack, "Filter kind and protocol must be specified");
2246 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2247 NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2253 prio = tcf_auto_prio(tcf_chain_tp_prev(chain,
2256 mutex_unlock(&chain->filter_chain_lock);
2257 tp_new = tcf_proto_create(name, protocol, prio, chain,
2259 if (IS_ERR(tp_new)) {
2260 err = PTR_ERR(tp_new);
2265 tp = tcf_chain_tp_insert_unique(chain, tp_new, protocol, prio,
2272 mutex_unlock(&chain->filter_chain_lock);
2275 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2276 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2281 fh = tp->ops->get(tp, t->tcm_handle);
2284 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2285 NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2289 } else if (n->nlmsg_flags & NLM_F_EXCL) {
2290 tfilter_put(tp, fh);
2291 NL_SET_ERR_MSG(extack, "Filter already exists");
2296 if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) {
2297 tfilter_put(tp, fh);
2298 NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind");
2303 if (!(n->nlmsg_flags & NLM_F_CREATE))
2304 flags |= TCA_ACT_FLAGS_REPLACE;
2306 flags |= TCA_ACT_FLAGS_NO_RTNL;
2307 if (is_qdisc_ingress(parent))
2308 flags |= TCA_ACT_FLAGS_AT_INGRESS;
2309 err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
2312 tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2313 RTM_NEWTFILTER, false, rtnl_held, extack);
2314 tfilter_put(tp, fh);
2315 /* q pointer is NULL for shared blocks */
2317 q->flags &= ~TCQ_F_CAN_BYPASS;
2321 if (err && tp_created)
2322 tcf_chain_tp_delete_empty(chain, tp, rtnl_held, NULL);
2325 if (tp && !IS_ERR(tp))
2326 tcf_proto_put(tp, rtnl_held, NULL);
2328 tcf_chain_put(chain);
2330 tcf_block_release(q, block, rtnl_held);
2335 if (err == -EAGAIN) {
2336 /* Take rtnl lock in case EAGAIN is caused by concurrent flush
2340 /* Replay the request. */
2346 mutex_unlock(&chain->filter_chain_lock);
2350 static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2351 struct netlink_ext_ack *extack)
2353 struct net *net = sock_net(skb->sk);
2354 struct nlattr *tca[TCA_MAX + 1];
2355 char name[IFNAMSIZ];
2361 struct Qdisc *q = NULL;
2362 struct tcf_chain_info chain_info;
2363 struct tcf_chain *chain = NULL;
2364 struct tcf_block *block = NULL;
2365 struct tcf_proto *tp = NULL;
2366 unsigned long cl = 0;
2369 bool rtnl_held = false;
2371 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2372 rtm_tca_policy, extack);
2377 protocol = TC_H_MIN(t->tcm_info);
2378 prio = TC_H_MAJ(t->tcm_info);
2379 parent = t->tcm_parent;
2381 if (prio == 0 && (protocol || t->tcm_handle || tca[TCA_KIND])) {
2382 NL_SET_ERR_MSG(extack, "Cannot flush filters with protocol, handle or kind set");
2386 /* Find head of filter chain. */
2388 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2392 if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2393 NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2397 /* Take rtnl mutex if flushing whole chain, block is shared (no qdisc
2398 * found), qdisc is not unlocked, classifier type is not specified,
2399 * classifier is not unlocked.
2402 (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2403 !tcf_proto_is_unlocked(name)) {
2408 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2412 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2414 if (IS_ERR(block)) {
2415 err = PTR_ERR(block);
2419 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2420 if (chain_index > TC_ACT_EXT_VAL_MASK) {
2421 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2425 chain = tcf_chain_get(block, chain_index, false);
2427 /* User requested flush on non-existent chain. Nothing to do,
2428 * so just return success.
2434 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2440 tfilter_notify_chain(net, skb, block, q, parent, n,
2441 chain, RTM_DELTFILTER, extack);
2442 tcf_chain_flush(chain, rtnl_held);
2447 mutex_lock(&chain->filter_chain_lock);
2448 tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2450 if (!tp || IS_ERR(tp)) {
2451 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2452 err = tp ? PTR_ERR(tp) : -ENOENT;
2454 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2455 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2458 } else if (t->tcm_handle == 0) {
2459 tcf_proto_signal_destroying(chain, tp);
2460 tcf_chain_tp_remove(chain, &chain_info, tp);
2461 mutex_unlock(&chain->filter_chain_lock);
2463 tcf_proto_put(tp, rtnl_held, NULL);
2464 tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2465 RTM_DELTFILTER, false, rtnl_held, extack);
2469 mutex_unlock(&chain->filter_chain_lock);
2471 fh = tp->ops->get(tp, t->tcm_handle);
2474 NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2479 err = tfilter_del_notify(net, skb, n, tp, block,
2480 q, parent, fh, false, &last,
2486 tcf_chain_tp_delete_empty(chain, tp, rtnl_held, extack);
2491 if (tp && !IS_ERR(tp))
2492 tcf_proto_put(tp, rtnl_held, NULL);
2493 tcf_chain_put(chain);
2495 tcf_block_release(q, block, rtnl_held);
2503 mutex_unlock(&chain->filter_chain_lock);
2507 static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2508 struct netlink_ext_ack *extack)
2510 struct net *net = sock_net(skb->sk);
2511 struct nlattr *tca[TCA_MAX + 1];
2512 char name[IFNAMSIZ];
2518 struct Qdisc *q = NULL;
2519 struct tcf_chain_info chain_info;
2520 struct tcf_chain *chain = NULL;
2521 struct tcf_block *block = NULL;
2522 struct tcf_proto *tp = NULL;
2523 unsigned long cl = 0;
2526 bool rtnl_held = false;
2528 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2529 rtm_tca_policy, extack);
2534 protocol = TC_H_MIN(t->tcm_info);
2535 prio = TC_H_MAJ(t->tcm_info);
2536 parent = t->tcm_parent;
2539 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
2543 /* Find head of filter chain. */
2545 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2549 if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2550 NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2554 /* Take rtnl mutex if block is shared (no qdisc found), qdisc is not
2555 * unlocked, classifier type is not specified, classifier is not
2558 if ((q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2559 !tcf_proto_is_unlocked(name)) {
2564 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2568 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2570 if (IS_ERR(block)) {
2571 err = PTR_ERR(block);
2575 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2576 if (chain_index > TC_ACT_EXT_VAL_MASK) {
2577 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2581 chain = tcf_chain_get(block, chain_index, false);
2583 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2588 mutex_lock(&chain->filter_chain_lock);
2589 tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2591 mutex_unlock(&chain->filter_chain_lock);
2592 if (!tp || IS_ERR(tp)) {
2593 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2594 err = tp ? PTR_ERR(tp) : -ENOENT;
2596 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2597 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2602 fh = tp->ops->get(tp, t->tcm_handle);
2605 NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2608 err = tfilter_notify(net, skb, n, tp, block, q, parent,
2609 fh, RTM_NEWTFILTER, true, rtnl_held, NULL);
2611 NL_SET_ERR_MSG(extack, "Failed to send filter notify message");
2614 tfilter_put(tp, fh);
2617 if (tp && !IS_ERR(tp))
2618 tcf_proto_put(tp, rtnl_held, NULL);
2619 tcf_chain_put(chain);
2621 tcf_block_release(q, block, rtnl_held);
2629 struct tcf_dump_args {
2630 struct tcf_walker w;
2631 struct sk_buff *skb;
2632 struct netlink_callback *cb;
2633 struct tcf_block *block;
2639 static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
2641 struct tcf_dump_args *a = (void *)arg;
2642 struct net *net = sock_net(a->skb->sk);
2644 return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent,
2645 n, NETLINK_CB(a->cb->skb).portid,
2646 a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
2647 RTM_NEWTFILTER, a->terse_dump, true, NULL);
2650 static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
2651 struct sk_buff *skb, struct netlink_callback *cb,
2652 long index_start, long *p_index, bool terse)
2654 struct net *net = sock_net(skb->sk);
2655 struct tcf_block *block = chain->block;
2656 struct tcmsg *tcm = nlmsg_data(cb->nlh);
2657 struct tcf_proto *tp, *tp_prev;
2658 struct tcf_dump_args arg;
2660 for (tp = __tcf_get_next_proto(chain, NULL);
2663 tp = __tcf_get_next_proto(chain, tp),
2664 tcf_proto_put(tp_prev, true, NULL),
2666 if (*p_index < index_start)
2668 if (TC_H_MAJ(tcm->tcm_info) &&
2669 TC_H_MAJ(tcm->tcm_info) != tp->prio)
2671 if (TC_H_MIN(tcm->tcm_info) &&
2672 TC_H_MIN(tcm->tcm_info) != tp->protocol)
2674 if (*p_index > index_start)
2675 memset(&cb->args[1], 0,
2676 sizeof(cb->args) - sizeof(cb->args[0]));
2677 if (cb->args[1] == 0) {
2678 if (tcf_fill_node(net, skb, tp, block, q, parent, NULL,
2679 NETLINK_CB(cb->skb).portid,
2680 cb->nlh->nlmsg_seq, NLM_F_MULTI,
2681 RTM_NEWTFILTER, false, true, NULL) <= 0)
2687 arg.w.fn = tcf_node_dump;
2692 arg.parent = parent;
2694 arg.w.skip = cb->args[1] - 1;
2696 arg.w.cookie = cb->args[2];
2697 arg.terse_dump = terse;
2698 tp->ops->walk(tp, &arg.w, true);
2699 cb->args[2] = arg.w.cookie;
2700 cb->args[1] = arg.w.count + 1;
2707 tcf_proto_put(tp, true, NULL);
2711 static const struct nla_policy tcf_tfilter_dump_policy[TCA_MAX + 1] = {
2712 [TCA_DUMP_FLAGS] = NLA_POLICY_BITFIELD32(TCA_DUMP_FLAGS_TERSE),
2715 /* called with RTNL */
2716 static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
2718 struct tcf_chain *chain, *chain_prev;
2719 struct net *net = sock_net(skb->sk);
2720 struct nlattr *tca[TCA_MAX + 1];
2721 struct Qdisc *q = NULL;
2722 struct tcf_block *block;
2723 struct tcmsg *tcm = nlmsg_data(cb->nlh);
2724 bool terse_dump = false;
2730 if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2733 err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
2734 tcf_tfilter_dump_policy, cb->extack);
2738 if (tca[TCA_DUMP_FLAGS]) {
2739 struct nla_bitfield32 flags =
2740 nla_get_bitfield32(tca[TCA_DUMP_FLAGS]);
2742 terse_dump = flags.value & TCA_DUMP_FLAGS_TERSE;
2745 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
2746 block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
2749 /* If we work with block index, q is NULL and parent value
2750 * will never be used in the following code. The check
2751 * in tcf_fill_node prevents it. However, compiler does not
2752 * see that far, so set parent to zero to silence the warning
2753 * about parent being uninitialized.
2757 const struct Qdisc_class_ops *cops;
2758 struct net_device *dev;
2759 unsigned long cl = 0;
2761 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2765 parent = tcm->tcm_parent;
2767 q = rtnl_dereference(dev->qdisc);
2769 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2772 cops = q->ops->cl_ops;
2775 if (!cops->tcf_block)
2777 if (TC_H_MIN(tcm->tcm_parent)) {
2778 cl = cops->find(q, tcm->tcm_parent);
2782 block = cops->tcf_block(q, cl, NULL);
2785 parent = block->classid;
2786 if (tcf_block_shared(block))
2790 index_start = cb->args[0];
2793 for (chain = __tcf_get_next_chain(block, NULL);
2796 chain = __tcf_get_next_chain(block, chain),
2797 tcf_chain_put(chain_prev)) {
2798 if (tca[TCA_CHAIN] &&
2799 nla_get_u32(tca[TCA_CHAIN]) != chain->index)
2801 if (!tcf_chain_dump(chain, q, parent, skb, cb,
2802 index_start, &index, terse_dump)) {
2803 tcf_chain_put(chain);
2809 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
2810 tcf_block_refcnt_put(block, true);
2811 cb->args[0] = index;
2814 /* If we did no progress, the error (EMSGSIZE) is real */
2815 if (skb->len == 0 && err)
2820 static int tc_chain_fill_node(const struct tcf_proto_ops *tmplt_ops,
2821 void *tmplt_priv, u32 chain_index,
2822 struct net *net, struct sk_buff *skb,
2823 struct tcf_block *block,
2824 u32 portid, u32 seq, u16 flags, int event,
2825 struct netlink_ext_ack *extack)
2827 unsigned char *b = skb_tail_pointer(skb);
2828 const struct tcf_proto_ops *ops;
2829 struct nlmsghdr *nlh;
2836 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
2838 goto out_nlmsg_trim;
2839 tcm = nlmsg_data(nlh);
2840 tcm->tcm_family = AF_UNSPEC;
2843 tcm->tcm_handle = 0;
2845 tcm->tcm_ifindex = qdisc_dev(block->q)->ifindex;
2846 tcm->tcm_parent = block->q->handle;
2848 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
2849 tcm->tcm_block_index = block->index;
2852 if (nla_put_u32(skb, TCA_CHAIN, chain_index))
2853 goto nla_put_failure;
2856 if (nla_put_string(skb, TCA_KIND, ops->kind))
2857 goto nla_put_failure;
2858 if (ops->tmplt_dump(skb, net, priv) < 0)
2859 goto nla_put_failure;
2862 if (extack && extack->_msg &&
2863 nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg))
2864 goto out_nlmsg_trim;
2866 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2876 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
2877 u32 seq, u16 flags, int event, bool unicast,
2878 struct netlink_ext_ack *extack)
2880 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2881 struct tcf_block *block = chain->block;
2882 struct net *net = block->net;
2883 struct sk_buff *skb;
2886 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2890 if (tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
2891 chain->index, net, skb, block, portid,
2892 seq, flags, event, extack) <= 0) {
2898 err = rtnl_unicast(skb, net, portid);
2900 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
2901 flags & NLM_F_ECHO);
2906 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
2907 void *tmplt_priv, u32 chain_index,
2908 struct tcf_block *block, struct sk_buff *oskb,
2909 u32 seq, u16 flags, bool unicast)
2911 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2912 struct net *net = block->net;
2913 struct sk_buff *skb;
2915 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2919 if (tc_chain_fill_node(tmplt_ops, tmplt_priv, chain_index, net, skb,
2920 block, portid, seq, flags, RTM_DELCHAIN, NULL) <= 0) {
2926 return rtnl_unicast(skb, net, portid);
2928 return rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO);
2931 static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net,
2932 struct nlattr **tca,
2933 struct netlink_ext_ack *extack)
2935 const struct tcf_proto_ops *ops;
2936 char name[IFNAMSIZ];
2939 /* If kind is not set, user did not specify template. */
2943 if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2944 NL_SET_ERR_MSG(extack, "Specified TC chain template name too long");
2948 ops = tcf_proto_lookup_ops(name, true, extack);
2950 return PTR_ERR(ops);
2951 if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) {
2952 NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier");
2956 tmplt_priv = ops->tmplt_create(net, chain, tca, extack);
2957 if (IS_ERR(tmplt_priv)) {
2958 module_put(ops->owner);
2959 return PTR_ERR(tmplt_priv);
2961 chain->tmplt_ops = ops;
2962 chain->tmplt_priv = tmplt_priv;
2966 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
2969 /* If template ops are set, no work to do for us. */
2973 tmplt_ops->tmplt_destroy(tmplt_priv);
2974 module_put(tmplt_ops->owner);
2977 /* Add/delete/get a chain */
2979 static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n,
2980 struct netlink_ext_ack *extack)
2982 struct net *net = sock_net(skb->sk);
2983 struct nlattr *tca[TCA_MAX + 1];
2988 struct tcf_chain *chain;
2989 struct tcf_block *block;
2995 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2996 rtm_tca_policy, extack);
3001 parent = t->tcm_parent;
3004 block = tcf_block_find(net, &q, &parent, &cl,
3005 t->tcm_ifindex, t->tcm_block_index, extack);
3007 return PTR_ERR(block);
3009 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
3010 if (chain_index > TC_ACT_EXT_VAL_MASK) {
3011 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
3016 mutex_lock(&block->lock);
3017 chain = tcf_chain_lookup(block, chain_index);
3018 if (n->nlmsg_type == RTM_NEWCHAIN) {
3020 if (tcf_chain_held_by_acts_only(chain)) {
3021 /* The chain exists only because there is
3022 * some action referencing it.
3024 tcf_chain_hold(chain);
3026 NL_SET_ERR_MSG(extack, "Filter chain already exists");
3028 goto errout_block_locked;
3031 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
3032 NL_SET_ERR_MSG(extack, "Need both RTM_NEWCHAIN and NLM_F_CREATE to create a new chain");
3034 goto errout_block_locked;
3036 chain = tcf_chain_create(block, chain_index);
3038 NL_SET_ERR_MSG(extack, "Failed to create filter chain");
3040 goto errout_block_locked;
3044 if (!chain || tcf_chain_held_by_acts_only(chain)) {
3045 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
3047 goto errout_block_locked;
3049 tcf_chain_hold(chain);
3052 if (n->nlmsg_type == RTM_NEWCHAIN) {
3053 /* Modifying chain requires holding parent block lock. In case
3054 * the chain was successfully added, take a reference to the
3055 * chain. This ensures that an empty chain does not disappear at
3056 * the end of this function.
3058 tcf_chain_hold(chain);
3059 chain->explicitly_created = true;
3061 mutex_unlock(&block->lock);
3063 switch (n->nlmsg_type) {
3065 err = tc_chain_tmplt_add(chain, net, tca, extack);
3067 tcf_chain_put_explicitly_created(chain);
3071 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
3072 RTM_NEWCHAIN, false, extack);
3075 tfilter_notify_chain(net, skb, block, q, parent, n,
3076 chain, RTM_DELTFILTER, extack);
3077 /* Flush the chain first as the user requested chain removal. */
3078 tcf_chain_flush(chain, true);
3079 /* In case the chain was successfully deleted, put a reference
3080 * to the chain previously taken during addition.
3082 tcf_chain_put_explicitly_created(chain);
3085 err = tc_chain_notify(chain, skb, n->nlmsg_seq,
3086 n->nlmsg_flags, n->nlmsg_type, true, extack);
3088 NL_SET_ERR_MSG(extack, "Failed to send chain notify message");
3092 NL_SET_ERR_MSG(extack, "Unsupported message type");
3097 tcf_chain_put(chain);
3099 tcf_block_release(q, block, true);
3101 /* Replay the request. */
3105 errout_block_locked:
3106 mutex_unlock(&block->lock);
3110 /* called with RTNL */
3111 static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb)
3113 struct net *net = sock_net(skb->sk);
3114 struct nlattr *tca[TCA_MAX + 1];
3115 struct Qdisc *q = NULL;
3116 struct tcf_block *block;
3117 struct tcmsg *tcm = nlmsg_data(cb->nlh);
3118 struct tcf_chain *chain;
3123 if (nlmsg_len(cb->nlh) < sizeof(*tcm))
3126 err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
3127 rtm_tca_policy, cb->extack);
3131 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
3132 block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
3136 const struct Qdisc_class_ops *cops;
3137 struct net_device *dev;
3138 unsigned long cl = 0;
3140 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
3144 if (!tcm->tcm_parent)
3145 q = rtnl_dereference(dev->qdisc);
3147 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
3151 cops = q->ops->cl_ops;
3154 if (!cops->tcf_block)
3156 if (TC_H_MIN(tcm->tcm_parent)) {
3157 cl = cops->find(q, tcm->tcm_parent);
3161 block = cops->tcf_block(q, cl, NULL);
3164 if (tcf_block_shared(block))
3168 index_start = cb->args[0];
3171 mutex_lock(&block->lock);
3172 list_for_each_entry(chain, &block->chain_list, list) {
3173 if ((tca[TCA_CHAIN] &&
3174 nla_get_u32(tca[TCA_CHAIN]) != chain->index))
3176 if (index < index_start) {
3180 if (tcf_chain_held_by_acts_only(chain))
3182 err = tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
3183 chain->index, net, skb, block,
3184 NETLINK_CB(cb->skb).portid,
3185 cb->nlh->nlmsg_seq, NLM_F_MULTI,
3186 RTM_NEWCHAIN, NULL);
3191 mutex_unlock(&block->lock);
3193 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
3194 tcf_block_refcnt_put(block, true);
3195 cb->args[0] = index;
3198 /* If we did no progress, the error (EMSGSIZE) is real */
3199 if (skb->len == 0 && err)
3204 int tcf_exts_init_ex(struct tcf_exts *exts, struct net *net, int action,
3205 int police, struct tcf_proto *tp, u32 handle,
3206 bool use_action_miss)
3210 #ifdef CONFIG_NET_CLS_ACT
3212 exts->nr_actions = 0;
3213 exts->miss_cookie_node = NULL;
3214 /* Note: we do not own yet a reference on net.
3215 * This reference might be taken later from tcf_exts_get_net().
3218 exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
3224 exts->action = action;
3225 exts->police = police;
3227 if (!use_action_miss)
3230 err = tcf_exts_miss_cookie_base_alloc(exts, tp, handle);
3232 goto err_miss_alloc;
3237 tcf_exts_destroy(exts);
3238 #ifdef CONFIG_NET_CLS_ACT
3239 exts->actions = NULL;
3243 EXPORT_SYMBOL(tcf_exts_init_ex);
3245 void tcf_exts_destroy(struct tcf_exts *exts)
3247 tcf_exts_miss_cookie_base_destroy(exts);
3249 #ifdef CONFIG_NET_CLS_ACT
3250 if (exts->actions) {
3251 tcf_action_destroy(exts->actions, TCA_ACT_UNBIND);
3252 kfree(exts->actions);
3254 exts->nr_actions = 0;
3257 EXPORT_SYMBOL(tcf_exts_destroy);
3259 int tcf_exts_validate_ex(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
3260 struct nlattr *rate_tlv, struct tcf_exts *exts,
3261 u32 flags, u32 fl_flags, struct netlink_ext_ack *extack)
3263 #ifdef CONFIG_NET_CLS_ACT
3265 int init_res[TCA_ACT_MAX_PRIO] = {};
3266 struct tc_action *act;
3267 size_t attr_size = 0;
3269 if (exts->police && tb[exts->police]) {
3270 struct tc_action_ops *a_o;
3272 a_o = tc_action_load_ops(tb[exts->police], true,
3273 !(flags & TCA_ACT_FLAGS_NO_RTNL),
3276 return PTR_ERR(a_o);
3277 flags |= TCA_ACT_FLAGS_POLICE | TCA_ACT_FLAGS_BIND;
3278 act = tcf_action_init_1(net, tp, tb[exts->police],
3279 rate_tlv, a_o, init_res, flags,
3281 module_put(a_o->owner);
3283 return PTR_ERR(act);
3285 act->type = exts->type = TCA_OLD_COMPAT;
3286 exts->actions[0] = act;
3287 exts->nr_actions = 1;
3288 tcf_idr_insert_many(exts->actions);
3289 } else if (exts->action && tb[exts->action]) {
3292 flags |= TCA_ACT_FLAGS_BIND;
3293 err = tcf_action_init(net, tp, tb[exts->action],
3294 rate_tlv, exts->actions, init_res,
3295 &attr_size, flags, fl_flags,
3299 exts->nr_actions = err;
3303 if ((exts->action && tb[exts->action]) ||
3304 (exts->police && tb[exts->police])) {
3305 NL_SET_ERR_MSG(extack, "Classifier actions are not supported per compile options (CONFIG_NET_CLS_ACT)");
3312 EXPORT_SYMBOL(tcf_exts_validate_ex);
3314 int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
3315 struct nlattr *rate_tlv, struct tcf_exts *exts,
3316 u32 flags, struct netlink_ext_ack *extack)
3318 return tcf_exts_validate_ex(net, tp, tb, rate_tlv, exts,
3321 EXPORT_SYMBOL(tcf_exts_validate);
3323 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src)
3325 #ifdef CONFIG_NET_CLS_ACT
3326 struct tcf_exts old = *dst;
3329 tcf_exts_destroy(&old);
3332 EXPORT_SYMBOL(tcf_exts_change);
3334 #ifdef CONFIG_NET_CLS_ACT
3335 static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts)
3337 if (exts->nr_actions == 0)
3340 return exts->actions[0];
3344 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
3346 #ifdef CONFIG_NET_CLS_ACT
3347 struct nlattr *nest;
3349 if (exts->action && tcf_exts_has_actions(exts)) {
3351 * again for backward compatible mode - we want
3352 * to work with both old and new modes of entering
3353 * tc data even if iproute2 was newer - jhs
3355 if (exts->type != TCA_OLD_COMPAT) {
3356 nest = nla_nest_start_noflag(skb, exts->action);
3358 goto nla_put_failure;
3360 if (tcf_action_dump(skb, exts->actions, 0, 0, false)
3362 goto nla_put_failure;
3363 nla_nest_end(skb, nest);
3364 } else if (exts->police) {
3365 struct tc_action *act = tcf_exts_first_act(exts);
3366 nest = nla_nest_start_noflag(skb, exts->police);
3367 if (nest == NULL || !act)
3368 goto nla_put_failure;
3369 if (tcf_action_dump_old(skb, act, 0, 0) < 0)
3370 goto nla_put_failure;
3371 nla_nest_end(skb, nest);
3377 nla_nest_cancel(skb, nest);
3383 EXPORT_SYMBOL(tcf_exts_dump);
3385 int tcf_exts_terse_dump(struct sk_buff *skb, struct tcf_exts *exts)
3387 #ifdef CONFIG_NET_CLS_ACT
3388 struct nlattr *nest;
3390 if (!exts->action || !tcf_exts_has_actions(exts))
3393 nest = nla_nest_start_noflag(skb, exts->action);
3395 goto nla_put_failure;
3397 if (tcf_action_dump(skb, exts->actions, 0, 0, true) < 0)
3398 goto nla_put_failure;
3399 nla_nest_end(skb, nest);
3403 nla_nest_cancel(skb, nest);
3409 EXPORT_SYMBOL(tcf_exts_terse_dump);
3411 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts)
3413 #ifdef CONFIG_NET_CLS_ACT
3414 struct tc_action *a = tcf_exts_first_act(exts);
3415 if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0)
3420 EXPORT_SYMBOL(tcf_exts_dump_stats);
3422 static void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
3424 if (*flags & TCA_CLS_FLAGS_IN_HW)
3426 *flags |= TCA_CLS_FLAGS_IN_HW;
3427 atomic_inc(&block->offloadcnt);
3430 static void tcf_block_offload_dec(struct tcf_block *block, u32 *flags)
3432 if (!(*flags & TCA_CLS_FLAGS_IN_HW))
3434 *flags &= ~TCA_CLS_FLAGS_IN_HW;
3435 atomic_dec(&block->offloadcnt);
3438 static void tc_cls_offload_cnt_update(struct tcf_block *block,
3439 struct tcf_proto *tp, u32 *cnt,
3440 u32 *flags, u32 diff, bool add)
3442 lockdep_assert_held(&block->cb_lock);
3444 spin_lock(&tp->lock);
3447 tcf_block_offload_inc(block, flags);
3452 tcf_block_offload_dec(block, flags);
3454 spin_unlock(&tp->lock);
3458 tc_cls_offload_cnt_reset(struct tcf_block *block, struct tcf_proto *tp,
3459 u32 *cnt, u32 *flags)
3461 lockdep_assert_held(&block->cb_lock);
3463 spin_lock(&tp->lock);
3464 tcf_block_offload_dec(block, flags);
3466 spin_unlock(&tp->lock);
3470 __tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3471 void *type_data, bool err_stop)
3473 struct flow_block_cb *block_cb;
3477 list_for_each_entry(block_cb, &block->flow_block.cb_list, list) {
3478 err = block_cb->cb(type, type_data, block_cb->cb_priv);
3489 int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3490 void *type_data, bool err_stop, bool rtnl_held)
3492 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3498 down_read(&block->cb_lock);
3499 /* Need to obtain rtnl lock if block is bound to devs that require it.
3500 * In block bind code cb_lock is obtained while holding rtnl, so we must
3501 * obtain the locks in same order here.
3503 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3504 up_read(&block->cb_lock);
3509 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3511 up_read(&block->cb_lock);
3516 EXPORT_SYMBOL(tc_setup_cb_call);
3518 /* Non-destructive filter add. If filter that wasn't already in hardware is
3519 * successfully offloaded, increment block offloads counter. On failure,
3520 * previously offloaded filter is considered to be intact and offloads counter
3521 * is not decremented.
3524 int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp,
3525 enum tc_setup_type type, void *type_data, bool err_stop,
3526 u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3528 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3534 down_read(&block->cb_lock);
3535 /* Need to obtain rtnl lock if block is bound to devs that require it.
3536 * In block bind code cb_lock is obtained while holding rtnl, so we must
3537 * obtain the locks in same order here.
3539 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3540 up_read(&block->cb_lock);
3545 /* Make sure all netdevs sharing this block are offload-capable. */
3546 if (block->nooffloaddevcnt && err_stop) {
3547 ok_count = -EOPNOTSUPP;
3551 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3555 if (tp->ops->hw_add)
3556 tp->ops->hw_add(tp, type_data);
3558 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags,
3561 up_read(&block->cb_lock);
3564 return min(ok_count, 0);
3566 EXPORT_SYMBOL(tc_setup_cb_add);
3568 /* Destructive filter replace. If filter that wasn't already in hardware is
3569 * successfully offloaded, increment block offload counter. On failure,
3570 * previously offloaded filter is considered to be destroyed and offload counter
3574 int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp,
3575 enum tc_setup_type type, void *type_data, bool err_stop,
3576 u32 *old_flags, unsigned int *old_in_hw_count,
3577 u32 *new_flags, unsigned int *new_in_hw_count,
3580 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3586 down_read(&block->cb_lock);
3587 /* Need to obtain rtnl lock if block is bound to devs that require it.
3588 * In block bind code cb_lock is obtained while holding rtnl, so we must
3589 * obtain the locks in same order here.
3591 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3592 up_read(&block->cb_lock);
3597 /* Make sure all netdevs sharing this block are offload-capable. */
3598 if (block->nooffloaddevcnt && err_stop) {
3599 ok_count = -EOPNOTSUPP;
3603 tc_cls_offload_cnt_reset(block, tp, old_in_hw_count, old_flags);
3604 if (tp->ops->hw_del)
3605 tp->ops->hw_del(tp, type_data);
3607 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3611 if (tp->ops->hw_add)
3612 tp->ops->hw_add(tp, type_data);
3614 tc_cls_offload_cnt_update(block, tp, new_in_hw_count,
3615 new_flags, ok_count, true);
3617 up_read(&block->cb_lock);
3620 return min(ok_count, 0);
3622 EXPORT_SYMBOL(tc_setup_cb_replace);
3624 /* Destroy filter and decrement block offload counter, if filter was previously
3628 int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp,
3629 enum tc_setup_type type, void *type_data, bool err_stop,
3630 u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3632 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3638 down_read(&block->cb_lock);
3639 /* Need to obtain rtnl lock if block is bound to devs that require it.
3640 * In block bind code cb_lock is obtained while holding rtnl, so we must
3641 * obtain the locks in same order here.
3643 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3644 up_read(&block->cb_lock);
3649 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3651 tc_cls_offload_cnt_reset(block, tp, in_hw_count, flags);
3652 if (tp->ops->hw_del)
3653 tp->ops->hw_del(tp, type_data);
3655 up_read(&block->cb_lock);
3658 return min(ok_count, 0);
3660 EXPORT_SYMBOL(tc_setup_cb_destroy);
3662 int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp,
3663 bool add, flow_setup_cb_t *cb,
3664 enum tc_setup_type type, void *type_data,
3665 void *cb_priv, u32 *flags, unsigned int *in_hw_count)
3667 int err = cb(type, type_data, cb_priv);
3670 if (add && tc_skip_sw(*flags))
3673 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, 1,
3679 EXPORT_SYMBOL(tc_setup_cb_reoffload);
3681 static int tcf_act_get_user_cookie(struct flow_action_entry *entry,
3682 const struct tc_action *act)
3684 struct tc_cookie *user_cookie;
3688 user_cookie = rcu_dereference(act->user_cookie);
3690 entry->user_cookie = flow_action_cookie_create(user_cookie->data,
3693 if (!entry->user_cookie)
3700 static void tcf_act_put_user_cookie(struct flow_action_entry *entry)
3702 flow_action_cookie_destroy(entry->user_cookie);
3705 void tc_cleanup_offload_action(struct flow_action *flow_action)
3707 struct flow_action_entry *entry;
3710 flow_action_for_each(i, entry, flow_action) {
3711 tcf_act_put_user_cookie(entry);
3712 if (entry->destructor)
3713 entry->destructor(entry->destructor_priv);
3716 EXPORT_SYMBOL(tc_cleanup_offload_action);
3718 static int tc_setup_offload_act(struct tc_action *act,
3719 struct flow_action_entry *entry,
3721 struct netlink_ext_ack *extack)
3723 #ifdef CONFIG_NET_CLS_ACT
3724 if (act->ops->offload_act_setup) {
3725 return act->ops->offload_act_setup(act, entry, index_inc, true,
3728 NL_SET_ERR_MSG(extack, "Action does not support offload");
3736 int tc_setup_action(struct flow_action *flow_action,
3737 struct tc_action *actions[],
3738 u32 miss_cookie_base,
3739 struct netlink_ext_ack *extack)
3741 int i, j, k, index, err = 0;
3742 struct tc_action *act;
3744 BUILD_BUG_ON(TCA_ACT_HW_STATS_ANY != FLOW_ACTION_HW_STATS_ANY);
3745 BUILD_BUG_ON(TCA_ACT_HW_STATS_IMMEDIATE != FLOW_ACTION_HW_STATS_IMMEDIATE);
3746 BUILD_BUG_ON(TCA_ACT_HW_STATS_DELAYED != FLOW_ACTION_HW_STATS_DELAYED);
3752 tcf_act_for_each_action(i, act, actions) {
3753 struct flow_action_entry *entry;
3755 entry = &flow_action->entries[j];
3756 spin_lock_bh(&act->tcfa_lock);
3757 err = tcf_act_get_user_cookie(entry, act);
3759 goto err_out_locked;
3762 err = tc_setup_offload_act(act, entry, &index, extack);
3764 goto err_out_locked;
3766 for (k = 0; k < index ; k++) {
3767 entry[k].hw_stats = tc_act_hw_stats(act->hw_stats);
3768 entry[k].hw_index = act->tcfa_index;
3769 entry[k].cookie = (unsigned long)act;
3770 entry[k].miss_cookie =
3771 tcf_exts_miss_cookie_get(miss_cookie_base, i);
3776 spin_unlock_bh(&act->tcfa_lock);
3781 tc_cleanup_offload_action(flow_action);
3785 spin_unlock_bh(&act->tcfa_lock);
3789 int tc_setup_offload_action(struct flow_action *flow_action,
3790 const struct tcf_exts *exts,
3791 struct netlink_ext_ack *extack)
3793 #ifdef CONFIG_NET_CLS_ACT
3794 u32 miss_cookie_base;
3799 miss_cookie_base = exts->miss_cookie_node ?
3800 exts->miss_cookie_node->miss_cookie_base : 0;
3801 return tc_setup_action(flow_action, exts->actions, miss_cookie_base,
3807 EXPORT_SYMBOL(tc_setup_offload_action);
3809 unsigned int tcf_exts_num_actions(struct tcf_exts *exts)
3811 unsigned int num_acts = 0;
3812 struct tc_action *act;
3815 tcf_exts_for_each_action(i, act, exts) {
3816 if (is_tcf_pedit(act))
3817 num_acts += tcf_pedit_nkeys(act);
3823 EXPORT_SYMBOL(tcf_exts_num_actions);
3825 #ifdef CONFIG_NET_CLS_ACT
3826 static int tcf_qevent_parse_block_index(struct nlattr *block_index_attr,
3828 struct netlink_ext_ack *extack)
3830 *p_block_index = nla_get_u32(block_index_attr);
3831 if (!*p_block_index) {
3832 NL_SET_ERR_MSG(extack, "Block number may not be zero");
3839 int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch,
3840 enum flow_block_binder_type binder_type,
3841 struct nlattr *block_index_attr,
3842 struct netlink_ext_ack *extack)
3847 if (!block_index_attr)
3850 err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack);
3854 qe->info.binder_type = binder_type;
3855 qe->info.chain_head_change = tcf_chain_head_change_dflt;
3856 qe->info.chain_head_change_priv = &qe->filter_chain;
3857 qe->info.block_index = block_index;
3859 return tcf_block_get_ext(&qe->block, sch, &qe->info, extack);
3861 EXPORT_SYMBOL(tcf_qevent_init);
3863 void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch)
3865 if (qe->info.block_index)
3866 tcf_block_put_ext(qe->block, sch, &qe->info);
3868 EXPORT_SYMBOL(tcf_qevent_destroy);
3870 int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr,
3871 struct netlink_ext_ack *extack)
3876 if (!block_index_attr)
3879 err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack);
3883 /* Bounce newly-configured block or change in block. */
3884 if (block_index != qe->info.block_index) {
3885 NL_SET_ERR_MSG(extack, "Change of blocks is not supported");
3891 EXPORT_SYMBOL(tcf_qevent_validate_change);
3893 struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb,
3894 struct sk_buff **to_free, int *ret)
3896 struct tcf_result cl_res;
3897 struct tcf_proto *fl;
3899 if (!qe->info.block_index)
3902 fl = rcu_dereference_bh(qe->filter_chain);
3904 switch (tcf_classify(skb, NULL, fl, &cl_res, false)) {
3906 qdisc_qstats_drop(sch);
3907 __qdisc_drop(skb, to_free);
3908 *ret = __NET_XMIT_BYPASS;
3913 __qdisc_drop(skb, to_free);
3914 *ret = __NET_XMIT_STOLEN;
3916 case TC_ACT_REDIRECT:
3917 skb_do_redirect(skb);
3918 *ret = __NET_XMIT_STOLEN;
3924 EXPORT_SYMBOL(tcf_qevent_handle);
3926 int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe)
3928 if (!qe->info.block_index)
3930 return nla_put_u32(skb, attr_name, qe->info.block_index);
3932 EXPORT_SYMBOL(tcf_qevent_dump);
3935 static __net_init int tcf_net_init(struct net *net)
3937 struct tcf_net *tn = net_generic(net, tcf_net_id);
3939 spin_lock_init(&tn->idr_lock);
3944 static void __net_exit tcf_net_exit(struct net *net)
3946 struct tcf_net *tn = net_generic(net, tcf_net_id);
3948 idr_destroy(&tn->idr);
3951 static struct pernet_operations tcf_net_ops = {
3952 .init = tcf_net_init,
3953 .exit = tcf_net_exit,
3955 .size = sizeof(struct tcf_net),
3958 static int __init tc_filter_init(void)
3962 tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0);
3966 err = register_pernet_subsys(&tcf_net_ops);
3968 goto err_register_pernet_subsys;
3970 xa_init_flags(&tcf_exts_miss_cookies_xa, XA_FLAGS_ALLOC1);
3972 rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL,
3973 RTNL_FLAG_DOIT_UNLOCKED);
3974 rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL,
3975 RTNL_FLAG_DOIT_UNLOCKED);
3976 rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_get_tfilter,
3977 tc_dump_tfilter, RTNL_FLAG_DOIT_UNLOCKED);
3978 rtnl_register(PF_UNSPEC, RTM_NEWCHAIN, tc_ctl_chain, NULL, 0);
3979 rtnl_register(PF_UNSPEC, RTM_DELCHAIN, tc_ctl_chain, NULL, 0);
3980 rtnl_register(PF_UNSPEC, RTM_GETCHAIN, tc_ctl_chain,
3985 err_register_pernet_subsys:
3986 destroy_workqueue(tc_filter_wq);
3990 subsys_initcall(tc_filter_init);