From b5140a36da7876bc084a2c680c8dbc7438db2051 Mon Sep 17 00:00:00 2001 From: wenxu Date: Mon, 24 Feb 2020 13:22:53 +0800 Subject: [PATCH] netfilter: flowtable: add indr block setup support Add etfilter flowtable support indr-block setup. It makes flowtable offload vlan and tunnel device. Signed-off-by: wenxu Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_flow_table_offload.c | 94 +++++++++++++++++++++++++++++++++-- 1 file changed, 90 insertions(+), 4 deletions(-) diff --git a/net/netfilter/nf_flow_table_offload.c b/net/netfilter/nf_flow_table_offload.c index c4cb03555315..f60f01e929b8 100644 --- a/net/netfilter/nf_flow_table_offload.c +++ b/net/netfilter/nf_flow_table_offload.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include @@ -827,6 +828,22 @@ static void nf_flow_table_block_offload_init(struct flow_block_offload *bo, INIT_LIST_HEAD(&bo->cb_list); } +static int nf_flow_table_indr_offload_cmd(struct flow_block_offload *bo, + struct nf_flowtable *flowtable, + struct net_device *dev, + enum flow_block_command cmd, + struct netlink_ext_ack *extack) +{ + nf_flow_table_block_offload_init(bo, dev_net(dev), cmd, flowtable, + extack); + flow_indr_block_call(dev, bo, cmd); + + if (list_empty(&bo->cb_list)) + return -EOPNOTSUPP; + + return 0; +} + static int nf_flow_table_offload_cmd(struct flow_block_offload *bo, struct nf_flowtable *flowtable, struct net_device *dev, @@ -835,9 +852,6 @@ static int nf_flow_table_offload_cmd(struct flow_block_offload *bo, { int err; - if (!dev->netdev_ops->ndo_setup_tc) - return -EOPNOTSUPP; - nf_flow_table_block_offload_init(bo, dev_net(dev), cmd, flowtable, extack); err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_FT, bo); @@ -858,7 +872,12 @@ int nf_flow_table_offload_setup(struct nf_flowtable *flowtable, if (!nf_flowtable_hw_offload(flowtable)) return 0; - err = nf_flow_table_offload_cmd(&bo, flowtable, dev, cmd, &extack); + if (dev->netdev_ops->ndo_setup_tc) + err = nf_flow_table_offload_cmd(&bo, flowtable, dev, cmd, + &extack); + else + err = nf_flow_table_indr_offload_cmd(&bo, flowtable, dev, cmd, + &extack); if (err < 0) return err; @@ -866,10 +885,75 @@ int nf_flow_table_offload_setup(struct nf_flowtable *flowtable, } EXPORT_SYMBOL_GPL(nf_flow_table_offload_setup); +static void nf_flow_table_indr_block_ing_cmd(struct net_device *dev, + struct nf_flowtable *flowtable, + flow_indr_block_bind_cb_t *cb, + void *cb_priv, + enum flow_block_command cmd) +{ + struct netlink_ext_ack extack = {}; + struct flow_block_offload bo; + + if (!flowtable) + return; + + nf_flow_table_block_offload_init(&bo, dev_net(dev), cmd, flowtable, + &extack); + + cb(dev, cb_priv, TC_SETUP_FT, &bo); + + nf_flow_table_block_setup(flowtable, &bo, cmd); +} + +static void nf_flow_table_indr_block_cb_cmd(struct nf_flowtable *flowtable, + struct net_device *dev, + flow_indr_block_bind_cb_t *cb, + void *cb_priv, + enum flow_block_command cmd) +{ + if (!(flowtable->flags & NF_FLOWTABLE_HW_OFFLOAD)) + return; + + nf_flow_table_indr_block_ing_cmd(dev, flowtable, cb, cb_priv, cmd); +} + +static void nf_flow_table_indr_block_cb(struct net_device *dev, + flow_indr_block_bind_cb_t *cb, + void *cb_priv, + enum flow_block_command cmd) +{ + struct net *net = dev_net(dev); + struct nft_flowtable *nft_ft; + struct nft_table *table; + struct nft_hook *hook; + + mutex_lock(&net->nft.commit_mutex); + list_for_each_entry(table, &net->nft.tables, list) { + list_for_each_entry(nft_ft, &table->flowtables, list) { + list_for_each_entry(hook, &nft_ft->hook_list, list) { + if (hook->ops.dev != dev) + continue; + + nf_flow_table_indr_block_cb_cmd(&nft_ft->data, + dev, cb, + cb_priv, cmd); + } + } + } + mutex_unlock(&net->nft.commit_mutex); +} + +static struct flow_indr_block_entry block_ing_entry = { + .cb = nf_flow_table_indr_block_cb, + .list = LIST_HEAD_INIT(block_ing_entry.list), +}; + int nf_flow_table_offload_init(void) { INIT_WORK(&nf_flow_offload_work, flow_offload_work_handler); + flow_indr_add_block_cb(&block_ing_entry); + return 0; } @@ -878,6 +962,8 @@ void nf_flow_table_offload_exit(void) struct flow_offload_work *offload, *next; LIST_HEAD(offload_pending_list); + flow_indr_del_block_cb(&block_ing_entry); + cancel_work_sync(&nf_flow_offload_work); list_for_each_entry_safe(offload, next, &offload_pending_list, list) { -- 2.11.0