OSDN Git Service

net: bonding: Use per-cpu rr_tx_counter
authorJussi Maki <joamaki@gmail.com>
Tue, 15 Jun 2021 08:54:15 +0000 (08:54 +0000)
committerDavid S. Miller <davem@davemloft.net>
Tue, 15 Jun 2021 18:26:15 +0000 (11:26 -0700)
The round-robin rr_tx_counter was shared across CPUs leading to
significant cache thrashing at high packet rates. This patch switches
the round-robin packet counter to use a per-cpu variable to decide
the destination slave.

On a test with 2x100Gbit ICE nic with pktgen_sample_04_many_flows.sh
(-s 64 -t 32) the tx rate was 19.6Mpps before and 22.3Mpps after
this patch.

"perf top -e cache_misses" before:
    12.31%  [bonding]       [k] bond_xmit_roundrobin_slave_get
    10.59%  [sch_fq_codel]  [k] fq_codel_dequeue
     9.34%  [kernel]        [k] skb_release_data
after:
    15.42%  [sch_fq_codel]  [k] fq_codel_dequeue
    10.06%  [kernel]        [k] __memset
     9.12%  [kernel]        [k] skb_release_data

Signed-off-by: Jussi Maki <joamaki@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/bonding/bond_main.c
include/net/bonding.h

index eb79a9f..1d9137e 100644 (file)
@@ -4202,16 +4202,16 @@ static u32 bond_rr_gen_slave_id(struct bonding *bond)
                slave_id = prandom_u32();
                break;
        case 1:
-               slave_id = bond->rr_tx_counter;
+               slave_id = this_cpu_inc_return(*bond->rr_tx_counter);
                break;
        default:
                reciprocal_packets_per_slave =
                        bond->params.reciprocal_packets_per_slave;
-               slave_id = reciprocal_divide(bond->rr_tx_counter,
+               slave_id = this_cpu_inc_return(*bond->rr_tx_counter);
+               slave_id = reciprocal_divide(slave_id,
                                             reciprocal_packets_per_slave);
                break;
        }
-       bond->rr_tx_counter++;
 
        return slave_id;
 }
@@ -4852,6 +4852,9 @@ static void bond_destructor(struct net_device *bond_dev)
 
        if (bond->wq)
                destroy_workqueue(bond->wq);
+
+       if (bond->rr_tx_counter)
+               free_percpu(bond->rr_tx_counter);
 }
 
 void bond_setup(struct net_device *bond_dev)
@@ -5350,6 +5353,15 @@ static int bond_init(struct net_device *bond_dev)
        if (!bond->wq)
                return -ENOMEM;
 
+       if (BOND_MODE(bond) == BOND_MODE_ROUNDROBIN) {
+               bond->rr_tx_counter = alloc_percpu(u32);
+               if (!bond->rr_tx_counter) {
+                       destroy_workqueue(bond->wq);
+                       bond->wq = NULL;
+                       return -ENOMEM;
+               }
+       }
+
        spin_lock_init(&bond->stats_lock);
        netdev_lockdep_set_classes(bond_dev);
 
index 019e998..1533573 100644 (file)
@@ -232,7 +232,7 @@ struct bonding {
        char     proc_file_name[IFNAMSIZ];
 #endif /* CONFIG_PROC_FS */
        struct   list_head bond_list;
-       u32      rr_tx_counter;
+       u32 __percpu *rr_tx_counter;
        struct   ad_bond_info ad_info;
        struct   alb_bond_info alb_info;
        struct   bond_params params;