OSDN Git Service

sched/topology: Add static_key for asymmetric CPU capacity optimizations
authorMorten Rasmussen <morten.rasmussen@arm.com>
Wed, 4 Jul 2018 10:17:39 +0000 (11:17 +0100)
committerIngo Molnar <mingo@kernel.org>
Mon, 10 Sep 2018 09:05:48 +0000 (11:05 +0200)
The existing asymmetric CPU capacity code should cause minimal overhead
for others. Putting it behind a static_key, it has been done for SMT
optimizations, would make it easier to extend and improve without
causing harm to others moving forward.

Signed-off-by: Morten Rasmussen <morten.rasmussen@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: dietmar.eggemann@arm.com
Cc: gaku.inami.xh@renesas.com
Cc: valentin.schneider@arm.com
Cc: vincent.guittot@linaro.org
Link: http://lkml.kernel.org/r/1530699470-29808-2-git-send-email-morten.rasmussen@arm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/sched/fair.c
kernel/sched/sched.h
kernel/sched/topology.c

index f808ddf..3e5071a 100644 (file)
@@ -6188,6 +6188,9 @@ static int wake_cap(struct task_struct *p, int cpu, int prev_cpu)
 {
        long min_cap, max_cap;
 
+       if (!static_branch_unlikely(&sched_asym_cpucapacity))
+               return 0;
+
        min_cap = min(capacity_orig_of(prev_cpu), capacity_orig_of(cpu));
        max_cap = cpu_rq(cpu)->rd->max_cpu_capacity;
 
index 4a2e8ca..0f36adc 100644 (file)
@@ -1185,6 +1185,7 @@ DECLARE_PER_CPU(int, sd_llc_id);
 DECLARE_PER_CPU(struct sched_domain_shared *, sd_llc_shared);
 DECLARE_PER_CPU(struct sched_domain *, sd_numa);
 DECLARE_PER_CPU(struct sched_domain *, sd_asym);
+extern struct static_key_false sched_asym_cpucapacity;
 
 struct sched_group_capacity {
        atomic_t                ref;
index 5c4d583..b0cdf5e 100644 (file)
@@ -398,6 +398,7 @@ DEFINE_PER_CPU(int, sd_llc_id);
 DEFINE_PER_CPU(struct sched_domain_shared *, sd_llc_shared);
 DEFINE_PER_CPU(struct sched_domain *, sd_numa);
 DEFINE_PER_CPU(struct sched_domain *, sd_asym);
+DEFINE_STATIC_KEY_FALSE(sched_asym_cpucapacity);
 
 static void update_top_cache_domain(int cpu)
 {
@@ -1705,6 +1706,7 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att
        struct rq *rq = NULL;
        int i, ret = -ENOMEM;
        struct sched_domain_topology_level *tl_asym;
+       bool has_asym = false;
 
        alloc_state = __visit_domain_allocation_hell(&d, cpu_map);
        if (alloc_state != sa_rootdomain)
@@ -1720,8 +1722,10 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att
                for_each_sd_topology(tl) {
                        int dflags = 0;
 
-                       if (tl == tl_asym)
+                       if (tl == tl_asym) {
                                dflags |= SD_ASYM_CPUCAPACITY;
+                               has_asym = true;
+                       }
 
                        sd = build_sched_domain(tl, cpu_map, attr, sd, dflags, i);
 
@@ -1773,6 +1777,9 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att
        }
        rcu_read_unlock();
 
+       if (has_asym)
+               static_branch_enable_cpuslocked(&sched_asym_cpucapacity);
+
        if (rq && sched_debug_enabled) {
                pr_info("root domain span: %*pbl (max cpu_capacity = %lu)\n",
                        cpumask_pr_args(cpu_map), rq->rd->max_cpu_capacity);