OSDN Git Service

core_ctl: Harden the adjustment_possible() check for unisolation
authorPavankumar Kondeti <pkondeti@codeaurora.org>
Thu, 13 Apr 2017 11:28:57 +0000 (16:58 +0530)
committerPavankumar Kondeti <pkondeti@codeaurora.org>
Sat, 15 Apr 2017 12:18:48 +0000 (17:48 +0530)
When the need for CPUs is more than the active CPUs and there are some
isolated CPUs, we wakeup the core_ctl thread to unisolate some CPUs.
The core_ctl task can't unisolate any CPU if all of them are isolated
by other clients. Track the number of isolated CPUs by core_ctl and
wakeup the core_ctl task when adjustment is really possible.

Change-Id: I11ef10860532df25cbde572aabd4b925320db8fe
Signed-off-by: Pavankumar Kondeti <pkondeti@codeaurora.org>
kernel/sched/core_ctl.c

index e094cba..0b5f2de 100644 (file)
@@ -35,6 +35,7 @@ struct cluster_data {
        unsigned int busy_down_thres[MAX_CPUS_PER_CLUSTER];
        unsigned int active_cpus;
        unsigned int num_cpus;
+       unsigned int nr_isolated_cpus;
        cpumask_t cpu_mask;
        unsigned int need_cpus;
        unsigned int task_thres;
@@ -294,6 +295,9 @@ static ssize_t show_global_state(const struct cluster_data *state, char *buf)
                count += snprintf(buf + count, PAGE_SIZE - count,
                                "\tNeed CPUs: %u\n", cluster->need_cpus);
                count += snprintf(buf + count, PAGE_SIZE - count,
+                               "\tNr isolated CPUs: %u\n",
+                                               cluster->nr_isolated_cpus);
+               count += snprintf(buf + count, PAGE_SIZE - count,
                                "\tBoost: %u\n", (unsigned int) cluster->boost);
        }
        spin_unlock_irq(&state_lock);
@@ -527,7 +531,7 @@ static bool adjustment_possible(const struct cluster_data *cluster,
                                                        unsigned int need)
 {
        return (need < cluster->active_cpus || (need > cluster->active_cpus &&
-           sched_isolate_count(&cluster->cpu_mask, false)));
+                                               cluster->nr_isolated_cpus));
 }
 
 static bool eval_need(struct cluster_data *cluster)
@@ -718,6 +722,7 @@ static void try_to_isolate(struct cluster_data *cluster, unsigned int need)
        struct cpu_data *c, *tmp;
        unsigned long flags;
        unsigned int num_cpus = cluster->num_cpus;
+       unsigned int nr_isolated = 0;
 
        /*
         * Protect against entry being removed (and added at tail) by other
@@ -742,12 +747,14 @@ static void try_to_isolate(struct cluster_data *cluster, unsigned int need)
                if (!sched_isolate_cpu(c->cpu)) {
                        c->isolated_by_us = true;
                        move_cpu_lru(c);
+                       nr_isolated++;
                } else {
                        pr_debug("Unable to isolate CPU%u\n", c->cpu);
                }
                cluster->active_cpus = get_active_cpu_count(cluster);
                spin_lock_irqsave(&state_lock, flags);
        }
+       cluster->nr_isolated_cpus += nr_isolated;
        spin_unlock_irqrestore(&state_lock, flags);
 
        /*
@@ -757,6 +764,7 @@ static void try_to_isolate(struct cluster_data *cluster, unsigned int need)
        if (cluster->active_cpus <= cluster->max_cpus)
                return;
 
+       nr_isolated = 0;
        num_cpus = cluster->num_cpus;
        spin_lock_irqsave(&state_lock, flags);
        list_for_each_entry_safe(c, tmp, &cluster->lru, sib) {
@@ -774,12 +782,14 @@ static void try_to_isolate(struct cluster_data *cluster, unsigned int need)
                if (!sched_isolate_cpu(c->cpu)) {
                        c->isolated_by_us = true;
                        move_cpu_lru(c);
+                       nr_isolated++;
                } else {
                        pr_debug("Unable to isolate CPU%u\n", c->cpu);
                }
                cluster->active_cpus = get_active_cpu_count(cluster);
                spin_lock_irqsave(&state_lock, flags);
        }
+       cluster->nr_isolated_cpus += nr_isolated;
        spin_unlock_irqrestore(&state_lock, flags);
 
 }
@@ -790,6 +800,7 @@ static void __try_to_unisolate(struct cluster_data *cluster,
        struct cpu_data *c, *tmp;
        unsigned long flags;
        unsigned int num_cpus = cluster->num_cpus;
+       unsigned int nr_unisolated = 0;
 
        /*
         * Protect against entry being removed (and added at tail) by other
@@ -814,12 +825,14 @@ static void __try_to_unisolate(struct cluster_data *cluster,
                if (!sched_unisolate_cpu(c->cpu)) {
                        c->isolated_by_us = false;
                        move_cpu_lru(c);
+                       nr_unisolated++;
                } else {
                        pr_debug("Unable to unisolate CPU%u\n", c->cpu);
                }
                cluster->active_cpus = get_active_cpu_count(cluster);
                spin_lock_irqsave(&state_lock, flags);
        }
+       cluster->nr_isolated_cpus -= nr_unisolated;
        spin_unlock_irqrestore(&state_lock, flags);
 }
 
@@ -885,6 +898,8 @@ static int __ref cpu_callback(struct notifier_block *nfb,
        struct cpu_data *state = &per_cpu(cpu_state, cpu);
        struct cluster_data *cluster = state->cluster;
        unsigned int need;
+       bool do_wakeup, unisolated = false;
+       unsigned long flags;
 
        if (unlikely(!cluster || !cluster->inited))
                return NOTIFY_DONE;
@@ -910,6 +925,7 @@ static int __ref cpu_callback(struct notifier_block *nfb,
                if (state->isolated_by_us) {
                        sched_unisolate_cpu_unlocked(cpu);
                        state->isolated_by_us = false;
+                       unisolated = true;
                }
 
                /* Move a CPU to the end of the LRU when it goes offline. */
@@ -923,7 +939,12 @@ static int __ref cpu_callback(struct notifier_block *nfb,
        }
 
        need = apply_limits(cluster, cluster->need_cpus);
-       if (adjustment_possible(cluster, need))
+       spin_lock_irqsave(&state_lock, flags);
+       if (unisolated)
+               cluster->nr_isolated_cpus--;
+       do_wakeup = adjustment_possible(cluster, need);
+       spin_unlock_irqrestore(&state_lock, flags);
+       if (do_wakeup)
                wake_up_core_ctl_thread(cluster);
 
        return NOTIFY_OK;