OSDN Git Service

Merge tag 'v3.10.52' into android-3.10
[android-x86/kernel.git] / kernel / sched / core.c
index 58453b8..98c3114 100644 (file)
@@ -633,7 +633,19 @@ void wake_up_nohz_cpu(int cpu)
 static inline bool got_nohz_idle_kick(void)
 {
        int cpu = smp_processor_id();
-       return idle_cpu(cpu) && test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu));
+
+       if (!test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu)))
+               return false;
+
+       if (idle_cpu(cpu) && !need_resched())
+               return true;
+
+       /*
+        * We can't run Idle Load Balance on this CPU for this time so we
+        * cancel it and clear NOHZ_BALANCE_KICK
+        */
+       clear_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu));
+       return false;
 }
 
 #else /* CONFIG_NO_HZ_COMMON */
@@ -1223,7 +1235,7 @@ out:
                 * leave kernel.
                 */
                if (p->mm && printk_ratelimit()) {
-                       printk_sched("process %d (%s) no longer affine to cpu%d\n",
+                       printk_deferred("process %d (%s) no longer affine to cpu%d\n",
                                        task_pid_nr(p), p->comm, cpu);
                }
        }
@@ -1393,8 +1405,9 @@ static void sched_ttwu_pending(void)
 
 void scheduler_ipi(void)
 {
-       if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick()
-           && !tick_nohz_full_cpu(smp_processor_id()))
+       if (llist_empty(&this_rq()->wake_list)
+                       && !tick_nohz_full_cpu(smp_processor_id())
+                       && !got_nohz_idle_kick())
                return;
 
        /*
@@ -1417,7 +1430,7 @@ void scheduler_ipi(void)
        /*
         * Check if someone kicked us for doing the nohz idle load balance.
         */
-       if (unlikely(got_nohz_idle_kick() && !need_resched())) {
+       if (unlikely(got_nohz_idle_kick())) {
                this_rq()->idle_balance = 1;
                raise_softirq_irqoff(SCHED_SOFTIRQ);
        }
@@ -1474,7 +1487,13 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
        unsigned long flags;
        int cpu, success = 0;
 
-       smp_wmb();
+       /*
+        * If we are going to wake up a thread waiting for CONDITION we
+        * need to ensure that CONDITION=1 done by the caller can not be
+        * reordered with p->state check below. This pairs with mb() in
+        * set_current_state() the waiting thread does.
+        */
+       smp_mb__before_spinlock();
        raw_spin_lock_irqsave(&p->pi_lock, flags);
        if (!(p->state & state))
                goto out;
@@ -2953,6 +2972,12 @@ need_resched:
        if (sched_feat(HRTICK))
                hrtick_clear(rq);
 
+       /*
+        * Make sure that signal_pending_state()->signal_pending() below
+        * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
+        * done by the caller to avoid the race with signal_wake_up().
+        */
+       smp_mb__before_spinlock();
        raw_spin_lock_irq(&rq->lock);
 
        switch_count = &prev->nivcsw;
@@ -4745,7 +4770,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
         */
        idle->sched_class = &idle_sched_class;
        ftrace_graph_init_idle_task(idle, cpu);
-       vtime_init_idle(idle);
+       vtime_init_idle(idle, cpu);
 #if defined(CONFIG_SMP)
        sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
 #endif
@@ -5245,7 +5270,6 @@ static int __cpuinit sched_cpu_active(struct notifier_block *nfb,
                                      unsigned long action, void *hcpu)
 {
        switch (action & ~CPU_TASKS_FROZEN) {
-       case CPU_STARTING:
        case CPU_DOWN_FAILED:
                set_cpu_active((long)hcpu, true);
                return NOTIFY_OK;
@@ -7080,13 +7104,24 @@ static inline int preempt_count_equals(int preempt_offset)
        return (nested == preempt_offset);
 }
 
+static int __might_sleep_init_called;
+int __init __might_sleep_init(void)
+{
+       __might_sleep_init_called = 1;
+       return 0;
+}
+early_initcall(__might_sleep_init);
+
 void __might_sleep(const char *file, int line, int preempt_offset)
 {
        static unsigned long prev_jiffy;        /* ratelimiting */
 
        rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */
        if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) ||
-           system_state != SYSTEM_RUNNING || oops_in_progress)
+           oops_in_progress)
+               return;
+       if (system_state != SYSTEM_RUNNING &&
+           (!__might_sleep_init_called || system_state != SYSTEM_BOOTING))
                return;
        if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
                return;
@@ -7692,6 +7727,23 @@ static void cpu_cgroup_css_offline(struct cgroup *cgrp)
        sched_offline_group(tg);
 }
 
+static int
+cpu_cgroup_allow_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
+{
+       const struct cred *cred = current_cred(), *tcred;
+       struct task_struct *task;
+
+       cgroup_taskset_for_each(task, cgrp, tset) {
+               tcred = __task_cred(task);
+
+               if ((current != task) && !capable(CAP_SYS_NICE) &&
+                   cred->euid != tcred->uid && cred->euid != tcred->suid)
+                       return -EACCES;
+       }
+
+       return 0;
+}
+
 static int cpu_cgroup_can_attach(struct cgroup *cgrp,
                                 struct cgroup_taskset *tset)
 {
@@ -7787,7 +7839,12 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
 
        runtime_enabled = quota != RUNTIME_INF;
        runtime_was_enabled = cfs_b->quota != RUNTIME_INF;
-       account_cfs_bandwidth_used(runtime_enabled, runtime_was_enabled);
+       /*
+        * If we need to toggle cfs_bandwidth_used, off->on must occur
+        * before making related changes, and on->off must occur afterwards
+        */
+       if (runtime_enabled && !runtime_was_enabled)
+               cfs_bandwidth_usage_inc();
        raw_spin_lock_irq(&cfs_b->lock);
        cfs_b->period = ns_to_ktime(period);
        cfs_b->quota = quota;
@@ -7813,6 +7870,8 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
                        unthrottle_cfs_rq(cfs_rq);
                raw_spin_unlock_irq(&rq->lock);
        }
+       if (runtime_was_enabled && !runtime_enabled)
+               cfs_bandwidth_usage_dec();
 out_unlock:
        mutex_unlock(&cfs_constraints_mutex);
 
@@ -8051,6 +8110,7 @@ struct cgroup_subsys cpu_cgroup_subsys = {
        .css_offline    = cpu_cgroup_css_offline,
        .can_attach     = cpu_cgroup_can_attach,
        .attach         = cpu_cgroup_attach,
+       .allow_attach   = cpu_cgroup_allow_attach,
        .exit           = cpu_cgroup_exit,
        .subsys_id      = cpu_cgroup_subsys_id,
        .base_cftypes   = cpu_files,