OSDN Git Service

Merge tag 'v3.10.52' into android-3.10
[android-x86/kernel.git] / kernel / sched / core.c
index e8b3350..98c3114 100644 (file)
@@ -1235,7 +1235,7 @@ out:
                 * leave kernel.
                 */
                if (p->mm && printk_ratelimit()) {
-                       printk_sched("process %d (%s) no longer affine to cpu%d\n",
+                       printk_deferred("process %d (%s) no longer affine to cpu%d\n",
                                        task_pid_nr(p), p->comm, cpu);
                }
        }
@@ -1487,7 +1487,13 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
        unsigned long flags;
        int cpu, success = 0;
 
-       smp_wmb();
+       /*
+        * If we are going to wake up a thread waiting for CONDITION we
+        * need to ensure that CONDITION=1 done by the caller can not be
+        * reordered with p->state check below. This pairs with mb() in
+        * set_current_state() the waiting thread does.
+        */
+       smp_mb__before_spinlock();
        raw_spin_lock_irqsave(&p->pi_lock, flags);
        if (!(p->state & state))
                goto out;
@@ -2966,6 +2972,12 @@ need_resched:
        if (sched_feat(HRTICK))
                hrtick_clear(rq);
 
+       /*
+        * Make sure that signal_pending_state()->signal_pending() below
+        * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
+        * done by the caller to avoid the race with signal_wake_up().
+        */
+       smp_mb__before_spinlock();
        raw_spin_lock_irq(&rq->lock);
 
        switch_count = &prev->nivcsw;
@@ -5258,7 +5270,6 @@ static int __cpuinit sched_cpu_active(struct notifier_block *nfb,
                                      unsigned long action, void *hcpu)
 {
        switch (action & ~CPU_TASKS_FROZEN) {
-       case CPU_STARTING:
        case CPU_DOWN_FAILED:
                set_cpu_active((long)hcpu, true);
                return NOTIFY_OK;
@@ -7093,13 +7104,24 @@ static inline int preempt_count_equals(int preempt_offset)
        return (nested == preempt_offset);
 }
 
+static int __might_sleep_init_called;
+int __init __might_sleep_init(void)
+{
+       __might_sleep_init_called = 1;
+       return 0;
+}
+early_initcall(__might_sleep_init);
+
 void __might_sleep(const char *file, int line, int preempt_offset)
 {
        static unsigned long prev_jiffy;        /* ratelimiting */
 
        rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */
        if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) ||
-           system_state != SYSTEM_RUNNING || oops_in_progress)
+           oops_in_progress)
+               return;
+       if (system_state != SYSTEM_RUNNING &&
+           (!__might_sleep_init_called || system_state != SYSTEM_BOOTING))
                return;
        if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
                return;
@@ -7705,6 +7727,23 @@ static void cpu_cgroup_css_offline(struct cgroup *cgrp)
        sched_offline_group(tg);
 }
 
+static int
+cpu_cgroup_allow_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
+{
+       const struct cred *cred = current_cred(), *tcred;
+       struct task_struct *task;
+
+       cgroup_taskset_for_each(task, cgrp, tset) {
+               tcred = __task_cred(task);
+
+               if ((current != task) && !capable(CAP_SYS_NICE) &&
+                   cred->euid != tcred->uid && cred->euid != tcred->suid)
+                       return -EACCES;
+       }
+
+       return 0;
+}
+
 static int cpu_cgroup_can_attach(struct cgroup *cgrp,
                                 struct cgroup_taskset *tset)
 {
@@ -7800,7 +7839,12 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
 
        runtime_enabled = quota != RUNTIME_INF;
        runtime_was_enabled = cfs_b->quota != RUNTIME_INF;
-       account_cfs_bandwidth_used(runtime_enabled, runtime_was_enabled);
+       /*
+        * If we need to toggle cfs_bandwidth_used, off->on must occur
+        * before making related changes, and on->off must occur afterwards
+        */
+       if (runtime_enabled && !runtime_was_enabled)
+               cfs_bandwidth_usage_inc();
        raw_spin_lock_irq(&cfs_b->lock);
        cfs_b->period = ns_to_ktime(period);
        cfs_b->quota = quota;
@@ -7826,6 +7870,8 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
                        unthrottle_cfs_rq(cfs_rq);
                raw_spin_unlock_irq(&rq->lock);
        }
+       if (runtime_was_enabled && !runtime_enabled)
+               cfs_bandwidth_usage_dec();
 out_unlock:
        mutex_unlock(&cfs_constraints_mutex);
 
@@ -8064,6 +8110,7 @@ struct cgroup_subsys cpu_cgroup_subsys = {
        .css_offline    = cpu_cgroup_css_offline,
        .can_attach     = cpu_cgroup_can_attach,
        .attach         = cpu_cgroup_attach,
+       .allow_attach   = cpu_cgroup_allow_attach,
        .exit           = cpu_cgroup_exit,
        .subsys_id      = cpu_cgroup_subsys_id,
        .base_cftypes   = cpu_files,