From a233f1120c37724938f7201fe2353b2577adaaf9 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 23 Sep 2013 19:04:26 +0200 Subject: [PATCH] sched: Prepare for per-cpu preempt_count When using per-cpu preempt_count variables we need to save/restore the preempt_count on context switch (into per task storage; for instance the old thread_info::preempt_count variable) because of PREEMPT_ACTIVE. However, this means that on fork() the preempt_count value of the last context switch gets copied and if we had a PREEMPT_ACTIVE switch right before cloning a child task the child task will now too have PREEMPT_ACTIVE set and start its life with an extra PREEMPT_ACTIVE count. Therefore we need to make init_task_preempt_count() unconditional; this resets whatever preempt_count we inherited from our parent process. Doing so for !per-cpu implementations is harmless. For !PREEMPT_COUNT kernels we need to be careful not to start life with an increased preempt_count. Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/n/tip-4k0b7oy1rcdyzochwiixuwi9@git.kernel.org Signed-off-by: Ingo Molnar --- include/linux/sched.h | 12 +++++++++--- kernel/sched/core.c | 2 -- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index 06ac17c7e639..b09798b672f3 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -428,6 +428,14 @@ struct task_cputime { .sum_exec_runtime = 0, \ } +#define PREEMPT_ENABLED (PREEMPT_NEED_RESCHED) + +#ifdef CONFIG_PREEMPT_COUNT +#define PREEMPT_DISABLED (1 + PREEMPT_ENABLED) +#else +#define PREEMPT_DISABLED PREEMPT_ENABLED +#endif + /* * Disable preemption until the scheduler is running. * Reset by start_kernel()->sched_init()->init_idle(). @@ -435,9 +443,7 @@ struct task_cputime { * We include PREEMPT_ACTIVE to avoid cond_resched() from working * before the scheduler is active -- see should_resched(). */ -#define INIT_PREEMPT_COUNT (1 + PREEMPT_ACTIVE + PREEMPT_NEED_RESCHED) -#define PREEMPT_ENABLED (PREEMPT_NEED_RESCHED) -#define PREEMPT_DISABLED (1 + PREEMPT_NEED_RESCHED) +#define INIT_PREEMPT_COUNT (PREEMPT_DISABLED + PREEMPT_ACTIVE) /** * struct thread_group_cputimer - thread group interval timer counts diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 9c84a9ab1892..f575d5bd7e7a 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1722,9 +1722,7 @@ void sched_fork(struct task_struct *p) #if defined(CONFIG_SMP) p->on_cpu = 0; #endif -#ifdef CONFIG_PREEMPT_COUNT init_task_preempt_count(p); -#endif #ifdef CONFIG_SMP plist_node_init(&p->pushable_tasks, MAX_PRIO); #endif -- 2.11.0