From eb61baf69871b9836783a81bc451189edb0d9de2 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 1 Feb 2017 17:09:06 +0100 Subject: [PATCH] sched/headers: Move the wake-queue types and interfaces from sched.h into Acked-by: Linus Torvalds Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- include/linux/sched.h | 54 ++++---------------------------------------- include/linux/sched/wake_q.h | 47 ++++++++++++++++++++++++++++++++++++++ ipc/msg.c | 1 - 3 files changed, 51 insertions(+), 51 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index 4f512e337584..5cda7cf09505 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -953,56 +953,6 @@ void force_schedstat_enabled(void); # define SCHED_FIXEDPOINT_SHIFT 10 # define SCHED_FIXEDPOINT_SCALE (1L << SCHED_FIXEDPOINT_SHIFT) -/* - * Wake-queues are lists of tasks with a pending wakeup, whose - * callers have already marked the task as woken internally, - * and can thus carry on. A common use case is being able to - * do the wakeups once the corresponding user lock as been - * released. - * - * We hold reference to each task in the list across the wakeup, - * thus guaranteeing that the memory is still valid by the time - * the actual wakeups are performed in wake_up_q(). - * - * One per task suffices, because there's never a need for a task to be - * in two wake queues simultaneously; it is forbidden to abandon a task - * in a wake queue (a call to wake_up_q() _must_ follow), so if a task is - * already in a wake queue, the wakeup will happen soon and the second - * waker can just skip it. - * - * The DEFINE_WAKE_Q macro declares and initializes the list head. - * wake_up_q() does NOT reinitialize the list; it's expected to be - * called near the end of a function. Otherwise, the list can be - * re-initialized for later re-use by wake_q_init(). - * - * Note that this can cause spurious wakeups. schedule() callers - * must ensure the call is done inside a loop, confirming that the - * wakeup condition has in fact occurred. - */ -struct wake_q_node { - struct wake_q_node *next; -}; - -struct wake_q_head { - struct wake_q_node *first; - struct wake_q_node **lastp; -}; - -#define WAKE_Q_TAIL ((struct wake_q_node *) 0x01) - -#define DEFINE_WAKE_Q(name) \ - struct wake_q_head name = { WAKE_Q_TAIL, &name.first } - -static inline void wake_q_init(struct wake_q_head *head) -{ - head->first = WAKE_Q_TAIL; - head->lastp = &head->first; -} - -extern void wake_q_add(struct wake_q_head *head, - struct task_struct *task); -extern void wake_up_q(struct wake_q_head *head); - struct io_context; /* See blkdev.h */ @@ -1234,6 +1184,10 @@ enum perf_event_task_context { perf_nr_task_contexts, }; +struct wake_q_node { + struct wake_q_node *next; +}; + /* Track pages that require TLB flushes */ struct tlbflush_unmap_batch { /* diff --git a/include/linux/sched/wake_q.h b/include/linux/sched/wake_q.h index 6383b35e4eba..d03d8a9047dc 100644 --- a/include/linux/sched/wake_q.h +++ b/include/linux/sched/wake_q.h @@ -1,6 +1,53 @@ #ifndef _LINUX_SCHED_WAKE_Q_H #define _LINUX_SCHED_WAKE_Q_H +/* + * Wake-queues are lists of tasks with a pending wakeup, whose + * callers have already marked the task as woken internally, + * and can thus carry on. A common use case is being able to + * do the wakeups once the corresponding user lock as been + * released. + * + * We hold reference to each task in the list across the wakeup, + * thus guaranteeing that the memory is still valid by the time + * the actual wakeups are performed in wake_up_q(). + * + * One per task suffices, because there's never a need for a task to be + * in two wake queues simultaneously; it is forbidden to abandon a task + * in a wake queue (a call to wake_up_q() _must_ follow), so if a task is + * already in a wake queue, the wakeup will happen soon and the second + * waker can just skip it. + * + * The DEFINE_WAKE_Q macro declares and initializes the list head. + * wake_up_q() does NOT reinitialize the list; it's expected to be + * called near the end of a function. Otherwise, the list can be + * re-initialized for later re-use by wake_q_init(). + * + * Note that this can cause spurious wakeups. schedule() callers + * must ensure the call is done inside a loop, confirming that the + * wakeup condition has in fact occurred. + */ + #include +struct wake_q_head { + struct wake_q_node *first; + struct wake_q_node **lastp; +}; + +#define WAKE_Q_TAIL ((struct wake_q_node *) 0x01) + +#define DEFINE_WAKE_Q(name) \ + struct wake_q_head name = { WAKE_Q_TAIL, &name.first } + +static inline void wake_q_init(struct wake_q_head *head) +{ + head->first = WAKE_Q_TAIL; + head->lastp = &head->first; +} + +extern void wake_q_add(struct wake_q_head *head, + struct task_struct *task); +extern void wake_up_q(struct wake_q_head *head); + #endif /* _LINUX_SCHED_WAKE_Q_H */ diff --git a/ipc/msg.c b/ipc/msg.c index ecc387e573f6..104926dc72be 100644 --- a/ipc/msg.c +++ b/ipc/msg.c @@ -30,7 +30,6 @@ #include #include #include -#include #include #include #include -- 2.11.0