OSDN Git Service

sched/isolation: Offload residual 1Hz scheduler tick
[uclinux-h8/linux.git] / kernel / sched / idle_task.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include "sched.h"
3
4 /*
5  * idle-task scheduling class.
6  *
7  * (NOTE: these are not related to SCHED_IDLE tasks which are
8  *  handled in sched/fair.c)
9  */
10
11 #ifdef CONFIG_SMP
12 static int
13 select_task_rq_idle(struct task_struct *p, int cpu, int sd_flag, int flags)
14 {
15         return task_cpu(p); /* IDLE tasks as never migrated */
16 }
17 #endif /* CONFIG_SMP */
18
19 /*
20  * Idle tasks are unconditionally rescheduled:
21  */
22 static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int flags)
23 {
24         resched_curr(rq);
25 }
26
27 static struct task_struct *
28 pick_next_task_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
29 {
30         put_prev_task(rq, prev);
31         update_idle_core(rq);
32         schedstat_inc(rq->sched_goidle);
33         return rq->idle;
34 }
35
36 /*
37  * It is not legal to sleep in the idle task - print a warning
38  * message if some code attempts to do it:
39  */
40 static void
41 dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags)
42 {
43         raw_spin_unlock_irq(&rq->lock);
44         printk(KERN_ERR "bad: scheduling from the idle thread!\n");
45         dump_stack();
46         raw_spin_lock_irq(&rq->lock);
47 }
48
49 static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
50 {
51         rq_last_tick_reset(rq);
52 }
53
54 /*
55  * scheduler tick hitting a task of our scheduling class.
56  *
57  * NOTE: This function can be called remotely by the tick offload that
58  * goes along full dynticks. Therefore no local assumption can be made
59  * and everything must be accessed through the @rq and @curr passed in
60  * parameters.
61  */
62 static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued)
63 {
64 }
65
66 static void set_curr_task_idle(struct rq *rq)
67 {
68 }
69
70 static void switched_to_idle(struct rq *rq, struct task_struct *p)
71 {
72         BUG();
73 }
74
75 static void
76 prio_changed_idle(struct rq *rq, struct task_struct *p, int oldprio)
77 {
78         BUG();
79 }
80
81 static unsigned int get_rr_interval_idle(struct rq *rq, struct task_struct *task)
82 {
83         return 0;
84 }
85
86 static void update_curr_idle(struct rq *rq)
87 {
88 }
89
90 /*
91  * Simple, special scheduling class for the per-CPU idle tasks:
92  */
93 const struct sched_class idle_sched_class = {
94         /* .next is NULL */
95         /* no enqueue/yield_task for idle tasks */
96
97         /* dequeue is not valid, we print a debug message there: */
98         .dequeue_task           = dequeue_task_idle,
99
100         .check_preempt_curr     = check_preempt_curr_idle,
101
102         .pick_next_task         = pick_next_task_idle,
103         .put_prev_task          = put_prev_task_idle,
104
105 #ifdef CONFIG_SMP
106         .select_task_rq         = select_task_rq_idle,
107         .set_cpus_allowed       = set_cpus_allowed_common,
108 #endif
109
110         .set_curr_task          = set_curr_task_idle,
111         .task_tick              = task_tick_idle,
112
113         .get_rr_interval        = get_rr_interval_idle,
114
115         .prio_changed           = prio_changed_idle,
116         .switched_to            = switched_to_idle,
117         .update_curr            = update_curr_idle,
118 };