2 #include <linux/sched.h>
3 #include <linux/sched/sysctl.h>
4 #include <linux/sched/rt.h>
5 #include <linux/sched/deadline.h>
6 #include <linux/mutex.h>
7 #include <linux/spinlock.h>
8 #include <linux/stop_machine.h>
9 #include <linux/irq_work.h>
10 #include <linux/tick.h>
11 #include <linux/slab.h>
14 #include "cpudeadline.h"
20 /* task_struct::on_rq states: */
21 #define TASK_ON_RQ_QUEUED 1
22 #define TASK_ON_RQ_MIGRATING 2
24 extern __read_mostly int scheduler_running;
26 extern unsigned long calc_load_update;
27 extern atomic_long_t calc_load_tasks;
29 extern void calc_global_load_tick(struct rq *this_rq);
31 struct freq_max_load_entry {
32 /* The maximum load which has accounted governor's headroom. */
36 struct freq_max_load {
39 struct freq_max_load_entry freqs[0];
42 extern DEFINE_PER_CPU(struct freq_max_load *, freq_max_load);
44 extern long calc_load_fold_active(struct rq *this_rq);
47 extern void update_cpu_load_active(struct rq *this_rq);
49 static inline void update_cpu_load_active(struct rq *this_rq) { }
53 * Helpers for converting nanosecond timing to jiffy resolution
55 #define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
58 * Increase resolution of nice-level calculations for 64-bit architectures.
59 * The extra resolution improves shares distribution and load balancing of
60 * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup
61 * hierarchies, especially on larger systems. This is not a user-visible change
62 * and does not change the user-interface for setting shares/weights.
64 * We increase resolution only if we have enough bits to allow this increased
65 * resolution (i.e. BITS_PER_LONG > 32). The costs for increasing resolution
66 * when BITS_PER_LONG <= 32 are pretty high and the returns do not justify the
69 #if 0 /* BITS_PER_LONG > 32 -- currently broken: it increases power usage under light load */
70 # define SCHED_LOAD_RESOLUTION 10
71 # define scale_load(w) ((w) << SCHED_LOAD_RESOLUTION)
72 # define scale_load_down(w) ((w) >> SCHED_LOAD_RESOLUTION)
74 # define SCHED_LOAD_RESOLUTION 0
75 # define scale_load(w) (w)
76 # define scale_load_down(w) (w)
79 #define SCHED_LOAD_SHIFT (10 + SCHED_LOAD_RESOLUTION)
80 #define SCHED_LOAD_SCALE (1L << SCHED_LOAD_SHIFT)
82 #define NICE_0_LOAD SCHED_LOAD_SCALE
83 #define NICE_0_SHIFT SCHED_LOAD_SHIFT
86 * Single value that decides SCHED_DEADLINE internal math precision.
87 * 10 -> just above 1us
88 * 9 -> just above 0.5us
93 * These are the 'tuning knobs' of the scheduler:
97 * single value that denotes runtime == period, ie unlimited time.
99 #define RUNTIME_INF ((u64)~0ULL)
101 static inline int idle_policy(int policy)
103 return policy == SCHED_IDLE;
105 static inline int fair_policy(int policy)
107 return policy == SCHED_NORMAL || policy == SCHED_BATCH;
110 static inline int rt_policy(int policy)
112 return policy == SCHED_FIFO || policy == SCHED_RR;
115 static inline int dl_policy(int policy)
117 return policy == SCHED_DEADLINE;
119 static inline bool valid_policy(int policy)
121 return idle_policy(policy) || fair_policy(policy) ||
122 rt_policy(policy) || dl_policy(policy);
125 static inline int task_has_rt_policy(struct task_struct *p)
127 return rt_policy(p->policy);
130 static inline int task_has_dl_policy(struct task_struct *p)
132 return dl_policy(p->policy);
136 * Tells if entity @a should preempt entity @b.
139 dl_entity_preempt(struct sched_dl_entity *a, struct sched_dl_entity *b)
141 return dl_time_before(a->deadline, b->deadline);
145 * This is the priority-queue data structure of the RT scheduling class:
147 struct rt_prio_array {
148 DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */
149 struct list_head queue[MAX_RT_PRIO];
152 struct rt_bandwidth {
153 /* nests inside the rq lock: */
154 raw_spinlock_t rt_runtime_lock;
157 struct hrtimer rt_period_timer;
158 unsigned int rt_period_active;
161 void __dl_clear_params(struct task_struct *p);
164 * To keep the bandwidth of -deadline tasks and groups under control
165 * we need some place where:
166 * - store the maximum -deadline bandwidth of the system (the group);
167 * - cache the fraction of that bandwidth that is currently allocated.
169 * This is all done in the data structure below. It is similar to the
170 * one used for RT-throttling (rt_bandwidth), with the main difference
171 * that, since here we are only interested in admission control, we
172 * do not decrease any runtime while the group "executes", neither we
173 * need a timer to replenish it.
175 * With respect to SMP, the bandwidth is given on a per-CPU basis,
177 * - dl_bw (< 100%) is the bandwidth of the system (group) on each CPU;
178 * - dl_total_bw array contains, in the i-eth element, the currently
179 * allocated bandwidth on the i-eth CPU.
180 * Moreover, groups consume bandwidth on each CPU, while tasks only
181 * consume bandwidth on the CPU they're running on.
182 * Finally, dl_total_bw_cpu is used to cache the index of dl_total_bw
183 * that will be shown the next time the proc or cgroup controls will
184 * be red. It on its turn can be changed by writing on its own
187 struct dl_bandwidth {
188 raw_spinlock_t dl_runtime_lock;
193 static inline int dl_bandwidth_enabled(void)
195 return sysctl_sched_rt_runtime >= 0;
198 extern struct dl_bw *dl_bw_of(int i);
206 void __dl_clear(struct dl_bw *dl_b, u64 tsk_bw)
208 dl_b->total_bw -= tsk_bw;
212 void __dl_add(struct dl_bw *dl_b, u64 tsk_bw)
214 dl_b->total_bw += tsk_bw;
218 bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw)
220 return dl_b->bw != -1 &&
221 dl_b->bw * cpus < dl_b->total_bw - old_bw + new_bw;
224 extern struct mutex sched_domains_mutex;
226 #ifdef CONFIG_CGROUP_SCHED
228 #include <linux/cgroup.h>
233 extern struct list_head task_groups;
235 struct cfs_bandwidth {
236 #ifdef CONFIG_CFS_BANDWIDTH
240 s64 hierarchical_quota;
243 int idle, period_active;
244 struct hrtimer period_timer, slack_timer;
245 struct list_head throttled_cfs_rq;
248 int nr_periods, nr_throttled;
253 /* task group related information */
255 struct cgroup_subsys_state css;
257 bool notify_on_migrate;
258 #ifdef CONFIG_SCHED_HMP
259 bool upmigrate_discouraged;
262 #ifdef CONFIG_FAIR_GROUP_SCHED
263 /* schedulable entities of this group on each cpu */
264 struct sched_entity **se;
265 /* runqueue "owned" by this group on each cpu */
266 struct cfs_rq **cfs_rq;
267 unsigned long shares;
270 atomic_long_t load_avg;
274 #ifdef CONFIG_RT_GROUP_SCHED
275 struct sched_rt_entity **rt_se;
276 struct rt_rq **rt_rq;
278 struct rt_bandwidth rt_bandwidth;
282 struct list_head list;
284 struct task_group *parent;
285 struct list_head siblings;
286 struct list_head children;
288 #ifdef CONFIG_SCHED_AUTOGROUP
289 struct autogroup *autogroup;
292 struct cfs_bandwidth cfs_bandwidth;
295 #ifdef CONFIG_FAIR_GROUP_SCHED
296 #define ROOT_TASK_GROUP_LOAD NICE_0_LOAD
299 * A weight of 0 or 1 can cause arithmetics problems.
300 * A weight of a cfs_rq is the sum of weights of which entities
301 * are queued on this cfs_rq, so a weight of a entity should not be
302 * too large, so as the shares value of a task group.
303 * (The default weight is 1024 - so there's no practical
304 * limitation from this.)
306 #define MIN_SHARES (1UL << 1)
307 #define MAX_SHARES (1UL << 18)
310 typedef int (*tg_visitor)(struct task_group *, void *);
312 extern int walk_tg_tree_from(struct task_group *from,
313 tg_visitor down, tg_visitor up, void *data);
316 * Iterate the full tree, calling @down when first entering a node and @up when
317 * leaving it for the final time.
319 * Caller must hold rcu_lock or sufficient equivalent.
321 static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
323 return walk_tg_tree_from(&root_task_group, down, up, data);
326 extern int tg_nop(struct task_group *tg, void *data);
328 extern void free_fair_sched_group(struct task_group *tg);
329 extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent);
330 extern void unregister_fair_sched_group(struct task_group *tg, int cpu);
331 extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
332 struct sched_entity *se, int cpu,
333 struct sched_entity *parent);
334 extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
335 extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
337 extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b);
338 extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
339 extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq);
341 extern void free_rt_sched_group(struct task_group *tg);
342 extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent);
343 extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
344 struct sched_rt_entity *rt_se, int cpu,
345 struct sched_rt_entity *parent);
347 extern struct task_group *sched_create_group(struct task_group *parent);
348 extern void sched_online_group(struct task_group *tg,
349 struct task_group *parent);
350 extern void sched_destroy_group(struct task_group *tg);
351 extern void sched_offline_group(struct task_group *tg);
353 extern void sched_move_task(struct task_struct *tsk);
355 #ifdef CONFIG_FAIR_GROUP_SCHED
356 extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
359 #else /* CONFIG_CGROUP_SCHED */
361 struct cfs_bandwidth { };
363 #endif /* CONFIG_CGROUP_SCHED */
365 #ifdef CONFIG_SCHED_HMP
367 struct hmp_sched_stats {
369 u64 cumulative_runnable_avg;
372 struct sched_cluster {
373 struct list_head list;
378 int max_possible_capacity;
380 int efficiency; /* Differentiate cpus with different IPC capability */
381 int load_scale_factor;
383 * max_freq = user or thermal defined maximum
384 * max_possible_freq = maximum supported by hardware
386 unsigned int cur_freq, max_freq, min_freq, max_possible_freq;
388 int dstate, dstate_wakeup_latency, dstate_wakeup_energy;
389 unsigned int static_cluster_pwr_cost;
392 extern unsigned long all_cluster_ids[];
394 static inline int cluster_first_cpu(struct sched_cluster *cluster)
396 return cpumask_first(&cluster->cpus);
399 struct related_thread_group {
402 struct list_head tasks;
403 struct list_head list;
404 struct sched_cluster *preferred_cluster;
409 extern struct list_head cluster_head;
410 extern int num_clusters;
411 extern struct sched_cluster *sched_cluster[NR_CPUS];
412 extern int group_will_fit(struct sched_cluster *cluster,
413 struct related_thread_group *grp, u64 demand);
415 #define for_each_sched_cluster(cluster) \
416 list_for_each_entry_rcu(cluster, &cluster_head, list)
420 /* CFS-related fields in a runqueue */
422 struct load_weight load;
423 unsigned int nr_running, h_nr_running;
428 u64 min_vruntime_copy;
431 struct rb_root tasks_timeline;
432 struct rb_node *rb_leftmost;
435 * 'curr' points to currently running entity on this cfs_rq.
436 * It is set to NULL otherwise (i.e when none are currently running).
438 struct sched_entity *curr, *next, *last, *skip;
440 #ifdef CONFIG_SCHED_DEBUG
441 unsigned int nr_spread_over;
448 struct sched_avg avg;
449 u64 runnable_load_sum;
450 unsigned long runnable_load_avg;
451 #ifdef CONFIG_FAIR_GROUP_SCHED
452 unsigned long tg_load_avg_contrib;
454 atomic_long_t removed_load_avg, removed_util_avg;
456 u64 load_last_update_time_copy;
459 #ifdef CONFIG_FAIR_GROUP_SCHED
461 * h_load = weight * f(tg)
463 * Where f(tg) is the recursive weight fraction assigned to
466 unsigned long h_load;
467 u64 last_h_load_update;
468 struct sched_entity *h_load_next;
469 #endif /* CONFIG_FAIR_GROUP_SCHED */
470 #endif /* CONFIG_SMP */
472 #ifdef CONFIG_FAIR_GROUP_SCHED
473 struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */
476 * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
477 * a hierarchy). Non-leaf lrqs hold other higher schedulable entities
478 * (like users, containers etc.)
480 * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This
481 * list is used during load balance.
484 struct list_head leaf_cfs_rq_list;
485 struct task_group *tg; /* group that "owns" this runqueue */
487 #ifdef CONFIG_CFS_BANDWIDTH
489 #ifdef CONFIG_SCHED_HMP
490 struct hmp_sched_stats hmp_stats;
495 s64 runtime_remaining;
497 u64 throttled_clock, throttled_clock_task;
498 u64 throttled_clock_task_time;
499 int throttled, throttle_count;
500 struct list_head throttled_list;
501 #endif /* CONFIG_CFS_BANDWIDTH */
502 #endif /* CONFIG_FAIR_GROUP_SCHED */
505 static inline int rt_bandwidth_enabled(void)
507 return sysctl_sched_rt_runtime >= 0;
510 /* RT IPI pull logic requires IRQ_WORK */
511 #ifdef CONFIG_IRQ_WORK
512 # define HAVE_RT_PUSH_IPI
515 /* Real-Time classes' related field in a runqueue: */
517 struct rt_prio_array active;
518 unsigned int rt_nr_running;
519 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
521 int curr; /* highest queued rt task prio */
523 int next; /* next highest */
528 unsigned long rt_nr_migratory;
529 unsigned long rt_nr_total;
531 struct plist_head pushable_tasks;
532 #ifdef HAVE_RT_PUSH_IPI
535 struct irq_work push_work;
536 raw_spinlock_t push_lock;
538 #endif /* CONFIG_SMP */
544 /* Nests inside the rq lock: */
545 raw_spinlock_t rt_runtime_lock;
547 #ifdef CONFIG_RT_GROUP_SCHED
548 unsigned long rt_nr_boosted;
551 struct task_group *tg;
555 /* Deadline class' related fields in a runqueue */
557 /* runqueue is an rbtree, ordered by deadline */
558 struct rb_root rb_root;
559 struct rb_node *rb_leftmost;
561 unsigned long dl_nr_running;
565 * Deadline values of the currently executing and the
566 * earliest ready task on this rq. Caching these facilitates
567 * the decision wether or not a ready but not running task
568 * should migrate somewhere else.
575 unsigned long dl_nr_migratory;
579 * Tasks on this rq that can be pushed away. They are kept in
580 * an rb-tree, ordered by tasks' deadlines, with caching
581 * of the leftmost (earliest deadline) element.
583 struct rb_root pushable_dl_tasks_root;
584 struct rb_node *pushable_dl_tasks_leftmost;
593 * We add the notion of a root-domain which will be used to define per-domain
594 * variables. Each exclusive cpuset essentially defines an island domain by
595 * fully partitioning the member cpus from any other cpuset. Whenever a new
596 * exclusive cpuset is created, we also create and attach a new root-domain
605 cpumask_var_t online;
607 /* Indicate more than one runnable task for any CPU */
611 * The bit corresponding to a CPU gets set here if such CPU has more
612 * than one runnable -deadline task (as it is below for RT tasks).
614 cpumask_var_t dlo_mask;
620 * The "RT overload" flag: it gets set if a CPU has more than
621 * one runnable RT task.
623 cpumask_var_t rto_mask;
624 struct cpupri cpupri;
627 extern struct root_domain def_root_domain;
629 #endif /* CONFIG_SMP */
632 * This is the main, per-CPU runqueue data structure.
634 * Locking rule: those places that want to lock multiple runqueues
635 * (such as the load balancing or the thread migration code), lock
636 * acquire operations must be ordered by ascending &runqueue.
643 * nr_running and cpu_load should be in the same cacheline because
644 * remote CPUs use both these fields when doing load calculation.
646 unsigned int nr_running;
647 #ifdef CONFIG_NUMA_BALANCING
648 unsigned int nr_numa_running;
649 unsigned int nr_preferred_running;
651 #define CPU_LOAD_IDX_MAX 5
652 unsigned long cpu_load[CPU_LOAD_IDX_MAX];
653 unsigned long last_load_update_tick;
654 #ifdef CONFIG_NO_HZ_COMMON
656 unsigned long nohz_flags;
658 #ifdef CONFIG_NO_HZ_FULL
659 unsigned long last_sched_tick;
661 /* capture load from *all* tasks on this cpu: */
662 struct load_weight load;
663 unsigned long nr_load_updates;
670 #ifdef CONFIG_FAIR_GROUP_SCHED
671 /* list of leaf cfs_rq on this cpu: */
672 struct list_head leaf_cfs_rq_list;
673 #endif /* CONFIG_FAIR_GROUP_SCHED */
676 * This is part of a global counter where only the total sum
677 * over all CPUs matters. A task can increase this counter on
678 * one CPU and if it got migrated afterwards it may decrease
679 * it on another CPU. Always updated under the runqueue lock:
681 unsigned long nr_uninterruptible;
683 struct task_struct *curr, *idle, *stop;
684 unsigned long next_balance;
685 struct mm_struct *prev_mm;
687 unsigned int clock_skip_update;
694 struct root_domain *rd;
695 struct sched_domain *sd;
697 unsigned long cpu_capacity;
698 unsigned long cpu_capacity_orig;
700 struct callback_head *balance_callback;
702 unsigned char idle_balance;
703 /* For active balancing */
706 struct task_struct *push_task;
707 struct cpu_stop_work active_balance_work;
708 /* cpu of this runqueue: */
712 struct list_head cfs_tasks;
718 int cstate, wakeup_latency, wakeup_energy;
720 /* This is used to determine avg_idle's max value */
721 u64 max_idle_balance_cost;
724 #ifdef CONFIG_SCHED_HMP
725 struct sched_cluster *cluster;
726 struct cpumask freq_domain_cpumask;
727 struct hmp_sched_stats hmp_stats;
730 unsigned long hmp_flags;
735 unsigned int static_cpu_pwr_cost;
736 struct task_struct *ed_task;
738 #ifdef CONFIG_SCHED_FREQ_INPUT
739 unsigned int old_busy_time;
744 #ifdef CONFIG_SCHED_FREQ_INPUT
745 u64 curr_runnable_sum;
746 u64 prev_runnable_sum;
747 u64 nt_curr_runnable_sum;
748 u64 nt_prev_runnable_sum;
751 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
754 #ifdef CONFIG_PARAVIRT
757 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
758 u64 prev_steal_time_rq;
761 /* calc_load related fields */
762 unsigned long calc_load_update;
763 long calc_load_active;
765 #ifdef CONFIG_SCHED_HRTICK
767 int hrtick_csd_pending;
768 struct call_single_data hrtick_csd;
770 struct hrtimer hrtick_timer;
773 #ifdef CONFIG_SCHEDSTATS
775 struct sched_info rq_sched_info;
776 unsigned long long rq_cpu_time;
777 /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
779 /* sys_sched_yield() stats */
780 unsigned int yld_count;
782 /* schedule() stats */
783 unsigned int sched_count;
784 unsigned int sched_goidle;
786 /* try_to_wake_up() stats */
787 unsigned int ttwu_count;
788 unsigned int ttwu_local;
792 struct llist_head wake_list;
795 #ifdef CONFIG_CPU_IDLE
796 /* Must be inspected within a rcu lock section */
797 struct cpuidle_state *idle_state;
801 static inline int cpu_of(struct rq *rq)
810 DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
812 #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
813 #define this_rq() this_cpu_ptr(&runqueues)
814 #define task_rq(p) cpu_rq(task_cpu(p))
815 #define cpu_curr(cpu) (cpu_rq(cpu)->curr)
816 #define raw_rq() raw_cpu_ptr(&runqueues)
818 static inline u64 __rq_clock_broken(struct rq *rq)
820 return READ_ONCE(rq->clock);
823 static inline u64 rq_clock(struct rq *rq)
825 lockdep_assert_held(&rq->lock);
829 static inline u64 rq_clock_task(struct rq *rq)
831 lockdep_assert_held(&rq->lock);
832 return rq->clock_task;
835 #define RQCF_REQ_SKIP 0x01
836 #define RQCF_ACT_SKIP 0x02
838 static inline void rq_clock_skip_update(struct rq *rq, bool skip)
840 lockdep_assert_held(&rq->lock);
842 rq->clock_skip_update |= RQCF_REQ_SKIP;
844 rq->clock_skip_update &= ~RQCF_REQ_SKIP;
848 enum numa_topology_type {
853 extern enum numa_topology_type sched_numa_topology_type;
854 extern int sched_max_numa_distance;
855 extern bool find_numa_distance(int distance);
858 #ifdef CONFIG_NUMA_BALANCING
859 /* The regions in numa_faults array from task_struct */
860 enum numa_faults_stats {
866 extern void sched_setnuma(struct task_struct *p, int node);
867 extern int migrate_task_to(struct task_struct *p, int cpu);
868 extern int migrate_swap(struct task_struct *, struct task_struct *);
869 #endif /* CONFIG_NUMA_BALANCING */
874 queue_balance_callback(struct rq *rq,
875 struct callback_head *head,
876 void (*func)(struct rq *rq))
878 lockdep_assert_held(&rq->lock);
880 if (unlikely(head->next))
883 head->func = (void (*)(struct callback_head *))func;
884 head->next = rq->balance_callback;
885 rq->balance_callback = head;
888 extern void sched_ttwu_pending(void);
890 #define rcu_dereference_check_sched_domain(p) \
891 rcu_dereference_check((p), \
892 lockdep_is_held(&sched_domains_mutex))
895 * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
896 * See detach_destroy_domains: synchronize_sched for details.
898 * The domain tree of any CPU may only be accessed from within
899 * preempt-disabled sections.
901 #define for_each_domain(cpu, __sd) \
902 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \
903 __sd; __sd = __sd->parent)
905 #define for_each_lower_domain(sd) for (; sd; sd = sd->child)
908 * highest_flag_domain - Return highest sched_domain containing flag.
909 * @cpu: The cpu whose highest level of sched domain is to
911 * @flag: The flag to check for the highest sched_domain
914 * Returns the highest sched_domain of a cpu which contains the given flag.
916 static inline struct sched_domain *highest_flag_domain(int cpu, int flag)
918 struct sched_domain *sd, *hsd = NULL;
920 for_each_domain(cpu, sd) {
921 if (!(sd->flags & flag))
929 static inline struct sched_domain *lowest_flag_domain(int cpu, int flag)
931 struct sched_domain *sd;
933 for_each_domain(cpu, sd) {
934 if (sd->flags & flag)
941 DECLARE_PER_CPU(struct sched_domain *, sd_llc);
942 DECLARE_PER_CPU(int, sd_llc_size);
943 DECLARE_PER_CPU(int, sd_llc_id);
944 DECLARE_PER_CPU(struct sched_domain *, sd_numa);
945 DECLARE_PER_CPU(struct sched_domain *, sd_busy);
946 DECLARE_PER_CPU(struct sched_domain *, sd_asym);
948 struct sched_group_capacity {
951 * CPU capacity of this group, SCHED_LOAD_SCALE being max capacity
954 unsigned int capacity;
955 unsigned long next_update;
956 int imbalance; /* XXX unrelated to capacity but shared group state */
958 * Number of busy cpus in this group.
960 atomic_t nr_busy_cpus;
962 unsigned long cpumask[0]; /* iteration mask */
966 struct sched_group *next; /* Must be a circular list */
969 unsigned int group_weight;
970 struct sched_group_capacity *sgc;
973 * The CPUs this group covers.
975 * NOTE: this field is variable length. (Allocated dynamically
976 * by attaching extra space to the end of the structure,
977 * depending on how many CPUs the kernel has booted up with)
979 unsigned long cpumask[0];
982 static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
984 return to_cpumask(sg->cpumask);
988 * cpumask masking which cpus in the group are allowed to iterate up the domain
991 static inline struct cpumask *sched_group_mask(struct sched_group *sg)
993 return to_cpumask(sg->sgc->cpumask);
997 * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
998 * @group: The group whose first cpu is to be returned.
1000 static inline unsigned int group_first_cpu(struct sched_group *group)
1002 return cpumask_first(sched_group_cpus(group));
1005 extern int group_balance_cpu(struct sched_group *sg);
1009 static inline void sched_ttwu_pending(void) { }
1011 #endif /* CONFIG_SMP */
1014 #include "auto_group.h"
1016 extern void init_new_task_load(struct task_struct *p);
1018 #ifdef CONFIG_SCHED_HMP
1020 #define WINDOW_STATS_RECENT 0
1021 #define WINDOW_STATS_MAX 1
1022 #define WINDOW_STATS_MAX_RECENT_AVG 2
1023 #define WINDOW_STATS_AVG 3
1024 #define WINDOW_STATS_INVALID_POLICY 4
1026 extern struct mutex policy_mutex;
1027 extern unsigned int sched_ravg_window;
1028 extern unsigned int sched_use_pelt;
1029 extern unsigned int sched_disable_window_stats;
1030 extern unsigned int sched_enable_hmp;
1031 extern unsigned int max_possible_freq;
1032 extern unsigned int min_max_freq;
1033 extern unsigned int pct_task_load(struct task_struct *p);
1034 extern unsigned int max_possible_efficiency;
1035 extern unsigned int min_possible_efficiency;
1036 extern unsigned int max_capacity;
1037 extern unsigned int min_capacity;
1038 extern unsigned int max_load_scale_factor;
1039 extern unsigned int max_possible_capacity;
1040 extern unsigned int min_max_possible_capacity;
1041 extern unsigned int sched_upmigrate;
1042 extern unsigned int sched_downmigrate;
1043 extern unsigned int sched_init_task_load_pelt;
1044 extern unsigned int sched_init_task_load_windows;
1045 extern unsigned int sched_heavy_task;
1046 extern unsigned int up_down_migrate_scale_factor;
1047 extern void reset_cpu_hmp_stats(int cpu, int reset_cra);
1048 extern unsigned int max_task_load(void);
1049 extern void sched_account_irqtime(int cpu, struct task_struct *curr,
1050 u64 delta, u64 wallclock);
1051 unsigned int cpu_temp(int cpu);
1052 int sched_set_group_id(struct task_struct *p, unsigned int group_id);
1053 extern unsigned int nr_eligible_big_tasks(int cpu);
1054 extern void update_up_down_migrate(void);
1056 static inline struct sched_cluster *cpu_cluster(int cpu)
1058 return cpu_rq(cpu)->cluster;
1061 static inline int cpu_capacity(int cpu)
1063 return cpu_rq(cpu)->cluster->capacity;
1066 static inline int cpu_max_possible_capacity(int cpu)
1068 return cpu_rq(cpu)->cluster->max_possible_capacity;
1071 static inline int cpu_load_scale_factor(int cpu)
1073 return cpu_rq(cpu)->cluster->load_scale_factor;
1076 static inline int cpu_efficiency(int cpu)
1078 return cpu_rq(cpu)->cluster->efficiency;
1081 static inline unsigned int cpu_cur_freq(int cpu)
1083 return cpu_rq(cpu)->cluster->cur_freq;
1086 static inline unsigned int cpu_min_freq(int cpu)
1088 return cpu_rq(cpu)->cluster->min_freq;
1091 static inline unsigned int cpu_max_freq(int cpu)
1093 return cpu_rq(cpu)->cluster->max_freq;
1096 static inline unsigned int cpu_max_possible_freq(int cpu)
1098 return cpu_rq(cpu)->cluster->max_possible_freq;
1101 static inline int same_cluster(int src_cpu, int dst_cpu)
1103 return cpu_rq(src_cpu)->cluster == cpu_rq(dst_cpu)->cluster;
1106 static inline int cpu_max_power_cost(int cpu)
1108 return cpu_rq(cpu)->cluster->max_power_cost;
1111 static inline bool hmp_capable(void)
1113 return max_possible_capacity != min_max_possible_capacity;
1117 * 'load' is in reference to "best cpu" at its best frequency.
1118 * Scale that in reference to a given cpu, accounting for how bad it is
1119 * in reference to "best cpu".
1121 static inline u64 scale_load_to_cpu(u64 task_load, int cpu)
1123 u64 lsf = cpu_load_scale_factor(cpu);
1133 static inline unsigned int task_load(struct task_struct *p)
1136 return p->se.avg.runnable_avg_sum_scaled;
1138 return p->ravg.demand;
1142 inc_cumulative_runnable_avg(struct hmp_sched_stats *stats,
1143 struct task_struct *p)
1147 if (!sched_enable_hmp || sched_disable_window_stats)
1150 task_load = sched_use_pelt ? p->se.avg.runnable_avg_sum_scaled :
1151 (sched_disable_window_stats ? 0 : p->ravg.demand);
1153 stats->cumulative_runnable_avg += task_load;
1157 dec_cumulative_runnable_avg(struct hmp_sched_stats *stats,
1158 struct task_struct *p)
1162 if (!sched_enable_hmp || sched_disable_window_stats)
1165 task_load = sched_use_pelt ? p->se.avg.runnable_avg_sum_scaled :
1166 (sched_disable_window_stats ? 0 : p->ravg.demand);
1168 stats->cumulative_runnable_avg -= task_load;
1170 BUG_ON((s64)stats->cumulative_runnable_avg < 0);
1174 fixup_cumulative_runnable_avg(struct hmp_sched_stats *stats,
1175 struct task_struct *p, s64 task_load_delta)
1177 if (!sched_enable_hmp || sched_disable_window_stats)
1180 stats->cumulative_runnable_avg += task_load_delta;
1181 BUG_ON((s64)stats->cumulative_runnable_avg < 0);
1185 #define pct_to_real(tunable) \
1186 (div64_u64((u64)tunable * (u64)max_task_load(), 100))
1188 #define real_to_pct(tunable) \
1189 (div64_u64((u64)tunable * (u64)100, (u64)max_task_load()))
1191 #define SCHED_HIGH_IRQ_TIMEOUT 3
1192 static inline u64 sched_irqload(int cpu)
1194 struct rq *rq = cpu_rq(cpu);
1197 delta = get_jiffies_64() - rq->irqload_ts;
1199 * Current context can be preempted by irq and rq->irqload_ts can be
1200 * updated by irq context so that delta can be negative.
1201 * But this is okay and we can safely return as this means there
1202 * was recent irq occurrence.
1205 if (delta < SCHED_HIGH_IRQ_TIMEOUT)
1206 return rq->avg_irqload;
1211 static inline int sched_cpu_high_irqload(int cpu)
1213 return sched_irqload(cpu) >= sysctl_sched_cpu_high_irqload;
1217 struct related_thread_group *task_related_thread_group(struct task_struct *p)
1222 #else /* CONFIG_SCHED_HMP */
1224 #define sched_use_pelt 0
1226 struct hmp_sched_stats;
1227 struct related_thread_group;
1229 static inline u64 scale_load_to_cpu(u64 load, int cpu)
1234 static inline unsigned int nr_eligible_big_tasks(int cpu)
1239 static inline int pct_task_load(struct task_struct *p) { return 0; }
1241 static inline int cpu_capacity(int cpu)
1243 return SCHED_LOAD_SCALE;
1246 static inline int same_cluster(int src_cpu, int dst_cpu) { return 1; }
1248 static inline void inc_cumulative_runnable_avg(struct hmp_sched_stats *stats,
1249 struct task_struct *p)
1253 static inline void dec_cumulative_runnable_avg(struct hmp_sched_stats *stats,
1254 struct task_struct *p)
1258 static inline void sched_account_irqtime(int cpu, struct task_struct *curr,
1259 u64 delta, u64 wallclock)
1263 static inline int sched_cpu_high_irqload(int cpu) { return 0; }
1265 static inline void set_preferred_cluster(struct related_thread_group *grp) { }
1268 struct related_thread_group *task_related_thread_group(struct task_struct *p)
1273 static inline u32 task_load(struct task_struct *p) { return 0; }
1275 static inline int update_preferred_cluster(struct related_thread_group *grp,
1276 struct task_struct *p, u32 old_load)
1281 #endif /* CONFIG_SCHED_HMP */
1284 * Returns the rq capacity of any rq in a group. This does not play
1285 * well with groups where rq capacity can change independently.
1287 #define group_rq_capacity(group) cpu_capacity(group_first_cpu(group))
1289 #ifdef CONFIG_SCHED_FREQ_INPUT
1291 extern void check_for_freq_change(struct rq *rq);
1293 /* Is frequency of two cpus synchronized with each other? */
1294 static inline int same_freq_domain(int src_cpu, int dst_cpu)
1296 struct rq *rq = cpu_rq(src_cpu);
1298 if (src_cpu == dst_cpu)
1301 return cpumask_test_cpu(dst_cpu, &rq->freq_domain_cpumask);
1304 #else /* CONFIG_SCHED_FREQ_INPUT */
1306 #define sched_migration_fixup 0
1308 static inline void check_for_freq_change(struct rq *rq) { }
1310 static inline int same_freq_domain(int src_cpu, int dst_cpu)
1315 #endif /* CONFIG_SCHED_FREQ_INPUT */
1317 #ifdef CONFIG_SCHED_HMP
1319 #define BOOST_KICK 0
1320 #define CPU_RESERVED 1
1322 static inline int is_reserved(int cpu)
1324 struct rq *rq = cpu_rq(cpu);
1326 return test_bit(CPU_RESERVED, &rq->hmp_flags);
1329 static inline int mark_reserved(int cpu)
1331 struct rq *rq = cpu_rq(cpu);
1333 /* Name boost_flags as hmp_flags? */
1334 return test_and_set_bit(CPU_RESERVED, &rq->hmp_flags);
1337 static inline void clear_reserved(int cpu)
1339 struct rq *rq = cpu_rq(cpu);
1341 clear_bit(CPU_RESERVED, &rq->hmp_flags);
1344 static inline u64 cpu_cravg_sync(int cpu, int sync)
1346 struct rq *rq = cpu_rq(cpu);
1349 load = rq->hmp_stats.cumulative_runnable_avg;
1352 * If load is being checked in a sync wakeup environment,
1353 * we may want to discount the load of the currently running
1356 if (sync && cpu == smp_processor_id()) {
1357 if (load > rq->curr->ravg.demand)
1358 load -= rq->curr->ravg.demand;
1366 extern void check_for_migration(struct rq *rq, struct task_struct *p);
1367 extern void pre_big_task_count_change(const struct cpumask *cpus);
1368 extern void post_big_task_count_change(const struct cpumask *cpus);
1369 extern void set_hmp_defaults(void);
1370 extern int power_delta_exceeded(unsigned int cpu_cost, unsigned int base_cost);
1371 extern unsigned int power_cost(int cpu, u64 demand);
1372 extern void reset_all_window_stats(u64 window_start, unsigned int window_size);
1373 extern void boost_kick(int cpu);
1374 extern int sched_boost(void);
1376 #else /* CONFIG_SCHED_HMP */
1378 #define sched_enable_hmp 0
1379 #define sched_freq_legacy_mode 1
1381 static inline void check_for_migration(struct rq *rq, struct task_struct *p) { }
1382 static inline void pre_big_task_count_change(void) { }
1383 static inline void post_big_task_count_change(void) { }
1384 static inline void set_hmp_defaults(void) { }
1386 static inline void clear_reserved(int cpu) { }
1388 #define trace_sched_cpu_load(...)
1389 #define trace_sched_cpu_load_lb(...)
1390 #define trace_sched_cpu_load_cgroup(...)
1391 #define trace_sched_cpu_load_wakeup(...)
1393 #endif /* CONFIG_SCHED_HMP */
1395 #ifdef CONFIG_CGROUP_SCHED
1398 * Return the group to which this tasks belongs.
1400 * We cannot use task_css() and friends because the cgroup subsystem
1401 * changes that value before the cgroup_subsys::attach() method is called,
1402 * therefore we cannot pin it and might observe the wrong value.
1404 * The same is true for autogroup's p->signal->autogroup->tg, the autogroup
1405 * core changes this before calling sched_move_task().
1407 * Instead we use a 'copy' which is updated from sched_move_task() while
1408 * holding both task_struct::pi_lock and rq::lock.
1410 static inline struct task_group *task_group(struct task_struct *p)
1412 return p->sched_task_group;
1415 static inline bool task_notify_on_migrate(struct task_struct *p)
1417 return task_group(p)->notify_on_migrate;
1420 /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
1421 static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
1423 #if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED)
1424 struct task_group *tg = task_group(p);
1427 #ifdef CONFIG_FAIR_GROUP_SCHED
1428 p->se.cfs_rq = tg->cfs_rq[cpu];
1429 p->se.parent = tg->se[cpu];
1432 #ifdef CONFIG_RT_GROUP_SCHED
1433 p->rt.rt_rq = tg->rt_rq[cpu];
1434 p->rt.parent = tg->rt_se[cpu];
1438 #else /* CONFIG_CGROUP_SCHED */
1440 static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
1441 static inline struct task_group *task_group(struct task_struct *p)
1445 static inline bool task_notify_on_migrate(struct task_struct *p)
1449 #endif /* CONFIG_CGROUP_SCHED */
1451 static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
1453 set_task_rq(p, cpu);
1456 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
1457 * successfuly executed on another CPU. We must ensure that updates of
1458 * per-task data have been completed by this moment.
1461 task_thread_info(p)->cpu = cpu;
1467 * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
1469 #ifdef CONFIG_SCHED_DEBUG
1470 # include <linux/static_key.h>
1471 # define const_debug __read_mostly
1473 # define const_debug const
1476 extern const_debug unsigned int sysctl_sched_features;
1478 #define SCHED_FEAT(name, enabled) \
1479 __SCHED_FEAT_##name ,
1482 #include "features.h"
1488 #if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL)
1489 #define SCHED_FEAT(name, enabled) \
1490 static __always_inline bool static_branch_##name(struct static_key *key) \
1492 return static_key_##enabled(key); \
1495 #include "features.h"
1499 extern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
1500 #define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x]))
1501 #else /* !(SCHED_DEBUG && HAVE_JUMP_LABEL) */
1502 #define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
1503 #endif /* SCHED_DEBUG && HAVE_JUMP_LABEL */
1505 extern struct static_key_false sched_numa_balancing;
1507 static inline u64 global_rt_period(void)
1509 return (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
1512 static inline u64 global_rt_runtime(void)
1514 if (sysctl_sched_rt_runtime < 0)
1517 return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
1520 static inline int task_current(struct rq *rq, struct task_struct *p)
1522 return rq->curr == p;
1525 static inline int task_running(struct rq *rq, struct task_struct *p)
1530 return task_current(rq, p);
1534 static inline int task_on_rq_queued(struct task_struct *p)
1536 return p->on_rq == TASK_ON_RQ_QUEUED;
1539 static inline int task_on_rq_migrating(struct task_struct *p)
1541 return p->on_rq == TASK_ON_RQ_MIGRATING;
1544 #ifndef prepare_arch_switch
1545 # define prepare_arch_switch(next) do { } while (0)
1547 #ifndef finish_arch_post_lock_switch
1548 # define finish_arch_post_lock_switch() do { } while (0)
1551 static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
1555 * We can optimise this out completely for !SMP, because the
1556 * SMP rebalancing from interrupt is the only thing that cares
1563 static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
1567 * After ->on_cpu is cleared, the task can be moved to a different CPU.
1568 * We must ensure this doesn't happen until the switch is completely
1571 * In particular, the load of prev->state in finish_task_switch() must
1572 * happen before this.
1574 * Pairs with the control dependency and rmb in try_to_wake_up().
1576 smp_store_release(&prev->on_cpu, 0);
1578 #ifdef CONFIG_DEBUG_SPINLOCK
1579 /* this is a valid case when another task releases the spinlock */
1580 rq->lock.owner = current;
1583 * If we are tracking spinlock dependencies then we have to
1584 * fix up the runqueue lock - which gets 'carried over' from
1585 * prev into current:
1587 spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
1589 raw_spin_unlock_irq(&rq->lock);
1595 #define WF_SYNC 0x01 /* waker goes to sleep after wakeup */
1596 #define WF_FORK 0x02 /* child wakeup after fork */
1597 #define WF_MIGRATED 0x4 /* internal use, task got migrated */
1600 * To aid in avoiding the subversion of "niceness" due to uneven distribution
1601 * of tasks with abnormal "nice" values across CPUs the contribution that
1602 * each task makes to its run queue's load is weighted according to its
1603 * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
1604 * scaled version of the new time slice allocation that they receive on time
1608 #define WEIGHT_IDLEPRIO 3
1609 #define WMULT_IDLEPRIO 1431655765
1612 * Nice levels are multiplicative, with a gentle 10% change for every
1613 * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
1614 * nice 1, it will get ~10% less CPU time than another CPU-bound task
1615 * that remained on nice 0.
1617 * The "10% effect" is relative and cumulative: from _any_ nice level,
1618 * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
1619 * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
1620 * If a task goes up by ~10% and another task goes down by ~10% then
1621 * the relative distance between them is ~25%.)
1623 static const int prio_to_weight[40] = {
1624 /* -20 */ 88761, 71755, 56483, 46273, 36291,
1625 /* -15 */ 29154, 23254, 18705, 14949, 11916,
1626 /* -10 */ 9548, 7620, 6100, 4904, 3906,
1627 /* -5 */ 3121, 2501, 1991, 1586, 1277,
1628 /* 0 */ 1024, 820, 655, 526, 423,
1629 /* 5 */ 335, 272, 215, 172, 137,
1630 /* 10 */ 110, 87, 70, 56, 45,
1631 /* 15 */ 36, 29, 23, 18, 15,
1635 * Inverse (2^32/x) values of the prio_to_weight[] array, precalculated.
1637 * In cases where the weight does not change often, we can use the
1638 * precalculated inverse to speed up arithmetics by turning divisions
1639 * into multiplications:
1641 static const u32 prio_to_wmult[40] = {
1642 /* -20 */ 48388, 59856, 76040, 92818, 118348,
1643 /* -15 */ 147320, 184698, 229616, 287308, 360437,
1644 /* -10 */ 449829, 563644, 704093, 875809, 1099582,
1645 /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326,
1646 /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587,
1647 /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126,
1648 /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717,
1649 /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
1652 #define ENQUEUE_WAKEUP 0x01
1653 #define ENQUEUE_HEAD 0x02
1655 #define ENQUEUE_WAKING 0x04 /* sched_class::task_waking was called */
1657 #define ENQUEUE_WAKING 0x00
1659 #define ENQUEUE_REPLENISH 0x08
1660 #define ENQUEUE_RESTORE 0x10
1662 #define DEQUEUE_SLEEP 0x01
1663 #define DEQUEUE_SAVE 0x02
1665 #define RETRY_TASK ((void *)-1UL)
1667 struct sched_class {
1668 const struct sched_class *next;
1670 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
1671 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
1672 void (*yield_task) (struct rq *rq);
1673 bool (*yield_to_task) (struct rq *rq, struct task_struct *p, bool preempt);
1675 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags);
1678 * It is the responsibility of the pick_next_task() method that will
1679 * return the next task to call put_prev_task() on the @prev task or
1680 * something equivalent.
1682 * May return RETRY_TASK when it finds a higher prio class has runnable
1685 struct task_struct * (*pick_next_task) (struct rq *rq,
1686 struct task_struct *prev);
1687 void (*put_prev_task) (struct rq *rq, struct task_struct *p);
1690 int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags);
1691 void (*migrate_task_rq)(struct task_struct *p);
1693 void (*task_waking) (struct task_struct *task);
1694 void (*task_woken) (struct rq *this_rq, struct task_struct *task);
1696 void (*set_cpus_allowed)(struct task_struct *p,
1697 const struct cpumask *newmask);
1699 void (*rq_online)(struct rq *rq);
1700 void (*rq_offline)(struct rq *rq);
1703 void (*set_curr_task) (struct rq *rq);
1704 void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
1705 void (*task_fork) (struct task_struct *p);
1706 void (*task_dead) (struct task_struct *p);
1709 * The switched_from() call is allowed to drop rq->lock, therefore we
1710 * cannot assume the switched_from/switched_to pair is serliazed by
1711 * rq->lock. They are however serialized by p->pi_lock.
1713 void (*switched_from) (struct rq *this_rq, struct task_struct *task);
1714 void (*switched_to) (struct rq *this_rq, struct task_struct *task);
1715 void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
1718 unsigned int (*get_rr_interval) (struct rq *rq,
1719 struct task_struct *task);
1721 void (*update_curr) (struct rq *rq);
1723 #ifdef CONFIG_FAIR_GROUP_SCHED
1724 void (*task_move_group) (struct task_struct *p);
1726 #ifdef CONFIG_SCHED_HMP
1727 void (*inc_hmp_sched_stats)(struct rq *rq, struct task_struct *p);
1728 void (*dec_hmp_sched_stats)(struct rq *rq, struct task_struct *p);
1729 void (*fixup_hmp_sched_stats)(struct rq *rq, struct task_struct *p,
1734 static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
1736 prev->sched_class->put_prev_task(rq, prev);
1739 #define sched_class_highest (&stop_sched_class)
1740 #define for_each_class(class) \
1741 for (class = sched_class_highest; class; class = class->next)
1743 extern const struct sched_class stop_sched_class;
1744 extern const struct sched_class dl_sched_class;
1745 extern const struct sched_class rt_sched_class;
1746 extern const struct sched_class fair_sched_class;
1747 extern const struct sched_class idle_sched_class;
1752 extern void update_group_capacity(struct sched_domain *sd, int cpu);
1754 extern void trigger_load_balance(struct rq *rq);
1756 extern void idle_enter_fair(struct rq *this_rq);
1757 extern void idle_exit_fair(struct rq *this_rq);
1759 extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask);
1763 static inline void idle_enter_fair(struct rq *rq) { }
1764 static inline void idle_exit_fair(struct rq *rq) { }
1768 #ifdef CONFIG_CPU_IDLE
1769 static inline void idle_set_state(struct rq *rq,
1770 struct cpuidle_state *idle_state)
1772 rq->idle_state = idle_state;
1775 static inline struct cpuidle_state *idle_get_state(struct rq *rq)
1777 WARN_ON(!rcu_read_lock_held());
1778 return rq->idle_state;
1781 static inline void idle_set_state(struct rq *rq,
1782 struct cpuidle_state *idle_state)
1786 static inline struct cpuidle_state *idle_get_state(struct rq *rq)
1792 #ifdef CONFIG_SYSRQ_SCHED_DEBUG
1793 extern void sysrq_sched_debug_show(void);
1795 extern void sched_init_granularity(void);
1796 extern void update_max_interval(void);
1798 extern void init_sched_dl_class(void);
1799 extern void init_sched_rt_class(void);
1800 extern void init_sched_fair_class(void);
1802 extern void resched_curr(struct rq *rq);
1803 extern void resched_cpu(int cpu);
1805 extern struct rt_bandwidth def_rt_bandwidth;
1806 extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
1808 extern struct dl_bandwidth def_dl_bandwidth;
1809 extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime);
1810 extern void init_dl_task_timer(struct sched_dl_entity *dl_se);
1812 unsigned long to_ratio(u64 period, u64 runtime);
1814 extern void init_entity_runnable_average(struct sched_entity *se);
1816 static inline void add_nr_running(struct rq *rq, unsigned count)
1818 unsigned prev_nr = rq->nr_running;
1820 sched_update_nr_prod(cpu_of(rq), count, true);
1821 rq->nr_running = prev_nr + count;
1823 if (prev_nr < 2 && rq->nr_running >= 2) {
1825 if (!rq->rd->overload)
1826 rq->rd->overload = true;
1829 #ifdef CONFIG_NO_HZ_FULL
1830 if (tick_nohz_full_cpu(rq->cpu)) {
1832 * Tick is needed if more than one task runs on a CPU.
1833 * Send the target an IPI to kick it out of nohz mode.
1835 * We assume that IPI implies full memory barrier and the
1836 * new value of rq->nr_running is visible on reception
1839 tick_nohz_full_kick_cpu(rq->cpu);
1845 static inline void sub_nr_running(struct rq *rq, unsigned count)
1847 sched_update_nr_prod(cpu_of(rq), count, false);
1848 rq->nr_running -= count;
1851 static inline void rq_last_tick_reset(struct rq *rq)
1853 #ifdef CONFIG_NO_HZ_FULL
1854 rq->last_sched_tick = jiffies;
1858 extern void update_rq_clock(struct rq *rq);
1860 extern void activate_task(struct rq *rq, struct task_struct *p, int flags);
1861 extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags);
1863 extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
1865 extern const_debug unsigned int sysctl_sched_time_avg;
1866 extern const_debug unsigned int sysctl_sched_nr_migrate;
1867 extern const_debug unsigned int sysctl_sched_migration_cost;
1869 static inline u64 sched_avg_period(void)
1871 return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2;
1874 #ifdef CONFIG_SCHED_HRTICK
1878 * - enabled by features
1879 * - hrtimer is actually high res
1881 static inline int hrtick_enabled(struct rq *rq)
1883 if (!sched_feat(HRTICK))
1885 if (!cpu_active(cpu_of(rq)))
1887 return hrtimer_is_hres_active(&rq->hrtick_timer);
1890 void hrtick_start(struct rq *rq, u64 delay);
1894 static inline int hrtick_enabled(struct rq *rq)
1899 #endif /* CONFIG_SCHED_HRTICK */
1902 extern void sched_avg_update(struct rq *rq);
1904 #ifndef arch_scale_freq_capacity
1905 static __always_inline
1906 unsigned long arch_scale_freq_capacity(struct sched_domain *sd, int cpu)
1908 return SCHED_CAPACITY_SCALE;
1912 #ifndef arch_scale_cpu_capacity
1913 static __always_inline
1914 unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
1916 if (sd && (sd->flags & SD_SHARE_CPUCAPACITY) && (sd->span_weight > 1))
1917 return sd->smt_gain / sd->span_weight;
1919 return SCHED_CAPACITY_SCALE;
1923 static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
1925 rq->rt_avg += rt_delta * arch_scale_freq_capacity(NULL, cpu_of(rq));
1926 sched_avg_update(rq);
1929 static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { }
1930 static inline void sched_avg_update(struct rq *rq) { }
1934 * __task_rq_lock - lock the rq @p resides on.
1936 static inline struct rq *__task_rq_lock(struct task_struct *p)
1937 __acquires(rq->lock)
1941 lockdep_assert_held(&p->pi_lock);
1945 raw_spin_lock(&rq->lock);
1946 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
1947 lockdep_pin_lock(&rq->lock);
1950 raw_spin_unlock(&rq->lock);
1952 while (unlikely(task_on_rq_migrating(p)))
1958 * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
1960 static inline struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
1961 __acquires(p->pi_lock)
1962 __acquires(rq->lock)
1967 raw_spin_lock_irqsave(&p->pi_lock, *flags);
1969 raw_spin_lock(&rq->lock);
1971 * move_queued_task() task_rq_lock()
1973 * ACQUIRE (rq->lock)
1974 * [S] ->on_rq = MIGRATING [L] rq = task_rq()
1975 * WMB (__set_task_cpu()) ACQUIRE (rq->lock);
1976 * [S] ->cpu = new_cpu [L] task_rq()
1978 * RELEASE (rq->lock)
1980 * If we observe the old cpu in task_rq_lock, the acquire of
1981 * the old rq->lock will fully serialize against the stores.
1983 * If we observe the new cpu in task_rq_lock, the acquire will
1984 * pair with the WMB to ensure we must then also see migrating.
1986 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
1987 lockdep_pin_lock(&rq->lock);
1990 raw_spin_unlock(&rq->lock);
1991 raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
1993 while (unlikely(task_on_rq_migrating(p)))
1998 static inline void __task_rq_unlock(struct rq *rq)
1999 __releases(rq->lock)
2001 lockdep_unpin_lock(&rq->lock);
2002 raw_spin_unlock(&rq->lock);
2006 task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags)
2007 __releases(rq->lock)
2008 __releases(p->pi_lock)
2010 lockdep_unpin_lock(&rq->lock);
2011 raw_spin_unlock(&rq->lock);
2012 raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
2016 #ifdef CONFIG_PREEMPT
2018 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2);
2021 * fair double_lock_balance: Safely acquires both rq->locks in a fair
2022 * way at the expense of forcing extra atomic operations in all
2023 * invocations. This assures that the double_lock is acquired using the
2024 * same underlying policy as the spinlock_t on this architecture, which
2025 * reduces latency compared to the unfair variant below. However, it
2026 * also adds more overhead and therefore may reduce throughput.
2028 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
2029 __releases(this_rq->lock)
2030 __acquires(busiest->lock)
2031 __acquires(this_rq->lock)
2033 raw_spin_unlock(&this_rq->lock);
2034 double_rq_lock(this_rq, busiest);
2041 * Unfair double_lock_balance: Optimizes throughput at the expense of
2042 * latency by eliminating extra atomic operations when the locks are
2043 * already in proper order on entry. This favors lower cpu-ids and will
2044 * grant the double lock to lower cpus over higher ids under contention,
2045 * regardless of entry order into the function.
2047 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
2048 __releases(this_rq->lock)
2049 __acquires(busiest->lock)
2050 __acquires(this_rq->lock)
2054 if (unlikely(!raw_spin_trylock(&busiest->lock))) {
2055 if (busiest < this_rq) {
2056 raw_spin_unlock(&this_rq->lock);
2057 raw_spin_lock(&busiest->lock);
2058 raw_spin_lock_nested(&this_rq->lock,
2059 SINGLE_DEPTH_NESTING);
2062 raw_spin_lock_nested(&busiest->lock,
2063 SINGLE_DEPTH_NESTING);
2068 #endif /* CONFIG_PREEMPT */
2071 * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
2073 static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest)
2075 if (unlikely(!irqs_disabled())) {
2076 /* printk() doesn't work good under rq->lock */
2077 raw_spin_unlock(&this_rq->lock);
2081 return _double_lock_balance(this_rq, busiest);
2084 static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
2085 __releases(busiest->lock)
2087 raw_spin_unlock(&busiest->lock);
2088 lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
2091 static inline void double_lock(spinlock_t *l1, spinlock_t *l2)
2097 spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
2100 static inline void double_lock_irq(spinlock_t *l1, spinlock_t *l2)
2106 spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
2109 static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2)
2115 raw_spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
2119 * double_rq_lock - safely lock two runqueues
2121 * Note this does not disable interrupts like task_rq_lock,
2122 * you need to do so manually before calling.
2124 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
2125 __acquires(rq1->lock)
2126 __acquires(rq2->lock)
2128 BUG_ON(!irqs_disabled());
2130 raw_spin_lock(&rq1->lock);
2131 __acquire(rq2->lock); /* Fake it out ;) */
2134 raw_spin_lock(&rq1->lock);
2135 raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
2137 raw_spin_lock(&rq2->lock);
2138 raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
2144 * double_rq_unlock - safely unlock two runqueues
2146 * Note this does not restore interrupts like task_rq_unlock,
2147 * you need to do so manually after calling.
2149 static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
2150 __releases(rq1->lock)
2151 __releases(rq2->lock)
2153 raw_spin_unlock(&rq1->lock);
2155 raw_spin_unlock(&rq2->lock);
2157 __release(rq2->lock);
2160 #else /* CONFIG_SMP */
2163 * double_rq_lock - safely lock two runqueues
2165 * Note this does not disable interrupts like task_rq_lock,
2166 * you need to do so manually before calling.
2168 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
2169 __acquires(rq1->lock)
2170 __acquires(rq2->lock)
2172 BUG_ON(!irqs_disabled());
2174 raw_spin_lock(&rq1->lock);
2175 __acquire(rq2->lock); /* Fake it out ;) */
2179 * double_rq_unlock - safely unlock two runqueues
2181 * Note this does not restore interrupts like task_rq_unlock,
2182 * you need to do so manually after calling.
2184 static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
2185 __releases(rq1->lock)
2186 __releases(rq2->lock)
2189 raw_spin_unlock(&rq1->lock);
2190 __release(rq2->lock);
2195 extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq);
2196 extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq);
2198 #ifdef CONFIG_SCHED_DEBUG
2199 extern void print_cfs_stats(struct seq_file *m, int cpu);
2200 extern void print_rt_stats(struct seq_file *m, int cpu);
2201 extern void print_dl_stats(struct seq_file *m, int cpu);
2203 print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
2205 #ifdef CONFIG_NUMA_BALANCING
2207 show_numa_stats(struct task_struct *p, struct seq_file *m);
2209 print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
2210 unsigned long tpf, unsigned long gsf, unsigned long gpf);
2211 #endif /* CONFIG_NUMA_BALANCING */
2212 #endif /* CONFIG_SCHED_DEBUG */
2214 extern void init_cfs_rq(struct cfs_rq *cfs_rq);
2215 extern void init_rt_rq(struct rt_rq *rt_rq);
2216 extern void init_dl_rq(struct dl_rq *dl_rq);
2218 extern void cfs_bandwidth_usage_inc(void);
2219 extern void cfs_bandwidth_usage_dec(void);
2221 #ifdef CONFIG_NO_HZ_COMMON
2222 enum rq_nohz_flag_bits {
2227 #define NOHZ_KICK_ANY 0
2228 #define NOHZ_KICK_RESTRICT 1
2230 #define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags)
2233 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
2235 DECLARE_PER_CPU(u64, cpu_hardirq_time);
2236 DECLARE_PER_CPU(u64, cpu_softirq_time);
2238 #ifndef CONFIG_64BIT
2239 DECLARE_PER_CPU(seqcount_t, irq_time_seq);
2241 static inline void irq_time_write_begin(void)
2243 __this_cpu_inc(irq_time_seq.sequence);
2247 static inline void irq_time_write_end(void)
2250 __this_cpu_inc(irq_time_seq.sequence);
2253 static inline u64 irq_time_read(int cpu)
2259 seq = read_seqcount_begin(&per_cpu(irq_time_seq, cpu));
2260 irq_time = per_cpu(cpu_softirq_time, cpu) +
2261 per_cpu(cpu_hardirq_time, cpu);
2262 } while (read_seqcount_retry(&per_cpu(irq_time_seq, cpu), seq));
2266 #else /* CONFIG_64BIT */
2267 static inline void irq_time_write_begin(void)
2271 static inline void irq_time_write_end(void)
2275 static inline u64 irq_time_read(int cpu)
2277 return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu);
2279 #endif /* CONFIG_64BIT */
2280 #endif /* CONFIG_IRQ_TIME_ACCOUNTING */