#include <uapi/linux/sched.h>
#include <linux/sched/prio.h>
+#include <linux/nodemask.h>
#include <linux/mutex.h>
#include <linux/plist.h>
#include <linux/kcov.h>
#include <linux/task_io_accounting.h>
#include <linux/latencytop.h>
-#include <linux/topology.h>
-#include <linux/magic.h>
#include <asm/current.h>
#define prof_exp stime
#define sched_exp sum_exec_runtime
-#ifdef CONFIG_SCHED_INFO
struct sched_info {
+#ifdef CONFIG_SCHED_INFO
/* cumulative counters */
unsigned long pcount; /* # of times run on this cpu */
unsigned long long run_delay; /* time spent waiting on a runqueue */
/* timestamps */
unsigned long long last_arrival,/* when we last ran on a cpu */
last_queued; /* when we were last queued to run */
-};
#endif /* CONFIG_SCHED_INFO */
+};
/*
* Integer metrics need fixed point arithmetic, e.g., sched/fair
# define SCHED_FIXEDPOINT_SHIFT 10
# define SCHED_FIXEDPOINT_SCALE (1L << SCHED_FIXEDPOINT_SHIFT)
-#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
-extern void prefetch_stack(struct task_struct *t);
-#else
-static inline void prefetch_stack(struct task_struct *t) { }
-#endif
-
struct load_weight {
unsigned long weight;
u32 inv_weight;
unsigned long load_avg, util_avg;
};
-#ifdef CONFIG_SCHEDSTATS
struct sched_statistics {
+#ifdef CONFIG_SCHEDSTATS
u64 wait_start;
u64 wait_max;
u64 wait_count;
u64 nr_wakeups_affine_attempts;
u64 nr_wakeups_passive;
u64 nr_wakeups_idle;
-};
#endif
+};
struct sched_entity {
struct load_weight load; /* for load-balancing */
u64 nr_migrations;
-#ifdef CONFIG_SCHEDSTATS
struct sched_statistics statistics;
-#endif
#ifdef CONFIG_FAIR_GROUP_SCHED
int depth;
int rcu_tasks_idle_cpu;
#endif /* #ifdef CONFIG_TASKS_RCU */
-#ifdef CONFIG_SCHED_INFO
struct sched_info sched_info;
-#endif
struct list_head tasks;
#ifdef CONFIG_SMP
#endif
}
-static inline int task_node(const struct task_struct *p)
-{
- return cpu_to_node(task_cpu(p));
-}
-
extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
#else