#include <uapi/linux/sched.h>
#include <linux/sched/prio.h>
+#include <linux/nodemask.h>
-#include <linux/capability.h>
#include <linux/mutex.h>
#include <linux/plist.h>
#include <linux/mm_types_task.h>
-#include <asm/ptrace.h>
#include <linux/sem.h>
#include <linux/shm.h>
-#include <linux/signal.h>
#include <linux/signal_types.h>
#include <linux/pid.h>
#include <linux/seccomp.h>
-#include <linux/rculist.h>
-#include <linux/rtmutex.h>
+#include <linux/rcupdate.h>
#include <linux/resource.h>
#include <linux/hrtimer.h>
#include <linux/kcov.h>
#include <linux/task_io_accounting.h>
#include <linux/latencytop.h>
-#include <linux/cred.h>
-#include <linux/gfp.h>
-#include <linux/topology.h>
-#include <linux/magic.h>
-#include <linux/cgroup-defs.h>
#include <asm/current.h>
extern cpumask_var_t cpu_isolated_map;
-extern int runqueue_is_locked(int cpu);
-
extern void scheduler_tick(void);
#define MAX_SCHEDULE_TIMEOUT LONG_MAX
#define prof_exp stime
#define sched_exp sum_exec_runtime
-#include <linux/rwsem.h>
-
-#ifdef CONFIG_SCHED_INFO
struct sched_info {
+#ifdef CONFIG_SCHED_INFO
/* cumulative counters */
unsigned long pcount; /* # of times run on this cpu */
unsigned long long run_delay; /* time spent waiting on a runqueue */
/* timestamps */
unsigned long long last_arrival,/* when we last ran on a cpu */
last_queued; /* when we were last queued to run */
-};
#endif /* CONFIG_SCHED_INFO */
+};
/*
* Integer metrics need fixed point arithmetic, e.g., sched/fair
# define SCHED_FIXEDPOINT_SHIFT 10
# define SCHED_FIXEDPOINT_SCALE (1L << SCHED_FIXEDPOINT_SHIFT)
-#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
-extern void prefetch_stack(struct task_struct *t);
-#else
-static inline void prefetch_stack(struct task_struct *t) { }
-#endif
-
struct load_weight {
unsigned long weight;
u32 inv_weight;
unsigned long load_avg, util_avg;
};
-#ifdef CONFIG_SCHEDSTATS
struct sched_statistics {
+#ifdef CONFIG_SCHEDSTATS
u64 wait_start;
u64 wait_max;
u64 wait_count;
u64 nr_wakeups_affine_attempts;
u64 nr_wakeups_passive;
u64 nr_wakeups_idle;
-};
#endif
+};
struct sched_entity {
struct load_weight load; /* for load-balancing */
u64 nr_migrations;
-#ifdef CONFIG_SCHEDSTATS
struct sched_statistics statistics;
-#endif
#ifdef CONFIG_FAIR_GROUP_SCHED
int depth;
struct wake_q_node *next;
};
-/* Track pages that require TLB flushes */
-struct tlbflush_unmap_batch {
- /*
- * Each bit set is a CPU that potentially has a TLB entry for one of
- * the PFNs being flushed. See set_tlb_ubc_flush_pending().
- */
- struct cpumask cpumask;
-
- /* True if any bit in cpumask is set */
- bool flush_required;
-
- /*
- * If true then the PTE was dirty when unmapped. The entry must be
- * flushed before IO is initiated or a stale TLB entry potentially
- * allows an update without redirtying the page.
- */
- bool writable;
-};
-
struct task_struct {
#ifdef CONFIG_THREAD_INFO_IN_TASK
/*
int rcu_tasks_idle_cpu;
#endif /* #ifdef CONFIG_TASKS_RCU */
-#ifdef CONFIG_SCHED_INFO
struct sched_info sched_info;
-#endif
struct list_head tasks;
#ifdef CONFIG_SMP
unsigned long numa_pages_migrated;
#endif /* CONFIG_NUMA_BALANCING */
-#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
struct tlbflush_unmap_batch tlb_ubc;
-#endif
struct rcu_head rcu;
extern struct pid *cad_pid;
-extern void free_task(struct task_struct *tsk);
-#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
-
-extern void __put_task_struct(struct task_struct *t);
-
-static inline void put_task_struct(struct task_struct *t)
-{
- if (atomic_dec_and_test(&t->usage))
- __put_task_struct(t);
-}
-
-struct task_struct *task_rcu_dereference(struct task_struct **ptask);
-struct task_struct *try_get_task_struct(struct task_struct **ptask);
-
/*
* Per process flags
*/
#define cpu_relax_yield() cpu_relax()
#endif
-/* sched_exec is called by processes performing an exec */
-#ifdef CONFIG_SMP
-extern void sched_exec(void);
-#else
-#define sched_exec() {}
-#endif
-
extern int yield_to(struct task_struct *p, bool preempt);
extern void set_user_nice(struct task_struct *p, long nice);
extern int task_prio(const struct task_struct *p);
# define task_thread_info(task) ((struct thread_info *)(task)->stack)
#endif
-extern struct pid_namespace init_pid_ns;
-
/*
* find a task by one of its numerical ids
*
#endif
}
-static inline int task_node(const struct task_struct *p)
-{
- return cpu_to_node(task_cpu(p));
-}
-
extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
#else
extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
-extern int task_can_switch_user(struct user_struct *up,
- struct task_struct *tsk);
-
#ifndef TASK_SIZE_OF
#define TASK_SIZE_OF(tsk) TASK_SIZE
#endif