-#endif /* !CONFIG_SMP */
-
-
-struct io_context; /* See blkdev.h */
-
-
-#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
-extern void prefetch_stack(struct task_struct *t);
-#else
-static inline void prefetch_stack(struct task_struct *t) { }
-#endif
-
-struct audit_context; /* See audit.c */
-struct mempolicy;
-struct pipe_inode_info;
-struct uts_namespace;
-
-struct load_weight {
- unsigned long weight;
- u32 inv_weight;
-};
-
-/*
- * The load_avg/util_avg accumulates an infinite geometric series
- * (see __update_load_avg() in kernel/sched/fair.c).
- *
- * [load_avg definition]
- *
- * load_avg = runnable% * scale_load_down(load)
- *
- * where runnable% is the time ratio that a sched_entity is runnable.
- * For cfs_rq, it is the aggregated load_avg of all runnable and
- * blocked sched_entities.
- *
- * load_avg may also take frequency scaling into account:
- *
- * load_avg = runnable% * scale_load_down(load) * freq%
- *
- * where freq% is the CPU frequency normalized to the highest frequency.
- *
- * [util_avg definition]
- *
- * util_avg = running% * SCHED_CAPACITY_SCALE
- *
- * where running% is the time ratio that a sched_entity is running on
- * a CPU. For cfs_rq, it is the aggregated util_avg of all runnable
- * and blocked sched_entities.
- *
- * util_avg may also factor frequency scaling and CPU capacity scaling:
- *
- * util_avg = running% * SCHED_CAPACITY_SCALE * freq% * capacity%
- *
- * where freq% is the same as above, and capacity% is the CPU capacity
- * normalized to the greatest capacity (due to uarch differences, etc).
- *
- * N.B., the above ratios (runnable%, running%, freq%, and capacity%)
- * themselves are in the range of [0, 1]. To do fixed point arithmetics,
- * we therefore scale them to as large a range as necessary. This is for
- * example reflected by util_avg's SCHED_CAPACITY_SCALE.
- *
- * [Overflow issue]
- *
- * The 64-bit load_sum can have 4353082796 (=2^64/47742/88761) entities
- * with the highest load (=88761), always runnable on a single cfs_rq,
- * and should not overflow as the number already hits PID_MAX_LIMIT.
- *
- * For all other cases (including 32-bit kernels), struct load_weight's
- * weight will overflow first before we do, because:
- *
- * Max(load_avg) <= Max(load.weight)
- *
- * Then it is the load_weight's responsibility to consider overflow
- * issues.
- */
-struct sched_avg {
- u64 last_update_time, load_sum;
- u32 util_sum, period_contrib;
- unsigned long load_avg, util_avg;
-};
-
-#ifdef CONFIG_SCHEDSTATS
-struct sched_statistics {
- u64 wait_start;
- u64 wait_max;
- u64 wait_count;
- u64 wait_sum;
- u64 iowait_count;
- u64 iowait_sum;
-
- u64 sleep_start;
- u64 sleep_max;
- s64 sum_sleep_runtime;
-
- u64 block_start;
- u64 block_max;
- u64 exec_max;
- u64 slice_max;
-
- u64 nr_migrations_cold;
- u64 nr_failed_migrations_affine;
- u64 nr_failed_migrations_running;
- u64 nr_failed_migrations_hot;
- u64 nr_forced_migrations;
-
- u64 nr_wakeups;
- u64 nr_wakeups_sync;
- u64 nr_wakeups_migrate;
- u64 nr_wakeups_local;
- u64 nr_wakeups_remote;
- u64 nr_wakeups_affine;
- u64 nr_wakeups_affine_attempts;
- u64 nr_wakeups_passive;
- u64 nr_wakeups_idle;
-};
-#endif
-
-struct sched_entity {
- struct load_weight load; /* for load-balancing */
- struct rb_node run_node;
- struct list_head group_node;
- unsigned int on_rq;
-
- u64 exec_start;
- u64 sum_exec_runtime;
- u64 vruntime;
- u64 prev_sum_exec_runtime;
-
- u64 nr_migrations;
-
-#ifdef CONFIG_SCHEDSTATS
- struct sched_statistics statistics;
-#endif
-
-#ifdef CONFIG_FAIR_GROUP_SCHED
- int depth;
- struct sched_entity *parent;
- /* rq on which this entity is (to be) queued: */
- struct cfs_rq *cfs_rq;
- /* rq "owned" by this entity/group: */
- struct cfs_rq *my_q;
-#endif
-
-#ifdef CONFIG_SMP
- /*
- * Per entity load average tracking.
- *
- * Put into separate cache line so it does not
- * collide with read-mostly values above.
- */
- struct sched_avg avg ____cacheline_aligned_in_smp;
-#endif
-};
-
-struct sched_rt_entity {
- struct list_head run_list;
- unsigned long timeout;
- unsigned long watchdog_stamp;
- unsigned int time_slice;
- unsigned short on_rq;
- unsigned short on_list;
-
- struct sched_rt_entity *back;
-#ifdef CONFIG_RT_GROUP_SCHED
- struct sched_rt_entity *parent;
- /* rq on which this entity is (to be) queued: */
- struct rt_rq *rt_rq;
- /* rq "owned" by this entity/group: */
- struct rt_rq *my_q;
-#endif
-};
-
-struct sched_dl_entity {
- struct rb_node rb_node;
-
- /*
- * Original scheduling parameters. Copied here from sched_attr
- * during sched_setattr(), they will remain the same until
- * the next sched_setattr().
- */
- u64 dl_runtime; /* maximum runtime for each instance */
- u64 dl_deadline; /* relative deadline of each instance */
- u64 dl_period; /* separation of two instances (period) */
- u64 dl_bw; /* dl_runtime / dl_deadline */
-
- /*
- * Actual scheduling parameters. Initialized with the values above,
- * they are continously updated during task execution. Note that
- * the remaining runtime could be < 0 in case we are in overrun.
- */
- s64 runtime; /* remaining runtime for this instance */
- u64 deadline; /* absolute deadline for this instance */
- unsigned int flags; /* specifying the scheduler behaviour */
-
- /*
- * Some bool flags:
- *
- * @dl_throttled tells if we exhausted the runtime. If so, the
- * task has to wait for a replenishment to be performed at the
- * next firing of dl_timer.
- *
- * @dl_boosted tells if we are boosted due to DI. If so we are
- * outside bandwidth enforcement mechanism (but only until we
- * exit the critical section);
- *
- * @dl_yielded tells if task gave up the cpu before consuming
- * all its available runtime during the last job.
- */
- int dl_throttled, dl_boosted, dl_yielded;
-
- /*
- * Bandwidth enforcement timer. Each -deadline task has its
- * own bandwidth to be enforced, thus we need one timer per task.
- */
- struct hrtimer dl_timer;
-};
-
-union rcu_special {
- struct {
- u8 blocked;
- u8 need_qs;
- u8 exp_need_qs;
- u8 pad; /* Otherwise the compiler can store garbage here. */
- } b; /* Bits. */
- u32 s; /* Set of bits. */
-};
-struct rcu_node;
-
-enum perf_event_task_context {
- perf_invalid_context = -1,
- perf_hw_context = 0,
- perf_sw_context,
- perf_nr_task_contexts,
-};
-
-/* Track pages that require TLB flushes */
-struct tlbflush_unmap_batch {
- /*
- * Each bit set is a CPU that potentially has a TLB entry for one of
- * the PFNs being flushed. See set_tlb_ubc_flush_pending().
- */
- struct cpumask cpumask;
-
- /* True if any bit in cpumask is set */
- bool flush_required;
-
- /*
- * If true then the PTE was dirty when unmapped. The entry must be
- * flushed before IO is initiated or a stale TLB entry potentially
- * allows an update without redirtying the page.
- */
- bool writable;
-};
-
-struct task_struct {
-#ifdef CONFIG_THREAD_INFO_IN_TASK
- /*
- * For reasons of header soup (see current_thread_info()), this
- * must be the first element of task_struct.
- */
- struct thread_info thread_info;
-#endif
- volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
- void *stack;
- atomic_t usage;
- unsigned int flags; /* per process flags, defined below */
- unsigned int ptrace;
-
-#ifdef CONFIG_SMP
- struct llist_node wake_entry;
- int on_cpu;
-#ifdef CONFIG_THREAD_INFO_IN_TASK
- unsigned int cpu; /* current CPU */
-#endif
- unsigned int wakee_flips;
- unsigned long wakee_flip_decay_ts;
- struct task_struct *last_wakee;
-
- int wake_cpu;
-#endif
- int on_rq;
-
- int prio, static_prio, normal_prio;
- unsigned int rt_priority;
- const struct sched_class *sched_class;
- struct sched_entity se;
- struct sched_rt_entity rt;
-#ifdef CONFIG_CGROUP_SCHED
- struct task_group *sched_task_group;
-#endif
- struct sched_dl_entity dl;
-
-#ifdef CONFIG_PREEMPT_NOTIFIERS
- /* list of struct preempt_notifier: */
- struct hlist_head preempt_notifiers;
-#endif
-
-#ifdef CONFIG_BLK_DEV_IO_TRACE
- unsigned int btrace_seq;
-#endif
-
- unsigned int policy;
- int nr_cpus_allowed;
- cpumask_t cpus_allowed;
-
-#ifdef CONFIG_PREEMPT_RCU
- int rcu_read_lock_nesting;
- union rcu_special rcu_read_unlock_special;
- struct list_head rcu_node_entry;
- struct rcu_node *rcu_blocked_node;
-#endif /* #ifdef CONFIG_PREEMPT_RCU */
-#ifdef CONFIG_TASKS_RCU
- unsigned long rcu_tasks_nvcsw;
- bool rcu_tasks_holdout;
- struct list_head rcu_tasks_holdout_list;
- int rcu_tasks_idle_cpu;
-#endif /* #ifdef CONFIG_TASKS_RCU */
-
-#ifdef CONFIG_SCHED_INFO
- struct sched_info sched_info;
-#endif
-
- struct list_head tasks;
-#ifdef CONFIG_SMP
- struct plist_node pushable_tasks;
- struct rb_node pushable_dl_tasks;
-#endif
-
- struct mm_struct *mm, *active_mm;
- /* per-thread vma caching */
- u32 vmacache_seqnum;
- struct vm_area_struct *vmacache[VMACACHE_SIZE];
-#if defined(SPLIT_RSS_COUNTING)
- struct task_rss_stat rss_stat;
-#endif
-/* task state */
- int exit_state;
- int exit_code, exit_signal;
- int pdeath_signal; /* The signal sent when the parent dies */
- unsigned long jobctl; /* JOBCTL_*, siglock protected */
-
- /* Used for emulating ABI behavior of previous Linux versions */
- unsigned int personality;
-
- /* scheduler bits, serialized by scheduler locks */
- unsigned sched_reset_on_fork:1;
- unsigned sched_contributes_to_load:1;
- unsigned sched_migrated:1;
- unsigned sched_remote_wakeup:1;
- unsigned :0; /* force alignment to the next boundary */
-
- /* unserialized, strictly 'current' */
- unsigned in_execve:1; /* bit to tell LSMs we're in execve */
- unsigned in_iowait:1;
-#if !defined(TIF_RESTORE_SIGMASK)
- unsigned restore_sigmask:1;
-#endif
-#ifdef CONFIG_MEMCG
- unsigned memcg_may_oom:1;
-#ifndef CONFIG_SLOB
- unsigned memcg_kmem_skip_account:1;
-#endif
-#endif
-#ifdef CONFIG_COMPAT_BRK
- unsigned brk_randomized:1;
-#endif
-
- unsigned long atomic_flags; /* Flags needing atomic access. */
-
- struct restart_block restart_block;
-
- pid_t pid;
- pid_t tgid;
-
-#ifdef CONFIG_CC_STACKPROTECTOR
- /* Canary value for the -fstack-protector gcc feature */
- unsigned long stack_canary;
-#endif
- /*
- * pointers to (original) parent process, youngest child, younger sibling,
- * older sibling, respectively. (p->father can be replaced with
- * p->real_parent->pid)
- */
- struct task_struct __rcu *real_parent; /* real parent process */
- struct task_struct __rcu *parent; /* recipient of SIGCHLD, wait4() reports */
- /*
- * children/sibling forms the list of my natural children
- */
- struct list_head children; /* list of my children */
- struct list_head sibling; /* linkage in my parent's children list */
- struct task_struct *group_leader; /* threadgroup leader */
-
- /*
- * ptraced is the list of tasks this task is using ptrace on.
- * This includes both natural children and PTRACE_ATTACH targets.
- * p->ptrace_entry is p's link on the p->parent->ptraced list.
- */
- struct list_head ptraced;
- struct list_head ptrace_entry;
-
- /* PID/PID hash table linkage. */
- struct pid_link pids[PIDTYPE_MAX];
- struct list_head thread_group;
- struct list_head thread_node;
-
- struct completion *vfork_done; /* for vfork() */
- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
-
- u64 utime, stime;
-#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
- u64 utimescaled, stimescaled;
-#endif
- u64 gtime;
- struct prev_cputime prev_cputime;
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
- seqcount_t vtime_seqcount;
- unsigned long long vtime_snap;
- enum {
- /* Task is sleeping or running in a CPU with VTIME inactive */
- VTIME_INACTIVE = 0,
- /* Task runs in userspace in a CPU with VTIME active */
- VTIME_USER,
- /* Task runs in kernelspace in a CPU with VTIME active */
- VTIME_SYS,
- } vtime_snap_whence;
-#endif
-
-#ifdef CONFIG_NO_HZ_FULL
- atomic_t tick_dep_mask;
-#endif
- unsigned long nvcsw, nivcsw; /* context switch counts */
- u64 start_time; /* monotonic time in nsec */
- u64 real_start_time; /* boot based time in nsec */
-/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
- unsigned long min_flt, maj_flt;
-
-#ifdef CONFIG_POSIX_TIMERS
- struct task_cputime cputime_expires;
- struct list_head cpu_timers[3];
-#endif
-
-/* process credentials */
- const struct cred __rcu *ptracer_cred; /* Tracer's credentials at attach */
- const struct cred __rcu *real_cred; /* objective and real subjective task
- * credentials (COW) */
- const struct cred __rcu *cred; /* effective (overridable) subjective task
- * credentials (COW) */
- char comm[TASK_COMM_LEN]; /* executable name excluding path
- - access with [gs]et_task_comm (which lock
- it with task_lock())
- - initialized normally by setup_new_exec */
-/* file system info */
- struct nameidata *nameidata;
-#ifdef CONFIG_SYSVIPC
-/* ipc stuff */
- struct sysv_sem sysvsem;
- struct sysv_shm sysvshm;
-#endif
-#ifdef CONFIG_DETECT_HUNG_TASK
-/* hung task detection */
- unsigned long last_switch_count;
-#endif
-/* filesystem information */
- struct fs_struct *fs;
-/* open file information */
- struct files_struct *files;
-/* namespaces */
- struct nsproxy *nsproxy;
-/* signal handlers */
- struct signal_struct *signal;
- struct sighand_struct *sighand;
-
- sigset_t blocked, real_blocked;
- sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */
- struct sigpending pending;
-
- unsigned long sas_ss_sp;
- size_t sas_ss_size;
- unsigned sas_ss_flags;
-
- struct callback_head *task_works;
-
- struct audit_context *audit_context;
-#ifdef CONFIG_AUDITSYSCALL
- kuid_t loginuid;
- unsigned int sessionid;
-#endif
- struct seccomp seccomp;
-
-/* Thread group tracking */
- u32 parent_exec_id;
- u32 self_exec_id;
-/* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed,
- * mempolicy */
- spinlock_t alloc_lock;
-
- /* Protection of the PI data structures: */
- raw_spinlock_t pi_lock;
-
- struct wake_q_node wake_q;
-
-#ifdef CONFIG_RT_MUTEXES
- /* PI waiters blocked on a rt_mutex held by this task */
- struct rb_root pi_waiters;
- struct rb_node *pi_waiters_leftmost;
- /* Deadlock detection and priority inheritance handling */
- struct rt_mutex_waiter *pi_blocked_on;
-#endif
-
-#ifdef CONFIG_DEBUG_MUTEXES
- /* mutex deadlock detection */
- struct mutex_waiter *blocked_on;
-#endif
-#ifdef CONFIG_TRACE_IRQFLAGS
- unsigned int irq_events;
- unsigned long hardirq_enable_ip;
- unsigned long hardirq_disable_ip;
- unsigned int hardirq_enable_event;
- unsigned int hardirq_disable_event;
- int hardirqs_enabled;
- int hardirq_context;
- unsigned long softirq_disable_ip;
- unsigned long softirq_enable_ip;
- unsigned int softirq_disable_event;
- unsigned int softirq_enable_event;
- int softirqs_enabled;
- int softirq_context;
-#endif
-#ifdef CONFIG_LOCKDEP
-# define MAX_LOCK_DEPTH 48UL
- u64 curr_chain_key;
- int lockdep_depth;
- unsigned int lockdep_recursion;
- struct held_lock held_locks[MAX_LOCK_DEPTH];
- gfp_t lockdep_reclaim_gfp;
-#endif
-#ifdef CONFIG_UBSAN
- unsigned int in_ubsan;
-#endif
-
-/* journalling filesystem info */
- void *journal_info;
-
-/* stacked block device info */
- struct bio_list *bio_list;
-
-#ifdef CONFIG_BLOCK
-/* stack plugging */
- struct blk_plug *plug;
-#endif
-
-/* VM state */
- struct reclaim_state *reclaim_state;
-
- struct backing_dev_info *backing_dev_info;
-
- struct io_context *io_context;
-
- unsigned long ptrace_message;
- siginfo_t *last_siginfo; /* For ptrace use. */
- struct task_io_accounting ioac;
-#if defined(CONFIG_TASK_XACCT)
- u64 acct_rss_mem1; /* accumulated rss usage */
- u64 acct_vm_mem1; /* accumulated virtual memory usage */
- u64 acct_timexpd; /* stime + utime since last update */
-#endif
-#ifdef CONFIG_CPUSETS
- nodemask_t mems_allowed; /* Protected by alloc_lock */
- seqcount_t mems_allowed_seq; /* Seqence no to catch updates */
- int cpuset_mem_spread_rotor;
- int cpuset_slab_spread_rotor;
-#endif
-#ifdef CONFIG_CGROUPS
- /* Control Group info protected by css_set_lock */
- struct css_set __rcu *cgroups;
- /* cg_list protected by css_set_lock and tsk->alloc_lock */
- struct list_head cg_list;
-#endif
-#ifdef CONFIG_INTEL_RDT_A
- int closid;
-#endif
-#ifdef CONFIG_FUTEX
- struct robust_list_head __user *robust_list;
-#ifdef CONFIG_COMPAT
- struct compat_robust_list_head __user *compat_robust_list;
-#endif
- struct list_head pi_state_list;
- struct futex_pi_state *pi_state_cache;
-#endif
-#ifdef CONFIG_PERF_EVENTS
- struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
- struct mutex perf_event_mutex;
- struct list_head perf_event_list;
-#endif
-#ifdef CONFIG_DEBUG_PREEMPT
- unsigned long preempt_disable_ip;
-#endif
-#ifdef CONFIG_NUMA
- struct mempolicy *mempolicy; /* Protected by alloc_lock */
- short il_next;
- short pref_node_fork;
-#endif
-#ifdef CONFIG_NUMA_BALANCING
- int numa_scan_seq;
- unsigned int numa_scan_period;
- unsigned int numa_scan_period_max;
- int numa_preferred_nid;
- unsigned long numa_migrate_retry;
- u64 node_stamp; /* migration stamp */
- u64 last_task_numa_placement;
- u64 last_sum_exec_runtime;
- struct callback_head numa_work;
-
- struct list_head numa_entry;
- struct numa_group *numa_group;
-
- /*
- * numa_faults is an array split into four regions:
- * faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer
- * in this precise order.
- *
- * faults_memory: Exponential decaying average of faults on a per-node
- * basis. Scheduling placement decisions are made based on these
- * counts. The values remain static for the duration of a PTE scan.
- * faults_cpu: Track the nodes the process was running on when a NUMA
- * hinting fault was incurred.
- * faults_memory_buffer and faults_cpu_buffer: Record faults per node
- * during the current scan window. When the scan completes, the counts
- * in faults_memory and faults_cpu decay and these values are copied.
- */
- unsigned long *numa_faults;
- unsigned long total_numa_faults;
-
- /*
- * numa_faults_locality tracks if faults recorded during the last
- * scan window were remote/local or failed to migrate. The task scan
- * period is adapted based on the locality of the faults with different
- * weights depending on whether they were shared or private faults
- */
- unsigned long numa_faults_locality[3];
-
- unsigned long numa_pages_migrated;
-#endif /* CONFIG_NUMA_BALANCING */
-
-#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
- struct tlbflush_unmap_batch tlb_ubc;
-#endif
-
- struct rcu_head rcu;
-
- /*
- * cache last used pipe for splice
- */
- struct pipe_inode_info *splice_pipe;
-
- struct page_frag task_frag;
-
-#ifdef CONFIG_TASK_DELAY_ACCT
- struct task_delay_info *delays;
-#endif
-#ifdef CONFIG_FAULT_INJECTION
- int make_it_fail;
-#endif
- /*
- * when (nr_dirtied >= nr_dirtied_pause), it's time to call
- * balance_dirty_pages() for some dirty throttling pause
- */
- int nr_dirtied;
- int nr_dirtied_pause;
- unsigned long dirty_paused_when; /* start of a write-and-pause period */
-
-#ifdef CONFIG_LATENCYTOP
- int latency_record_count;
- struct latency_record latency_record[LT_SAVECOUNT];
-#endif
- /*
- * time slack values; these are used to round up poll() and
- * select() etc timeout values. These are in nanoseconds.
- */
- u64 timer_slack_ns;
- u64 default_timer_slack_ns;
-
-#ifdef CONFIG_KASAN
- unsigned int kasan_depth;
-#endif
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- /* Index of current stored address in ret_stack */
- int curr_ret_stack;
- /* Stack of return addresses for return function tracing */
- struct ftrace_ret_stack *ret_stack;
- /* time stamp for last schedule */
- unsigned long long ftrace_timestamp;
- /*
- * Number of functions that haven't been traced
- * because of depth overrun.
- */
- atomic_t trace_overrun;
- /* Pause for the tracing */
- atomic_t tracing_graph_pause;
-#endif
-#ifdef CONFIG_TRACING
- /* state flags for use by tracers */
- unsigned long trace;
- /* bitmask and counter of trace recursion */
- unsigned long trace_recursion;
-#endif /* CONFIG_TRACING */
-#ifdef CONFIG_KCOV
- /* Coverage collection mode enabled for this task (0 if disabled). */
- enum kcov_mode kcov_mode;
- /* Size of the kcov_area. */
- unsigned kcov_size;
- /* Buffer for coverage collection. */
- void *kcov_area;
- /* kcov desciptor wired with this task or NULL. */
- struct kcov *kcov;
-#endif
-#ifdef CONFIG_MEMCG
- struct mem_cgroup *memcg_in_oom;
- gfp_t memcg_oom_gfp_mask;
- int memcg_oom_order;
-
- /* number of pages to reclaim on returning to userland */
- unsigned int memcg_nr_pages_over_high;
-#endif
-#ifdef CONFIG_UPROBES
- struct uprobe_task *utask;
-#endif
-#if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
- unsigned int sequential_io;
- unsigned int sequential_io_avg;
-#endif
-#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
- unsigned long task_state_change;
-#endif
- int pagefault_disabled;
-#ifdef CONFIG_MMU
- struct task_struct *oom_reaper_list;
-#endif
-#ifdef CONFIG_VMAP_STACK
- struct vm_struct *stack_vm_area;
-#endif
-#ifdef CONFIG_THREAD_INFO_IN_TASK
- /* A live task holds one reference. */
- atomic_t stack_refcount;
-#endif
-/* CPU-specific state of this task */
- struct thread_struct thread;
-/*
- * WARNING: on x86, 'thread_struct' contains a variable-sized
- * structure. It *MUST* be at the end of 'task_struct'.
- *
- * Do not put anything below here!
- */
-};
-
-#ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
-extern int arch_task_struct_size __read_mostly;
-#else
-# define arch_task_struct_size (sizeof(struct task_struct))
-#endif
-
-#ifdef CONFIG_VMAP_STACK
-static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
-{
- return t->stack_vm_area;
-}
-#else
-static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
-{
- return NULL;
-}
-#endif
-
-/* Future-safe accessor for struct task_struct's cpus_allowed. */
-#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
-
-static inline int tsk_nr_cpus_allowed(struct task_struct *p)
-{
- return p->nr_cpus_allowed;
-}
-
-#define TNF_MIGRATED 0x01
-#define TNF_NO_GROUP 0x02
-#define TNF_SHARED 0x04
-#define TNF_FAULT_LOCAL 0x08
-#define TNF_MIGRATE_FAIL 0x10
-
-static inline bool in_vfork(struct task_struct *tsk)
-{
- bool ret;
-
- /*
- * need RCU to access ->real_parent if CLONE_VM was used along with
- * CLONE_PARENT.
- *
- * We check real_parent->mm == tsk->mm because CLONE_VFORK does not
- * imply CLONE_VM
- *
- * CLONE_VFORK can be used with CLONE_PARENT/CLONE_THREAD and thus
- * ->real_parent is not necessarily the task doing vfork(), so in
- * theory we can't rely on task_lock() if we want to dereference it.
- *
- * And in this case we can't trust the real_parent->mm == tsk->mm
- * check, it can be false negative. But we do not care, if init or
- * another oom-unkillable task does this it should blame itself.
- */
- rcu_read_lock();
- ret = tsk->vfork_done && tsk->real_parent->mm == tsk->mm;
- rcu_read_unlock();
-
- return ret;
-}
-
-#ifdef CONFIG_NUMA_BALANCING
-extern void task_numa_fault(int last_node, int node, int pages, int flags);
-extern pid_t task_numa_group_id(struct task_struct *p);
-extern void set_numabalancing_state(bool enabled);
-extern void task_numa_free(struct task_struct *p);
-extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page,
- int src_nid, int dst_cpu);
-#else
-static inline void task_numa_fault(int last_node, int node, int pages,
- int flags)
-{
-}
-static inline pid_t task_numa_group_id(struct task_struct *p)
-{
- return 0;
-}
-static inline void set_numabalancing_state(bool enabled)
-{
-}
-static inline void task_numa_free(struct task_struct *p)
-{
-}
-static inline bool should_numa_migrate_memory(struct task_struct *p,
- struct page *page, int src_nid, int dst_cpu)
-{
- return true;
-}
-#endif
-
-static inline struct pid *task_pid(struct task_struct *task)
-{
- return task->pids[PIDTYPE_PID].pid;
-}
-
-static inline struct pid *task_tgid(struct task_struct *task)
-{
- return task->group_leader->pids[PIDTYPE_PID].pid;
-}
-
-/*
- * Without tasklist or rcu lock it is not safe to dereference
- * the result of task_pgrp/task_session even if task == current,
- * we can race with another thread doing sys_setsid/sys_setpgid.
- */
-static inline struct pid *task_pgrp(struct task_struct *task)
-{
- return task->group_leader->pids[PIDTYPE_PGID].pid;
-}
-
-static inline struct pid *task_session(struct task_struct *task)
-{
- return task->group_leader->pids[PIDTYPE_SID].pid;
-}
-
-struct pid_namespace;
-
-/*
- * the helpers to get the task's different pids as they are seen
- * from various namespaces
- *
- * task_xid_nr() : global id, i.e. the id seen from the init namespace;
- * task_xid_vnr() : virtual id, i.e. the id seen from the pid namespace of
- * current.
- * task_xid_nr_ns() : id seen from the ns specified;
- *
- * set_task_vxid() : assigns a virtual id to a task;
- *
- * see also pid_nr() etc in include/linux/pid.h
- */
-pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
- struct pid_namespace *ns);
-
-static inline pid_t task_pid_nr(struct task_struct *tsk)
-{
- return tsk->pid;
-}
-
-static inline pid_t task_pid_nr_ns(struct task_struct *tsk,
- struct pid_namespace *ns)
-{
- return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
-}
-
-static inline pid_t task_pid_vnr(struct task_struct *tsk)
-{
- return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
-}
-
-
-static inline pid_t task_tgid_nr(struct task_struct *tsk)
-{
- return tsk->tgid;
-}
-
-pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
-
-static inline pid_t task_tgid_vnr(struct task_struct *tsk)
-{
- return pid_vnr(task_tgid(tsk));
-}
-
-
-static inline int pid_alive(const struct task_struct *p);
-static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
-{
- pid_t pid = 0;
-
- rcu_read_lock();
- if (pid_alive(tsk))
- pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
- rcu_read_unlock();
-
- return pid;
-}
-
-static inline pid_t task_ppid_nr(const struct task_struct *tsk)
-{
- return task_ppid_nr_ns(tsk, &init_pid_ns);
-}
-
-static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
- struct pid_namespace *ns)
-{
- return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
-}
-
-static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
-{
- return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
-}
-
-
-static inline pid_t task_session_nr_ns(struct task_struct *tsk,
- struct pid_namespace *ns)
-{
- return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
-}
-
-static inline pid_t task_session_vnr(struct task_struct *tsk)
-{
- return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
-}
-
-/* obsolete, do not use */
-static inline pid_t task_pgrp_nr(struct task_struct *tsk)
-{
- return task_pgrp_nr_ns(tsk, &init_pid_ns);
-}
-
-/**
- * pid_alive - check that a task structure is not stale
- * @p: Task structure to be checked.
- *
- * Test if a process is not yet dead (at most zombie state)
- * If pid_alive fails, then pointers within the task structure
- * can be stale and must not be dereferenced.
- *
- * Return: 1 if the process is alive. 0 otherwise.
- */
-static inline int pid_alive(const struct task_struct *p)
-{
- return p->pids[PIDTYPE_PID].pid != NULL;
-}
-
-/**
- * is_global_init - check if a task structure is init. Since init
- * is free to have sub-threads we need to check tgid.
- * @tsk: Task structure to be checked.
- *
- * Check if a task structure is the first user space task the kernel created.
- *
- * Return: 1 if the task structure is init. 0 otherwise.
- */
-static inline int is_global_init(struct task_struct *tsk)
-{
- return task_tgid_nr(tsk) == 1;
-}
-
-extern struct pid *cad_pid;
-
-extern void free_task(struct task_struct *tsk);
-#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
-
-extern void __put_task_struct(struct task_struct *t);
-
-static inline void put_task_struct(struct task_struct *t)
-{
- if (atomic_dec_and_test(&t->usage))
- __put_task_struct(t);
-}
-
-struct task_struct *task_rcu_dereference(struct task_struct **ptask);
-struct task_struct *try_get_task_struct(struct task_struct **ptask);
-
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
-extern void task_cputime(struct task_struct *t,
- u64 *utime, u64 *stime);
-extern u64 task_gtime(struct task_struct *t);
-#else
-static inline void task_cputime(struct task_struct *t,
- u64 *utime, u64 *stime)
-{
- *utime = t->utime;
- *stime = t->stime;
-}
-
-static inline u64 task_gtime(struct task_struct *t)
-{
- return t->gtime;
-}
-#endif
-
-#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
-static inline void task_cputime_scaled(struct task_struct *t,
- u64 *utimescaled,
- u64 *stimescaled)
-{
- *utimescaled = t->utimescaled;
- *stimescaled = t->stimescaled;
-}
-#else
-static inline void task_cputime_scaled(struct task_struct *t,
- u64 *utimescaled,
- u64 *stimescaled)
-{
- task_cputime(t, utimescaled, stimescaled);
-}
-#endif
-
-extern void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st);
-extern void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st);
-
-/*
- * Per process flags
- */
-#define PF_IDLE 0x00000002 /* I am an IDLE thread */
-#define PF_EXITING 0x00000004 /* getting shut down */
-#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
-#define PF_VCPU 0x00000010 /* I'm a virtual CPU */
-#define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */
-#define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */
-#define PF_MCE_PROCESS 0x00000080 /* process policy on mce errors */
-#define PF_SUPERPRIV 0x00000100 /* used super-user privileges */
-#define PF_DUMPCORE 0x00000200 /* dumped core */
-#define PF_SIGNALED 0x00000400 /* killed by a signal */
-#define PF_MEMALLOC 0x00000800 /* Allocating memory */
-#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
-#define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
-#define PF_USED_ASYNC 0x00004000 /* used async_schedule*(), used by module init */
-#define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
-#define PF_FROZEN 0x00010000 /* frozen for system suspend */
-#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */
-#define PF_KSWAPD 0x00040000 /* I am kswapd */
-#define PF_MEMALLOC_NOIO 0x00080000 /* Allocating memory without IO involved */
-#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */
-#define PF_KTHREAD 0x00200000 /* I am a kernel thread */
-#define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */
-#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */
-#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */
-#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */
-#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
-#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */
-#define PF_SUSPEND_TASK 0x80000000 /* this thread called freeze_processes and should not be frozen */
-
-/*
- * Only the _current_ task can read/write to tsk->flags, but other
- * tasks can access tsk->flags in readonly mode for example
- * with tsk_used_math (like during threaded core dumping).
- * There is however an exception to this rule during ptrace
- * or during fork: the ptracer task is allowed to write to the
- * child->flags of its traced child (same goes for fork, the parent
- * can write to the child->flags), because we're guaranteed the
- * child is not running and in turn not changing child->flags
- * at the same time the parent does it.
- */
-#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
-#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
-#define clear_used_math() clear_stopped_child_used_math(current)
-#define set_used_math() set_stopped_child_used_math(current)
-#define conditional_stopped_child_used_math(condition, child) \
- do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
-#define conditional_used_math(condition) \
- conditional_stopped_child_used_math(condition, current)
-#define copy_to_stopped_child_used_math(child) \
- do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
-/* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
-#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
-#define used_math() tsk_used_math(current)
-
-/* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags
- * __GFP_FS is also cleared as it implies __GFP_IO.
- */
-static inline gfp_t memalloc_noio_flags(gfp_t flags)
-{
- if (unlikely(current->flags & PF_MEMALLOC_NOIO))
- flags &= ~(__GFP_IO | __GFP_FS);
- return flags;
-}
-
-static inline unsigned int memalloc_noio_save(void)
-{
- unsigned int flags = current->flags & PF_MEMALLOC_NOIO;
- current->flags |= PF_MEMALLOC_NOIO;
- return flags;
-}
-
-static inline void memalloc_noio_restore(unsigned int flags)
-{
- current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
-}
-
-/* Per-process atomic flags. */
-#define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */
-#define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */
-#define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */
-#define PFA_LMK_WAITING 3 /* Lowmemorykiller is waiting */
-
-
-#define TASK_PFA_TEST(name, func) \
- static inline bool task_##func(struct task_struct *p) \
- { return test_bit(PFA_##name, &p->atomic_flags); }
-#define TASK_PFA_SET(name, func) \
- static inline void task_set_##func(struct task_struct *p) \
- { set_bit(PFA_##name, &p->atomic_flags); }
-#define TASK_PFA_CLEAR(name, func) \
- static inline void task_clear_##func(struct task_struct *p) \
- { clear_bit(PFA_##name, &p->atomic_flags); }
-
-TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs)
-TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs)
-
-TASK_PFA_TEST(SPREAD_PAGE, spread_page)
-TASK_PFA_SET(SPREAD_PAGE, spread_page)
-TASK_PFA_CLEAR(SPREAD_PAGE, spread_page)
-
-TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
-TASK_PFA_SET(SPREAD_SLAB, spread_slab)
-TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
-
-TASK_PFA_TEST(LMK_WAITING, lmk_waiting)
-TASK_PFA_SET(LMK_WAITING, lmk_waiting)
-
-/*
- * task->jobctl flags
- */
-#define JOBCTL_STOP_SIGMASK 0xffff /* signr of the last group stop */
-
-#define JOBCTL_STOP_DEQUEUED_BIT 16 /* stop signal dequeued */
-#define JOBCTL_STOP_PENDING_BIT 17 /* task should stop for group stop */
-#define JOBCTL_STOP_CONSUME_BIT 18 /* consume group stop count */
-#define JOBCTL_TRAP_STOP_BIT 19 /* trap for STOP */
-#define JOBCTL_TRAP_NOTIFY_BIT 20 /* trap for NOTIFY */
-#define JOBCTL_TRAPPING_BIT 21 /* switching to TRACED */
-#define JOBCTL_LISTENING_BIT 22 /* ptracer is listening for events */
-
-#define JOBCTL_STOP_DEQUEUED (1UL << JOBCTL_STOP_DEQUEUED_BIT)
-#define JOBCTL_STOP_PENDING (1UL << JOBCTL_STOP_PENDING_BIT)
-#define JOBCTL_STOP_CONSUME (1UL << JOBCTL_STOP_CONSUME_BIT)
-#define JOBCTL_TRAP_STOP (1UL << JOBCTL_TRAP_STOP_BIT)
-#define JOBCTL_TRAP_NOTIFY (1UL << JOBCTL_TRAP_NOTIFY_BIT)
-#define JOBCTL_TRAPPING (1UL << JOBCTL_TRAPPING_BIT)
-#define JOBCTL_LISTENING (1UL << JOBCTL_LISTENING_BIT)
-
-#define JOBCTL_TRAP_MASK (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
-#define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK)
-
-extern bool task_set_jobctl_pending(struct task_struct *task,
- unsigned long mask);
-extern void task_clear_jobctl_trapping(struct task_struct *task);
-extern void task_clear_jobctl_pending(struct task_struct *task,
- unsigned long mask);
-
-static inline void rcu_copy_process(struct task_struct *p)
-{
-#ifdef CONFIG_PREEMPT_RCU
- p->rcu_read_lock_nesting = 0;
- p->rcu_read_unlock_special.s = 0;
- p->rcu_blocked_node = NULL;
- INIT_LIST_HEAD(&p->rcu_node_entry);
-#endif /* #ifdef CONFIG_PREEMPT_RCU */
-#ifdef CONFIG_TASKS_RCU
- p->rcu_tasks_holdout = false;
- INIT_LIST_HEAD(&p->rcu_tasks_holdout_list);
- p->rcu_tasks_idle_cpu = -1;
-#endif /* #ifdef CONFIG_TASKS_RCU */
-}
-
-static inline void tsk_restore_flags(struct task_struct *task,
- unsigned long orig_flags, unsigned long flags)
-{
- task->flags &= ~flags;
- task->flags |= orig_flags & flags;
-}
-
-extern int cpuset_cpumask_can_shrink(const struct cpumask *cur,
- const struct cpumask *trial);
-extern int task_can_attach(struct task_struct *p,
- const struct cpumask *cs_cpus_allowed);
-#ifdef CONFIG_SMP
-extern void do_set_cpus_allowed(struct task_struct *p,
- const struct cpumask *new_mask);
-
-extern int set_cpus_allowed_ptr(struct task_struct *p,
- const struct cpumask *new_mask);
-#else
-static inline void do_set_cpus_allowed(struct task_struct *p,
- const struct cpumask *new_mask)
-{
-}
-static inline int set_cpus_allowed_ptr(struct task_struct *p,
- const struct cpumask *new_mask)
-{
- if (!cpumask_test_cpu(0, new_mask))
- return -EINVAL;
- return 0;
-}
-#endif
-
-#ifdef CONFIG_NO_HZ_COMMON
-void calc_load_enter_idle(void);
-void calc_load_exit_idle(void);
-#else
-static inline void calc_load_enter_idle(void) { }
-static inline void calc_load_exit_idle(void) { }
-#endif /* CONFIG_NO_HZ_COMMON */
-
-#ifndef cpu_relax_yield
-#define cpu_relax_yield() cpu_relax()
-#endif
-
-/*
- * Do not use outside of architecture code which knows its limitations.
- *
- * sched_clock() has no promise of monotonicity or bounded drift between
- * CPUs, use (which you should not) requires disabling IRQs.
- *
- * Please use one of the three interfaces below.
- */
-extern unsigned long long notrace sched_clock(void);
-/*
- * See the comment in kernel/sched/clock.c
- */
-extern u64 running_clock(void);
-extern u64 sched_clock_cpu(int cpu);
-
-
-extern void sched_clock_init(void);
-
-#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
-static inline void sched_clock_init_late(void)
-{
-}
-
-static inline void sched_clock_tick(void)
-{
-}
-
-static inline void clear_sched_clock_stable(void)
-{
-}