X-Git-Url: http://git.osdn.net/view?a=blobdiff_plain;f=include%2Flinux%2Fsched.h;h=4c0f72d6988c3453fa7a6c51903e37ac46fe303b;hb=8eb3d65a5ddc6b3afaa3466e0665c4c0cd6578b1;hp=e6f50626b262b80118b10bec0a07d71cdb3df854;hpb=64b564428faa872686215f9a410558ebb1b7ccf4;p=sagit-ice-cold%2Fkernel_xiaomi_msm8998.git diff --git a/include/linux/sched.h b/include/linux/sched.h index e6f50626b262..4c0f72d6988c 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2041,7 +2041,7 @@ extern int arch_task_struct_size __read_mostly; extern void task_numa_fault(int last_node, int node, int pages, int flags); extern pid_t task_numa_group_id(struct task_struct *p); extern void set_numabalancing_state(bool enabled); -extern void task_numa_free(struct task_struct *p); +extern void task_numa_free(struct task_struct *p, bool final); extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page, int src_nid, int dst_cpu); #else @@ -2056,7 +2056,7 @@ static inline pid_t task_numa_group_id(struct task_struct *p) static inline void set_numabalancing_state(bool enabled) { } -static inline void task_numa_free(struct task_struct *p) +static inline void task_numa_free(struct task_struct *p, bool final) { } static inline bool should_numa_migrate_memory(struct task_struct *p, @@ -2350,6 +2350,8 @@ static inline void memalloc_noio_restore(unsigned int flags) #define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */ #define PFA_SPEC_SSB_DISABLE 4 /* Speculative Store Bypass disabled */ #define PFA_SPEC_SSB_FORCE_DISABLE 5 /* Speculative Store Bypass force disabled*/ +#define PFA_SPEC_IB_DISABLE 6 /* Indirect branch speculation restricted */ +#define PFA_SPEC_IB_FORCE_DISABLE 7 /* Indirect branch speculation permanently restricted */ #define TASK_PFA_TEST(name, func) \ @@ -2380,6 +2382,13 @@ TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable) TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable) TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable) +TASK_PFA_TEST(SPEC_IB_DISABLE, spec_ib_disable) +TASK_PFA_SET(SPEC_IB_DISABLE, spec_ib_disable) +TASK_PFA_CLEAR(SPEC_IB_DISABLE, spec_ib_disable) + +TASK_PFA_TEST(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable) +TASK_PFA_SET(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable) + /* * task->jobctl flags */ @@ -2788,12 +2797,17 @@ extern struct mm_struct * mm_alloc(void); /* mmdrop drops the mm and the page tables */ extern void __mmdrop(struct mm_struct *); -static inline void mmdrop(struct mm_struct * mm) +static inline void mmdrop(struct mm_struct *mm) { if (unlikely(atomic_dec_and_test(&mm->mm_count))) __mmdrop(mm); } +static inline bool mmget_not_zero(struct mm_struct *mm) +{ + return atomic_inc_not_zero(&mm->mm_users); +} + /* mmput gets rid of the mappings and all user-space */ extern void mmput(struct mm_struct *); /* same as above but performs the slow path from the async kontext. Can