From 532ed1900d37a47c821718a0d8d28eb05b2c4d28 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Mon, 17 Aug 2020 05:47:57 +0000 Subject: [PATCH] powerpc/process: Remove useless #ifdef CONFIG_SPE cpu_has_feature(CPU_FTR_SPE) returns false when CONFIG_SPE is not set. There is no need to enclose the test in an #ifdef CONFIG_SPE. Remove it. CPU_FTR_SPE only exists on 32 bits. Define it as 0 on 64 bits. We have a couple of places like: #ifdef CONFIG_SPE if (cpu_has_feature(CPU_FTR_SPE)) { do_something_that_requires_CONFIG_SPE } else { return -EINVAL; } #else return -EINVAL; #endif Replace them by a cleaner version: if (cpu_has_feature(CPU_FTR_SPE)) { #ifdef CONFIG_SPE do_something_that_requires_CONFIG_SPE #endif } else { return -EINVAL; } When CONFIG_SPE is not set, this resolves to an unconditional return of -EINVAL Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/698df8387555765b70ea42e4a7fa48141c309c1f.1597643221.git.christophe.leroy@csgroup.eu --- arch/powerpc/include/asm/cputable.h | 1 + arch/powerpc/kernel/process.c | 21 +++++++-------------- 2 files changed, 8 insertions(+), 14 deletions(-) diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index 32a15dc49e8c..8ca5885bd5b9 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h @@ -170,6 +170,7 @@ static inline void cpu_feature_keys_init(void) { } #else /* CONFIG_PPC32 */ /* Define these to 0 for the sake of tests in common code */ #define CPU_FTR_PPC_LE (0) +#define CPU_FTR_SPE (0) #endif /* diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 52019b6962ba..348d4355bc00 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -413,10 +413,8 @@ static int __init init_msr_all_available(void) msr_all_available |= MSR_VEC; if (cpu_has_feature(CPU_FTR_VSX)) msr_all_available |= MSR_VSX; -#ifdef CONFIG_SPE if (cpu_has_feature(CPU_FTR_SPE)) msr_all_available |= MSR_SPE; -#endif return 0; } @@ -446,10 +444,8 @@ void giveup_all(struct task_struct *tsk) #endif if (usermsr & MSR_VEC) __giveup_altivec(tsk); -#ifdef CONFIG_SPE if (usermsr & MSR_SPE) __giveup_spe(tsk); -#endif msr_check_and_clear(msr_all_available); } @@ -1899,7 +1895,6 @@ int set_fpexc_mode(struct task_struct *tsk, unsigned int val) * fpexc_mode. fpexc_mode is also used for setting FP exception * mode (asyn, precise, disabled) for 'Classic' FP. */ if (val & PR_FP_EXC_SW_ENABLE) { -#ifdef CONFIG_SPE if (cpu_has_feature(CPU_FTR_SPE)) { /* * When the sticky exception bits are set @@ -1913,16 +1908,15 @@ int set_fpexc_mode(struct task_struct *tsk, unsigned int val) * anyway to restore the prctl settings from * the saved environment. */ +#ifdef CONFIG_SPE tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR); tsk->thread.fpexc_mode = val & (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT); +#endif return 0; } else { return -EINVAL; } -#else - return -EINVAL; -#endif } /* on a CONFIG_SPE this does not hurt us. The bits that @@ -1943,8 +1937,7 @@ int get_fpexc_mode(struct task_struct *tsk, unsigned long adr) { unsigned int val; - if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE) -#ifdef CONFIG_SPE + if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE) { if (cpu_has_feature(CPU_FTR_SPE)) { /* * When the sticky exception bits are set @@ -1958,15 +1951,15 @@ int get_fpexc_mode(struct task_struct *tsk, unsigned long adr) * anyway to restore the prctl settings from * the saved environment. */ +#ifdef CONFIG_SPE tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR); val = tsk->thread.fpexc_mode; +#endif } else return -EINVAL; -#else - return -EINVAL; -#endif - else + } else { val = __unpack_fe01(tsk->thread.fpexc_mode); + } return put_user(val, (unsigned int __user *) adr); } -- 2.11.0