From 3a0aee4801d475b64a408539c01ec0d17d52192b Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 22 Apr 2015 13:16:47 +0200 Subject: [PATCH] x86/fpu: Rename math_state_restore() to fpu__restore() Move to the new fpu__*() namespace. Reviewed-by: Borislav Petkov Cc: Andy Lutomirski Cc: Dave Hansen Cc: Fenghua Yu Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Oleg Nesterov Cc: Peter Zijlstra Cc: Thomas Gleixner Signed-off-by: Ingo Molnar --- Documentation/preempt-locking.txt | 2 +- arch/x86/include/asm/i387.h | 2 +- arch/x86/kernel/fpu/core.c | 6 +++--- arch/x86/kernel/fpu/xsave.c | 2 +- arch/x86/kernel/process_32.c | 2 +- arch/x86/kernel/process_64.c | 2 +- arch/x86/kernel/traps.c | 2 +- drivers/lguest/x86/core.c | 4 ++-- 8 files changed, 11 insertions(+), 11 deletions(-) diff --git a/Documentation/preempt-locking.txt b/Documentation/preempt-locking.txt index 57883ca2498b..e89ce6624af2 100644 --- a/Documentation/preempt-locking.txt +++ b/Documentation/preempt-locking.txt @@ -48,7 +48,7 @@ preemption must be disabled around such regions. Note, some FPU functions are already explicitly preempt safe. For example, kernel_fpu_begin and kernel_fpu_end will disable and enable preemption. -However, math_state_restore must be called with preemption disabled. +However, fpu__restore() must be called with preemption disabled. RULE #3: Lock acquire and release must be performed by same task diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h index d6fc84440b73..c8ee395dd6c6 100644 --- a/arch/x86/include/asm/i387.h +++ b/arch/x86/include/asm/i387.h @@ -23,7 +23,7 @@ extern void fpstate_init(struct fpu *fpu); extern void fpu__flush_thread(struct task_struct *tsk); extern int dump_fpu(struct pt_regs *, struct user_i387_struct *); -extern void math_state_restore(void); +extern void fpu__restore(void); extern bool irq_fpu_usable(void); diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c index 7add2fb7369e..15c3cf7bd160 100644 --- a/arch/x86/kernel/fpu/core.c +++ b/arch/x86/kernel/fpu/core.c @@ -228,7 +228,7 @@ static int fpu__unlazy_stopped(struct task_struct *child) } /* - * 'math_state_restore()' saves the current math information in the + * 'fpu__restore()' saves the current math information in the * old math state array, and gets the new ones from the current task * * Careful.. There are problems with IBM-designed IRQ13 behaviour. @@ -237,7 +237,7 @@ static int fpu__unlazy_stopped(struct task_struct *child) * Must be called with kernel preemption disabled (eg with local * local interrupts as in the case of do_device_not_available). */ -void math_state_restore(void) +void fpu__restore(void) { struct task_struct *tsk = current; @@ -267,7 +267,7 @@ void math_state_restore(void) } kernel_fpu_enable(); } -EXPORT_SYMBOL_GPL(math_state_restore); +EXPORT_SYMBOL_GPL(fpu__restore); void fpu__flush_thread(struct task_struct *tsk) { diff --git a/arch/x86/kernel/fpu/xsave.c b/arch/x86/kernel/fpu/xsave.c index 163b5cc582ef..d913d5024901 100644 --- a/arch/x86/kernel/fpu/xsave.c +++ b/arch/x86/kernel/fpu/xsave.c @@ -404,7 +404,7 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size) set_used_math(); if (use_eager_fpu()) { preempt_disable(); - math_state_restore(); + fpu__restore(); preempt_enable(); } diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 84d647d4b14d..1a0edce626b2 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -295,7 +295,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) * Leave lazy mode, flushing any hypercalls made here. * This must be done before restoring TLS segments so * the GDT and LDT are properly updated, and must be - * done before math_state_restore, so the TS bit is up + * done before fpu__restore(), so the TS bit is up * to date. */ arch_end_context_switch(next_p); diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index ae6efeccb46e..99cc4b8589ad 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -298,7 +298,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) * Leave lazy mode, flushing any hypercalls made here. This * must be done after loading TLS entries in the GDT but before * loading segments that might reference them, and and it must - * be done before math_state_restore, so the TS bit is up to + * be done before fpu__restore(), so the TS bit is up to * date. */ arch_end_context_switch(next_p); diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 63c7fc3677b4..22ad90a40dbf 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -846,7 +846,7 @@ do_device_not_available(struct pt_regs *regs, long error_code) return; } #endif - math_state_restore(); /* interrupts still off */ + fpu__restore(); /* interrupts still off */ #ifdef CONFIG_X86_32 conditional_sti(regs); #endif diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c index 30f2aef69d78..bcb534a5512d 100644 --- a/drivers/lguest/x86/core.c +++ b/drivers/lguest/x86/core.c @@ -297,12 +297,12 @@ void lguest_arch_run_guest(struct lg_cpu *cpu) /* * Similarly, if we took a trap because the Guest used the FPU, * we have to restore the FPU it expects to see. - * math_state_restore() may sleep and we may even move off to + * fpu__restore() may sleep and we may even move off to * a different CPU. So all the critical stuff should be done * before this. */ else if (cpu->regs->trapnum == 7 && !user_has_fpu()) - math_state_restore(); + fpu__restore(); } /*H:130 -- 2.11.0