From cc08d5459905a4155cb77e5fe25f396b4c622b7d Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 24 Apr 2015 02:18:23 +0200 Subject: [PATCH] x86/fpu: Use 'struct fpu' in fpu__unlazy_stopped() Migrate this function to pure 'struct fpu' usage. Reviewed-by: Borislav Petkov Cc: Andy Lutomirski Cc: Dave Hansen Cc: Fenghua Yu Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Oleg Nesterov Cc: Peter Zijlstra Cc: Thomas Gleixner Signed-off-by: Ingo Molnar --- arch/x86/kernel/fpu/core.c | 29 +++++++++++++++++------------ 1 file changed, 17 insertions(+), 12 deletions(-) diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c index 183e69dfd4d0..e3e8585284ad 100644 --- a/arch/x86/kernel/fpu/core.c +++ b/arch/x86/kernel/fpu/core.c @@ -311,27 +311,26 @@ EXPORT_SYMBOL_GPL(fpstate_alloc_init); * the read-only case, it's not strictly necessary for * read-only access to the context. */ -static int fpu__unlazy_stopped(struct task_struct *child) +static int fpu__unlazy_stopped(struct fpu *child_fpu) { - struct fpu *child_fpu = &child->thread.fpu; int ret; - if (WARN_ON_ONCE(child == current)) + if (WARN_ON_ONCE(child_fpu == ¤t->thread.fpu)) return -EINVAL; if (child_fpu->fpstate_active) { - child->thread.fpu.last_cpu = -1; + child_fpu->last_cpu = -1; return 0; } /* * Memory allocation at the first usage of the FPU and other state. */ - ret = fpstate_alloc(&child->thread.fpu); + ret = fpstate_alloc(child_fpu); if (ret) return ret; - fpstate_init(&child->thread.fpu); + fpstate_init(child_fpu); /* Safe to do for stopped child tasks: */ child_fpu->fpstate_active = 1; @@ -426,12 +425,13 @@ int xfpregs_get(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) { + struct fpu *fpu = &target->thread.fpu; int ret; if (!cpu_has_fxsr) return -ENODEV; - ret = fpu__unlazy_stopped(target); + ret = fpu__unlazy_stopped(fpu); if (ret) return ret; @@ -445,12 +445,13 @@ int xfpregs_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { + struct fpu *fpu = &target->thread.fpu; int ret; if (!cpu_has_fxsr) return -ENODEV; - ret = fpu__unlazy_stopped(target); + ret = fpu__unlazy_stopped(fpu); if (ret) return ret; @@ -478,13 +479,14 @@ int xstateregs_get(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) { + struct fpu *fpu = &target->thread.fpu; struct xsave_struct *xsave; int ret; if (!cpu_has_xsave) return -ENODEV; - ret = fpu__unlazy_stopped(target); + ret = fpu__unlazy_stopped(fpu); if (ret) return ret; @@ -508,13 +510,14 @@ int xstateregs_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { + struct fpu *fpu = &target->thread.fpu; struct xsave_struct *xsave; int ret; if (!cpu_has_xsave) return -ENODEV; - ret = fpu__unlazy_stopped(target); + ret = fpu__unlazy_stopped(fpu); if (ret) return ret; @@ -674,10 +677,11 @@ int fpregs_get(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) { + struct fpu *fpu = &target->thread.fpu; struct user_i387_ia32_struct env; int ret; - ret = fpu__unlazy_stopped(target); + ret = fpu__unlazy_stopped(fpu); if (ret) return ret; @@ -705,10 +709,11 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { + struct fpu *fpu = &target->thread.fpu; struct user_i387_ia32_struct env; int ret; - ret = fpu__unlazy_stopped(target); + ret = fpu__unlazy_stopped(fpu); if (ret) return ret; -- 2.11.0