OSDN Git Service

x86/fault: Make error_code sanitization more robust
authorAndy Lutomirski <luto@kernel.org>
Mon, 19 Nov 2018 22:45:31 +0000 (14:45 -0800)
committerIngo Molnar <mingo@kernel.org>
Tue, 20 Nov 2018 07:44:29 +0000 (08:44 +0100)
The error code in a page fault on a kernel address indicates
whether that address is mapped, which should not be revealed in a signal.

The normal code path for a page fault on a kernel address sanitizes the bit,
but the paths for vsyscall emulation and SIGBUS do not.  Both are
harmless, but for subtle reasons.  SIGBUS is never sent for a kernel
address, and vsyscall emulation will never fault on a kernel address
per se because it will fail an access_ok() check instead.

Make the code more robust by adding a helper that sets the relevant
fields and sanitizing the error code in the helper.  This also
cleans up the code -- we had three copies of roughly the same thing.

Signed-off-by: Andy Lutomirski <luto@kernel.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@surriel.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Yu-cheng Yu <yu-cheng.yu@intel.com>
Link: http://lkml.kernel.org/r/b31159bd55bd0c4fa061a20dfd6c429c094bebaa.1542667307.git.luto@kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/x86/mm/fault.c

index 3c9aed0..b5ec1ca 100644 (file)
@@ -631,6 +631,24 @@ pgtable_bad(struct pt_regs *regs, unsigned long error_code,
        oops_end(flags, regs, sig);
 }
 
+static void set_signal_archinfo(unsigned long address,
+                               unsigned long error_code)
+{
+       struct task_struct *tsk = current;
+
+       /*
+        * To avoid leaking information about the kernel page
+        * table layout, pretend that user-mode accesses to
+        * kernel addresses are always protection faults.
+        */
+       if (address >= TASK_SIZE_MAX)
+               error_code |= X86_PF_PROT;
+
+       tsk->thread.trap_nr = X86_TRAP_PF;
+       tsk->thread.error_code = error_code | X86_PF_USER;
+       tsk->thread.cr2 = address;
+}
+
 static noinline void
 no_context(struct pt_regs *regs, unsigned long error_code,
           unsigned long address, int signal, int si_code)
@@ -656,9 +674,7 @@ no_context(struct pt_regs *regs, unsigned long error_code,
                 * faulting through the emulate_vsyscall() logic.
                 */
                if (current->thread.sig_on_uaccess_err && signal) {
-                       tsk->thread.trap_nr = X86_TRAP_PF;
-                       tsk->thread.error_code = error_code | X86_PF_USER;
-                       tsk->thread.cr2 = address;
+                       set_signal_archinfo(address, error_code);
 
                        /* XXX: hwpoison faults will set the wrong code. */
                        force_sig_fault(signal, si_code, (void __user *)address,
@@ -821,9 +837,7 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
                if (likely(show_unhandled_signals))
                        show_signal_msg(regs, error_code, address, tsk);
 
-               tsk->thread.cr2         = address;
-               tsk->thread.error_code  = error_code;
-               tsk->thread.trap_nr     = X86_TRAP_PF;
+               set_signal_archinfo(address, error_code);
 
                if (si_code == SEGV_PKUERR)
                        force_sig_pkuerr((void __user *)address, pkey);
@@ -937,9 +951,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
        if (is_prefetch(regs, error_code, address))
                return;
 
-       tsk->thread.cr2         = address;
-       tsk->thread.error_code  = error_code;
-       tsk->thread.trap_nr     = X86_TRAP_PF;
+       set_signal_archinfo(address, error_code);
 
 #ifdef CONFIG_MEMORY_FAILURE
        if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {