OSDN Git Service

Merge tag 'v3.16-rc1' into x86/cpufeature
authorH. Peter Anvin <hpa@linux.intel.com>
Wed, 18 Jun 2014 22:26:19 +0000 (15:26 -0700)
committerH. Peter Anvin <hpa@linux.intel.com>
Wed, 18 Jun 2014 22:26:19 +0000 (15:26 -0700)
Linux 3.16-rc1

Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
1  2 
arch/x86/kernel/cpu/common.c

@@@ -8,6 -8,7 +8,7 @@@
  #include <linux/delay.h>
  #include <linux/sched.h>
  #include <linux/init.h>
+ #include <linux/kprobes.h>
  #include <linux/kgdb.h>
  #include <linux/smp.h>
  #include <linux/io.h>
@@@ -20,6 -21,7 +21,7 @@@
  #include <asm/processor.h>
  #include <asm/debugreg.h>
  #include <asm/sections.h>
+ #include <asm/vsyscall.h>
  #include <linux/topology.h>
  #include <linux/cpumask.h>
  #include <asm/pgtable.h>
@@@ -632,15 -634,6 +634,15 @@@ void get_cpu_cap(struct cpuinfo_x86 *c
                c->x86_capability[9] = ebx;
        }
  
 +      /* Extended state features: level 0x0000000d */
 +      if (c->cpuid_level >= 0x0000000d) {
 +              u32 eax, ebx, ecx, edx;
 +
 +              cpuid_count(0x0000000d, 1, &eax, &ebx, &ecx, &edx);
 +
 +              c->x86_capability[10] = eax;
 +      }
 +
        /* AMD-defined flags: level 0x80000001 */
        xlvl = cpuid_eax(0x80000000);
        c->extended_cpuid_level = xlvl;
@@@ -962,6 -955,38 +964,38 @@@ static void vgetcpu_set_mode(void
        else
                vgetcpu_mode = VGETCPU_LSL;
  }
+ /* May not be __init: called during resume */
+ static void syscall32_cpu_init(void)
+ {
+       /* Load these always in case some future AMD CPU supports
+          SYSENTER from compat mode too. */
+       wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
+       wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL);
+       wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)ia32_sysenter_target);
+       wrmsrl(MSR_CSTAR, ia32_cstar_target);
+ }
+ #endif
+ #ifdef CONFIG_X86_32
+ void enable_sep_cpu(void)
+ {
+       int cpu = get_cpu();
+       struct tss_struct *tss = &per_cpu(init_tss, cpu);
+       if (!boot_cpu_has(X86_FEATURE_SEP)) {
+               put_cpu();
+               return;
+       }
+       tss->x86_tss.ss1 = __KERNEL_CS;
+       tss->x86_tss.sp1 = sizeof(struct tss_struct) + (unsigned long) tss;
+       wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0);
+       wrmsr(MSR_IA32_SYSENTER_ESP, tss->x86_tss.sp1, 0);
+       wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long) ia32_sysenter_target, 0);
+       put_cpu();
+ }
  #endif
  
  void __init identify_boot_cpu(void)
@@@ -1169,6 -1194,7 +1203,7 @@@ int is_debug_stack(unsigned long addr
                (addr <= __get_cpu_var(debug_stack_addr) &&
                 addr > (__get_cpu_var(debug_stack_addr) - DEBUG_STKSZ));
  }
+ NOKPROBE_SYMBOL(is_debug_stack);
  
  DEFINE_PER_CPU(u32, debug_idt_ctr);
  
@@@ -1177,6 -1203,7 +1212,7 @@@ void debug_stack_set_zero(void
        this_cpu_inc(debug_idt_ctr);
        load_current_idt();
  }
+ NOKPROBE_SYMBOL(debug_stack_set_zero);
  
  void debug_stack_reset(void)
  {
        if (this_cpu_dec_return(debug_idt_ctr) == 0)
                load_current_idt();
  }
+ NOKPROBE_SYMBOL(debug_stack_reset);
  
  #else /* CONFIG_X86_64 */