1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright (C) 1991,1992 Linus Torvalds
5 * entry_32.S contains the system-call and low-level fault and trap handling routines.
7 * Stack layout while running C code:
8 * ptrace needs to have all registers on the stack.
9 * If the order here is changed, it needs to be
10 * updated in fork.c:copy_process(), signal.c:do_signal(),
11 * ptrace.c and ptrace.h
23 * 28(%esp) - %gs saved iff !CONFIG_X86_32_LAZY_GS
32 #include <linux/linkage.h>
33 #include <linux/err.h>
34 #include <asm/thread_info.h>
35 #include <asm/irqflags.h>
36 #include <asm/errno.h>
37 #include <asm/segment.h>
39 #include <asm/percpu.h>
40 #include <asm/processor-flags.h>
41 #include <asm/irq_vectors.h>
42 #include <asm/cpufeatures.h>
43 #include <asm/alternative-asm.h>
46 #include <asm/frame.h>
47 #include <asm/nospec-branch.h>
49 .section .entry.text, "ax"
52 * We use macros for low-level operations which need to be overridden
53 * for paravirtualization. The following will never clobber any registers:
54 * INTERRUPT_RETURN (aka. "iret")
55 * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
56 * ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit").
58 * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
59 * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
60 * Allowing a register to be clobbered can shrink the paravirt replacement
61 * enough to patch inline, increasing performance.
65 # define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
67 # define preempt_stop(clobbers)
68 # define resume_kernel restore_all_kernel
71 .macro TRACE_IRQS_IRET
72 #ifdef CONFIG_TRACE_IRQFLAGS
73 testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off?
80 #define PTI_SWITCH_MASK (1 << PAGE_SHIFT)
83 * User gs save/restore
85 * %gs is used for userland TLS and kernel only uses it for stack
86 * canary which is required to be at %gs:20 by gcc. Read the comment
87 * at the top of stackprotector.h for more info.
89 * Local labels 98 and 99 are used.
91 #ifdef CONFIG_X86_32_LAZY_GS
93 /* unfortunately push/pop can't be no-op */
98 addl $(4 + \pop), %esp
103 /* all the rest are no-op */
110 .macro REG_TO_PTGS reg
112 .macro SET_KERNEL_GS reg
115 #else /* CONFIG_X86_32_LAZY_GS */
128 .pushsection .fixup, "ax"
132 _ASM_EXTABLE(98b, 99b)
136 98: mov PT_GS(%esp), %gs
139 .pushsection .fixup, "ax"
140 99: movl $0, PT_GS(%esp)
143 _ASM_EXTABLE(98b, 99b)
149 .macro REG_TO_PTGS reg
150 movl \reg, PT_GS(%esp)
152 .macro SET_KERNEL_GS reg
153 movl $(__KERNEL_STACK_CANARY), \reg
157 #endif /* CONFIG_X86_32_LAZY_GS */
159 /* Unconditionally switch to user cr3 */
160 .macro SWITCH_TO_USER_CR3 scratch_reg:req
161 ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
163 movl %cr3, \scratch_reg
164 orl $PTI_SWITCH_MASK, \scratch_reg
165 movl \scratch_reg, %cr3
170 * Switch to kernel cr3 if not already loaded and return current cr3 in
173 .macro SWITCH_TO_KERNEL_CR3 scratch_reg:req
174 ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
175 movl %cr3, \scratch_reg
176 /* Test if we are already on kernel CR3 */
177 testl $PTI_SWITCH_MASK, \scratch_reg
179 andl $(~PTI_SWITCH_MASK), \scratch_reg
180 movl \scratch_reg, %cr3
181 /* Return original CR3 in \scratch_reg */
182 orl $PTI_SWITCH_MASK, \scratch_reg
186 .macro SAVE_ALL pt_regs_ax=%eax switch_stacks=0
199 movl $(__USER_DS), %edx
202 movl $(__KERNEL_PERCPU), %edx
206 /* Switch to kernel stack if necessary */
207 .if \switch_stacks > 0
208 SWITCH_TO_KERNEL_STACK
217 * This is a sneaky trick to help the unwinder find pt_regs on the stack. The
218 * frame pointer is replaced with an encoded pointer to pt_regs. The encoding
219 * is just clearing the MSB, which makes it an invalid stack address and is also
220 * a signal to the unwinder that it's a pt_regs pointer in disguise.
222 * NOTE: This macro must be used *after* SAVE_ALL because it corrupts the
225 .macro ENCODE_FRAME_POINTER
226 #ifdef CONFIG_FRAME_POINTER
228 andl $0x7fffffff, %ebp
232 .macro RESTORE_INT_REGS
242 .macro RESTORE_REGS pop=0
248 .pushsection .fixup, "ax"
262 .macro RESTORE_ALL_NMI pop=0
263 RESTORE_REGS pop=\pop
266 .macro CHECK_AND_APPLY_ESPFIX
267 #ifdef CONFIG_X86_ESPFIX32
268 #define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
270 ALTERNATIVE "jmp .Lend_\@", "", X86_BUG_ESPFIX
272 movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
274 * Warning: PT_OLDSS(%esp) contains the wrong/random values if we
275 * are returning to the kernel.
276 * See comments in process.c:copy_thread() for details.
278 movb PT_OLDSS(%esp), %ah
279 movb PT_CS(%esp), %al
280 andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
281 cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
282 jne .Lend_\@ # returning to user-space with LDT SS
285 * Setup and switch to ESPFIX stack
287 * We're returning to userspace with a 16 bit stack. The CPU will not
288 * restore the high word of ESP for us on executing iret... This is an
289 * "official" bug of all the x86-compatible CPUs, which we can work
290 * around to make dosemu and wine happy. We do this by preloading the
291 * high word of ESP with the high word of the userspace ESP while
292 * compensating for the offset by changing to the ESPFIX segment with
293 * a base address that matches for the difference.
295 mov %esp, %edx /* load kernel esp */
296 mov PT_OLDESP(%esp), %eax /* load userspace esp */
297 mov %dx, %ax /* eax: new kernel esp */
298 sub %eax, %edx /* offset (low word is 0) */
300 mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
301 mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
303 pushl %eax /* new kernel esp */
305 * Disable interrupts, but do not irqtrace this section: we
306 * will soon execute iret and the tracer was already set to
307 * the irqstate after the IRET:
309 DISABLE_INTERRUPTS(CLBR_ANY)
310 lss (%esp), %esp /* switch to espfix segment */
312 #endif /* CONFIG_X86_ESPFIX32 */
316 * Called with pt_regs fully populated and kernel segments loaded,
317 * so we can access PER_CPU and use the integer registers.
319 * We need to be very careful here with the %esp switch, because an NMI
320 * can happen everywhere. If the NMI handler finds itself on the
321 * entry-stack, it will overwrite the task-stack and everything we
322 * copied there. So allocate the stack-frame on the task-stack and
323 * switch to it before we do any copying.
326 #define CS_FROM_ENTRY_STACK (1 << 31)
327 #define CS_FROM_USER_CR3 (1 << 30)
329 .macro SWITCH_TO_KERNEL_STACK
331 ALTERNATIVE "", "jmp .Lend_\@", X86_FEATURE_XENPV
333 SWITCH_TO_KERNEL_CR3 scratch_reg=%eax
336 * %eax now contains the entry cr3 and we carry it forward in
337 * that register for the time this macro runs
340 /* Are we on the entry stack? Bail out if not! */
341 movl PER_CPU_VAR(cpu_entry_area), %ecx
342 addl $CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx
343 subl %esp, %ecx /* ecx = (end of entry_stack) - esp */
344 cmpl $SIZEOF_entry_stack, %ecx
347 /* Load stack pointer into %esi and %edi */
351 /* Move %edi to the top of the entry stack */
352 andl $(MASK_entry_stack), %edi
353 addl $(SIZEOF_entry_stack), %edi
355 /* Load top of task-stack into %edi */
356 movl TSS_entry2task_stack(%edi), %edi
359 * Clear unused upper bits of the dword containing the word-sized CS
360 * slot in pt_regs in case hardware didn't clear it for us.
362 andl $(0x0000ffff), PT_CS(%esp)
364 /* Special case - entry from kernel mode via entry stack */
365 testl $SEGMENT_RPL_MASK, PT_CS(%esp)
366 jz .Lentry_from_kernel_\@
369 movl $PTREGS_SIZE, %ecx
372 testl $X86_EFLAGS_VM, PT_EFLAGS(%esi)
376 * Stack-frame contains 4 additional segment registers when
377 * coming from VM86 mode
384 /* Allocate frame on task-stack */
387 /* Switch to task-stack */
391 * We are now on the task-stack and can safely copy over the
400 .Lentry_from_kernel_\@:
403 * This handles the case when we enter the kernel from
404 * kernel-mode and %esp points to the entry-stack. When this
405 * happens we need to switch to the task-stack to run C code,
406 * but switch back to the entry-stack again when we approach
407 * iret and return to the interrupted code-path. This usually
408 * happens when we hit an exception while restoring user-space
409 * segment registers on the way back to user-space or when the
410 * sysenter handler runs with eflags.tf set.
412 * When we switch to the task-stack here, we can't trust the
413 * contents of the entry-stack anymore, as the exception handler
414 * might be scheduled out or moved to another CPU. Therefore we
415 * copy the complete entry-stack to the task-stack and set a
416 * marker in the iret-frame (bit 31 of the CS dword) to detect
417 * what we've done on the iret path.
419 * On the iret path we copy everything back and switch to the
420 * entry-stack, so that the interrupted kernel code-path
421 * continues on the same stack it was interrupted with.
423 * Be aware that an NMI can happen anytime in this code.
425 * %esi: Entry-Stack pointer (same as %esp)
426 * %edi: Top of the task stack
427 * %eax: CR3 on kernel entry
430 /* Calculate number of bytes on the entry stack in %ecx */
433 /* %ecx to the top of entry-stack */
434 andl $(MASK_entry_stack), %ecx
435 addl $(SIZEOF_entry_stack), %ecx
437 /* Number of bytes on the entry stack to %ecx */
440 /* Mark stackframe as coming from entry stack */
441 orl $CS_FROM_ENTRY_STACK, PT_CS(%esp)
444 * Test the cr3 used to enter the kernel and add a marker
445 * so that we can switch back to it before iret.
447 testl $PTI_SWITCH_MASK, %eax
449 orl $CS_FROM_USER_CR3, PT_CS(%esp)
452 * %esi and %edi are unchanged, %ecx contains the number of
453 * bytes to copy. The code at .Lcopy_pt_regs_\@ will allocate
454 * the stack-frame on task-stack and copy everything over
456 jmp .Lcopy_pt_regs_\@
462 * Switch back from the kernel stack to the entry stack.
464 * The %esp register must point to pt_regs on the task stack. It will
465 * first calculate the size of the stack-frame to copy, depending on
466 * whether we return to VM86 mode or not. With that it uses 'rep movsl'
467 * to copy the contents of the stack over to the entry stack.
469 * We must be very careful here, as we can't trust the contents of the
470 * task-stack once we switched to the entry-stack. When an NMI happens
471 * while on the entry-stack, the NMI handler will switch back to the top
472 * of the task stack, overwriting our stack-frame we are about to copy.
473 * Therefore we switch the stack only after everything is copied over.
475 .macro SWITCH_TO_ENTRY_STACK
477 ALTERNATIVE "", "jmp .Lend_\@", X86_FEATURE_XENPV
480 movl $PTREGS_SIZE, %ecx
483 testl $(X86_EFLAGS_VM), PT_EFLAGS(%esp)
486 /* Additional 4 registers to copy when returning to VM86 mode */
492 /* Initialize source and destination for movsl */
493 movl PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %edi
497 /* Save future stack pointer in %ebx */
500 /* Copy over the stack-frame */
506 * Switch to entry-stack - needs to happen after everything is
507 * copied because the NMI handler will overwrite the task-stack
508 * when on entry-stack
516 * This macro handles the case when we return to kernel-mode on the iret
517 * path and have to switch back to the entry stack and/or user-cr3
519 * See the comments below the .Lentry_from_kernel_\@ label in the
520 * SWITCH_TO_KERNEL_STACK macro for more details.
522 .macro PARANOID_EXIT_TO_KERNEL_MODE
525 * Test if we entered the kernel with the entry-stack. Most
526 * likely we did not, because this code only runs on the
527 * return-to-kernel path.
529 testl $CS_FROM_ENTRY_STACK, PT_CS(%esp)
532 /* Unlikely slow-path */
534 /* Clear marker from stack-frame */
535 andl $(~CS_FROM_ENTRY_STACK), PT_CS(%esp)
537 /* Copy the remaining task-stack contents to entry-stack */
539 movl PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %edi
541 /* Bytes on the task-stack to ecx */
542 movl PER_CPU_VAR(cpu_tss_rw + TSS_sp1), %ecx
545 /* Allocate stack-frame on entry-stack */
549 * Save future stack-pointer, we must not switch until the
550 * copy is done, otherwise the NMI handler could destroy the
551 * contents of the task-stack we are about to copy.
560 /* Safe to switch to entry-stack now */
564 * We came from entry-stack and need to check if we also need to
565 * switch back to user cr3.
567 testl $CS_FROM_USER_CR3, PT_CS(%esp)
570 /* Clear marker from stack-frame */
571 andl $(~CS_FROM_USER_CR3), PT_CS(%esp)
573 SWITCH_TO_USER_CR3 scratch_reg=%eax
581 ENTRY(__switch_to_asm)
583 * Save callee-saved registers
584 * This must match the order in struct inactive_task_frame
592 movl %esp, TASK_threadsp(%eax)
593 movl TASK_threadsp(%edx), %esp
595 #ifdef CONFIG_STACKPROTECTOR
596 movl TASK_stack_canary(%edx), %ebx
597 movl %ebx, PER_CPU_VAR(stack_canary)+stack_canary_offset
600 #ifdef CONFIG_RETPOLINE
602 * When switching from a shallower to a deeper call stack
603 * the RSB may either underflow or use entries populated
604 * with userspace addresses. On CPUs where those concerns
605 * exist, overwrite the RSB with entries which capture
606 * speculative execution to prevent attack.
608 FILL_RETURN_BUFFER %ebx, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
611 /* restore callee-saved registers */
621 * The unwinder expects the last frame on the stack to always be at the same
622 * offset from the end of the page, which allows it to validate the stack.
623 * Calling schedule_tail() directly would break that convention because its an
624 * asmlinkage function so its argument has to be pushed on the stack. This
625 * wrapper creates a proper "end of stack" frame header before the call.
627 ENTRY(schedule_tail_wrapper)
636 ENDPROC(schedule_tail_wrapper)
638 * A newly forked process directly context switches into this address.
640 * eax: prev task we switched from
641 * ebx: kernel thread func (NULL for user thread)
642 * edi: kernel thread arg
645 call schedule_tail_wrapper
648 jnz 1f /* kernel threads are uncommon */
651 /* When we fork, we trace the syscall return in the child, too. */
653 call syscall_return_slowpath
660 * A kernel thread is allowed to return here after successfully
661 * calling do_execve(). Exit to userspace to complete the execve()
664 movl $0, PT_EAX(%esp)
669 * Return to user mode is not as complex as all this looks,
670 * but we want the default path for a system call return to
671 * go as quickly as possible which is why some of this is
672 * less clear than it otherwise should be.
675 # userspace resumption stub bypassing syscall exit tracing
678 preempt_stop(CLBR_ANY)
681 movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
682 movb PT_CS(%esp), %al
683 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
686 * We can be coming here from child spawned by kernel_thread().
688 movl PT_CS(%esp), %eax
689 andl $SEGMENT_RPL_MASK, %eax
692 jb resume_kernel # not returning to v8086 or userspace
694 ENTRY(resume_userspace)
695 DISABLE_INTERRUPTS(CLBR_ANY)
698 call prepare_exit_to_usermode
700 END(ret_from_exception)
702 #ifdef CONFIG_PREEMPT
704 DISABLE_INTERRUPTS(CLBR_ANY)
706 cmpl $0, PER_CPU_VAR(__preempt_count)
707 jnz restore_all_kernel
708 testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ?
709 jz restore_all_kernel
710 call preempt_schedule_irq
715 GLOBAL(__begin_SYSENTER_singlestep_region)
717 * All code from here through __end_SYSENTER_singlestep_region is subject
718 * to being single-stepped if a user program sets TF and executes SYSENTER.
719 * There is absolutely nothing that we can do to prevent this from happening
720 * (thanks Intel!). To keep our handling of this situation as simple as
721 * possible, we handle TF just like AC and NT, except that our #DB handler
722 * will ignore all of the single-step traps generated in this range.
727 * Xen doesn't set %esp to be precisely what the normal SYSENTER
728 * entry point expects, so fix it up before using the normal path.
730 ENTRY(xen_sysenter_target)
731 addl $5*4, %esp /* remove xen-provided frame */
732 jmp .Lsysenter_past_esp
736 * 32-bit SYSENTER entry.
738 * 32-bit system calls through the vDSO's __kernel_vsyscall enter here
739 * if X86_FEATURE_SEP is available. This is the preferred system call
740 * entry on 32-bit systems.
742 * The SYSENTER instruction, in principle, should *only* occur in the
743 * vDSO. In practice, a small number of Android devices were shipped
744 * with a copy of Bionic that inlined a SYSENTER instruction. This
745 * never happened in any of Google's Bionic versions -- it only happened
746 * in a narrow range of Intel-provided versions.
748 * SYSENTER loads SS, ESP, CS, and EIP from previously programmed MSRs.
749 * IF and VM in RFLAGS are cleared (IOW: interrupts are off).
750 * SYSENTER does not save anything on the stack,
751 * and does not save old EIP (!!!), ESP, or EFLAGS.
753 * To avoid losing track of EFLAGS.VM (and thus potentially corrupting
754 * user and/or vm86 state), we explicitly disable the SYSENTER
755 * instruction in vm86 mode by reprogramming the MSRs.
758 * eax system call number
767 ENTRY(entry_SYSENTER_32)
769 * On entry-stack with all userspace-regs live - save and
770 * restore eflags and %eax to use it as scratch-reg for the cr3
775 SWITCH_TO_KERNEL_CR3 scratch_reg=%eax
779 /* Stack empty again, switch to task stack */
780 movl TSS_entry2task_stack(%esp), %esp
783 pushl $__USER_DS /* pt_regs->ss */
784 pushl %ebp /* pt_regs->sp (stashed in bp) */
785 pushfl /* pt_regs->flags (except IF = 0) */
786 orl $X86_EFLAGS_IF, (%esp) /* Fix IF */
787 pushl $__USER_CS /* pt_regs->cs */
788 pushl $0 /* pt_regs->ip = 0 (placeholder) */
789 pushl %eax /* pt_regs->orig_ax */
790 SAVE_ALL pt_regs_ax=$-ENOSYS /* save rest, stack already switched */
793 * SYSENTER doesn't filter flags, so we need to clear NT, AC
794 * and TF ourselves. To save a few cycles, we can check whether
795 * either was set instead of doing an unconditional popfq.
796 * This needs to happen before enabling interrupts so that
797 * we don't get preempted with NT set.
799 * If TF is set, we will single-step all the way to here -- do_debug
800 * will ignore all the traps. (Yes, this is slow, but so is
801 * single-stepping in general. This allows us to avoid having
802 * a more complicated code to handle the case where a user program
803 * forces us to single-step through the SYSENTER entry code.)
805 * NB.: .Lsysenter_fix_flags is a label with the code under it moved
806 * out-of-line as an optimization: NT is unlikely to be set in the
807 * majority of the cases and instead of polluting the I$ unnecessarily,
808 * we're keeping that code behind a branch which will predict as
809 * not-taken and therefore its instructions won't be fetched.
811 testl $X86_EFLAGS_NT|X86_EFLAGS_AC|X86_EFLAGS_TF, PT_EFLAGS(%esp)
812 jnz .Lsysenter_fix_flags
813 .Lsysenter_flags_fixed:
816 * User mode is traced as though IRQs are on, and SYSENTER
822 call do_fast_syscall_32
823 /* XEN PV guests always use IRET path */
824 ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \
825 "jmp .Lsyscall_32_done", X86_FEATURE_XENPV
827 /* Opportunistic SYSEXIT */
828 TRACE_IRQS_ON /* User mode traces as IRQs on. */
831 * Setup entry stack - we keep the pointer in %eax and do the
832 * switch after almost all user-state is restored.
835 /* Load entry stack pointer and allocate frame for eflags/eax */
836 movl PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %eax
839 /* Copy eflags and eax to entry stack */
840 movl PT_EFLAGS(%esp), %edi
841 movl PT_EAX(%esp), %esi
845 /* Restore user registers and segments */
846 movl PT_EIP(%esp), %edx /* pt_regs->ip */
847 movl PT_OLDESP(%esp), %ecx /* pt_regs->sp */
848 1: mov PT_FS(%esp), %fs
851 popl %ebx /* pt_regs->bx */
852 addl $2*4, %esp /* skip pt_regs->cx and pt_regs->dx */
853 popl %esi /* pt_regs->si */
854 popl %edi /* pt_regs->di */
855 popl %ebp /* pt_regs->bp */
857 /* Switch to entry stack */
860 /* Now ready to switch the cr3 */
861 SWITCH_TO_USER_CR3 scratch_reg=%eax
864 * Restore all flags except IF. (We restore IF separately because
865 * STI gives a one-instruction window in which we won't be interrupted,
866 * whereas POPF does not.)
868 btrl $X86_EFLAGS_IF_BIT, (%esp)
873 * Return back to the vDSO, which will pop ecx and edx.
874 * Don't bother with DS and ES (they already contain __USER_DS).
879 .pushsection .fixup, "ax"
880 2: movl $0, PT_FS(%esp)
886 .Lsysenter_fix_flags:
887 pushl $X86_EFLAGS_FIXED
889 jmp .Lsysenter_flags_fixed
890 GLOBAL(__end_SYSENTER_singlestep_region)
891 ENDPROC(entry_SYSENTER_32)
894 * 32-bit legacy system call entry.
896 * 32-bit x86 Linux system calls traditionally used the INT $0x80
897 * instruction. INT $0x80 lands here.
899 * This entry point can be used by any 32-bit perform system calls.
900 * Instances of INT $0x80 can be found inline in various programs and
901 * libraries. It is also used by the vDSO's __kernel_vsyscall
902 * fallback for hardware that doesn't support a faster entry method.
903 * Restarted 32-bit system calls also fall back to INT $0x80
904 * regardless of what instruction was originally used to do the system
905 * call. (64-bit programs can use INT $0x80 as well, but they can
906 * only run on 64-bit kernels and therefore land in
907 * entry_INT80_compat.)
909 * This is considered a slow path. It is not used by most libc
910 * implementations on modern hardware except during process startup.
913 * eax system call number
921 ENTRY(entry_INT80_32)
923 pushl %eax /* pt_regs->orig_ax */
925 SAVE_ALL pt_regs_ax=$-ENOSYS switch_stacks=1 /* save rest */
928 * User mode is traced as though IRQs are on, and the interrupt gate
934 call do_int80_syscall_32
939 SWITCH_TO_ENTRY_STACK
940 .Lrestore_all_notrace:
941 CHECK_AND_APPLY_ESPFIX
943 /* Switch back to user CR3 */
944 SWITCH_TO_USER_CR3 scratch_reg=%eax
946 /* Restore user state */
947 RESTORE_REGS pop=4 # skip orig_eax/error_code
950 * ARCH_HAS_MEMBARRIER_SYNC_CORE rely on IRET core serialization
951 * when returning from IPI handler and when returning from
952 * scheduler to user-space.
958 PARANOID_EXIT_TO_KERNEL_MODE
962 .section .fixup, "ax"
964 pushl $0 # no error code
968 _ASM_EXTABLE(.Lirq_return, iret_exc)
969 ENDPROC(entry_INT80_32)
971 .macro FIXUP_ESPFIX_STACK
973 * Switch back for ESPFIX stack to the normal zerobased stack
975 * We can't call C functions using the ESPFIX stack. This code reads
976 * the high word of the segment base from the GDT and swiches to the
977 * normal stack and adjusts ESP with the matching offset.
979 #ifdef CONFIG_X86_ESPFIX32
980 /* fixup the stack */
981 mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
982 mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
984 addl %esp, %eax /* the adjusted stack pointer */
987 lss (%esp), %esp /* switch to the normal stack segment */
990 .macro UNWIND_ESPFIX_STACK
991 #ifdef CONFIG_X86_ESPFIX32
993 /* see if on espfix stack */
994 cmpw $__ESPFIX_SS, %ax
996 movl $__KERNEL_DS, %eax
999 /* switch to normal stack */
1006 * Build the entry stubs with some assembler magic.
1007 * We pack 1 stub into every 8-byte block.
1010 ENTRY(irq_entries_start)
1011 vector=FIRST_EXTERNAL_VECTOR
1012 .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
1013 pushl $(~vector+0x80) /* Note: always in signed byte range */
1015 jmp common_interrupt
1018 END(irq_entries_start)
1021 * the CPU automatically disables interrupts when executing an IRQ vector,
1022 * so IRQ-flags tracing has to follow that:
1024 .p2align CONFIG_X86_L1_CACHE_SHIFT
1027 addl $-0x80, (%esp) /* Adjust vector into the [-256, -1] range */
1029 SAVE_ALL switch_stacks=1
1030 ENCODE_FRAME_POINTER
1035 ENDPROC(common_interrupt)
1037 #define BUILD_INTERRUPT3(name, nr, fn) \
1041 SAVE_ALL switch_stacks=1; \
1042 ENCODE_FRAME_POINTER; \
1046 jmp ret_from_intr; \
1049 #define BUILD_INTERRUPT(name, nr) \
1050 BUILD_INTERRUPT3(name, nr, smp_##name); \
1052 /* The include is where all of the SMP etc. interrupts come from */
1053 #include <asm/entry_arch.h>
1055 ENTRY(coprocessor_error)
1058 pushl $do_coprocessor_error
1059 jmp common_exception
1060 END(coprocessor_error)
1062 ENTRY(simd_coprocessor_error)
1065 #ifdef CONFIG_X86_INVD_BUG
1066 /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */
1067 ALTERNATIVE "pushl $do_general_protection", \
1068 "pushl $do_simd_coprocessor_error", \
1071 pushl $do_simd_coprocessor_error
1073 jmp common_exception
1074 END(simd_coprocessor_error)
1076 ENTRY(device_not_available)
1078 pushl $-1 # mark this as an int
1079 pushl $do_device_not_available
1080 jmp common_exception
1081 END(device_not_available)
1083 #ifdef CONFIG_PARAVIRT
1086 _ASM_EXTABLE(native_iret, iret_exc)
1094 jmp common_exception
1101 jmp common_exception
1107 pushl $do_invalid_op
1108 jmp common_exception
1111 ENTRY(coprocessor_segment_overrun)
1114 pushl $do_coprocessor_segment_overrun
1115 jmp common_exception
1116 END(coprocessor_segment_overrun)
1120 pushl $do_invalid_TSS
1121 jmp common_exception
1124 ENTRY(segment_not_present)
1126 pushl $do_segment_not_present
1127 jmp common_exception
1128 END(segment_not_present)
1130 ENTRY(stack_segment)
1132 pushl $do_stack_segment
1133 jmp common_exception
1136 ENTRY(alignment_check)
1138 pushl $do_alignment_check
1139 jmp common_exception
1140 END(alignment_check)
1144 pushl $0 # no error code
1145 pushl $do_divide_error
1146 jmp common_exception
1149 #ifdef CONFIG_X86_MCE
1150 ENTRY(machine_check)
1153 pushl machine_check_vector
1154 jmp common_exception
1158 ENTRY(spurious_interrupt_bug)
1161 pushl $do_spurious_interrupt_bug
1162 jmp common_exception
1163 END(spurious_interrupt_bug)
1166 ENTRY(xen_hypervisor_callback)
1167 pushl $-1 /* orig_ax = -1 => not a system call */
1169 ENCODE_FRAME_POINTER
1173 * Check to see if we got the event in the critical
1174 * region in xen_iret_direct, after we've reenabled
1175 * events and checked for pending events. This simulates
1176 * iret instruction's behaviour where it delivers a
1177 * pending interrupt when enabling interrupts:
1179 movl PT_EIP(%esp), %eax
1180 cmpl $xen_iret_start_crit, %eax
1182 cmpl $xen_iret_end_crit, %eax
1185 jmp xen_iret_crit_fixup
1187 ENTRY(xen_do_upcall)
1189 call xen_evtchn_do_upcall
1190 #ifndef CONFIG_PREEMPT
1191 call xen_maybe_preempt_hcall
1194 ENDPROC(xen_hypervisor_callback)
1197 * Hypervisor uses this for application faults while it executes.
1198 * We get here for two reasons:
1199 * 1. Fault while reloading DS, ES, FS or GS
1200 * 2. Fault while executing IRET
1201 * Category 1 we fix up by reattempting the load, and zeroing the segment
1202 * register if the load fails.
1203 * Category 2 we fix up by jumping to do_iret_error. We cannot use the
1204 * normal Linux return path in this case because if we use the IRET hypercall
1205 * to pop the stack frame we end up in an infinite loop of failsafe callbacks.
1206 * We distinguish between categories by maintaining a status value in EAX.
1208 ENTRY(xen_failsafe_callback)
1213 3: mov 12(%esp), %fs
1214 4: mov 16(%esp), %gs
1215 /* EAX == 0 => Category 1 (Bad segment)
1216 EAX != 0 => Category 2 (Bad IRET) */
1222 5: pushl $-1 /* orig_ax = -1 => not a system call */
1224 ENCODE_FRAME_POINTER
1225 jmp ret_from_exception
1227 .section .fixup, "ax"
1241 _ASM_EXTABLE(1b, 6b)
1242 _ASM_EXTABLE(2b, 7b)
1243 _ASM_EXTABLE(3b, 8b)
1244 _ASM_EXTABLE(4b, 9b)
1245 ENDPROC(xen_failsafe_callback)
1247 BUILD_INTERRUPT3(xen_hvm_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
1248 xen_evtchn_do_upcall)
1250 #endif /* CONFIG_XEN */
1252 #if IS_ENABLED(CONFIG_HYPERV)
1254 BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
1255 hyperv_vector_handler)
1257 BUILD_INTERRUPT3(hyperv_reenlightenment_vector, HYPERV_REENLIGHTENMENT_VECTOR,
1258 hyperv_reenlightenment_intr)
1260 BUILD_INTERRUPT3(hv_stimer0_callback_vector, HYPERV_STIMER0_VECTOR,
1261 hv_stimer0_vector_handler)
1263 #endif /* CONFIG_HYPERV */
1267 pushl $do_page_fault
1269 jmp common_exception
1273 /* the function address is in %gs's slot on the stack */
1278 movl $(__USER_DS), %eax
1281 movl $(__KERNEL_PERCPU), %eax
1289 SWITCH_TO_KERNEL_STACK
1290 ENCODE_FRAME_POINTER
1294 movl PT_GS(%esp), %edi # get the function address
1295 movl PT_ORIG_EAX(%esp), %edx # get the error code
1296 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
1300 movl %esp, %eax # pt_regs pointer
1302 jmp ret_from_exception
1303 END(common_exception)
1307 * Entry from sysenter is now handled in common_exception
1310 pushl $-1 # mark this as an int
1312 jmp common_exception
1316 * NMI is doubly nasty. It can happen on the first instruction of
1317 * entry_SYSENTER_32 (just like #DB), but it can also interrupt the beginning
1318 * of the #DB handler even if that #DB in turn hit before entry_SYSENTER_32
1319 * switched stacks. We handle both conditions by simply checking whether we
1320 * interrupted kernel code running on the SYSENTER stack.
1325 #ifdef CONFIG_X86_ESPFIX32
1328 cmpw $__ESPFIX_SS, %ax
1330 je .Lnmi_espfix_stack
1333 pushl %eax # pt_regs->orig_ax
1335 ENCODE_FRAME_POINTER
1336 xorl %edx, %edx # zero error code
1337 movl %esp, %eax # pt_regs pointer
1339 /* Are we currently on the SYSENTER stack? */
1340 movl PER_CPU_VAR(cpu_entry_area), %ecx
1341 addl $CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx
1342 subl %eax, %ecx /* ecx = (end of entry_stack) - esp */
1343 cmpl $SIZEOF_entry_stack, %ecx
1344 jb .Lnmi_from_sysenter_stack
1346 /* Not on SYSENTER stack. */
1350 .Lnmi_from_sysenter_stack:
1352 * We're on the SYSENTER stack. Switch off. No one (not even debug)
1353 * is using the thread stack right now, so it's safe for us to use it.
1356 movl PER_CPU_VAR(cpu_current_top_of_stack), %esp
1361 CHECK_AND_APPLY_ESPFIX
1362 RESTORE_ALL_NMI pop=4
1365 #ifdef CONFIG_X86_ESPFIX32
1368 * create the pointer to lss back
1373 /* copy the iret frame of 12 bytes */
1379 ENCODE_FRAME_POINTER
1380 FIXUP_ESPFIX_STACK # %eax == %esp
1381 xorl %edx, %edx # zero error code
1384 lss 12+4(%esp), %esp # back to espfix stack
1391 pushl $-1 # mark this as an int
1393 SAVE_ALL switch_stacks=1
1394 ENCODE_FRAME_POINTER
1396 xorl %edx, %edx # zero error code
1397 movl %esp, %eax # pt_regs pointer
1399 jmp ret_from_exception
1402 ENTRY(general_protection)
1403 pushl $do_general_protection
1404 jmp common_exception
1405 END(general_protection)
1407 #ifdef CONFIG_KVM_GUEST
1408 ENTRY(async_page_fault)
1410 pushl $do_async_page_fault
1411 jmp common_exception
1412 END(async_page_fault)
1415 ENTRY(rewind_stack_do_exit)
1416 /* Prevent any naive code from trying to unwind to our caller. */
1419 movl PER_CPU_VAR(cpu_current_top_of_stack), %esi
1420 leal -TOP_OF_KERNEL_STACK_PADDING-PTREGS_SIZE(%esi), %esp
1424 END(rewind_stack_do_exit)