aboutsummaryrefslogtreecommitdiff
path: root/arch/i386/kernel/entry.S
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.osdl.org>2006-12-07 08:59:11 -0800
committerLinus Torvalds <torvalds@woody.osdl.org>2006-12-07 08:59:11 -0800
commit4522d58275f124105819723e24e912c8e5bf3cdd (patch)
treeb92c29014fadffe049c1925676037f0092b8d112 /arch/i386/kernel/entry.S
parent6cf24f031bc97cb5a7c9df3b6e73c45b628b2b28 (diff)
parent64a26a731235b59c9d73bbe82c1f896d57400d37 (diff)
Merge branch 'for-linus' of git://one.firstfloor.org/home/andi/git/linux-2.6
* 'for-linus' of git://one.firstfloor.org/home/andi/git/linux-2.6: (156 commits) [PATCH] x86-64: Export smp_call_function_single [PATCH] i386: Clean up smp_tune_scheduling() [PATCH] unwinder: move .eh_frame to RODATA [PATCH] unwinder: fully support linker generated .eh_frame_hdr section [PATCH] x86-64: don't use set_irq_regs() [PATCH] x86-64: check vector in setup_ioapic_dest to verify if need setup_IO_APIC_irq [PATCH] x86-64: Make ix86 default to HIGHMEM4G instead of NOHIGHMEM [PATCH] i386: replace kmalloc+memset with kzalloc [PATCH] x86-64: remove remaining pc98 code [PATCH] x86-64: remove unused variable [PATCH] x86-64: Fix constraints in atomic_add_return() [PATCH] x86-64: fix asm constraints in i386 atomic_add_return [PATCH] x86-64: Correct documentation for bzImage protocol v2.05 [PATCH] x86-64: replace kmalloc+memset with kzalloc in MTRR code [PATCH] x86-64: Fix numaq build error [PATCH] x86-64: include/asm-x86_64/cpufeature.h isn't a userspace header [PATCH] unwinder: Add debugging output to the Dwarf2 unwinder [PATCH] x86-64: Clarify error message in GART code [PATCH] x86-64: Fix interrupt race in idle callback (3rd try) [PATCH] x86-64: Remove unwind stack pointer alignment forcing again ... Fixed conflict in include/linux/uaccess.h manually Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/i386/kernel/entry.S')
-rw-r--r--arch/i386/kernel/entry.S331
1 files changed, 184 insertions, 147 deletions
diff --git a/arch/i386/kernel/entry.S b/arch/i386/kernel/entry.S
index 5a63d6fdb70..de34b7fed3c 100644
--- a/arch/i386/kernel/entry.S
+++ b/arch/i386/kernel/entry.S
@@ -30,12 +30,13 @@
* 18(%esp) - %eax
* 1C(%esp) - %ds
* 20(%esp) - %es
- * 24(%esp) - orig_eax
- * 28(%esp) - %eip
- * 2C(%esp) - %cs
- * 30(%esp) - %eflags
- * 34(%esp) - %oldesp
- * 38(%esp) - %oldss
+ * 24(%esp) - %gs
+ * 28(%esp) - orig_eax
+ * 2C(%esp) - %eip
+ * 30(%esp) - %cs
+ * 34(%esp) - %eflags
+ * 38(%esp) - %oldesp
+ * 3C(%esp) - %oldss
*
* "current" is in register %ebx during any slow entries.
*/
@@ -48,26 +49,24 @@
#include <asm/smp.h>
#include <asm/page.h>
#include <asm/desc.h>
+#include <asm/percpu.h>
#include <asm/dwarf2.h>
#include "irq_vectors.h"
-#define nr_syscalls ((syscall_table_size)/4)
+/*
+ * We use macros for low-level operations which need to be overridden
+ * for paravirtualization. The following will never clobber any registers:
+ * INTERRUPT_RETURN (aka. "iret")
+ * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
+ * ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit").
+ *
+ * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
+ * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
+ * Allowing a register to be clobbered can shrink the paravirt replacement
+ * enough to patch inline, increasing performance.
+ */
-EBX = 0x00
-ECX = 0x04
-EDX = 0x08
-ESI = 0x0C
-EDI = 0x10
-EBP = 0x14
-EAX = 0x18
-DS = 0x1C
-ES = 0x20
-ORIG_EAX = 0x24
-EIP = 0x28
-CS = 0x2C
-EFLAGS = 0x30
-OLDESP = 0x34
-OLDSS = 0x38
+#define nr_syscalls ((syscall_table_size)/4)
CF_MASK = 0x00000001
TF_MASK = 0x00000100
@@ -76,23 +75,16 @@ DF_MASK = 0x00000400
NT_MASK = 0x00004000
VM_MASK = 0x00020000
-/* These are replaces for paravirtualization */
-#define DISABLE_INTERRUPTS cli
-#define ENABLE_INTERRUPTS sti
-#define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
-#define INTERRUPT_RETURN iret
-#define GET_CR0_INTO_EAX movl %cr0, %eax
-
#ifdef CONFIG_PREEMPT
-#define preempt_stop DISABLE_INTERRUPTS; TRACE_IRQS_OFF
+#define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
#else
-#define preempt_stop
+#define preempt_stop(clobbers)
#define resume_kernel restore_nocheck
#endif
.macro TRACE_IRQS_IRET
#ifdef CONFIG_TRACE_IRQFLAGS
- testl $IF_MASK,EFLAGS(%esp) # interrupts off?
+ testl $IF_MASK,PT_EFLAGS(%esp) # interrupts off?
jz 1f
TRACE_IRQS_ON
1:
@@ -107,6 +99,9 @@ VM_MASK = 0x00020000
#define SAVE_ALL \
cld; \
+ pushl %gs; \
+ CFI_ADJUST_CFA_OFFSET 4;\
+ /*CFI_REL_OFFSET gs, 0;*/\
pushl %es; \
CFI_ADJUST_CFA_OFFSET 4;\
/*CFI_REL_OFFSET es, 0;*/\
@@ -136,7 +131,9 @@ VM_MASK = 0x00020000
CFI_REL_OFFSET ebx, 0;\
movl $(__USER_DS), %edx; \
movl %edx, %ds; \
- movl %edx, %es;
+ movl %edx, %es; \
+ movl $(__KERNEL_PDA), %edx; \
+ movl %edx, %gs
#define RESTORE_INT_REGS \
popl %ebx; \
@@ -169,17 +166,22 @@ VM_MASK = 0x00020000
2: popl %es; \
CFI_ADJUST_CFA_OFFSET -4;\
/*CFI_RESTORE es;*/\
-.section .fixup,"ax"; \
-3: movl $0,(%esp); \
- jmp 1b; \
+3: popl %gs; \
+ CFI_ADJUST_CFA_OFFSET -4;\
+ /*CFI_RESTORE gs;*/\
+.pushsection .fixup,"ax"; \
4: movl $0,(%esp); \
+ jmp 1b; \
+5: movl $0,(%esp); \
jmp 2b; \
-.previous; \
+6: movl $0,(%esp); \
+ jmp 3b; \
.section __ex_table,"a";\
.align 4; \
- .long 1b,3b; \
- .long 2b,4b; \
-.previous
+ .long 1b,4b; \
+ .long 2b,5b; \
+ .long 3b,6b; \
+.popsection
#define RING0_INT_FRAME \
CFI_STARTPROC simple;\
@@ -198,18 +200,18 @@ VM_MASK = 0x00020000
#define RING0_PTREGS_FRAME \
CFI_STARTPROC simple;\
CFI_SIGNAL_FRAME;\
- CFI_DEF_CFA esp, OLDESP-EBX;\
- /*CFI_OFFSET cs, CS-OLDESP;*/\
- CFI_OFFSET eip, EIP-OLDESP;\
- /*CFI_OFFSET es, ES-OLDESP;*/\
- /*CFI_OFFSET ds, DS-OLDESP;*/\
- CFI_OFFSET eax, EAX-OLDESP;\
- CFI_OFFSET ebp, EBP-OLDESP;\
- CFI_OFFSET edi, EDI-OLDESP;\
- CFI_OFFSET esi, ESI-OLDESP;\
- CFI_OFFSET edx, EDX-OLDESP;\
- CFI_OFFSET ecx, ECX-OLDESP;\
- CFI_OFFSET ebx, EBX-OLDESP
+ CFI_DEF_CFA esp, PT_OLDESP-PT_EBX;\
+ /*CFI_OFFSET cs, PT_CS-PT_OLDESP;*/\
+ CFI_OFFSET eip, PT_EIP-PT_OLDESP;\
+ /*CFI_OFFSET es, PT_ES-PT_OLDESP;*/\
+ /*CFI_OFFSET ds, PT_DS-PT_OLDESP;*/\
+ CFI_OFFSET eax, PT_EAX-PT_OLDESP;\
+ CFI_OFFSET ebp, PT_EBP-PT_OLDESP;\
+ CFI_OFFSET edi, PT_EDI-PT_OLDESP;\
+ CFI_OFFSET esi, PT_ESI-PT_OLDESP;\
+ CFI_OFFSET edx, PT_EDX-PT_OLDESP;\
+ CFI_OFFSET ecx, PT_ECX-PT_OLDESP;\
+ CFI_OFFSET ebx, PT_EBX-PT_OLDESP
ENTRY(ret_from_fork)
CFI_STARTPROC
@@ -237,17 +239,18 @@ ENTRY(ret_from_fork)
ALIGN
RING0_PTREGS_FRAME
ret_from_exception:
- preempt_stop
+ preempt_stop(CLBR_ANY)
ret_from_intr:
GET_THREAD_INFO(%ebp)
check_userspace:
- movl EFLAGS(%esp), %eax # mix EFLAGS and CS
- movb CS(%esp), %al
+ movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
+ movb PT_CS(%esp), %al
andl $(VM_MASK | SEGMENT_RPL_MASK), %eax
cmpl $USER_RPL, %eax
jb resume_kernel # not returning to v8086 or userspace
+
ENTRY(resume_userspace)
- DISABLE_INTERRUPTS # make sure we don't miss an interrupt
+ DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
# setting need_resched or sigpending
# between sampling and the iret
movl TI_flags(%ebp), %ecx
@@ -258,14 +261,14 @@ ENTRY(resume_userspace)
#ifdef CONFIG_PREEMPT
ENTRY(resume_kernel)
- DISABLE_INTERRUPTS
+ DISABLE_INTERRUPTS(CLBR_ANY)
cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ?
jnz restore_nocheck
need_resched:
movl TI_flags(%ebp), %ecx # need_resched set ?
testb $_TIF_NEED_RESCHED, %cl
jz restore_all
- testl $IF_MASK,EFLAGS(%esp) # interrupts off (exception path) ?
+ testl $IF_MASK,PT_EFLAGS(%esp) # interrupts off (exception path) ?
jz restore_all
call preempt_schedule_irq
jmp need_resched
@@ -287,7 +290,7 @@ sysenter_past_esp:
* No need to follow this irqs on/off section: the syscall
* disabled irqs and here we enable it straight after entry:
*/
- ENABLE_INTERRUPTS
+ ENABLE_INTERRUPTS(CLBR_NONE)
pushl $(__USER_DS)
CFI_ADJUST_CFA_OFFSET 4
/*CFI_REL_OFFSET ss, 0*/
@@ -331,20 +334,27 @@ sysenter_past_esp:
cmpl $(nr_syscalls), %eax
jae syscall_badsys
call *sys_call_table(,%eax,4)
- movl %eax,EAX(%esp)
- DISABLE_INTERRUPTS
+ movl %eax,PT_EAX(%esp)
+ DISABLE_INTERRUPTS(CLBR_ECX|CLBR_EDX)
TRACE_IRQS_OFF
movl TI_flags(%ebp), %ecx
testw $_TIF_ALLWORK_MASK, %cx
jne syscall_exit_work
/* if something modifies registers it must also disable sysexit */
- movl EIP(%esp), %edx
- movl OLDESP(%esp), %ecx
+ movl PT_EIP(%esp), %edx
+ movl PT_OLDESP(%esp), %ecx
xorl %ebp,%ebp
TRACE_IRQS_ON
+1: mov PT_GS(%esp), %gs
ENABLE_INTERRUPTS_SYSEXIT
CFI_ENDPROC
-
+.pushsection .fixup,"ax"
+2: movl $0,PT_GS(%esp)
+ jmp 1b
+.section __ex_table,"a"
+ .align 4
+ .long 1b,2b
+.popsection
# system call handler stub
ENTRY(system_call)
@@ -353,7 +363,7 @@ ENTRY(system_call)
CFI_ADJUST_CFA_OFFSET 4
SAVE_ALL
GET_THREAD_INFO(%ebp)
- testl $TF_MASK,EFLAGS(%esp)
+ testl $TF_MASK,PT_EFLAGS(%esp)
jz no_singlestep
orl $_TIF_SINGLESTEP,TI_flags(%ebp)
no_singlestep:
@@ -365,9 +375,9 @@ no_singlestep:
jae syscall_badsys
syscall_call:
call *sys_call_table(,%eax,4)
- movl %eax,EAX(%esp) # store the return value
+ movl %eax,PT_EAX(%esp) # store the return value
syscall_exit:
- DISABLE_INTERRUPTS # make sure we don't miss an interrupt
+ DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
# setting need_resched or sigpending
# between sampling and the iret
TRACE_IRQS_OFF
@@ -376,12 +386,12 @@ syscall_exit:
jne syscall_exit_work
restore_all:
- movl EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
- # Warning: OLDSS(%esp) contains the wrong/random values if we
+ movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
+ # Warning: PT_OLDSS(%esp) contains the wrong/random values if we
# are returning to the kernel.
# See comments in process.c:copy_thread() for details.
- movb OLDSS(%esp), %ah
- movb CS(%esp), %al
+ movb PT_OLDSS(%esp), %ah
+ movb PT_CS(%esp), %al
andl $(VM_MASK | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
CFI_REMEMBER_STATE
@@ -390,13 +400,13 @@ restore_nocheck:
TRACE_IRQS_IRET
restore_nocheck_notrace:
RESTORE_REGS
- addl $4, %esp
+ addl $4, %esp # skip orig_eax/error_code
CFI_ADJUST_CFA_OFFSET -4
1: INTERRUPT_RETURN
.section .fixup,"ax"
iret_exc:
TRACE_IRQS_ON
- ENABLE_INTERRUPTS
+ ENABLE_INTERRUPTS(CLBR_NONE)
pushl $0 # no error code
pushl $do_iret_error
jmp error_code
@@ -408,33 +418,42 @@ iret_exc:
CFI_RESTORE_STATE
ldt_ss:
- larl OLDSS(%esp), %eax
+ larl PT_OLDSS(%esp), %eax
jnz restore_nocheck
testl $0x00400000, %eax # returning to 32bit stack?
jnz restore_nocheck # allright, normal return
+
+#ifdef CONFIG_PARAVIRT
+ /*
+ * The kernel can't run on a non-flat stack if paravirt mode
+ * is active. Rather than try to fixup the high bits of
+ * ESP, bypass this code entirely. This may break DOSemu
+ * and/or Wine support in a paravirt VM, although the option
+ * is still available to implement the setting of the high
+ * 16-bits in the INTERRUPT_RETURN paravirt-op.
+ */
+ cmpl $0, paravirt_ops+PARAVIRT_enabled
+ jne restore_nocheck
+#endif
+
/* If returning to userspace with 16bit stack,
* try to fix the higher word of ESP, as the CPU
* won't restore it.
* This is an "official" bug of all the x86-compatible
* CPUs, which we can try to work around to make
* dosemu and wine happy. */
- subl $8, %esp # reserve space for switch16 pointer
- CFI_ADJUST_CFA_OFFSET 8
- DISABLE_INTERRUPTS
+ movl PT_OLDESP(%esp), %eax
+ movl %esp, %edx
+ call patch_espfix_desc
+ pushl $__ESPFIX_SS
+ CFI_ADJUST_CFA_OFFSET 4
+ pushl %eax
+ CFI_ADJUST_CFA_OFFSET 4
+ DISABLE_INTERRUPTS(CLBR_EAX)
TRACE_IRQS_OFF
- movl %esp, %eax
- /* Set up the 16bit stack frame with switch32 pointer on top,
- * and a switch16 pointer on top of the current frame. */
- call setup_x86_bogus_stack
- CFI_ADJUST_CFA_OFFSET -8 # frame has moved
- TRACE_IRQS_IRET
- RESTORE_REGS
- lss 20+4(%esp), %esp # switch to 16bit stack
-1: INTERRUPT_RETURN
-.section __ex_table,"a"
- .align 4
- .long 1b,iret_exc
-.previous
+ lss (%esp), %esp
+ CFI_ADJUST_CFA_OFFSET -8
+ jmp restore_nocheck
CFI_ENDPROC
# perform work that needs to be done immediately before resumption
@@ -445,7 +464,7 @@ work_pending:
jz work_notifysig
work_resched:
call schedule
- DISABLE_INTERRUPTS # make sure we don't miss an interrupt
+ DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
# setting need_resched or sigpending
# between sampling and the iret
TRACE_IRQS_OFF
@@ -458,7 +477,8 @@ work_resched:
work_notifysig: # deal with pending signals and
# notify-resume requests
- testl $VM_MASK, EFLAGS(%esp)
+#ifdef CONFIG_VM86
+ testl $VM_MASK, PT_EFLAGS(%esp)
movl %esp, %eax
jne work_notifysig_v86 # returning to kernel-space or
# vm86-space
@@ -468,29 +488,30 @@ work_notifysig: # deal with pending signals and
ALIGN
work_notifysig_v86:
-#ifdef CONFIG_VM86
pushl %ecx # save ti_flags for do_notify_resume
CFI_ADJUST_CFA_OFFSET 4
call save_v86_state # %eax contains pt_regs pointer
popl %ecx
CFI_ADJUST_CFA_OFFSET -4
movl %eax, %esp
+#else
+ movl %esp, %eax
+#endif
xorl %edx, %edx
call do_notify_resume
jmp resume_userspace_sig
-#endif
# perform syscall exit tracing
ALIGN
syscall_trace_entry:
- movl $-ENOSYS,EAX(%esp)
+ movl $-ENOSYS,PT_EAX(%esp)
movl %esp, %eax
xorl %edx,%edx
call do_syscall_trace
cmpl $0, %eax
jne resume_userspace # ret != 0 -> running under PTRACE_SYSEMU,
# so must skip actual syscall
- movl ORIG_EAX(%esp), %eax
+ movl PT_ORIG_EAX(%esp), %eax
cmpl $(nr_syscalls), %eax
jnae syscall_call
jmp syscall_exit
@@ -501,7 +522,7 @@ syscall_exit_work:
testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl
jz work_pending
TRACE_IRQS_ON
- ENABLE_INTERRUPTS # could let do_syscall_trace() call
+ ENABLE_INTERRUPTS(CLBR_ANY) # could let do_syscall_trace() call
# schedule() instead
movl %esp, %eax
movl $1, %edx
@@ -515,39 +536,38 @@ syscall_fault:
CFI_ADJUST_CFA_OFFSET 4
SAVE_ALL
GET_THREAD_INFO(%ebp)
- movl $-EFAULT,EAX(%esp)
+ movl $-EFAULT,PT_EAX(%esp)
jmp resume_userspace
syscall_badsys:
- movl $-ENOSYS,EAX(%esp)
+ movl $-ENOSYS,PT_EAX(%esp)
jmp resume_userspace
CFI_ENDPROC
#define FIXUP_ESPFIX_STACK \
- movl %esp, %eax; \
- /* switch to 32bit stack using the pointer on top of 16bit stack */ \
- lss %ss:CPU_16BIT_STACK_SIZE-8, %esp; \
- /* copy data from 16bit stack to 32bit stack */ \
- call fixup_x86_bogus_stack; \
- /* put ESP to the proper location */ \
- movl %eax, %esp;
-#define UNWIND_ESPFIX_STACK \
+ /* since we are on a wrong stack, we cant make it a C code :( */ \
+ movl %gs:PDA_cpu, %ebx; \
+ PER_CPU(cpu_gdt_descr, %ebx); \
+ movl GDS_address(%ebx), %ebx; \
+ GET_DESC_BASE(GDT_ENTRY_ESPFIX_SS, %ebx, %eax, %ax, %al, %ah); \
+ addl %esp, %eax; \
+ pushl $__KERNEL_DS; \
+ CFI_ADJUST_CFA_OFFSET 4; \
pushl %eax; \
CFI_ADJUST_CFA_OFFSET 4; \
+ lss (%esp), %esp; \
+ CFI_ADJUST_CFA_OFFSET -8;
+#define UNWIND_ESPFIX_STACK \
movl %ss, %eax; \
- /* see if on 16bit stack */ \
+ /* see if on espfix stack */ \
cmpw $__ESPFIX_SS, %ax; \
- je 28f; \
-27: popl %eax; \
- CFI_ADJUST_CFA_OFFSET -4; \
-.section .fixup,"ax"; \
-28: movl $__KERNEL_DS, %eax; \
+ jne 27f; \
+ movl $__KERNEL_DS, %eax; \
movl %eax, %ds; \
movl %eax, %es; \
- /* switch to 32bit stack */ \
+ /* switch to normal stack */ \
FIXUP_ESPFIX_STACK; \
- jmp 27b; \
-.previous
+27:;
/*
* Build the entry stubs and pointer table with
@@ -608,13 +628,16 @@ KPROBE_ENTRY(page_fault)
CFI_ADJUST_CFA_OFFSET 4
ALIGN
error_code:
+ /* the function address is in %gs's slot on the stack */
+ pushl %es
+ CFI_ADJUST_CFA_OFFSET 4
+ /*CFI_REL_OFFSET es, 0*/
pushl %ds
CFI_ADJUST_CFA_OFFSET 4
/*CFI_REL_OFFSET ds, 0*/
pushl %eax
CFI_ADJUST_CFA_OFFSET 4
CFI_REL_OFFSET eax, 0
- xorl %eax, %eax
pushl %ebp
CFI_ADJUST_CFA_OFFSET 4
CFI_REL_OFFSET ebp, 0
@@ -627,7 +650,6 @@ error_code:
pushl %edx
CFI_ADJUST_CFA_OFFSET 4
CFI_REL_OFFSET edx, 0
- decl %eax # eax = -1
pushl %ecx
CFI_ADJUST_CFA_OFFSET 4
CFI_REL_OFFSET ecx, 0
@@ -635,18 +657,20 @@ error_code:
CFI_ADJUST_CFA_OFFSET 4
CFI_REL_OFFSET ebx, 0
cld
- pushl %es
+ pushl %gs
CFI_ADJUST_CFA_OFFSET 4
- /*CFI_REL_OFFSET es, 0*/
+ /*CFI_REL_OFFSET gs, 0*/
+ movl $(__KERNEL_PDA), %ecx
+ movl %ecx, %gs
UNWIND_ESPFIX_STACK
popl %ecx
CFI_ADJUST_CFA_OFFSET -4
/*CFI_REGISTER es, ecx*/
- movl ES(%esp), %edi # get the function address
- movl ORIG_EAX(%esp), %edx # get the error code
- movl %eax, ORIG_EAX(%esp)
- movl %ecx, ES(%esp)
- /*CFI_REL_OFFSET es, ES*/
+ movl PT_GS(%esp), %edi # get the function address
+ movl PT_ORIG_EAX(%esp), %edx # get the error code
+ movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
+ mov %ecx, PT_GS(%esp)
+ /*CFI_REL_OFFSET gs, ES*/
movl $(__USER_DS), %ecx
movl %ecx, %ds
movl %ecx, %es
@@ -682,7 +706,7 @@ ENTRY(device_not_available)
GET_CR0_INTO_EAX
testl $0x4, %eax # EM (math emulation bit)
jne device_not_available_emulate
- preempt_stop
+ preempt_stop(CLBR_ANY)
call math_state_restore
jmp ret_from_exception
device_not_available_emulate:
@@ -754,7 +778,7 @@ KPROBE_ENTRY(nmi)
cmpw $__ESPFIX_SS, %ax
popl %eax
CFI_ADJUST_CFA_OFFSET -4
- je nmi_16bit_stack
+ je nmi_espfix_stack
cmpl $sysenter_entry,(%esp)
je nmi_stack_fixup
pushl %eax
@@ -797,7 +821,7 @@ nmi_debug_stack_check:
FIX_STACK(24,nmi_stack_correct, 1)
jmp nmi_stack_correct
-nmi_16bit_stack:
+nmi_espfix_stack:
/* We have a RING0_INT_FRAME here.
*
* create the pointer to lss back
@@ -806,7 +830,6 @@ nmi_16bit_stack:
CFI_ADJUST_CFA_OFFSET 4
pushl %esp
CFI_ADJUST_CFA_OFFSET 4
- movzwl %sp, %esp
addw $4, (%esp)
/* copy the iret frame of 12 bytes */
.rept 3
@@ -817,11 +840,11 @@ nmi_16bit_stack:
CFI_ADJUST_CFA_OFFSET 4
SAVE_ALL
FIXUP_ESPFIX_STACK # %eax == %esp
- CFI_ADJUST_CFA_OFFSET -20 # the frame has now moved
xorl %edx,%edx # zero error code
call do_nmi
RESTORE_REGS
- lss 12+4(%esp), %esp # back to 16bit stack
+ lss 12+4(%esp), %esp # back to espfix stack
+ CFI_ADJUST_CFA_OFFSET -24
1: INTERRUPT_RETURN
CFI_ENDPROC
.section __ex_table,"a"
@@ -830,6 +853,19 @@ nmi_16bit_stack:
.previous
KPROBE_END(nmi)
+#ifdef CONFIG_PARAVIRT
+ENTRY(native_iret)
+1: iret
+.section __ex_table,"a"
+ .align 4
+ .long 1b,iret_exc
+.previous
+
+ENTRY(native_irq_enable_sysexit)
+ sti
+ sysexit
+#endif
+
KPROBE_ENTRY(int3)
RING0_INT_FRAME
pushl $-1 # mark this as an int
@@ -949,26 +985,27 @@ ENTRY(arch_unwind_init_running)
movl 4(%esp), %edx
movl (%esp), %ecx
leal 4(%esp), %eax
- movl %ebx, EBX(%edx)
+ movl %ebx, PT_EBX(%edx)
xorl %ebx, %ebx
- movl %ebx, ECX(%edx)
- movl %ebx, EDX(%edx)
- movl %esi, ESI(%edx)
- movl %edi, EDI(%edx)
- movl %ebp, EBP(%edx)
- movl %ebx, EAX(%edx)
- movl $__USER_DS, DS(%edx)
- movl $__USER_DS, ES(%edx)
- movl %ebx, ORIG_EAX(%edx)
- movl %ecx, EIP(%edx)
+ movl %ebx, PT_ECX(%edx)
+ movl %ebx, PT_EDX(%edx)
+ movl %esi, PT_ESI(%edx)
+ movl %edi, PT_EDI(%edx)
+ movl %ebp, PT_EBP(%edx)
+ movl %ebx, PT_EAX(%edx)
+ movl $__USER_DS, PT_DS(%edx)
+ movl $__USER_DS, PT_ES(%edx)
+ movl $0, PT_GS(%edx)
+ movl %ebx, PT_ORIG_EAX(%edx)
+ movl %ecx, PT_EIP(%edx)
movl 12(%esp), %ecx
- movl $__KERNEL_CS, CS(%edx)
- movl %ebx, EFLAGS(%edx)
- movl %eax, OLDESP(%edx)
+ movl $__KERNEL_CS, PT_CS(%edx)
+ movl %ebx, PT_EFLAGS(%edx)
+ movl %eax, PT_OLDESP(%edx)
movl 8(%esp), %eax
movl %ecx, 8(%esp)
- movl EBX(%edx), %ebx
- movl $__KERNEL_DS, OLDSS(%edx)
+ movl PT_EBX(%edx), %ebx
+ movl $__KERNEL_DS, PT_OLDSS(%edx)
jmpl *%eax
CFI_ENDPROC
ENDPROC(arch_unwind_init_running)