aboutsummaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorTony Luck <tony.luck@intel.com>2008-07-17 10:53:37 -0700
committerTony Luck <tony.luck@intel.com>2008-07-17 10:53:37 -0700
commitfca515fbfa5ecd9f7b54db311317e2c877d7831a (patch)
tree66b44028b3ab5be068be78650932812520d78865 /arch
parent2b04be7e8ab5756ea36e137dd03c8773d184e67e (diff)
parent4d58bbcc89e267d52b4df572acbf209a60a8a497 (diff)
Pull pvops into release branch
Diffstat (limited to 'arch')
-rw-r--r--arch/ia64/Makefile6
-rw-r--r--arch/ia64/kernel/Makefile44
-rw-r--r--arch/ia64/kernel/entry.S115
-rw-r--r--arch/ia64/kernel/head.S41
-rw-r--r--arch/ia64/kernel/iosapic.c45
-rw-r--r--arch/ia64/kernel/irq_ia64.c19
-rw-r--r--arch/ia64/kernel/ivt.S462
-rw-r--r--arch/ia64/kernel/minstate.h13
-rw-r--r--arch/ia64/kernel/nr-irqs.c24
-rw-r--r--arch/ia64/kernel/paravirt.c369
-rw-r--r--arch/ia64/kernel/paravirt_inst.h29
-rw-r--r--arch/ia64/kernel/paravirtentry.S60
-rw-r--r--arch/ia64/kernel/setup.c10
-rw-r--r--arch/ia64/kernel/smpboot.c2
-rw-r--r--arch/ia64/kernel/time.c23
-rw-r--r--arch/ia64/kernel/vmlinux.lds.S1
16 files changed, 960 insertions, 303 deletions
diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
index e67ee3f2769..905d25b13d5 100644
--- a/arch/ia64/Makefile
+++ b/arch/ia64/Makefile
@@ -100,3 +100,9 @@ define archhelp
echo ' boot - Build vmlinux and bootloader for Ski simulator'
echo '* unwcheck - Check vmlinux for invalid unwind info'
endef
+
+archprepare: make_nr_irqs_h FORCE
+PHONY += make_nr_irqs_h FORCE
+
+make_nr_irqs_h: FORCE
+ $(Q)$(MAKE) $(build)=arch/ia64/kernel include/asm-ia64/nr-irqs.h
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
index 13fd10e8699..87fea11aecb 100644
--- a/arch/ia64/kernel/Makefile
+++ b/arch/ia64/kernel/Makefile
@@ -36,6 +36,8 @@ obj-$(CONFIG_PCI_MSI) += msi_ia64.o
mca_recovery-y += mca_drv.o mca_drv_asm.o
obj-$(CONFIG_IA64_MC_ERR_INJECT)+= err_inject.o
+obj-$(CONFIG_PARAVIRT) += paravirt.o paravirtentry.o
+
obj-$(CONFIG_IA64_ESI) += esi.o
ifneq ($(CONFIG_IA64_ESI),)
obj-y += esi_stub.o # must be in kernel proper
@@ -70,3 +72,45 @@ $(obj)/gate-syms.o: $(obj)/gate.lds $(obj)/gate.o FORCE
# We must build gate.so before we can assemble it.
# Note: kbuild does not track this dependency due to usage of .incbin
$(obj)/gate-data.o: $(obj)/gate.so
+
+# Calculate NR_IRQ = max(IA64_NATIVE_NR_IRQS, XEN_NR_IRQS, ...) based on config
+define sed-y
+ "/^->/{s:^->\([^ ]*\) [\$$#]*\([^ ]*\) \(.*\):#define \1 \2 /* \3 */:; s:->::; p;}"
+endef
+quiet_cmd_nr_irqs = GEN $@
+define cmd_nr_irqs
+ (set -e; \
+ echo "#ifndef __ASM_NR_IRQS_H__"; \
+ echo "#define __ASM_NR_IRQS_H__"; \
+ echo "/*"; \
+ echo " * DO NOT MODIFY."; \
+ echo " *"; \
+ echo " * This file was generated by Kbuild"; \
+ echo " *"; \
+ echo " */"; \
+ echo ""; \
+ sed -ne $(sed-y) $<; \
+ echo ""; \
+ echo "#endif" ) > $@
+endef
+
+# We use internal kbuild rules to avoid the "is up to date" message from make
+arch/$(SRCARCH)/kernel/nr-irqs.s: $(srctree)/arch/$(SRCARCH)/kernel/nr-irqs.c \
+ $(wildcard $(srctree)/include/asm-ia64/*/irq.h)
+ $(Q)mkdir -p $(dir $@)
+ $(call if_changed_dep,cc_s_c)
+
+include/asm-ia64/nr-irqs.h: arch/$(SRCARCH)/kernel/nr-irqs.s
+ $(Q)mkdir -p $(dir $@)
+ $(call cmd,nr_irqs)
+
+clean-files += $(objtree)/include/asm-ia64/nr-irqs.h
+
+#
+# native ivt.S and entry.S
+#
+ASM_PARAVIRT_OBJS = ivt.o entry.o
+define paravirtualized_native
+AFLAGS_$(1) += -D__IA64_ASM_PARAVIRTUALIZED_NATIVE
+endef
+$(foreach obj,$(ASM_PARAVIRT_OBJS),$(eval $(call paravirtualized_native,$(obj))))
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index ca2bb95726d..56ab156c48a 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -23,6 +23,11 @@
* 11/07/2000
*/
/*
+ * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
+ * VA Linux Systems Japan K.K.
+ * pv_ops.
+ */
+/*
* Global (preserved) predicate usage on syscall entry/exit path:
*
* pKStk: See entry.h.
@@ -45,6 +50,7 @@
#include "minstate.h"
+#ifdef __IA64_ASM_PARAVIRTUALIZED_NATIVE
/*
* execve() is special because in case of success, we need to
* setup a null register window frame.
@@ -173,6 +179,7 @@ GLOBAL_ENTRY(sys_clone)
mov rp=loc0
br.ret.sptk.many rp
END(sys_clone)
+#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
/*
* prev_task <- ia64_switch_to(struct task_struct *next)
@@ -180,7 +187,7 @@ END(sys_clone)
* called. The code starting at .map relies on this. The rest of the code
* doesn't care about the interrupt masking status.
*/
-GLOBAL_ENTRY(ia64_switch_to)
+GLOBAL_ENTRY(__paravirt_switch_to)
.prologue
alloc r16=ar.pfs,1,0,0,0
DO_SAVE_SWITCH_STACK
@@ -204,7 +211,7 @@ GLOBAL_ENTRY(ia64_switch_to)
;;
.done:
ld8 sp=[r21] // load kernel stack pointer of new task
- mov IA64_KR(CURRENT)=in0 // update "current" application register
+ MOV_TO_KR(CURRENT, in0, r8, r9) // update "current" application register
mov r8=r13 // return pointer to previously running task
mov r13=in0 // set "current" pointer
;;
@@ -216,26 +223,25 @@ GLOBAL_ENTRY(ia64_switch_to)
br.ret.sptk.many rp // boogie on out in new context
.map:
- rsm psr.ic // interrupts (psr.i) are already disabled here
+ RSM_PSR_IC(r25) // interrupts (psr.i) are already disabled here
movl r25=PAGE_KERNEL
;;
srlz.d
or r23=r25,r20 // construct PA | page properties
mov r25=IA64_GRANULE_SHIFT<<2
;;
- mov cr.itir=r25
- mov cr.ifa=in0 // VA of next task...
+ MOV_TO_ITIR(p0, r25, r8)
+ MOV_TO_IFA(in0, r8) // VA of next task...
;;
mov r25=IA64_TR_CURRENT_STACK
- mov IA64_KR(CURRENT_STACK)=r26 // remember last page we mapped...
+ MOV_TO_KR(CURRENT_STACK, r26, r8, r9) // remember last page we mapped...
;;
itr.d dtr[r25]=r23 // wire in new mapping...
- ssm psr.ic // reenable the psr.ic bit
- ;;
- srlz.d
+ SSM_PSR_IC_AND_SRLZ_D(r8, r9) // reenable the psr.ic bit
br.cond.sptk .done
-END(ia64_switch_to)
+END(__paravirt_switch_to)
+#ifdef __IA64_ASM_PARAVIRTUALIZED_NATIVE
/*
* Note that interrupts are enabled during save_switch_stack and load_switch_stack. This
* means that we may get an interrupt with "sp" pointing to the new kernel stack while
@@ -375,7 +381,7 @@ END(save_switch_stack)
* - b7 holds address to return to
* - must not touch r8-r11
*/
-ENTRY(load_switch_stack)
+GLOBAL_ENTRY(load_switch_stack)
.prologue
.altrp b7
@@ -571,7 +577,7 @@ GLOBAL_ENTRY(ia64_trace_syscall)
.ret3:
(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
(pUStk) rsm psr.i // disable interrupts
- br.cond.sptk .work_pending_syscall_end
+ br.cond.sptk ia64_work_pending_syscall_end
strace_error:
ld8 r3=[r2] // load pt_regs.r8
@@ -636,8 +642,17 @@ GLOBAL_ENTRY(ia64_ret_from_syscall)
adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8
mov r10=r0 // clear error indication in r10
(p7) br.cond.spnt handle_syscall_error // handle potential syscall failure
+#ifdef CONFIG_PARAVIRT
+ ;;
+ br.cond.sptk.few ia64_leave_syscall
+ ;;
+#endif /* CONFIG_PARAVIRT */
END(ia64_ret_from_syscall)
+#ifndef CONFIG_PARAVIRT
// fall through
+#endif
+#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
+
/*
* ia64_leave_syscall(): Same as ia64_leave_kernel, except that it doesn't
* need to switch to bank 0 and doesn't restore the scratch registers.
@@ -682,7 +697,7 @@ END(ia64_ret_from_syscall)
* ar.csd: cleared
* ar.ssd: cleared
*/
-ENTRY(ia64_leave_syscall)
+GLOBAL_ENTRY(__paravirt_leave_syscall)
PT_REGS_UNWIND_INFO(0)
/*
* work.need_resched etc. mustn't get changed by this CPU before it returns to
@@ -692,11 +707,11 @@ ENTRY(ia64_leave_syscall)
* extra work. We always check for extra work when returning to user-level.
* With CONFIG_PREEMPT, we also check for extra work when the preempt_count
* is 0. After extra work processing has been completed, execution
- * resumes at .work_processed_syscall with p6 set to 1 if the extra-work-check
+ * resumes at ia64_work_processed_syscall with p6 set to 1 if the extra-work-check
* needs to be redone.
*/
#ifdef CONFIG_PREEMPT
- rsm psr.i // disable interrupts
+ RSM_PSR_I(p0, r2, r18) // disable interrupts
cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall
(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
;;
@@ -706,11 +721,12 @@ ENTRY(ia64_leave_syscall)
;;
cmp.eq p6,p0=r21,r0 // p6 <- pUStk || (preempt_count == 0)
#else /* !CONFIG_PREEMPT */
-(pUStk) rsm psr.i
+ RSM_PSR_I(pUStk, r2, r18)
cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall
(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
#endif
-.work_processed_syscall:
+.global __paravirt_work_processed_syscall;
+__paravirt_work_processed_syscall:
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
adds r2=PT(LOADRS)+16,r12
(pUStk) mov.m r22=ar.itc // fetch time at leave
@@ -744,7 +760,7 @@ ENTRY(ia64_leave_syscall)
(pNonSys) break 0 // bug check: we shouldn't be here if pNonSys is TRUE!
;;
invala // M0|1 invalidate ALAT
- rsm psr.i | psr.ic // M2 turn off interrupts and interruption collection
+ RSM_PSR_I_IC(r28, r29, r30) // M2 turn off interrupts and interruption collection
cmp.eq p9,p0=r0,r0 // A set p9 to indicate that we should restore cr.ifs
ld8 r29=[r2],16 // M0|1 load cr.ipsr
@@ -765,7 +781,7 @@ ENTRY(ia64_leave_syscall)
;;
#endif
ld8 r26=[r2],PT(B0)-PT(AR_PFS) // M0|1 load ar.pfs
-(pKStk) mov r22=psr // M2 read PSR now that interrupts are disabled
+ MOV_FROM_PSR(pKStk, r22, r21) // M2 read PSR now that interrupts are disabled
nop 0
;;
ld8 r21=[r2],PT(AR_RNAT)-PT(B0) // M0|1 load b0
@@ -798,7 +814,7 @@ ENTRY(ia64_leave_syscall)
srlz.d // M0 ensure interruption collection is off (for cover)
shr.u r18=r19,16 // I0|1 get byte size of existing "dirty" partition
- cover // B add current frame into dirty partition & set cr.ifs
+ COVER // B add current frame into dirty partition & set cr.ifs
;;
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
mov r19=ar.bsp // M2 get new backing store pointer
@@ -823,8 +839,9 @@ ENTRY(ia64_leave_syscall)
mov.m ar.ssd=r0 // M2 clear ar.ssd
mov f11=f0 // F clear f11
br.cond.sptk.many rbs_switch // B
-END(ia64_leave_syscall)
+END(__paravirt_leave_syscall)
+#ifdef __IA64_ASM_PARAVIRTUALIZED_NATIVE
#ifdef CONFIG_IA32_SUPPORT
GLOBAL_ENTRY(ia64_ret_from_ia32_execve)
PT_REGS_UNWIND_INFO(0)
@@ -835,10 +852,20 @@ GLOBAL_ENTRY(ia64_ret_from_ia32_execve)
st8.spill [r2]=r8 // store return value in slot for r8 and set unat bit
.mem.offset 8,0
st8.spill [r3]=r0 // clear error indication in slot for r10 and set unat bit
+#ifdef CONFIG_PARAVIRT
+ ;;
+ // don't fall through, ia64_leave_kernel may be #define'd
+ br.cond.sptk.few ia64_leave_kernel
+ ;;
+#endif /* CONFIG_PARAVIRT */
END(ia64_ret_from_ia32_execve)
+#ifndef CONFIG_PARAVIRT
// fall through
+#endif
#endif /* CONFIG_IA32_SUPPORT */
-GLOBAL_ENTRY(ia64_leave_kernel)
+#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
+
+GLOBAL_ENTRY(__paravirt_leave_kernel)
PT_REGS_UNWIND_INFO(0)
/*
* work.need_resched etc. mustn't get changed by this CPU before it returns to
@@ -852,7 +879,7 @@ GLOBAL_ENTRY(ia64_leave_kernel)
* needs to be redone.
*/
#ifdef CONFIG_PREEMPT
- rsm psr.i // disable interrupts
+ RSM_PSR_I(p0, r17, r31) // disable interrupts
cmp.eq p0,pLvSys=r0,r0 // pLvSys=0: leave from kernel
(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
;;
@@ -862,7 +889,7 @@ GLOBAL_ENTRY(ia64_leave_kernel)
;;
cmp.eq p6,p0=r21,r0 // p6 <- pUStk || (preempt_count == 0)
#else
-(pUStk) rsm psr.i
+ RSM_PSR_I(pUStk, r17, r31)
cmp.eq p0,pLvSys=r0,r0 // pLvSys=0: leave from kernel
(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
#endif
@@ -910,7 +937,7 @@ GLOBAL_ENTRY(ia64_leave_kernel)
mov ar.csd=r30
mov ar.ssd=r31
;;
- rsm psr.i | psr.ic // initiate turning off of interrupt and interruption collection
+ RSM_PSR_I_IC(r23, r22, r25) // initiate turning off of interrupt and interruption collection
invala // invalidate ALAT
;;
ld8.fill r22=[r2],24
@@ -942,7 +969,7 @@ GLOBAL_ENTRY(ia64_leave_kernel)
mov ar.ccv=r15
;;
ldf.fill f11=[r2]
- bsw.0 // switch back to bank 0 (no stop bit required beforehand...)
+ BSW_0(r2, r3, r15) // switch back to bank 0 (no stop bit required beforehand...)
;;
(pUStk) mov r18=IA64_KR(CURRENT)// M2 (12 cycle read latency)
adds r16=PT(CR_IPSR)+16,r12
@@ -950,12 +977,12 @@ GLOBAL_ENTRY(ia64_leave_kernel)
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
.pred.rel.mutex pUStk,pKStk
-(pKStk) mov r22=psr // M2 read PSR now that interrupts are disabled
+ MOV_FROM_PSR(pKStk, r22, r29) // M2 read PSR now that interrupts are disabled
(pUStk) mov.m r22=ar.itc // M fetch time at leave
nop.i 0
;;
#else
-(pKStk) mov r22=psr // M2 read PSR now that interrupts are disabled
+ MOV_FROM_PSR(pKStk, r22, r29) // M2 read PSR now that interrupts are disabled
nop.i 0
nop.i 0
;;
@@ -1027,7 +1054,7 @@ GLOBAL_ENTRY(ia64_leave_kernel)
* NOTE: alloc, loadrs, and cover can't be predicated.
*/
(pNonSys) br.cond.dpnt dont_preserve_current_frame
- cover // add current frame into dirty partition and set cr.ifs
+ COVER // add current frame into dirty partition and set cr.ifs
;;
mov r19=ar.bsp // get new backing store pointer
rbs_switch:
@@ -1130,16 +1157,16 @@ skip_rbs_switch:
(pKStk) dep r29=r22,r29,21,1 // I0 update ipsr.pp with psr.pp
(pLvSys)mov r16=r0 // A clear r16 for leave_syscall, no-op otherwise
;;
- mov cr.ipsr=r29 // M2
+ MOV_TO_IPSR(p0, r29, r25) // M2
mov ar.pfs=r26 // I0
(pLvSys)mov r17=r0 // A clear r17 for leave_syscall, no-op otherwise
-(p9) mov cr.ifs=r30 // M2
+ MOV_TO_IFS(p9, r30, r25)// M2
mov b0=r21 // I0
(pLvSys)mov r18=r0 // A clear r18 for leave_syscall, no-op otherwise
mov ar.fpsr=r20 // M2
- mov cr.iip=r28 // M2
+ MOV_TO_IIP(r28, r25) // M2
nop 0
;;
(pUStk) mov ar.rnat=r24 // M2 must happen with RSE in lazy mode
@@ -1148,7 +1175,7 @@ skip_rbs_switch:
mov ar.rsc=r27 // M2
mov pr=r31,-1 // I0
- rfi // B
+ RFI // B
/*
* On entry:
@@ -1174,35 +1201,36 @@ skip_rbs_switch:
;;
(pKStk) st4 [r20]=r21
#endif
- ssm psr.i // enable interrupts
+ SSM_PSR_I(p0, p6, r2) // enable interrupts
br.call.spnt.many rp=schedule
.ret9: cmp.eq p6,p0=r0,r0 // p6 <- 1 (re-check)
- rsm psr.i // disable interrupts
+ RSM_PSR_I(p0, r2, r20) // disable interrupts
;;
#ifdef CONFIG_PREEMPT
(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
;;
(pKStk) st4 [r20]=r0 // preempt_count() <- 0
#endif
-(pLvSys)br.cond.sptk.few .work_pending_syscall_end
+(pLvSys)br.cond.sptk.few __paravirt_pending_syscall_end
br.cond.sptk.many .work_processed_kernel
.notify:
(pUStk) br.call.spnt.many rp=notify_resume_user
.ret10: cmp.ne p6,p0=r0,r0 // p6 <- 0 (don't re-check)
-(pLvSys)br.cond.sptk.few .work_pending_syscall_end
+(pLvSys)br.cond.sptk.few __paravirt_pending_syscall_end
br.cond.sptk.many .work_processed_kernel
-.work_pending_syscall_end:
+.global __paravirt_pending_syscall_end;
+__paravirt_pending_syscall_end:
adds r2=PT(R8)+16,r12
adds r3=PT(R10)+16,r12
;;
ld8 r8=[r2]
ld8 r10=[r3]
- br.cond.sptk.many .work_processed_syscall
-
-END(ia64_leave_kernel)
+ br.cond.sptk.many __paravirt_work_processed_syscall_target
+END(__paravirt_leave_kernel)
+#ifdef __IA64_ASM_PARAVIRTUALIZED_NATIVE
ENTRY(handle_syscall_error)
/*
* Some system calls (e.g., ptrace, mmap) can return arbitrary values which could
@@ -1244,7 +1272,7 @@ END(ia64_invoke_schedule_tail)
* We declare 8 input registers so the system call args get preserved,
* in case we need to restart a system call.
*/
-ENTRY(notify_resume_user)
+GLOBAL_ENTRY(notify_resume_user)
.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
alloc loc1=ar.pfs,8,2,3,0 // preserve all eight input regs in case of syscall restart!
mov r9=ar.unat
@@ -1306,7 +1334,7 @@ ENTRY(sys_rt_sigreturn)
adds sp=16,sp
;;
ld8 r9=[sp] // load new ar.unat
- mov.sptk b7=r8,ia64_leave_kernel
+ mov.sptk b7=r8,ia64_native_leave_kernel
;;
mov ar.unat=r9
br.many b7
@@ -1665,3 +1693,4 @@ sys_call_table:
data8 sys_timerfd_gettime
.org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
+#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S
index ddeab4e36fd..db540e58c78 100644
--- a/arch/ia64/kernel/head.S
+++ b/arch/ia64/kernel/head.S
@@ -26,11 +26,14 @@
#include <asm/mmu_context.h>
#include <asm/asm-offsets.h>
#include <asm/pal.h>
+#include <asm/paravirt.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/ptrace.h>
#include <asm/system.h>
#include <asm/mca_asm.h>
+#include <linux/init.h>
+#include <linux/linkage.h>
#ifdef CONFIG_HOTPLUG_CPU
#define SAL_PSR_BITS_TO_SET \
@@ -367,6 +370,44 @@ start_ap:
;;
(isBP) st8 [r2]=r28 // save the address of the boot param area passed by the bootloader
+#ifdef CONFIG_PARAVIRT
+
+ movl r14=hypervisor_setup_hooks
+ movl r15=hypervisor_type
+ mov r16=num_hypervisor_hooks
+ ;;
+ ld8 r2=[r15]
+ ;;
+ cmp.ltu p7,p0=r2,r16 // array size check
+ shladd r8=r2,3,r14
+ ;;
+(p7) ld8 r9=[r8]
+ ;;
+(p7) mov b1=r9
+(p7) cmp.ne.unc p7,p0=r9,r0 // no actual branch to NULL
+ ;;
+(p7) br.call.sptk.many rp=b1
+
+ __INITDATA
+
+default_setup_hook = 0 // Currently nothing needs to be done.
+
+ .weak xen_setup_hook
+
+ .global hypervisor_type
+hypervisor_type:
+ data8 PARAVIRT_HYPERVISOR_TYPE_DEFAULT
+
+ // must have the same order with PARAVIRT_HYPERVISOR_TYPE_xxx
+
+hypervisor_setup_hooks:
+ data8 default_setup_hook
+ data8 xen_setup_hook
+num_hypervisor_hooks = (. - hypervisor_setup_hooks) / 8
+ .previous
+
+#endif
+
#ifdef CONFIG_SMP
(isAP) br.call.sptk.many rp=start_secondary
.ret0:
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index 39752cdef6f..3bc2fa64f87 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -585,6 +585,15 @@ static inline int irq_is_shared (int irq)
return (iosapic_intr_info[irq].count > 1);
}
+struct irq_chip*
+ia64_native_iosapic_get_irq_chip(unsigned long trigger)
+{
+ if (trigger == IOSAPIC_EDGE)
+ return &irq_type_iosapic_edge;
+ else
+ return &irq_type_iosapic_level;
+}
+
static int
register_intr (unsigned int gsi, int irq, unsigned char delivery,
unsigned long polarity, unsigned long trigger)
@@ -635,13 +644,10 @@ register_intr (unsigned int gsi, int irq, unsigned char delivery,
iosapic_intr_info[irq].dmode = delivery;
iosapic_intr_info[irq].trigger = trigger;
- if (trigger == IOSAPIC_EDGE)
- irq_type = &irq_type_iosapic_edge;
- else
- irq_type = &irq_type_iosapic_level;
+ irq_type = iosapic_get_irq_chip(trigger);
idesc = irq_desc + irq;
- if (idesc->chip != irq_type) {
+ if (irq_type != NULL && idesc->chip != irq_type) {
if (idesc->chip != &no_irq_type)
printk(KERN_WARNING
"%s: changing vector %d from %s to %s\n",
@@ -974,6 +980,22 @@ iosapic_override_isa_irq (unsigned int isa_irq, unsigned int gsi,
}
void __init
+ia64_native_iosapic_pcat_compat_init(void)
+{
+ if (pcat_compat) {
+ /*
+ * Disable the compatibility mode interrupts (8259 style),
+ * needs IN/OUT support enabled.
+ */
+ printk(KERN_INFO
+ "%s: Disabling PC-AT compatible 8259 interrupts\n",
+ __func__);
+ outb(0xff, 0xA1);
+ outb(0xff, 0x21);
+ }
+}
+
+void __init
iosapic_system_init (int system_pcat_compat)
{
int irq;
@@ -987,17 +1009,8 @@ iosapic_system_init (int system_pcat_compat)
}
pcat_compat = system_pcat_compat;
- if (pcat_compat) {
- /*
- * Disable the compatibility mode interrupts (8259 style),
- * needs IN/OUT support enabled.
- */
- printk(KERN_INFO
- "%s: Disabling PC-AT compatible 8259 interrupts\n",
- __func__);
- outb(0xff, 0xA1);
- outb(0xff, 0x21);
- }
+ if (pcat_compat)
+ iosapic_pcat_compat_init();
}
static inline int
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index 5538471e8d6..28d3d483db9 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -196,7 +196,7 @@ static void clear_irq_vector(int irq)
}
int
-assign_irq_vector (int irq)
+ia64_native_assign_irq_vector (int irq)
{
unsigned long flags;
int vector, cpu;
@@ -222,7 +222,7 @@ assign_irq_vector (int irq)
}
void
-free_irq_vector (int vector)
+ia64_native_free_irq_vector (int vector)
{
if (vector < IA64_FIRST_DEVICE_VECTOR ||
vector > IA64_LAST_DEVICE_VECTOR)
@@ -600,7 +600,6 @@ static irqreturn_t dummy_handler (int irq, void *dev_id)
{
BUG();
}
-extern irqreturn_t handle_IPI (int irq, void *dev_id);
static struct irqaction ipi_irqaction = {
.handler = handle_IPI,
@@ -623,7 +622,7 @@ static struct irqaction tlb_irqaction = {
#endif
void
-register_percpu_irq (ia64_vector vec, struct irqaction *action)
+ia64_native_register_percpu_irq (ia64_vector vec, struct irqaction *action)
{
irq_desc_t *desc;
unsigned int irq;
@@ -638,13 +637,21 @@ register_percpu_irq (ia64_vector vec, struct irqaction *action)
}
void __init
-init_IRQ (void)
+ia64_native_register_ipi(void)
{
- register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL);
#ifdef CONFIG_SMP
register_percpu_irq(IA64_IPI_VECTOR, &ipi_irqaction);
register_percpu_irq(IA64_IPI_RESCHEDULE, &resched_irqaction);
register_percpu_irq(IA64_IPI_LOCAL_TLB_FLUSH, &tlb_irqaction);
+#endif
+}
+
+void __init
+init_IRQ (void)
+{
+ ia64_register_ipi();
+ register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL);
+#ifdef CONFIG_SMP
#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG)
if (vector_domain_type != VECTOR_DOMAIN_NONE) {
BUG_ON(IA64_FIRST_DEVICE_VECTOR != IA64_IRQ_MOVE_VECTOR);
diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S
index 80b44ea052d..c39627df3cd 100644
--- a/arch/ia64/kernel/ivt.S
+++ b/arch/ia64/kernel/ivt.S
@@ -12,6 +12,14 @@
*
* 00/08/23 Asit Mallick <asit.k.mallick@intel.com> TLB handling for SMP
* 00/12/20 David Mosberger-Tang <davidm@hpl.hp.com> DTLB/ITLB handler now uses virtual PT.
+ *
+ * Copyright (C) 2005 Hewlett-Packard Co
+ * Dan Magenheimer <dan.magenheimer@hp.com>
+ * Xen paravirtualization
+ * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
+ * VA Linux Systems Japan K.K.
+ * pv_ops.
+ * Yaozu (Eddie) Dong <eddie.dong@intel.com>
*/
/*
* This file defines the interruption vector table used by the CPU.
@@ -102,13 +110,13 @@ ENTRY(vhpt_miss)
* - the faulting virtual address uses unimplemented address bits
* - the faulting virtual address has no valid page table mapping
*/
- mov r16=cr.ifa // get address that caused the TLB miss
+ MOV_FROM_IFA(r16) // get address that caused the TLB miss
#ifdef CONFIG_HUGETLB_PAGE
movl r18=PAGE_SHIFT
- mov r25=cr.itir
+ MOV_FROM_ITIR(r25)
#endif
;;
- rsm psr.dt // use physical addressing for data
+ RSM_PSR_DT // use physical addressing for data
mov r31=pr // save the predicate registers
mov r19=IA64_KR(PT_BASE) // get page table base address
shl r21=r16,3 // shift bit 60 into sign bit
@@ -168,21 +176,21 @@ ENTRY(vhpt_miss)
dep r21=r19,r20,3,(PAGE_SHIFT-3) // r21=pte_offset(pmd,addr)
;;
(p7) ld8 r18=[r21] // read *pte
- mov r19=cr.isr // cr.isr bit 32 tells us if this is an insn miss
+ MOV_FROM_ISR(r19) // cr.isr bit 32 tells us if this is an insn miss
;;
(p7) tbit.z p6,p7=r18,_PAGE_P_BIT // page present bit cleared?
- mov r22=cr.iha // get the VHPT address that caused the TLB miss
+ MOV_FROM_IHA(r22) // get the VHPT address that caused the TLB miss
;; // avoid RAW on p7
(p7) tbit.nz.unc p10,p11=r19,32 // is it an instruction TLB miss?
dep r23=0,r20,0,PAGE_SHIFT // clear low bits to get page address
;;
-(p10) itc.i r18 // insert the instruction TLB entry
-(p11) itc.d r18 // insert the data TLB entry
+ ITC_I_AND_D(p10, p11, r18, r24) // insert the instruction TLB entry and
+ // insert the data TLB entry
(p6) br.cond.spnt.many page_fault // handle bad address/page not present (page fault)
- mov cr.ifa=r22
+ MOV_TO_IFA(r22, r24)
#ifdef CONFIG_HUGETLB_PAGE
-(p8) mov cr.itir=r25 // change to default page-size for VHPT
+ MOV_TO_ITIR(p8, r25, r24) // change to default page-size for VHPT
#endif
/*
@@ -192,7 +200,7 @@ ENTRY(vhpt_miss)
*/
adds r24=__DIRTY_BITS_NO_ED|_PAGE_PL_0|_PAGE_AR_RW,r23
;;
-(p7) itc.d r24
+ ITC_D(p7, r24, r25)
;;
#ifdef CONFIG_SMP
/*
@@ -234,7 +242,7 @@ ENTRY(vhpt_miss)
#endif
mov pr=r31,-1 // restore predicate registers
- rfi
+ RFI
END(vhpt_miss)
.org ia64_ivt+0x400
@@ -248,11 +256,11 @@ ENTRY(itlb_miss)
* mode, walk the page table, and then re-execute the PTE read and
* go on normally after that.
*/
- mov r16=cr.ifa // get virtual address
+ MOV_FROM_IFA(r16) // get virtual address
mov r29=b0 // save b0
mov r31=pr // save predicates
.itlb_fault:
- mov r17=cr.iha // get virtual address of PTE
+ MOV_FROM_IHA(r17) // get virtual address of PTE
movl r30=1f // load nested fault continuation point
;;
1: ld8 r18=[r17] // read *pte
@@ -261,7 +269,7 @@ ENTRY(itlb_miss)
tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared?
(p6) br.cond.spnt page_fault
;;
- itc.i r18
+ ITC_I(p0, r18, r19)
;;
#ifdef CONFIG_SMP
/*
@@ -278,7 +286,7 @@ ENTRY(itlb_miss)
(p7) ptc.l r16,r20
#endif
mov pr=r31,-1
- rfi
+ RFI
END(itlb_miss)
.org ia64_ivt+0x0800
@@ -292,11 +300,11 @@ ENTRY(dtlb_miss)
* mode, walk the page table, and then re-execute the PTE read and
* go on normally after that.
*/
- mov r16=cr.ifa // get virtual address
+ MOV_FROM_IFA(r16) // get virtual address
mov r29=b0 // save b0
mov r31=pr // save predicates
dtlb_fault:
- mov r17=cr.iha // get virtual address of PTE
+ MOV_FROM_IHA(r17) // get virtual address of PTE
movl r30=1f // load nested fault continuation point
;;
1: ld8 r18=[r17] // read *pte
@@ -305,7 +313,7 @@ dtlb_fault:
tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared?
(p6) br.cond.spnt page_fault
;;
- itc.d r18
+ ITC_D(p0, r18, r19)
;;
#ifdef CONFIG_SMP
/*
@@ -322,7 +330,7 @@ dtlb_fault:
(p7) ptc.l r16,r20
#endif
mov pr=r31,-1
- rfi
+ RFI
END(dtlb_miss)
.org ia64_ivt+0x0c00
@@ -330,9 +338,9 @@ END(dtlb_miss)
// 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19)
ENTRY(alt_itlb_miss)
DBG_FAULT(3)
- mov r16=cr.ifa // get address that caused the TLB miss
+ MOV_FROM_IFA(r16) // get address that caused the TLB miss
movl r17=PAGE_KERNEL
- mov r21=cr.ipsr
+ MOV_FROM_IPSR(p0, r21)
movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
mov r31=pr
;;
@@ -341,9 +349,9 @@ ENTRY(alt_itlb_miss)
;;
cmp.gt p8,p0=6,r22 // user mode
;;
-(p8) thash r17=r16
+ THASH(p8, r17, r16, r23)
;;
-(p8) mov cr.iha=r17
+ MOV_TO_IHA(p8, r17, r23)
(p8) mov r29=b0 // save b0
(p8) br.cond.dptk .itlb_fault
#endif
@@ -358,9 +366,9 @@ ENTRY(alt_itlb_miss)
or r19=r19,r18 // set bit 4 (uncached) if the access was to region 6
(p8) br.cond.spnt page_fault
;;
- itc.i r19 // insert the TLB entry
+ ITC_I(p0, r19, r18) // insert the TLB entry
mov pr=r31,-1
- rfi
+ RFI
END(alt_itlb_miss)
.org ia64_ivt+0x1000
@@ -368,11 +376,11 @@ END(alt_itlb_miss)
// 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46)
ENTRY(alt_dtlb_miss)
DBG_FAULT(4)
- mov r16=cr.ifa // get address that caused the TLB miss
+ MOV_FROM_IFA(r16) // get address that caused the TLB miss
movl r17=PAGE_KERNEL
- mov r20=cr.isr
+ MOV_FROM_ISR(r20)
movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
- mov r21=cr.ipsr
+ MOV_FROM_IPSR(p0, r21)
mov r31=pr
mov r24=PERCPU_ADDR
;;
@@ -381,9 +389,9 @@ ENTRY(alt_dtlb_miss)
;;
cmp.gt p8,p0=6,r22 // access to region 0-5
;;
-(p8) thash r17=r16
+ THASH(p8, r17, r16, r25)
;;
-(p8) mov cr.iha=r17
+ MOV_TO_IHA(p8, r17, r25)
(p8) mov r29=b0 // save b0
(p8) br.cond.dptk dtlb_fault
#endif
@@ -402,7 +410,7 @@ ENTRY(alt_dtlb_miss)
tbit.nz p9,p0=r20,IA64_ISR_NA_BIT // is non-access bit on?
;;
(p10) sub r19=r19,r26
-(p10) mov cr.itir=r25
+ MOV_TO_ITIR(p10, r25, r24)
cmp.ne p8,p0=r0,r23
(p9) cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22 // check isr.code field
(p12) dep r17=-1,r17,4,1 // set ma=UC for region 6 addr
@@ -411,11 +419,11 @@ ENTRY(alt_dtlb_miss)
dep r21=-1,r21,IA64_PSR_ED_BIT,1
;;
or r19=r19,r17 // insert PTE control bits into r19
-(p6) mov cr.ipsr=r21
+ MOV_TO_IPSR(p6, r21, r24)
;;
-(p7) itc.d r19 // insert the TLB entry
+ ITC_D(p7, r19, r18) // insert the TLB entry
mov pr=r31,-1
- rfi
+ RFI
END(alt_dtlb_miss)
.org ia64_ivt+0x1400
@@ -444,10 +452,10 @@ ENTRY(nested_dtlb_miss)
*
* Clobbered: b0, r18, r19, r21, r22, psr.dt (cleared)
*/
- rsm psr.dt // switch to using physical data addressing
+ RSM_PSR_DT // switch to using physical data addressing
mov r19=IA64_KR(PT_BASE) // get the page table base address
shl r21=r16,3 // shift bit 60 into sign bit
- mov r18=cr.itir
+ MOV_FROM_ITIR(r18)
;;
shr.u r17=r16,61 // get the region number into r17
extr.u r18=r18,2,6 // get the faulting page size
@@ -507,33 +515,6 @@ ENTRY(ikey_miss)
FAULT(6)
END(ikey_miss)
- //-----------------------------------------------------------------------------------
- // call do_page_fault (predicates are in r31, psr.dt may be off, r16 is faulting address)
-ENTRY(page_fault)
- ssm psr.dt
- ;;
- srlz.i
- ;;
- SAVE_MIN_WITH_COVER
- alloc r15=ar.pfs,0,0,3,0
- mov out0=cr.ifa
- mov out1=cr.isr
- adds r3=8,r2 // set up second base pointer
- ;;
- ssm psr.ic | PSR_DEFAULT_BITS
- ;;
- srlz.i // guarantee that interruption collectin is on
- ;;
-(p15) ssm psr.i // restore psr.i
- movl r14=ia64_leave_kernel
- ;;
- SAVE_REST
- mov rp=r14
- ;;
- adds out2=16,r12 // out2 = pointer to pt_regs
- br.call.sptk.many b6=ia64_do_page_fault // ignore return address
-END(page_fault)
-
.org ia64_ivt+0x1c00
/////////////////////////////////////////////////////////////////////////////////////////
// 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
@@ -556,10 +537,10 @@ ENTRY(dirty_bit)
* page table TLB entry isn't present, we take a nested TLB miss hit where we look
* up the physical address of the L3 PTE and then continue at label 1 below.
*/
- mov r16=cr.ifa // get the address that caused the fault
+ MOV_FROM_IFA(r16) // get the address that caused the fault
movl r30=1f // load continuation point in case of nested fault
;;
- thash r17=r16 // compute virtual address of L3 PTE
+ THASH(p0, r17, r16, r18) // compute virtual address of L3 PTE
mov r29=b0 // save b0 in case of nested fault
mov r31=pr // save pr
#ifdef CONFIG_SMP
@@ -576,7 +557,7 @@ ENTRY(dirty_bit)
;;
(p6) cmp.eq p6,p7=r26,r18 // Only compare if page is present
;;
-(p6) itc.d r25 // install updated PTE
+ ITC_D(p6, r25, r18) // install updated PTE
;;
/*
* Tell the assemblers dependency-violation checker that the above "itc" instructions
@@ -602,7 +583,7 @@ ENTRY(dirty_bit)
itc.d r18 // install updated PTE
#endif
mov pr=r31,-1 // restore pr
- rfi
+ RFI
END(dirty_bit)
.org ia64_ivt+0x2400
@@ -611,22 +592,22 @@ END(dirty_bit)
ENTRY(iaccess_bit)
DBG_FAULT(9)
// Like Entry 8, except for instruction access
- mov r16=cr.ifa // get the address that caused the fault
+ MOV_FROM_IFA(r16) // get the address that caused the fault
movl r30=1f // load continuation point in case of nested fault
mov r31=pr // save predicates
#ifdef CONFIG_ITANIUM
/*
* Erratum 10 (IFA may contain incorrect address) has "NoFix" status.
*/
- mov r17=cr.ipsr
+ MOV_FROM_IPSR(p0, r17)
;;
- mov r18=cr.iip
+ MOV_FROM_IIP(r18)
tbit.z p6,p0=r17,IA64_PSR_IS_BIT // IA64 instruction set?
;;
(p6) mov r16=r18 // if so, use cr.iip instead of cr.ifa
#endif /* CONFIG_ITANIUM */
;;
- thash r17=r16 // compute virtual address of L3 PTE
+ THASH(p0, r17, r16, r18) // compute virtual address of L3 PTE
mov r29=b0 // save b0 in case of nested fault)
#ifdef CONFIG_SMP
mov r28=ar.ccv // save ar.ccv
@@ -642,7 +623,7 @@ ENTRY(iaccess_bit)
;;
(p6) cmp.eq p6,p7=r26,r18 // Only if page present
;;
-(p6) itc.i r25 // install updated PTE
+ ITC_I(p6, r25, r26) // install updated PTE
;;
/*
* Tell the assemblers dependency-violation checker that the above "itc" instructions
@@ -668,7 +649,7 @@ ENTRY(iaccess_bit)
itc.i r18 // install updated PTE
#endif /* !CONFIG_SMP */
mov pr=r31,-1
- rfi
+ RFI
END(iaccess_bit)
.org ia64_ivt+0x2800
@@ -677,10 +658,10 @@ END(iaccess_bit)
ENTRY(daccess_bit)
DBG_FAULT(10)
// Like Entry 8, except for data access
- mov r16=cr.ifa // get the address that caused the fault
+ MOV_FROM_IFA(r16) // get the address that caused the fault
movl r30=1f // load continuation point in case of nested fault
;;
- thash r17=r16 // compute virtual address of L3 PTE
+ THASH(p0, r17, r16, r18) // compute virtual address of L3 PTE
mov r31=pr
mov r29=b0 // save b0 in case of nested fault)
#ifdef CONFIG_SMP
@@ -697,7 +678,7 @@ ENTRY(daccess_bit)
;;
(p6) cmp.eq p6,p7=r26,r18 // Only if page is present
;;
-(p6) itc.d r25 // install updated PTE
+ ITC_D(p6, r25, r26) // install updated PTE
/*
* Tell the assemblers dependency-violation checker that the above "itc" instructions
* cannot possibly affect the following loads:
@@ -721,7 +702,7 @@ ENTRY(daccess_bit)
#endif
mov b0=r29 // restore b0
mov pr=r31,-1
- rfi
+ RFI
END(daccess_bit)
.org ia64_ivt+0x2c00
@@ -745,10 +726,10 @@ ENTRY(break_fault)
*/
DBG_FAULT(11)
mov.m r16=IA64_KR(CURRENT) // M2 r16 <- current task (12 cyc)
- mov r29=cr.ipsr // M2 (12 cyc)
+ MOV_FROM_IPSR(p0, r29) // M2 (12 cyc)
mov r31=pr // I0 (2 cyc)
- mov r17=cr.iim // M2 (2 cyc)
+ MOV_FROM_IIM(r17) // M2 (2 cyc)
mov.m r27=ar.rsc // M2 (12 cyc)
mov r18=__IA64_BREAK_SYSCALL // A
@@ -767,7 +748,7 @@ ENTRY(break_fault)
nop.m 0
movl r30=sys_call_table // X
- mov r28=cr.iip // M2 (2 cyc)
+ MOV_FROM_IIP(r28) // M2 (2 cyc)
cmp.eq p0,p7=r18,r17 // I0 is this a system call?
(p7) br.cond.spnt non_syscall // B no ->
//
@@ -864,18 +845,17 @@ ENTRY(break_fault)
#endif
mov ar.rsc=0x3 // M2 set eager mode, pl 0, LE, loadrs=0
nop 0
- bsw.1 // B (6 cyc) regs are saved, switch to bank 1
+ BSW_1(r2, r14) // B (6 cyc) regs are saved, switch to bank 1
;;
- ssm psr.ic | PSR_DEFAULT_BITS // M2 now it's safe to re-enable intr.-collection
+ SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, r16) // M2 now it's safe to re-enable intr.-collection
+ // M0 ensure interruption collection is on
movl r3=ia64_ret_from_syscall // X
;;
-
- srlz.i // M0 ensure interruption collection is on
mov rp=r3 // I0 set the real return addr
(p10) br.cond.spnt.many ia64_ret_from_syscall // B return if bad call-frame or r15 is a NaT
-(p15) ssm psr.i // M2 restore psr.i
+ SSM_PSR_I(p15, p15, r16) // M2 restore psr.i
(p14) br.call.sptk.many b6=b6 // B invoke syscall-handker (ignore return addr)
br.cond.spnt.many ia64_trace_syscall // B do syscall-tracing thingamagic
// NOT REACHED
@@ -895,27 +875,8 @@ END(break_fault)
/////////////////////////////////////////////////////////////////////////////////////////
// 0x3000 Entry 12 (size 64 bundles) External Interrupt (4)
ENTRY(interrupt)
- DBG_FAULT(12)
- mov r31=pr // prepare to save predicates
- ;;
- SAVE_MIN_WITH_COVER // uses r31; defines r2 and r3
- ssm psr.ic | PSR_DEFAULT_BITS
- ;;
- adds r3=8,r2 // set up second base pointer for SAVE_REST
- srlz.i // ensure everybody knows psr.ic is back on
- ;;
- SAVE_REST
- ;;
- MCA_RECOVER_RANGE(interrupt)
- alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group
- mov out0=cr.ivr // pass cr.ivr as first arg
- add out1=16,sp // pass pointer to pt_regs as second arg
- ;;
- srlz.d // make sure we see the effect of cr.ivr
- movl r14=ia64_leave_kernel
- ;;
- mov rp=r14
- br.call.sptk.many b6=ia64_handle_irq
+ /* interrupt handler has become too big to fit this area. */
+ br.sptk.many __interrupt
END(interrupt)
.org ia64_ivt+0x3400
@@ -978,6 +939,7 @@ END(interrupt)
* - ar.fpsr: set to kernel settings
* - b6: preserved (same as on entry)
*/
+#ifdef __IA64_ASM_PARAVIRTUALIZED_NATIVE
GLOBAL_ENTRY(ia64_syscall_setup)
#if PT(B6) != 0
# error This code assumes that b6 is the first field in pt_regs.
@@ -1069,6 +1031,7 @@ GLOBAL_ENTRY(ia64_syscall_setup)
(p10) mov r8=-EINVAL
br.ret.sptk.many b7
END(ia64_syscall_setup)
+#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
.org ia64_ivt+0x3c00
/////////////////////////////////////////////////////////////////////////////////////////
@@ -1082,7 +1045,7 @@ END(ia64_syscall_setup)
DBG_FAULT(16)
FAULT(16)
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#if defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(__IA64_ASM_PARAVIRTUALIZED_NATIVE)
/*
* There is no particular reason for this code to be here, other than
* that there happens to be space here that would go unused otherwise.
@@ -1092,7 +1055,7 @@ END(ia64_syscall_setup)
* account_sys_enter is called from SAVE_MIN* macros if accounting is
* enabled and if the macro is entered from user mode.
*/
-ENTRY(account_sys_enter)
+GLOBAL_ENTRY(account_sys_enter)
// mov.m r20=ar.itc is called in advance, and r13 is current
add r16=TI_AC_STAMP+IA64_TASK_SIZE,r13
add r17=TI_AC_LEAVE+IA64_TASK_SIZE,r13
@@ -1123,110 +1086,18 @@ END(account_sys_enter)
DBG_FAULT(17)
FAULT(17)
-ENTRY(non_syscall)
- mov ar.rsc=r27 // restore ar.rsc before SAVE_MIN_WITH_COVER
- ;;
- SAVE_MIN_WITH_COVER
-
- // There is no particular reason for this code to be here, other than that
- // there happens to be space here that would go unused otherwise. If this
- // fault ever gets "unreserved", simply moved the following code to a more
- // suitable spot...
-
- alloc r14=ar.pfs,0,0,2,0
- mov out0=cr.iim
- add out1=16,sp
- adds r3=8,r2 // set up second base pointer for SAVE_REST
-
- ssm psr.ic | PSR_DEFAULT_BITS
- ;;
- srlz.i // guarantee that interruption collection is on
- ;;
-(p15) ssm psr.i // restore psr.i
- movl r15=ia64_leave_kernel
- ;;
- SAVE_REST
- mov rp=r15
- ;;
- br.call.sptk.many b6=ia64_bad_break // avoid WAW on CFM and ignore return addr
-END(non_syscall)
-
.org ia64_ivt+0x4800
/////////////////////////////////////////////////////////////////////////////////////////
// 0x4800 Entry 18 (size 64 bundles) Reserved
DBG_FAULT(18)
FAULT(18)
- /*
- * There is no particular reason for this code to be here, other than that
- * there happens to be space here that would go unused otherwise. If this
- * fault ever gets "unreserved", simply moved the following code to a more
- * suitable spot...
- */
-
-ENTRY(dispatch_unaligned_handler)
- SAVE_MIN_WITH_COVER
- ;;
- alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in insn group!)
- mov out0=cr.ifa
- adds out1=16,sp
-
- ssm psr.ic | PSR_DEFAULT_BITS
- ;;
- srlz.i // guarantee that interruption collection is on
- ;;
-(p15) ssm psr.i // restore psr.i
- adds r3=8,r2 // set up second base pointer
- ;;
- SAVE_REST
- movl r14=ia64_leave_kernel
- ;;
- mov rp=r14
- br.sptk.many ia64_prepare_handle_unaligned
-END(dispatch_unaligned_handler)
-
.org ia64_ivt+0x4c00
/////////////////////////////////////////////////////////////////////////////////////////
// 0x4c00 Entry 19 (size 64 bundles) Reserved
DBG_FAULT(19)
FAULT(19)
- /*
- * There is no particular reason for this code to be here, other than that
- * there happens to be space here that would go unused otherwise. If this
- * fault ever gets "unreserved", simply moved the following code to a more
- * suitable spot...
- */
-
-ENTRY(dispatch_to_fault_handler)
- /*
- * Input:
- * psr.ic: off
- * r19: fault vector number (e.g., 24 for General Exception)
- * r31: contains saved predicates (pr)
- */
- SAVE_MIN_WITH_COVER_R19
- alloc r14=ar.pfs,0,0,5,0
- mov out0=r15
- mov out1=cr.isr
- mov out2=cr.ifa
- mov out3=cr.iim
- mov out4=cr.itir
- ;;
- ssm psr.ic | PSR_DEFAULT_BITS
- ;;
- srlz.i // guarantee that interruption collection is on
- ;;
-(p15) ssm psr.i // restore psr.i
- adds r3=8,r2 // set up second base pointer for SAVE_REST
- ;;
- SAVE_REST
- movl r14=ia64_leave_kernel
- ;;
- mov rp=r14
- br.call.sptk.many b6=ia64_fault
-END(dispatch_to_fault_handler)
-
//
// --- End of long entries, Beginning of short entries
//
@@ -1236,8 +1107,8 @@ END(dispatch_to_fault_handler)
// 0x5000 Entry 20 (size 16 bundles) Page Not Present (10,22,49)
ENTRY(page_not_present)
DBG_FAULT(20)
- mov r16=cr.ifa
- rsm psr.dt
+ MOV_FROM_IFA(r16)
+ RSM_PSR_DT
/*
* The Linux page fault handler doesn't expect non-present pages to be in
* the TLB. Flush the existing entry now, so we meet that expectation.
@@ -1256,8 +1127,8 @@ END(page_not_present)
// 0x5100 Entry 21 (size 16 bundles) Key Permission (13,25,52)
ENTRY(key_permission)
DBG_FAULT(21)
- mov r16=cr.ifa
- rsm psr.dt
+ MOV_FROM_IFA(r16)
+ RSM_PSR_DT
mov r31=pr
;;
srlz.d
@@ -1269,8 +1140,8 @@ END(key_permission)
// 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26)
ENTRY(iaccess_rights)
DBG_FAULT(22)
- mov r16=cr.ifa
- rsm psr.dt
+ MOV_FROM_IFA(r16)
+ RSM_PSR_DT
mov r31=pr
;;
srlz.d
@@ -1282,8 +1153,8 @@ END(iaccess_rights)
// 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53)
ENTRY(daccess_rights)
DBG_FAULT(23)
- mov r16=cr.ifa
- rsm psr.dt
+ MOV_FROM_IFA(r16)
+ RSM_PSR_DT
mov r31=pr
;;
srlz.d
@@ -1295,7 +1166,7 @@ END(daccess_rights)
// 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39)
ENTRY(general_exception)
DBG_FAULT(24)
- mov r16=cr.isr
+ MOV_FROM_ISR(r16)
mov r31=pr
;;
cmp4.eq p6,p0=0,r16
@@ -1324,8 +1195,8 @@ END(disabled_fp_reg)
ENTRY(nat_consumption)
DBG_FAULT(26)
- mov r16=cr.ipsr
- mov r17=cr.isr
+ MOV_FROM_IPSR(p0, r16)
+ MOV_FROM_ISR(r17)
mov r31=pr // save PR
;;
and r18=0xf,r17 // r18 = cr.ipsr.code{3:0}
@@ -1335,10 +1206,10 @@ ENTRY(nat_consumption)
dep r16=-1,r16,IA64_PSR_ED_BIT,1
(p6) br.cond.spnt 1f // branch if (cr.ispr.na == 0 || cr.ipsr.code{3:0} != LFETCH)
;;
- mov cr.ipsr=r16 // set cr.ipsr.na
+ MOV_TO_IPSR(p0, r16, r18)
mov pr=r31,-1
;;
- rfi
+ RFI
1: mov pr=r31,-1
;;
@@ -1360,26 +1231,26 @@ ENTRY(speculation_vector)
*
* cr.imm contains zero_ext(imm21)
*/
- mov r18=cr.iim
+ MOV_FROM_IIM(r18)
;;
- mov r17=cr.iip
+ MOV_FROM_IIP(r17)
shl r18=r18,43 // put sign bit in position (43=64-21)
;;
- mov r16=cr.ipsr
+ MOV_FROM_IPSR(p0, r16)
shr r18=r18,39 // sign extend (39=43-4)
;;
add r17=r17,r18 // now add the offset
;;
- mov cr.iip=r17
+ MOV_FROM_IIP(r17)
dep r16=0,r16,41,2 // clear EI
;;
- mov cr.ipsr=r16
+ MOV_FROM_IPSR(p0, r16)
;;
- rfi // and go back
+ RFI
END(speculation_vector)
.org ia64_ivt+0x5800
@@ -1517,11 +1388,11 @@ ENTRY(ia32_intercept)
DBG_FAULT(46)
#ifdef CONFIG_IA32_SUPPORT
mov r31=pr
- mov r16=cr.isr
+ MOV_FROM_ISR(r16)
;;
extr.u r17=r16,16,8 // get ISR.code
mov r18=ar.eflag
- mov r19=cr.iim // old eflag value
+ MOV_FROM_IIM(r19) // old eflag value
;;
cmp.ne p6,p0=2,r17
(p6) br.cond.spnt 1f // not a system flag fault
@@ -1533,7 +1404,7 @@ ENTRY(ia32_intercept)
(p6) br.cond.spnt 1f // eflags.ac bit didn't change
;;
mov pr=r31,-1 // restore predicate registers
- rfi
+ RFI
1:
#endif // CONFIG_IA32_SUPPORT
@@ -1673,6 +1544,137 @@ END(ia32_interrupt)
DBG_FAULT(67)
FAULT(67)
+ //-----------------------------------------------------------------------------------
+ // call do_page_fault (predicates are in r31, psr.dt may be off, r16 is faulting address)
+ENTRY(page_fault)
+ SSM_PSR_DT_AND_SRLZ_I
+ ;;
+ SAVE_MIN_WITH_COVER
+ alloc r15=ar.pfs,0,0,3,0
+ MOV_FROM_IFA(out0)
+ MOV_FROM_ISR(out1)
+ SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r14, r3)
+ adds r3=8,r2 // set up second base pointer
+ SSM_PSR_I(p15, p15, r14) // restore psr.i
+ movl r14=ia64_leave_kernel
+ ;;
+ SAVE_REST
+ mov rp=r14
+ ;;
+ adds out2=16,r12 // out2 = pointer to pt_regs
+ br.call.sptk.many b6=ia64_do_page_fault // ignore return address
+END(page_fault)
+
+ENTRY(non_syscall)
+ mov ar.rsc=r27 // restore ar.rsc before SAVE_MIN_WITH_COVER
+ ;;
+ SAVE_MIN_WITH_COVER
+
+ // There is no particular reason for this code to be here, other than that
+ // there happens to be space here that would go unused otherwise. If this
+ // fault ever gets "unreserved", simply moved the following code to a more
+ // suitable spot...
+
+ alloc r14=ar.pfs,0,0,2,0
+ MOV_FROM_IIM(out0)
+ add out1=16,sp
+ adds r3=8,r2 // set up second base pointer for SAVE_REST
+
+ SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r15, r24)
+ // guarantee that interruption collection is on
+ SSM_PSR_I(p15, p15, r15) // restore psr.i
+ movl r15=ia64_leave_kernel
+ ;;
+ SAVE_REST
+ mov rp=r15
+ ;;
+ br.call.sptk.many b6=ia64_bad_break // avoid WAW on CFM and ignore return addr
+END(non_syscall)
+
+ENTRY(__interrupt)
+ DBG_FAULT(12)
+ mov r31=pr // prepare to save predicates
+ ;;
+ SAVE_MIN_WITH_COVER // uses r31; defines r2 and r3
+ SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, r14)
+ // ensure everybody knows psr.ic is back on
+ adds r3=8,r2 // set up second base pointer for SAVE_REST
+ ;;
+ SAVE_REST
+ ;;
+ MCA_RECOVER_RANGE(interrupt)
+ alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group
+ MOV_FROM_IVR(out0, r8) // pass cr.ivr as first arg
+ add out1=16,sp // pass pointer to pt_regs as second arg
+ ;;
+ srlz.d // make sure we see the effect of cr.ivr
+ movl r14=ia64_leave_kernel
+ ;;
+ mov rp=r14
+ br.call.sptk.many b6=ia64_handle_irq
+END(__interrupt)
+
+ /*
+ * There is no particular reason for this code to be here, other than that
+ * there happens to be space here that would go unused otherwise. If this
+ * fault ever gets "unreserved", simply moved the following code to a more
+ * suitable spot...
+ */
+
+ENTRY(dispatch_unaligned_handler)
+ SAVE_MIN_WITH_COVER
+ ;;
+ alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in insn group!)
+ MOV_FROM_IFA(out0)
+ adds out1=16,sp
+
+ SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, r24)
+ // guarantee that interruption collection is on
+ SSM_PSR_I(p15, p15, r3) // restore psr.i
+ adds r3=8,r2 // set up second base pointer
+ ;;
+ SAVE_REST
+ movl r14=ia64_leave_kernel
+ ;;
+ mov rp=r14
+ br.sptk.many ia64_prepare_handle_unaligned
+END(dispatch_unaligned_handler)
+
+ /*
+ * There is no particular reason for this code to be here, other than that
+ * there happens to be space here that would go unused otherwise. If this
+ * fault ever gets "unreserved", simply moved the following code to a more
+ * suitable spot...
+ */
+
+ENTRY(dispatch_to_fault_handler)
+ /*
+ * Input:
+ * psr.ic: off
+ * r19: fault vector number (e.g., 24 for General Exception)
+ * r31: contains saved predicates (pr)
+ */
+ SAVE_MIN_WITH_COVER_R19
+ alloc r14=ar.pfs,0,0,5,0
+ MOV_FROM_ISR(out1)
+ MOV_FROM_IFA(out2)
+ MOV_FROM_IIM(out3)
+ MOV_FROM_ITIR(out4)
+ ;;
+ SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, out0)
+ // guarantee that interruption collection is on
+ mov out0=r15
+ ;;
+ SSM_PSR_I(p15, p15, r3) // restore psr.i
+ adds r3=8,r2 // set up second base pointer for SAVE_REST
+ ;;
+ SAVE_REST
+ movl r14=ia64_leave_kernel
+ ;;
+ mov rp=r14
+ br.call.sptk.many b6=ia64_fault
+END(dispatch_to_fault_handler)
+
/*
* Squatting in this space ...
*
@@ -1686,11 +1688,10 @@ ENTRY(dispatch_illegal_op_fault)
.prologue
.body
SAVE_MIN_WITH_COVER
- ssm psr.ic | PSR_DEFAULT_BITS
- ;;
- srlz.i // guarantee that interruption collection is on
+ SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, r24)
+ // guarantee that interruption collection is on
;;
-(p15) ssm psr.i // restore psr.i
+ SSM_PSR_I(p15, p15, r3) // restore psr.i
adds r3=8,r2 // set up second base pointer for SAVE_REST
;;
alloc r14=ar.pfs,0,0,1,0 // must be first in insn group
@@ -1729,12 +1730,11 @@ END(dispatch_illegal_op_fault)
ENTRY(dispatch_to_ia32_handler)
SAVE_MIN
;;
- mov r14=cr.isr
- ssm psr.ic | PSR_DEFAULT_BITS
- ;;
- srlz.i // guarantee that interruption collection is on
+ MOV_FROM_ISR(r14)
+ SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, r24)
+ // guarantee that interruption collection is on
;;
-(p15) ssm psr.i
+ SSM_PSR_I(p15, p15, r3)
adds r3=8,r2 // Base pointer for SAVE_REST
;;
SAVE_REST
diff --git a/arch/ia64/kernel/minstate.h b/arch/ia64/kernel/minstate.h
index 74b6d670aae..292e214a3b8 100644
--- a/arch/ia64/kernel/minstate.h
+++ b/arch/ia64/kernel/minstate.h
@@ -2,6 +2,7 @@
#include <asm/cache.h>
#include "entry.h"
+#include "paravirt_inst.h"
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
/* read ar.itc in advance, and use it before leaving bank 0 */
@@ -43,16 +44,16 @@
* Note that psr.ic is NOT turned on by this macro. This is so that
* we can pass interruption state as arguments to a handler.
*/
-#define DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA,WORKAROUND) \
+#define IA64_NATIVE_DO_SAVE_MIN(__COVER,SAVE_IFS,EXTRA,WORKAROUND) \
mov r16=IA64_KR(CURRENT); /* M */ \
mov r27=ar.rsc; /* M */ \
mov r20=r1; /* A */ \
mov r25=ar.unat; /* M */ \
- mov r29=cr.ipsr; /* M */ \
+ MOV_FROM_IPSR(p0,r29); /* M */ \
mov r26=ar.pfs; /* I */ \
- mov r28=cr.iip; /* M */ \
+ MOV_FROM_IIP(r28); /* M */ \
mov r21=ar.fpsr; /* M */ \
- COVER; /* B;; (or nothing) */ \
+ __COVER; /* B;; (or nothing) */ \
;; \
adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16; \
;; \
@@ -244,6 +245,6 @@
1: \
.pred.rel "mutex", pKStk, pUStk
-#define SAVE_MIN_WITH_COVER DO_SAVE_MIN(cover, mov r30=cr.ifs, , RSE_WORKAROUND)
-#define SAVE_MIN_WITH_COVER_R19 DO_SAVE_MIN(cover, mov r30=cr.ifs, mov r15=r19, RSE_WORKAROUND)
+#define SAVE_MIN_WITH_COVER DO_SAVE_MIN(COVER, mov r30=cr.ifs, , RSE_WORKAROUND)
+#define SAVE_MIN_WITH_COVER_R19 DO_SAVE_MIN(COVER, mov r30=cr.ifs, mov r15=r19, RSE_WORKAROUND)
#define SAVE_MIN DO_SAVE_MIN( , mov r30=r0, , )
diff --git a/arch/ia64/kernel/nr-irqs.c b/arch/ia64/kernel/nr-irqs.c
new file mode 100644
index 00000000000..1ae049181e8
--- /dev/null
+++ b/arch/ia64/kernel/nr-irqs.c
@@ -0,0 +1,24 @@
+/*
+ * calculate
+ * NR_IRQS = max(IA64_NATIVE_NR_IRQS, XEN_NR_IRQS, FOO_NR_IRQS...)
+ * depending on config.
+ * This must be calculated before processing asm-offset.c.
+ */
+
+#define ASM_OFFSETS_C 1
+
+#include <linux/kbuild.h>
+#include <linux/threads.h>
+#include <asm-ia64/native/irq.h>
+
+void foo(void)
+{
+ union paravirt_nr_irqs_max {
+ char ia64_native_nr_irqs[IA64_NATIVE_NR_IRQS];
+#ifdef CONFIG_XEN
+ char xen_nr_irqs[XEN_NR_IRQS];
+#endif
+ };
+
+ DEFINE(NR_IRQS, sizeof (union paravirt_nr_irqs_max));
+}
diff --git a/arch/ia64/kernel/paravirt.c b/arch/ia64/kernel/paravirt.c
new file mode 100644
index 00000000000..afaf5b9a2cf
--- /dev/null
+++ b/arch/ia64/kernel/paravirt.c
@@ -0,0 +1,369 @@
+/******************************************************************************
+ * arch/ia64/kernel/paravirt.c
+ *
+ * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
+ * VA Linux Systems Japan K.K.
+ * Yaozu (Eddie) Dong <eddie.dong@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/init.h>
+
+#include <linux/compiler.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/types.h>
+
+#include <asm/iosapic.h>
+#include <asm/paravirt.h>
+
+/***************************************************************************
+ * general info
+ */
+struct pv_info pv_info = {
+ .kernel_rpl = 0,
+ .paravirt_enabled = 0,
+ .name = "bare hardware"
+};
+
+/***************************************************************************
+ * pv_init_ops
+ * initialization hooks.
+ */
+
+struct pv_init_ops pv_init_ops;
+
+/***************************************************************************
+ * pv_cpu_ops
+ * intrinsics hooks.
+ */
+
+/* ia64_native_xxx are macros so that we have to make them real functions */
+
+#define DEFINE_VOID_FUNC1(name) \
+ static void \
+ ia64_native_ ## name ## _func(unsigned long arg) \
+ { \
+ ia64_native_ ## name(arg); \
+ } \
+
+#define DEFINE_VOID_FUNC2(name) \
+ static void \
+ ia64_native_ ## name ## _func(unsigned long arg0, \
+ unsigned long arg1) \
+ { \
+ ia64_native_ ## name(arg0, arg1); \
+ } \
+
+#define DEFINE_FUNC0(name) \
+ static unsigned long \
+ ia64_native_ ## name ## _func(void) \
+ { \
+ return ia64_native_ ## name(); \
+ }
+
+#define DEFINE_FUNC1(name, type) \
+ static unsigned long \
+ ia64_native_ ## name ## _func(type arg) \
+ { \
+ return ia64_native_ ## name(arg); \
+ } \
+
+DEFINE_VOID_FUNC1(fc);
+DEFINE_VOID_FUNC1(intrin_local_irq_restore);
+
+DEFINE_VOID_FUNC2(ptcga);
+DEFINE_VOID_FUNC2(set_rr);
+
+DEFINE_FUNC0(get_psr_i);
+
+DEFINE_FUNC1(thash, unsigned long);
+DEFINE_FUNC1(get_cpuid, int);
+DEFINE_FUNC1(get_pmd, int);
+DEFINE_FUNC1(get_rr, unsigned long);
+
+static void
+ia64_native_ssm_i_func(void)
+{
+ ia64_native_ssm(IA64_PSR_I);
+}
+
+static void
+ia64_native_rsm_i_func(void)
+{
+ ia64_native_rsm(IA64_PSR_I);
+}
+
+static void
+ia64_native_set_rr0_to_rr4_func(unsigned long val0, unsigned long val1,
+ unsigned long val2, unsigned long val3,
+ unsigned long val4)
+{
+ ia64_native_set_rr0_to_rr4(val0, val1, val2, val3, val4);
+}
+
+#define CASE_GET_REG(id) \
+ case _IA64_REG_ ## id: \
+ res = ia64_native_getreg(_IA64_REG_ ## id); \
+ break;
+#define CASE_GET_AR(id) CASE_GET_REG(AR_ ## id)
+#define CASE_GET_CR(id) CASE_GET_REG(CR_ ## id)
+
+unsigned long
+ia64_native_getreg_func(int regnum)
+{
+ unsigned long res = -1;
+ switch (regnum) {
+ CASE_GET_REG(GP);
+ CASE_GET_REG(IP);
+ CASE_GET_REG(PSR);
+ CASE_GET_REG(TP);
+ CASE_GET_REG(SP);
+
+ CASE_GET_AR(KR0);
+ CASE_GET_AR(KR1);
+ CASE_GET_AR(KR2);
+ CASE_GET_AR(KR3);
+ CASE_GET_AR(KR4);
+ CASE_GET_AR(KR5);
+ CASE_GET_AR(KR6);
+ CASE_GET_AR(KR7);
+ CASE_GET_AR(RSC);
+ CASE_GET_AR(BSP);
+ CASE_GET_AR(BSPSTORE);
+ CASE_GET_AR(RNAT);
+ CASE_GET_AR(FCR);
+ CASE_GET_AR(EFLAG);
+ CASE_GET_AR(CSD);
+ CASE_GET_AR(SSD);
+ CASE_GET_AR(CFLAG);
+ CASE_GET_AR(FSR);
+ CASE_GET_AR(FIR);
+ CASE_GET_AR(FDR);
+ CASE_GET_AR(CCV);
+ CASE_GET_AR(UNAT);
+ CASE_GET_AR(FPSR);
+ CASE_GET_AR(ITC);
+ CASE_GET_AR(PFS);
+ CASE_GET_AR(LC);
+ CASE_GET_AR(EC);
+
+ CASE_GET_CR(DCR);
+ CASE_GET_CR(ITM);
+ CASE_GET_CR(IVA);
+ CASE_GET_CR(PTA);
+ CASE_GET_CR(IPSR);
+ CASE_GET_CR(ISR);
+ CASE_GET_CR(IIP);
+ CASE_GET_CR(IFA);
+ CASE_GET_CR(ITIR);
+ CASE_GET_CR(IIPA);
+ CASE_GET_CR(IFS);
+ CASE_GET_CR(IIM);
+ CASE_GET_CR(IHA);
+ CASE_GET_CR(LID);
+ CASE_GET_CR(IVR);
+ CASE_GET_CR(TPR);
+ CASE_GET_CR(EOI);
+ CASE_GET_CR(IRR0);
+ CASE_GET_CR(IRR1);
+ CASE_GET_CR(IRR2);
+ CASE_GET_CR(IRR3);
+ CASE_GET_CR(ITV);
+ CASE_GET_CR(PMV);
+ CASE_GET_CR(CMCV);
+ CASE_GET_CR(LRR0);
+ CASE_GET_CR(LRR1);
+
+ default:
+ printk(KERN_CRIT "wrong_getreg %d\n", regnum);
+ break;
+ }
+ return res;
+}
+
+#define CASE_SET_REG(id) \
+ case _IA64_REG_ ## id: \
+ ia64_native_setreg(_IA64_REG_ ## id, val); \
+ break;
+#define CASE_SET_AR(id) CASE_SET_REG(AR_ ## id)
+#define CASE_SET_CR(id) CASE_SET_REG(CR_ ## id)
+
+void
+ia64_native_setreg_func(int regnum, unsigned long val)
+{
+ switch (regnum) {
+ case _IA64_REG_PSR_L:
+ ia64_native_setreg(_IA64_REG_PSR_L, val);
+ ia64_dv_serialize_data();
+ break;
+ CASE_SET_REG(SP);
+ CASE_SET_REG(GP);
+
+ CASE_SET_AR(KR0);
+ CASE_SET_AR(KR1);
+ CASE_SET_AR(KR2);
+ CASE_SET_AR(KR3);
+ CASE_SET_AR(KR4);
+ CASE_SET_AR(KR5);
+ CASE_SET_AR(KR6);
+ CASE_SET_AR(KR7);
+ CASE_SET_AR(RSC);
+ CASE_SET_AR(BSP);
+ CASE_SET_AR(BSPSTORE);
+ CASE_SET_AR(RNAT);
+ CASE_SET_AR(FCR);
+ CASE_SET_AR(EFLAG);
+ CASE_SET_AR(CSD);
+ CASE_SET_AR(SSD);
+ CASE_SET_AR(CFLAG);
+ CASE_SET_AR(FSR);
+ CASE_SET_AR(FIR);
+ CASE_SET_AR(FDR);
+ CASE_SET_AR(CCV);
+ CASE_SET_AR(UNAT);
+ CASE_SET_AR(FPSR);
+ CASE_SET_AR(ITC);
+ CASE_SET_AR(PFS);
+ CASE_SET_AR(LC);
+ CASE_SET_AR(EC);
+
+ CASE_SET_CR(DCR);
+ CASE_SET_CR(ITM);
+ CASE_SET_CR(IVA);
+ CASE_SET_CR(PTA);
+ CASE_SET_CR(IPSR);
+ CASE_SET_CR(ISR);
+ CASE_SET_CR(IIP);
+ CASE_SET_CR(IFA);
+ CASE_SET_CR(ITIR);
+ CASE_SET_CR(IIPA);
+ CASE_SET_CR(IFS);
+ CASE_SET_CR(IIM);
+ CASE_SET_CR(IHA);
+ CASE_SET_CR(LID);
+ CASE_SET_CR(IVR);
+ CASE_SET_CR(TPR);
+ CASE_SET_CR(EOI);
+ CASE_SET_CR(IRR0);
+ CASE_SET_CR(IRR1);
+ CASE_SET_CR(IRR2);
+ CASE_SET_CR(IRR3);
+ CASE_SET_CR(ITV);
+ CASE_SET_CR(PMV);
+ CASE_SET_CR(CMCV);
+ CASE_SET_CR(LRR0);
+ CASE_SET_CR(LRR1);
+ default:
+ printk(KERN_CRIT "wrong setreg %d\n", regnum);
+ break;
+ }
+}
+
+struct pv_cpu_ops pv_cpu_ops = {
+ .fc = ia64_native_fc_func,
+ .thash = ia64_native_thash_func,
+ .get_cpuid = ia64_native_get_cpuid_func,
+ .get_pmd = ia64_native_get_pmd_func,
+ .ptcga = ia64_native_ptcga_func,
+ .get_rr = ia64_native_get_rr_func,
+ .set_rr = ia64_native_set_rr_func,
+ .set_rr0_to_rr4 = ia64_native_set_rr0_to_rr4_func,
+ .ssm_i = ia64_native_ssm_i_func,
+ .getreg = ia64_native_getreg_func,
+ .setreg = ia64_native_setreg_func,
+ .rsm_i = ia64_native_rsm_i_func,
+ .get_psr_i = ia64_native_get_psr_i_func,
+ .intrin_local_irq_restore
+ = ia64_native_intrin_local_irq_restore_func,
+};
+EXPORT_SYMBOL(pv_cpu_ops);
+
+/******************************************************************************
+ * replacement of hand written assembly codes.
+ */
+
+void
+paravirt_cpu_asm_init(const struct pv_cpu_asm_switch *cpu_asm_switch)
+{
+ extern unsigned long paravirt_switch_to_targ;
+ extern unsigned long paravirt_leave_syscall_targ;
+ extern unsigned long paravirt_work_processed_syscall_targ;
+ extern unsigned long paravirt_leave_kernel_targ;
+
+ paravirt_switch_to_targ = cpu_asm_switch->switch_to;
+ paravirt_leave_syscall_targ = cpu_asm_switch->leave_syscall;
+ paravirt_work_processed_syscall_targ =
+ cpu_asm_switch->work_processed_syscall;
+ paravirt_leave_kernel_targ = cpu_asm_switch->leave_kernel;
+}
+
+/***************************************************************************
+ * pv_iosapic_ops
+ * iosapic read/write hooks.
+ */
+
+static unsigned int
+ia64_native_iosapic_read(char __iomem *iosapic, unsigned int reg)
+{
+ return __ia64_native_iosapic_read(iosapic, reg);
+}
+
+static void
+ia64_native_iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val)
+{
+ __ia64_native_iosapic_write(iosapic, reg, val);
+}
+
+struct pv_iosapic_ops pv_iosapic_ops = {
+ .pcat_compat_init = ia64_native_iosapic_pcat_compat_init,
+ .get_irq_chip = ia64_native_iosapic_get_irq_chip,
+
+ .__read = ia64_native_iosapic_read,
+ .__write = ia64_native_iosapic_write,
+};
+
+/***************************************************************************
+ * pv_irq_ops
+ * irq operations
+ */
+
+struct pv_irq_ops pv_irq_ops = {
+ .register_ipi = ia64_native_register_ipi,
+
+ .assign_irq_vector = ia64_native_assign_irq_vector,
+ .free_irq_vector = ia64_native_free_irq_vector,
+ .register_percpu_irq = ia64_native_register_percpu_irq,
+
+ .resend_irq = ia64_native_resend_irq,
+};
+
+/***************************************************************************
+ * pv_time_ops
+ * time operations
+ */
+
+static int
+ia64_native_do_steal_accounting(unsigned long *new_itm)
+{
+ return 0;
+}
+
+struct pv_time_ops pv_time_ops = {
+ .do_steal_accounting = ia64_native_do_steal_accounting,
+};
diff --git a/arch/ia64/kernel/paravirt_inst.h b/arch/ia64/kernel/paravirt_inst.h
new file mode 100644
index 00000000000..5cad6fb2ed1
--- /dev/null
+++ b/arch/ia64/kernel/paravirt_inst.h
@@ -0,0 +1,29 @@
+/******************************************************************************
+ * linux/arch/ia64/xen/paravirt_inst.h
+ *
+ * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
+ * VA Linux Systems Japan K.K.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#ifdef __IA64_ASM_PARAVIRTUALIZED_XEN
+#include <asm/xen/inst.h>
+#include <asm/xen/minstate.h>
+#else
+#include <asm/native/inst.h>
+#endif
+
diff --git a/arch/ia64/kernel/paravirtentry.S b/arch/ia64/kernel/paravirtentry.S
new file mode 100644
index 00000000000..2f42fcb9776
--- /dev/null
+++ b/arch/ia64/kernel/paravirtentry.S
@@ -0,0 +1,60 @@
+/******************************************************************************
+ * linux/arch/ia64/xen/paravirtentry.S
+ *
+ * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
+ * VA Linux Systems Japan K.K.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <asm/asmmacro.h>
+#include <asm/asm-offsets.h>
+#include "entry.h"
+
+#define DATA8(sym, init_value) \
+ .pushsection .data.read_mostly ; \
+ .align 8 ; \
+ .global sym ; \
+ sym: ; \
+ data8 init_value ; \
+ .popsection
+
+#define BRANCH(targ, reg, breg) \
+ movl reg=targ ; \
+ ;; \
+ ld8 reg=[reg] ; \
+ ;; \
+ mov breg=reg ; \
+ br.cond.sptk.many breg
+
+#define BRANCH_PROC(sym, reg, breg) \
+ DATA8(paravirt_ ## sym ## _targ, ia64_native_ ## sym) ; \
+ GLOBAL_ENTRY(paravirt_ ## sym) ; \
+ BRANCH(paravirt_ ## sym ## _targ, reg, breg) ; \
+ END(paravirt_ ## sym)
+
+#define BRANCH_PROC_UNWINFO(sym, reg, breg) \
+ DATA8(paravirt_ ## sym ## _targ, ia64_native_ ## sym) ; \
+ GLOBAL_ENTRY(paravirt_ ## sym) ; \
+ PT_REGS_UNWIND_INFO(0) ; \
+ BRANCH(paravirt_ ## sym ## _targ, reg, breg) ; \
+ END(paravirt_ ## sym)
+
+
+BRANCH_PROC(switch_to, r22, b7)
+BRANCH_PROC_UNWINFO(leave_syscall, r22, b7)
+BRANCH_PROC(work_processed_syscall, r2, b7)
+BRANCH_PROC_UNWINFO(leave_kernel, r22, b7)
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 632cda8f2e7..e5c2de9b29a 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -51,6 +51,7 @@
#include <asm/mca.h>
#include <asm/meminit.h>
#include <asm/page.h>
+#include <asm/paravirt.h>
#include <asm/patch.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
@@ -341,6 +342,8 @@ reserve_memory (void)
rsvd_region[n].end = (unsigned long) ia64_imva(_end);
n++;
+ n += paravirt_reserve_memory(&rsvd_region[n]);
+
#ifdef CONFIG_BLK_DEV_INITRD
if (ia64_boot_param->initrd_start) {
rsvd_region[n].start = (unsigned long)__va(ia64_boot_param->initrd_start);
@@ -519,6 +522,8 @@ setup_arch (char **cmdline_p)
{
unw_init();
+ paravirt_arch_setup_early();
+
ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist);
*cmdline_p = __va(ia64_boot_param->command_line);
@@ -583,6 +588,9 @@ setup_arch (char **cmdline_p)
acpi_boot_init();
#endif
+ paravirt_banner();
+ paravirt_arch_setup_console(cmdline_p);
+
#ifdef CONFIG_VT
if (!conswitchp) {
# if defined(CONFIG_DUMMY_CONSOLE)
@@ -602,6 +610,8 @@ setup_arch (char **cmdline_p)
#endif
/* enable IA-64 Machine Check Abort Handling unless disabled */
+ if (paravirt_arch_setup_nomca())
+ nomca = 1;
if (!nomca)
ia64_mca_init();
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index 9d1d429c6c5..03f1a9908af 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -50,6 +50,7 @@
#include <asm/machvec.h>
#include <asm/mca.h>
#include <asm/page.h>
+#include <asm/paravirt.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
@@ -642,6 +643,7 @@ void __devinit smp_prepare_boot_cpu(void)
cpu_set(smp_processor_id(), cpu_online_map);
cpu_set(smp_processor_id(), cpu_callin_map);
per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
+ paravirt_post_smp_prepare_boot_cpu();
}
#ifdef CONFIG_HOTPLUG_CPU
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index aad1b7b1fff..65c10a42c88 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -24,6 +24,7 @@
#include <asm/machvec.h>
#include <asm/delay.h>
#include <asm/hw_irq.h>
+#include <asm/paravirt.h>
#include <asm/ptrace.h>
#include <asm/sal.h>
#include <asm/sections.h>
@@ -48,6 +49,15 @@ EXPORT_SYMBOL(last_cli_ip);
#endif
+#ifdef CONFIG_PARAVIRT
+static void
+paravirt_clocksource_resume(void)
+{
+ if (pv_time_ops.clocksource_resume)
+ pv_time_ops.clocksource_resume();
+}
+#endif
+
static struct clocksource clocksource_itc = {
.name = "itc",
.rating = 350,
@@ -56,6 +66,9 @@ static struct clocksource clocksource_itc = {
.mult = 0, /*to be calculated*/
.shift = 16,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
+#ifdef CONFIG_PARAVIRT
+ .resume = paravirt_clocksource_resume,
+#endif
};
static struct clocksource *itc_clocksource;
@@ -157,6 +170,9 @@ timer_interrupt (int irq, void *dev_id)
profile_tick(CPU_PROFILING);
+ if (paravirt_do_steal_accounting(&new_itm))
+ goto skip_process_time_accounting;
+
while (1) {
update_process_times(user_mode(get_irq_regs()));
@@ -186,6 +202,8 @@ timer_interrupt (int irq, void *dev_id)
local_irq_disable();
}
+skip_process_time_accounting:
+
do {
/*
* If we're too close to the next clock tick for
@@ -335,6 +353,11 @@ ia64_init_itm (void)
*/
clocksource_itc.rating = 50;
+ paravirt_init_missing_ticks_accounting(smp_processor_id());
+
+ /* avoid softlock up message when cpu is unplug and plugged again. */
+ touch_softlockup_watchdog();
+
/* Setup the CPU local timer tick */
ia64_cpu_local_tick();
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index 5929ab10a28..5a77206c249 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -4,7 +4,6 @@
#include <asm/system.h>
#include <asm/pgtable.h>
-#define LOAD_OFFSET (KERNEL_START - KERNEL_TR_PAGE_SIZE)
#include <asm-generic/vmlinux.lds.h>
#define IVT_TEXT \