aboutsummaryrefslogtreecommitdiff
path: root/arch/ia64/kvm
diff options
context:
space:
mode:
authorYang Zhang <yang.zhang@intel.com>2009-03-23 03:31:04 -0400
committerAvi Kivity <avi@redhat.com>2009-06-10 11:48:34 +0300
commit362c1055e58ecd25a9393c520ab263c80b147497 (patch)
tree76fe99925f7ce1e9bf14733658ea51b4dd025703 /arch/ia64/kvm
parenta8b876b1a469cb364fee16ba3aef01613a1231cc (diff)
KVM: ia64: enable external interrupt in vmm
Currently, the interrupt enable bit is cleared when in the vmm. This patch sets the bit and the external interrupts can be dealt with when in the vmm. This improves the I/O performance. Signed-off-by: Yang Zhang <yang.zhang@intel.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/ia64/kvm')
-rw-r--r--arch/ia64/kvm/process.c5
-rw-r--r--arch/ia64/kvm/vmm_ivt.S18
-rw-r--r--arch/ia64/kvm/vtlb.c3
3 files changed, 16 insertions, 10 deletions
diff --git a/arch/ia64/kvm/process.c b/arch/ia64/kvm/process.c
index b1dc80952d9..a8f84da04b4 100644
--- a/arch/ia64/kvm/process.c
+++ b/arch/ia64/kvm/process.c
@@ -652,20 +652,25 @@ void kvm_ia64_handle_break(unsigned long ifa, struct kvm_pt_regs *regs,
unsigned long isr, unsigned long iim)
{
struct kvm_vcpu *v = current_vcpu;
+ long psr;
if (ia64_psr(regs)->cpl == 0) {
/* Allow hypercalls only when cpl = 0. */
if (iim == DOMN_PAL_REQUEST) {
+ local_irq_save(psr);
set_pal_call_data(v);
vmm_transition(v);
get_pal_call_result(v);
vcpu_increment_iip(v);
+ local_irq_restore(psr);
return;
} else if (iim == DOMN_SAL_REQUEST) {
+ local_irq_save(psr);
set_sal_call_data(v);
vmm_transition(v);
get_sal_call_result(v);
vcpu_increment_iip(v);
+ local_irq_restore(psr);
return;
}
}
diff --git a/arch/ia64/kvm/vmm_ivt.S b/arch/ia64/kvm/vmm_ivt.S
index 3ef1a017a31..40920c63064 100644
--- a/arch/ia64/kvm/vmm_ivt.S
+++ b/arch/ia64/kvm/vmm_ivt.S
@@ -95,7 +95,7 @@ GLOBAL_ENTRY(kvm_vmm_panic)
;;
srlz.i // guarantee that interruption collection is on
;;
- //(p15) ssm psr.i // restore psr.i
+ (p15) ssm psr.i // restore psr.
addl r14=@gprel(ia64_leave_hypervisor),gp
;;
KVM_SAVE_REST
@@ -249,7 +249,7 @@ ENTRY(kvm_break_fault)
;;
srlz.i // guarantee that interruption collection is on
;;
- //(p15)ssm psr.i // restore psr.i
+ (p15)ssm psr.i // restore psr.i
addl r14=@gprel(ia64_leave_hypervisor),gp
;;
KVM_SAVE_REST
@@ -439,7 +439,7 @@ kvm_dispatch_vexirq:
;;
srlz.i // guarantee that interruption collection is on
;;
- //(p15) ssm psr.i // restore psr.i
+ (p15) ssm psr.i // restore psr.i
adds r3=8,r2 // set up second base pointer
;;
KVM_SAVE_REST
@@ -819,7 +819,7 @@ ENTRY(kvm_dtlb_miss_dispatch)
;;
srlz.i // guarantee that interruption collection is on
;;
- //(p15) ssm psr.i // restore psr.i
+ (p15) ssm psr.i // restore psr.i
addl r14=@gprel(ia64_leave_hypervisor_prepare),gp
;;
KVM_SAVE_REST
@@ -842,7 +842,7 @@ ENTRY(kvm_itlb_miss_dispatch)
;;
srlz.i // guarantee that interruption collection is on
;;
- //(p15) ssm psr.i // restore psr.i
+ (p15) ssm psr.i // restore psr.i
addl r14=@gprel(ia64_leave_hypervisor),gp
;;
KVM_SAVE_REST
@@ -871,7 +871,7 @@ ENTRY(kvm_dispatch_reflection)
;;
srlz.i // guarantee that interruption collection is on
;;
- //(p15) ssm psr.i // restore psr.i
+ (p15) ssm psr.i // restore psr.i
addl r14=@gprel(ia64_leave_hypervisor),gp
;;
KVM_SAVE_REST
@@ -898,7 +898,7 @@ ENTRY(kvm_dispatch_virtualization_fault)
;;
srlz.i // guarantee that interruption collection is on
;;
- //(p15) ssm psr.i // restore psr.i
+ (p15) ssm psr.i // restore psr.i
addl r14=@gprel(ia64_leave_hypervisor_prepare),gp
;;
KVM_SAVE_REST
@@ -920,7 +920,7 @@ ENTRY(kvm_dispatch_interrupt)
;;
srlz.i
;;
- //(p15) ssm psr.i
+ (p15) ssm psr.i
addl r14=@gprel(ia64_leave_hypervisor),gp
;;
KVM_SAVE_REST
@@ -1333,7 +1333,7 @@ hostret = r24
;;
(p7) srlz.i
;;
-//(p6) ssm psr.i
+(p6) ssm psr.i
;;
mov rp=rpsave
mov ar.pfs=pfssave
diff --git a/arch/ia64/kvm/vtlb.c b/arch/ia64/kvm/vtlb.c
index 2c2501f1315..4290a429bf7 100644
--- a/arch/ia64/kvm/vtlb.c
+++ b/arch/ia64/kvm/vtlb.c
@@ -254,7 +254,8 @@ u64 guest_vhpt_lookup(u64 iha, u64 *pte)
"(p7) st8 [%2]=r9;;"
"ssm psr.ic;;"
"srlz.d;;"
- /* "ssm psr.i;;" Once interrupts in vmm open, need fix*/
+ "ssm psr.i;;"
+ "srlz.d;;"
: "=r"(ret) : "r"(iha), "r"(pte):"memory");
return ret;