aboutsummaryrefslogtreecommitdiff
path: root/arch/x86/kvm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r--arch/x86/kvm/Kconfig1
-rw-r--r--arch/x86/kvm/mmu.c111
-rw-r--r--arch/x86/kvm/paging_tmpl.h14
-rw-r--r--arch/x86/kvm/svm.c22
-rw-r--r--arch/x86/kvm/vmx.c21
-rw-r--r--arch/x86/kvm/vmx.h2
-rw-r--r--arch/x86/kvm/x86.c133
7 files changed, 216 insertions, 88 deletions
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index 8d45fabc5f3..ce3251ce550 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -21,6 +21,7 @@ config KVM
tristate "Kernel-based Virtual Machine (KVM) support"
depends on HAVE_KVM
select PREEMPT_NOTIFIERS
+ select MMU_NOTIFIER
select ANON_INODES
---help---
Support hosting fully virtualized guest machines using hardware
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index b0e4ddca6c1..3da2508eb22 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -653,6 +653,88 @@ static void rmap_write_protect(struct kvm *kvm, u64 gfn)
account_shadowed(kvm, gfn);
}
+static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp)
+{
+ u64 *spte;
+ int need_tlb_flush = 0;
+
+ while ((spte = rmap_next(kvm, rmapp, NULL))) {
+ BUG_ON(!(*spte & PT_PRESENT_MASK));
+ rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", spte, *spte);
+ rmap_remove(kvm, spte);
+ set_shadow_pte(spte, shadow_trap_nonpresent_pte);
+ need_tlb_flush = 1;
+ }
+ return need_tlb_flush;
+}
+
+static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
+ int (*handler)(struct kvm *kvm, unsigned long *rmapp))
+{
+ int i;
+ int retval = 0;
+
+ /*
+ * If mmap_sem isn't taken, we can look the memslots with only
+ * the mmu_lock by skipping over the slots with userspace_addr == 0.
+ */
+ for (i = 0; i < kvm->nmemslots; i++) {
+ struct kvm_memory_slot *memslot = &kvm->memslots[i];
+ unsigned long start = memslot->userspace_addr;
+ unsigned long end;
+
+ /* mmu_lock protects userspace_addr */
+ if (!start)
+ continue;
+
+ end = start + (memslot->npages << PAGE_SHIFT);
+ if (hva >= start && hva < end) {
+ gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT;
+ retval |= handler(kvm, &memslot->rmap[gfn_offset]);
+ retval |= handler(kvm,
+ &memslot->lpage_info[
+ gfn_offset /
+ KVM_PAGES_PER_HPAGE].rmap_pde);
+ }
+ }
+
+ return retval;
+}
+
+int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
+{
+ return kvm_handle_hva(kvm, hva, kvm_unmap_rmapp);
+}
+
+static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp)
+{
+ u64 *spte;
+ int young = 0;
+
+ /* always return old for EPT */
+ if (!shadow_accessed_mask)
+ return 0;
+
+ spte = rmap_next(kvm, rmapp, NULL);
+ while (spte) {
+ int _young;
+ u64 _spte = *spte;
+ BUG_ON(!(_spte & PT_PRESENT_MASK));
+ _young = _spte & PT_ACCESSED_MASK;
+ if (_young) {
+ young = 1;
+ clear_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
+ }
+ spte = rmap_next(kvm, rmapp, spte);
+ }
+ return young;
+}
+
+int kvm_age_hva(struct kvm *kvm, unsigned long hva)
+{
+ return kvm_handle_hva(kvm, hva, kvm_age_rmapp);
+}
+
#ifdef MMU_DEBUG
static int is_empty_shadow_page(u64 *spt)
{
@@ -1203,6 +1285,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
int r;
int largepage = 0;
pfn_t pfn;
+ unsigned long mmu_seq;
down_read(&current->mm->mmap_sem);
if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) {
@@ -1210,6 +1293,8 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
largepage = 1;
}
+ mmu_seq = vcpu->kvm->mmu_notifier_seq;
+ /* implicit mb(), we'll read before PT lock is unlocked */
pfn = gfn_to_pfn(vcpu->kvm, gfn);
up_read(&current->mm->mmap_sem);
@@ -1220,6 +1305,8 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
}
spin_lock(&vcpu->kvm->mmu_lock);
+ if (mmu_notifier_retry(vcpu, mmu_seq))
+ goto out_unlock;
kvm_mmu_free_some_pages(vcpu);
r = __direct_map(vcpu, v, write, largepage, gfn, pfn,
PT32E_ROOT_LEVEL);
@@ -1227,6 +1314,11 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
return r;
+
+out_unlock:
+ spin_unlock(&vcpu->kvm->mmu_lock);
+ kvm_release_pfn_clean(pfn);
+ return 0;
}
@@ -1345,6 +1437,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
int r;
int largepage = 0;
gfn_t gfn = gpa >> PAGE_SHIFT;
+ unsigned long mmu_seq;
ASSERT(vcpu);
ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
@@ -1358,6 +1451,8 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
gfn &= ~(KVM_PAGES_PER_HPAGE-1);
largepage = 1;
}
+ mmu_seq = vcpu->kvm->mmu_notifier_seq;
+ /* implicit mb(), we'll read before PT lock is unlocked */
pfn = gfn_to_pfn(vcpu->kvm, gfn);
up_read(&current->mm->mmap_sem);
if (is_error_pfn(pfn)) {
@@ -1365,12 +1460,19 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
return 1;
}
spin_lock(&vcpu->kvm->mmu_lock);
+ if (mmu_notifier_retry(vcpu, mmu_seq))
+ goto out_unlock;
kvm_mmu_free_some_pages(vcpu);
r = __direct_map(vcpu, gpa, error_code & PFERR_WRITE_MASK,
largepage, gfn, pfn, kvm_x86_ops->get_tdp_level());
spin_unlock(&vcpu->kvm->mmu_lock);
return r;
+
+out_unlock:
+ spin_unlock(&vcpu->kvm->mmu_lock);
+ kvm_release_pfn_clean(pfn);
+ return 0;
}
static void nonpaging_free(struct kvm_vcpu *vcpu)
@@ -1670,6 +1772,8 @@ static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
gfn &= ~(KVM_PAGES_PER_HPAGE-1);
vcpu->arch.update_pte.largepage = 1;
}
+ vcpu->arch.update_pte.mmu_seq = vcpu->kvm->mmu_notifier_seq;
+ /* implicit mb(), we'll read before PT lock is unlocked */
pfn = gfn_to_pfn(vcpu->kvm, gfn);
up_read(&current->mm->mmap_sem);
@@ -1814,6 +1918,7 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
spin_unlock(&vcpu->kvm->mmu_lock);
return r;
}
+EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt);
void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
{
@@ -1870,6 +1975,12 @@ void kvm_enable_tdp(void)
}
EXPORT_SYMBOL_GPL(kvm_enable_tdp);
+void kvm_disable_tdp(void)
+{
+ tdp_enabled = false;
+}
+EXPORT_SYMBOL_GPL(kvm_disable_tdp);
+
static void free_mmu_pages(struct kvm_vcpu *vcpu)
{
struct kvm_mmu_page *sp;
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 4d918220bae..4a814bff21f 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -263,6 +263,8 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
pfn = vcpu->arch.update_pte.pfn;
if (is_error_pfn(pfn))
return;
+ if (mmu_notifier_retry(vcpu, vcpu->arch.update_pte.mmu_seq))
+ return;
kvm_get_pfn(pfn);
mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0,
gpte & PT_DIRTY_MASK, NULL, largepage, gpte_to_gfn(gpte),
@@ -343,7 +345,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
shadow_addr = __pa(shadow_page->spt);
shadow_pte = shadow_addr | PT_PRESENT_MASK | PT_ACCESSED_MASK
| PT_WRITABLE_MASK | PT_USER_MASK;
- *shadow_ent = shadow_pte;
+ set_shadow_pte(shadow_ent, shadow_pte);
}
mmu_set_spte(vcpu, shadow_ent, access, walker->pte_access & access,
@@ -380,6 +382,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
int r;
pfn_t pfn;
int largepage = 0;
+ unsigned long mmu_seq;
pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
kvm_mmu_audit(vcpu, "pre page fault");
@@ -413,6 +416,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
largepage = 1;
}
}
+ mmu_seq = vcpu->kvm->mmu_notifier_seq;
+ /* implicit mb(), we'll read before PT lock is unlocked */
pfn = gfn_to_pfn(vcpu->kvm, walker.gfn);
up_read(&current->mm->mmap_sem);
@@ -424,6 +429,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
}
spin_lock(&vcpu->kvm->mmu_lock);
+ if (mmu_notifier_retry(vcpu, mmu_seq))
+ goto out_unlock;
kvm_mmu_free_some_pages(vcpu);
shadow_pte = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
largepage, &write_pt, pfn);
@@ -439,6 +446,11 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
spin_unlock(&vcpu->kvm->mmu_lock);
return write_pt;
+
+out_unlock:
+ spin_unlock(&vcpu->kvm->mmu_lock);
+ kvm_release_pfn_clean(pfn);
+ return 0;
}
static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index b756e876dce..8233b86c778 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -62,6 +62,7 @@ static int npt = 1;
module_param(npt, int, S_IRUGO);
static void kvm_reput_irq(struct vcpu_svm *svm);
+static void svm_flush_tlb(struct kvm_vcpu *vcpu);
static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
{
@@ -453,7 +454,8 @@ static __init int svm_hardware_setup(void)
if (npt_enabled) {
printk(KERN_INFO "kvm: Nested Paging enabled\n");
kvm_enable_tdp();
- }
+ } else
+ kvm_disable_tdp();
return 0;
@@ -877,6 +879,10 @@ set:
static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{
unsigned long host_cr4_mce = read_cr4() & X86_CR4_MCE;
+ unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4;
+
+ if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE))
+ force_new_asid(vcpu);
vcpu->arch.cr4 = cr4;
if (!npt_enabled)
@@ -1007,10 +1013,13 @@ static int pf_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
struct kvm *kvm = svm->vcpu.kvm;
u64 fault_address;
u32 error_code;
+ bool event_injection = false;
if (!irqchip_in_kernel(kvm) &&
- is_external_interrupt(exit_int_info))
+ is_external_interrupt(exit_int_info)) {
+ event_injection = true;
push_irq(&svm->vcpu, exit_int_info & SVM_EVTINJ_VEC_MASK);
+ }
fault_address = svm->vmcb->control.exit_info_2;
error_code = svm->vmcb->control.exit_info_1;
@@ -1023,7 +1032,16 @@ static int pf_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
KVMTRACE_3D(TDP_FAULT, &svm->vcpu, error_code,
(u32)fault_address, (u32)(fault_address >> 32),
handler);
+ /*
+ * FIXME: Tis shouldn't be necessary here, but there is a flush
+ * missing in the MMU code. Until we find this bug, flush the
+ * complete TLB here on an NPF
+ */
+ if (npt_enabled)
+ svm_flush_tlb(&svm->vcpu);
+ if (event_injection)
+ kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address);
return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code);
}
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 0cac6370171..7041cc52b56 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2298,6 +2298,8 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
cr2 = vmcs_readl(EXIT_QUALIFICATION);
KVMTRACE_3D(PAGE_FAULT, vcpu, error_code, (u32)cr2,
(u32)((u64)cr2 >> 32), handler);
+ if (vect_info & VECTORING_INFO_VALID_MASK)
+ kvm_mmu_unprotect_page_virt(vcpu, cr2);
return kvm_mmu_page_fault(vcpu, cr2, error_code);
}
@@ -3116,15 +3118,6 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
return ERR_PTR(-ENOMEM);
allocate_vpid(vmx);
- if (id == 0 && vm_need_ept()) {
- kvm_mmu_set_base_ptes(VMX_EPT_READABLE_MASK |
- VMX_EPT_WRITABLE_MASK |
- VMX_EPT_DEFAULT_MT << VMX_EPT_MT_EPTE_SHIFT);
- kvm_mmu_set_mask_ptes(0ull, VMX_EPT_FAKE_ACCESSED_MASK,
- VMX_EPT_FAKE_DIRTY_MASK, 0ull,
- VMX_EPT_EXECUTABLE_MASK);
- kvm_enable_tdp();
- }
err = kvm_vcpu_init(&vmx->vcpu, kvm, id);
if (err)
@@ -3303,8 +3296,16 @@ static int __init vmx_init(void)
vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_IA32_SYSENTER_ESP);
vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_IA32_SYSENTER_EIP);
- if (cpu_has_vmx_ept())
+ if (vm_need_ept()) {
bypass_guest_pf = 0;
+ kvm_mmu_set_base_ptes(VMX_EPT_READABLE_MASK |
+ VMX_EPT_WRITABLE_MASK |
+ VMX_EPT_DEFAULT_MT << VMX_EPT_MT_EPTE_SHIFT);
+ kvm_mmu_set_mask_ptes(0ull, 0ull, 0ull, 0ull,
+ VMX_EPT_EXECUTABLE_MASK);
+ kvm_enable_tdp();
+ } else
+ kvm_disable_tdp();
if (bypass_guest_pf)
kvm_mmu_set_nonpresent_ptes(~0xffeull, 0ull);
diff --git a/arch/x86/kvm/vmx.h b/arch/x86/kvm/vmx.h
index 425a13436b3..23e8373507a 100644
--- a/arch/x86/kvm/vmx.h
+++ b/arch/x86/kvm/vmx.h
@@ -370,8 +370,6 @@ enum vmcs_field {
#define VMX_EPT_READABLE_MASK 0x1ull
#define VMX_EPT_WRITABLE_MASK 0x2ull
#define VMX_EPT_EXECUTABLE_MASK 0x4ull
-#define VMX_EPT_FAKE_ACCESSED_MASK (1ull << 62)
-#define VMX_EPT_FAKE_DIRTY_MASK (1ull << 63)
#define VMX_EPT_IDENTITY_PAGETABLE_ADDR 0xfffbc000ul
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 9f1cdb011cf..0d682fc6aeb 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -883,6 +883,7 @@ int kvm_dev_ioctl_check_extension(long ext)
case KVM_CAP_PIT:
case KVM_CAP_NOP_IO_DELAY:
case KVM_CAP_MP_STATE:
+ case KVM_CAP_SYNC_MMU:
r = 1;
break;
case KVM_CAP_COALESCED_MMIO:
@@ -1495,6 +1496,7 @@ static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
goto out;
down_write(&kvm->slots_lock);
+ spin_lock(&kvm->mmu_lock);
p = &kvm->arch.aliases[alias->slot];
p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT;
@@ -1506,6 +1508,7 @@ static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
break;
kvm->arch.naliases = n;
+ spin_unlock(&kvm->mmu_lock);
kvm_mmu_zap_all(kvm);
up_write(&kvm->slots_lock);
@@ -3184,6 +3187,10 @@ static void seg_desct_to_kvm_desct(struct desc_struct *seg_desc, u16 selector,
kvm_desct->base |= seg_desc->base2 << 24;
kvm_desct->limit = seg_desc->limit0;
kvm_desct->limit |= seg_desc->limit << 16;
+ if (seg_desc->g) {
+ kvm_desct->limit <<= 12;
+ kvm_desct->limit |= 0xfff;
+ }
kvm_desct->selector = selector;
kvm_desct->type = seg_desc->type;
kvm_desct->present = seg_desc->p;
@@ -3223,6 +3230,7 @@ static void get_segment_descritptor_dtable(struct kvm_vcpu *vcpu,
static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
struct desc_struct *seg_desc)
{
+ gpa_t gpa;
struct descriptor_table dtable;
u16 index = selector >> 3;
@@ -3232,13 +3240,16 @@ static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc);
return 1;
}
- return kvm_read_guest(vcpu->kvm, dtable.base + index * 8, seg_desc, 8);
+ gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, dtable.base);
+ gpa += index * 8;
+ return kvm_read_guest(vcpu->kvm, gpa, seg_desc, 8);
}
/* allowed just for 8 bytes segments */
static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
struct desc_struct *seg_desc)
{
+ gpa_t gpa;
struct descriptor_table dtable;
u16 index = selector >> 3;
@@ -3246,7 +3257,9 @@ static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
if (dtable.limit < index * 8 + 7)
return 1;
- return kvm_write_guest(vcpu->kvm, dtable.base + index * 8, seg_desc, 8);
+ gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, dtable.base);
+ gpa += index * 8;
+ return kvm_write_guest(vcpu->kvm, gpa, seg_desc, 8);
}
static u32 get_tss_base_addr(struct kvm_vcpu *vcpu,
@@ -3258,55 +3271,7 @@ static u32 get_tss_base_addr(struct kvm_vcpu *vcpu,
base_addr |= (seg_desc->base1 << 16);
base_addr |= (seg_desc->base2 << 24);
- return base_addr;
-}
-
-static int load_tss_segment32(struct kvm_vcpu *vcpu,
- struct desc_struct *seg_desc,
- struct tss_segment_32 *tss)
-{
- u32 base_addr;
-
- base_addr = get_tss_base_addr(vcpu, seg_desc);
-
- return kvm_read_guest(vcpu->kvm, base_addr, tss,
- sizeof(struct tss_segment_32));
-}
-
-static int save_tss_segment32(struct kvm_vcpu *vcpu,
- struct desc_struct *seg_desc,
- struct tss_segment_32 *tss)
-{
- u32 base_addr;
-
- base_addr = get_tss_base_addr(vcpu, seg_desc);
-
- return kvm_write_guest(vcpu->kvm, base_addr, tss,
- sizeof(struct tss_segment_32));
-}
-
-static int load_tss_segment16(struct kvm_vcpu *vcpu,
- struct desc_struct *seg_desc,
- struct tss_segment_16 *tss)
-{
- u32 base_addr;
-
- base_addr = get_tss_base_addr(vcpu, seg_desc);
-
- return kvm_read_guest(vcpu->kvm, base_addr, tss,
- sizeof(struct tss_segment_16));
-}
-
-static int save_tss_segment16(struct kvm_vcpu *vcpu,
- struct desc_struct *seg_desc,
- struct tss_segment_16 *tss)
-{
- u32 base_addr;
-
- base_addr = get_tss_base_addr(vcpu, seg_desc);
-
- return kvm_write_guest(vcpu->kvm, base_addr, tss,
- sizeof(struct tss_segment_16));
+ return vcpu->arch.mmu.gva_to_gpa(vcpu, base_addr);
}
static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg)
@@ -3466,20 +3431,26 @@ static int load_state_from_tss16(struct kvm_vcpu *vcpu,
}
static int kvm_task_switch_16(struct kvm_vcpu *vcpu, u16 tss_selector,
- struct desc_struct *cseg_desc,
+ u32 old_tss_base,
struct desc_struct *nseg_desc)
{
struct tss_segment_16 tss_segment_16;
int ret = 0;
- if (load_tss_segment16(vcpu, cseg_desc, &tss_segment_16))
+ if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_16,
+ sizeof tss_segment_16))
goto out;
save_state_to_tss16(vcpu, &tss_segment_16);
- save_tss_segment16(vcpu, cseg_desc, &tss_segment_16);
- if (load_tss_segment16(vcpu, nseg_desc, &tss_segment_16))
+ if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_16,
+ sizeof tss_segment_16))
+ goto out;
+
+ if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc),
+ &tss_segment_16, sizeof tss_segment_16))
goto out;
+
if (load_state_from_tss16(vcpu, &tss_segment_16))
goto out;
@@ -3489,20 +3460,26 @@ out:
}
static int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16 tss_selector,
- struct desc_struct *cseg_desc,
+ u32 old_tss_base,
struct desc_struct *nseg_desc)
{
struct tss_segment_32 tss_segment_32;
int ret = 0;
- if (load_tss_segment32(vcpu, cseg_desc, &tss_segment_32))
+ if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_32,
+ sizeof tss_segment_32))
goto out;
save_state_to_tss32(vcpu, &tss_segment_32);
- save_tss_segment32(vcpu, cseg_desc, &tss_segment_32);
- if (load_tss_segment32(vcpu, nseg_desc, &tss_segment_32))
+ if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_32,
+ sizeof tss_segment_32))
goto out;
+
+ if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc),
+ &tss_segment_32, sizeof tss_segment_32))
+ goto out;
+
if (load_state_from_tss32(vcpu, &tss_segment_32))
goto out;
@@ -3517,16 +3494,20 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
struct desc_struct cseg_desc;
struct desc_struct nseg_desc;
int ret = 0;
+ u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR);
+ u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR);
- kvm_get_segment(vcpu, &tr_seg, VCPU_SREG_TR);
+ old_tss_base = vcpu->arch.mmu.gva_to_gpa(vcpu, old_tss_base);
+ /* FIXME: Handle errors. Failure to read either TSS or their
+ * descriptors should generate a pagefault.
+ */
if (load_guest_segment_descriptor(vcpu, tss_selector, &nseg_desc))
goto out;
- if (load_guest_segment_descriptor(vcpu, tr_seg.selector, &cseg_desc))
+ if (load_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc))
goto out;
-
if (reason != TASK_SWITCH_IRET) {
int cpl;
@@ -3544,8 +3525,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
cseg_desc.type &= ~(1 << 1); //clear the B flag
- save_guest_segment_descriptor(vcpu, tr_seg.selector,
- &cseg_desc);
+ save_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc);
}
if (reason == TASK_SWITCH_IRET) {
@@ -3557,10 +3537,10 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
kvm_x86_ops->cache_regs(vcpu);
if (nseg_desc.type & 8)
- ret = kvm_task_switch_32(vcpu, tss_selector, &cseg_desc,
+ ret = kvm_task_switch_32(vcpu, tss_selector, old_tss_base,
&nseg_desc);
else
- ret = kvm_task_switch_16(vcpu, tss_selector, &cseg_desc,
+ ret = kvm_task_switch_16(vcpu, tss_selector, old_tss_base,
&nseg_desc);
if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE) {
@@ -3995,16 +3975,23 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
*/
if (!user_alloc) {
if (npages && !old.rmap) {
+ unsigned long userspace_addr;
+
down_write(&current->mm->mmap_sem);
- memslot->userspace_addr = do_mmap(NULL, 0,
- npages * PAGE_SIZE,
- PROT_READ | PROT_WRITE,
- MAP_SHARED | MAP_ANONYMOUS,
- 0);
+ userspace_addr = do_mmap(NULL, 0,
+ npages * PAGE_SIZE,
+ PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_ANONYMOUS,
+ 0);
up_write(&current->mm->mmap_sem);
- if (IS_ERR((void *)memslot->userspace_addr))
- return PTR_ERR((void *)memslot->userspace_addr);
+ if (IS_ERR((void *)userspace_addr))
+ return PTR_ERR((void *)userspace_addr);
+
+ /* set userspace_addr atomically for kvm_hva_to_rmapp */
+ spin_lock(&kvm->mmu_lock);
+ memslot->userspace_addr = userspace_addr;
+ spin_unlock(&kvm->mmu_lock);
} else {
if (!old.user_alloc && old.rmap) {
int ret;