diff options
Diffstat (limited to 'include')
-rw-r--r-- | include/asm-powerpc/kvm_host.h | 1 | ||||
-rw-r--r-- | include/asm-powerpc/kvm_ppc.h | 5 | ||||
-rw-r--r-- | include/asm-x86/kvm_host.h | 10 |
3 files changed, 15 insertions, 1 deletions
diff --git a/include/asm-powerpc/kvm_host.h b/include/asm-powerpc/kvm_host.h index 04ffbb8e0a3..81a69d71101 100644 --- a/include/asm-powerpc/kvm_host.h +++ b/include/asm-powerpc/kvm_host.h @@ -59,6 +59,7 @@ struct kvm_vcpu_stat { u32 emulated_inst_exits; u32 dec_exits; u32 ext_intr_exits; + u32 halt_wakeup; }; struct tlbe { diff --git a/include/asm-powerpc/kvm_ppc.h b/include/asm-powerpc/kvm_ppc.h index 7ac820308a7..b35a7e3ef97 100644 --- a/include/asm-powerpc/kvm_ppc.h +++ b/include/asm-powerpc/kvm_ppc.h @@ -77,12 +77,17 @@ static inline void kvmppc_clear_exception(struct kvm_vcpu *vcpu, int exception) clear_bit(priority, &vcpu->arch.pending_exceptions); } +/* Helper function for "full" MSR writes. No need to call this if only EE is + * changing. */ static inline void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr) { if ((new_msr & MSR_PR) != (vcpu->arch.msr & MSR_PR)) kvmppc_mmu_priv_switch(vcpu, new_msr & MSR_PR); vcpu->arch.msr = new_msr; + + if (vcpu->arch.msr & MSR_WE) + kvm_vcpu_block(vcpu); } #endif /* __POWERPC_KVM_PPC_H__ */ diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h index 9d963cd6533..1d8cd01fa51 100644 --- a/include/asm-x86/kvm_host.h +++ b/include/asm-x86/kvm_host.h @@ -314,6 +314,9 @@ struct kvm_arch{ struct page *apic_access_page; gpa_t wall_clock; + + struct page *ept_identity_pagetable; + bool ept_identity_pagetable_done; }; struct kvm_vm_stat { @@ -422,6 +425,7 @@ struct kvm_x86_ops { struct kvm_run *run); int (*set_tss_addr)(struct kvm *kvm, unsigned int addr); + int (*get_tdp_level)(void); }; extern struct kvm_x86_ops *kvm_x86_ops; @@ -433,6 +437,9 @@ void kvm_mmu_destroy(struct kvm_vcpu *vcpu); int kvm_mmu_create(struct kvm_vcpu *vcpu); int kvm_mmu_setup(struct kvm_vcpu *vcpu); void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte); +void kvm_mmu_set_base_ptes(u64 base_pte); +void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, + u64 dirty_mask, u64 nx_mask, u64 x_mask); int kvm_mmu_reset_context(struct kvm_vcpu *vcpu); void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot); @@ -620,7 +627,7 @@ static inline void fx_restore(struct i387_fxsave_struct *image) asm("fxrstor (%0)":: "r" (image)); } -static inline void fpu_init(void) +static inline void fx_finit(void) { asm("finit"); } @@ -644,6 +651,7 @@ static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code) #define ASM_VMX_VMWRITE_RSP_RDX ".byte 0x0f, 0x79, 0xd4" #define ASM_VMX_VMXOFF ".byte 0x0f, 0x01, 0xc4" #define ASM_VMX_VMXON_RAX ".byte 0xf3, 0x0f, 0xc7, 0x30" +#define ASM_VMX_INVEPT ".byte 0x66, 0x0f, 0x38, 0x80, 0x08" #define ASM_VMX_INVVPID ".byte 0x66, 0x0f, 0x38, 0x81, 0x08" #define MSR_IA32_TIME_STAMP_COUNTER 0x010 |