aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAvi Kivity <avi@qumranet.com>2007-10-11 15:30:21 +0200
committerAvi Kivity <avi@qumranet.com>2008-01-30 17:52:52 +0200
commitc4fcc2724628c6548748ec80a90b548fc300e81f (patch)
tree30bfbb4b8624636270a6921a4c8fdbcef054bff0
parent5df34a86f917024b67f9e7c850153390973cdfe3 (diff)
KVM: MMU: When updating the dirty bit, inform the mmu about it
Since the mmu uses different shadow pages for dirty large pages and clean large pages, this allows the mmu to drop ptes that are now invalid. Signed-off-by: Avi Kivity <avi@qumranet.com>
-rw-r--r--drivers/kvm/paging_tmpl.h5
1 files changed, 5 insertions, 0 deletions
diff --git a/drivers/kvm/paging_tmpl.h b/drivers/kvm/paging_tmpl.h
index a0f84a5379a..a9e687b5c1e 100644
--- a/drivers/kvm/paging_tmpl.h
+++ b/drivers/kvm/paging_tmpl.h
@@ -74,9 +74,14 @@ static void FNAME(update_dirty_bit)(struct kvm_vcpu *vcpu,
pt_element_t *ptep,
gfn_t table_gfn)
{
+ gpa_t pte_gpa;
+
if (write_fault && !is_dirty_pte(*ptep)) {
mark_page_dirty(vcpu->kvm, table_gfn);
*ptep |= PT_DIRTY_MASK;
+ pte_gpa = ((gpa_t)table_gfn << PAGE_SHIFT);
+ pte_gpa += offset_in_page(ptep);
+ kvm_mmu_pte_write(vcpu, pte_gpa, (u8 *)ptep, sizeof(*ptep));
}
}