From 855149aaa90016c576a0e684361a34f8047307d0 Mon Sep 17 00:00:00 2001 From: Izik Eidus Date: Thu, 20 Mar 2008 18:17:24 +0200 Subject: KVM: MMU: fix dirty bit setting when removing write permissions When mmu_set_spte() checks if a page related to spte should be release as dirty or clean, it check if the shadow pte was writeble, but in case rmap_write_protect() is called called it is possible for shadow ptes that were writeble to become readonly and therefor mmu_set_spte will release the pages as clean. This patch fix this issue by marking the page as dirty inside rmap_write_protect(). Signed-off-by: Izik Eidus Signed-off-by: Avi Kivity --- arch/x86/kvm/mmu.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'arch/x86/kvm/mmu.c') diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index a5872b3c466..dd4b95b3896 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -626,6 +626,14 @@ static void rmap_write_protect(struct kvm *kvm, u64 gfn) } spte = rmap_next(kvm, rmapp, spte); } + if (write_protected) { + struct page *page; + + spte = rmap_next(kvm, rmapp, NULL); + page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT); + SetPageDirty(page); + } + /* check for huge page mappings */ rmapp = gfn_to_rmap(kvm, gfn, 1); spte = rmap_next(kvm, rmapp, NULL); -- cgit v1.2.3