aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAvi Kivity <avi@qumranet.com>2007-10-11 15:12:24 +0200
committerAvi Kivity <avi@qumranet.com>2008-01-30 17:52:52 +0200
commitcc70e7374df1e3a56d718e8ca330619f316511a6 (patch)
tree6746e9724eb51486104800be264e6baa64c8fefd
parentc22e3514fceb2f514093ce1d19a2f660ac7347ae (diff)
KVM: MMU: Disable write access on clean large pages
By forcing clean huge pages to be read-only, we have separate roles for the shadow of a clean large page and the shadow of a dirty large page. This is necessary because different ptes will be instantiated for the two cases, even for read faults. Signed-off-by: Avi Kivity <avi@qumranet.com>
-rw-r--r--drivers/kvm/paging_tmpl.h2
1 files changed, 2 insertions, 0 deletions
diff --git a/drivers/kvm/paging_tmpl.h b/drivers/kvm/paging_tmpl.h
index e07cb2e006c..4538b1533d5 100644
--- a/drivers/kvm/paging_tmpl.h
+++ b/drivers/kvm/paging_tmpl.h
@@ -382,6 +382,8 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
metaphysical = 1;
hugepage_access = walker->pte;
hugepage_access &= PT_USER_MASK | PT_WRITABLE_MASK;
+ if (!is_dirty_pte(walker->pte))
+ hugepage_access &= ~PT_WRITABLE_MASK;
hugepage_access >>= PT_WRITABLE_SHIFT;
if (walker->pte & PT64_NX_MASK)
hugepage_access |= (1 << 2);