aboutsummaryrefslogtreecommitdiff
path: root/arch/s390/include/asm/pgtable.h
diff options
context:
space:
mode:
authorFlorian Funke <ffunke@de.ibm.com>2008-10-10 21:33:26 +0200
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2008-10-10 21:34:00 +0200
commit15e86b0c752d50e910b2cca6e83ce74c4440d06c (patch)
treec17e8e71362bb9432e215a01bbd671c76216d262 /arch/s390/include/asm/pgtable.h
parentab1d848fd6a9151b02c6cbf4bddce6e24707b094 (diff)
[S390] introduce dirty bit for kvm live migration
This patch defines a dirty bit in the PGSTE that can be used to implement dirty pages logging for KVM's live migration. The bit is set in the ptep_rcp_copy function, which is called to save dirty and referenced information from the storage key in the PGSTE. The bit can be tested and reset by KVM using the kvm_s390_test_and_clear_page_dirty function that is introduced by this patch. Acked-by: Carsten Otte <cotte@de.ibm.com> Signed-off-by: Florian Funke <ffunke@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/include/asm/pgtable.h')
-rw-r--r--arch/s390/include/asm/pgtable.h45
1 files changed, 43 insertions, 2 deletions
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 0bdb704ae05..1a928f84afd 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -281,6 +281,9 @@ extern char empty_zero_page[PAGE_SIZE];
#define RCP_GR_BIT 50
#define RCP_GC_BIT 49
+/* User dirty bit for KVM's migration feature */
+#define KVM_UD_BIT 47
+
#ifndef __s390x__
/* Bits in the segment table address-space-control-element */
@@ -575,12 +578,16 @@ static inline void ptep_rcp_copy(pte_t *ptep)
unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
skey = page_get_storage_key(page_to_phys(page));
- if (skey & _PAGE_CHANGED)
+ if (skey & _PAGE_CHANGED) {
set_bit_simple(RCP_GC_BIT, pgste);
+ set_bit_simple(KVM_UD_BIT, pgste);
+ }
if (skey & _PAGE_REFERENCED)
set_bit_simple(RCP_GR_BIT, pgste);
- if (test_and_clear_bit_simple(RCP_HC_BIT, pgste))
+ if (test_and_clear_bit_simple(RCP_HC_BIT, pgste)) {
SetPageDirty(page);
+ set_bit_simple(KVM_UD_BIT, pgste);
+ }
if (test_and_clear_bit_simple(RCP_HR_BIT, pgste))
SetPageReferenced(page);
#endif
@@ -744,6 +751,40 @@ static inline pte_t pte_mkspecial(pte_t pte)
return pte;
}
+#ifdef CONFIG_PGSTE
+/*
+ * Get (and clear) the user dirty bit for a PTE.
+ */
+static inline int kvm_s390_test_and_clear_page_dirty(struct mm_struct *mm,
+ pte_t *ptep)
+{
+ int dirty;
+ unsigned long *pgste;
+ struct page *page;
+ unsigned int skey;
+
+ if (!mm->context.pgstes)
+ return -EINVAL;
+ rcp_lock(ptep);
+ pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
+ page = virt_to_page(pte_val(*ptep));
+ skey = page_get_storage_key(page_to_phys(page));
+ if (skey & _PAGE_CHANGED) {
+ set_bit_simple(RCP_GC_BIT, pgste);
+ set_bit_simple(KVM_UD_BIT, pgste);
+ }
+ if (test_and_clear_bit_simple(RCP_HC_BIT, pgste)) {
+ SetPageDirty(page);
+ set_bit_simple(KVM_UD_BIT, pgste);
+ }
+ dirty = test_and_clear_bit_simple(KVM_UD_BIT, pgste);
+ if (skey & _PAGE_CHANGED)
+ page_clear_dirty(page);
+ rcp_unlock(ptep);
+ return dirty;
+}
+#endif
+
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)