aboutsummaryrefslogtreecommitdiff
path: root/include/asm-s390
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-s390')
-rw-r--r--include/asm-s390/page.h22
-rw-r--r--include/asm-s390/pgalloc.h3
-rw-r--r--include/asm-s390/pgtable.h16
3 files changed, 35 insertions, 6 deletions
diff --git a/include/asm-s390/page.h b/include/asm-s390/page.h
index 363ea761d5e..05ea6f17278 100644
--- a/include/asm-s390/page.h
+++ b/include/asm-s390/page.h
@@ -127,6 +127,26 @@ page_get_storage_key(unsigned long addr)
return skey;
}
+extern unsigned long max_pfn;
+
+static inline int pfn_valid(unsigned long pfn)
+{
+ unsigned long dummy;
+ int ccode;
+
+ if (pfn >= max_pfn)
+ return 0;
+
+ asm volatile(
+ " lra %0,0(%2)\n"
+ " ipm %1\n"
+ " srl %1,28\n"
+ : "=d" (dummy), "=d" (ccode)
+ : "a" (pfn << PAGE_SHIFT)
+ : "cc");
+ return !ccode;
+}
+
#endif /* !__ASSEMBLY__ */
/* to align the pointer to the (next) page boundary */
@@ -138,8 +158,6 @@ page_get_storage_key(unsigned long addr)
#define __va(x) (void *)(unsigned long)(x)
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
-
-#define pfn_valid(pfn) ((pfn) < max_mapnr)
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
diff --git a/include/asm-s390/pgalloc.h b/include/asm-s390/pgalloc.h
index 28619de5eca..0707a7e2fc1 100644
--- a/include/asm-s390/pgalloc.h
+++ b/include/asm-s390/pgalloc.h
@@ -25,8 +25,11 @@ extern void diag10(unsigned long addr);
* Page allocation orders.
*/
#ifndef __s390x__
+# define PTE_ALLOC_ORDER 0
+# define PMD_ALLOC_ORDER 0
# define PGD_ALLOC_ORDER 1
#else /* __s390x__ */
+# define PTE_ALLOC_ORDER 0
# define PMD_ALLOC_ORDER 2
# define PGD_ALLOC_ORDER 2
#endif /* __s390x__ */
diff --git a/include/asm-s390/pgtable.h b/include/asm-s390/pgtable.h
index 2d968a69ed1..ae61aca5d48 100644
--- a/include/asm-s390/pgtable.h
+++ b/include/asm-s390/pgtable.h
@@ -107,23 +107,25 @@ extern char empty_zero_page[PAGE_SIZE];
* The vmalloc() routines leaves a hole of 4kB between each vmalloced
* area for the same reason. ;)
*/
+extern unsigned long vmalloc_end;
#define VMALLOC_OFFSET (8*1024*1024)
#define VMALLOC_START (((unsigned long) high_memory + VMALLOC_OFFSET) \
& ~(VMALLOC_OFFSET-1))
+#define VMALLOC_END vmalloc_end
/*
* We need some free virtual space to be able to do vmalloc.
* VMALLOC_MIN_SIZE defines the minimum size of the vmalloc
* area. On a machine with 2GB memory we make sure that we
* have at least 128MB free space for vmalloc. On a machine
- * with 4TB we make sure we have at least 1GB.
+ * with 4TB we make sure we have at least 128GB.
*/
#ifndef __s390x__
#define VMALLOC_MIN_SIZE 0x8000000UL
-#define VMALLOC_END 0x80000000UL
+#define VMALLOC_END_INIT 0x80000000UL
#else /* __s390x__ */
-#define VMALLOC_MIN_SIZE 0x40000000UL
-#define VMALLOC_END 0x40000000000UL
+#define VMALLOC_MIN_SIZE 0x2000000000UL
+#define VMALLOC_END_INIT 0x40000000000UL
#endif /* __s390x__ */
/*
@@ -815,11 +817,17 @@ static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
#define kern_addr_valid(addr) (1)
+extern int add_shared_memory(unsigned long start, unsigned long size);
+extern int remove_shared_memory(unsigned long start, unsigned long size);
+
/*
* No page table caches to initialise
*/
#define pgtable_cache_init() do { } while (0)
+#define __HAVE_ARCH_MEMMAP_INIT
+extern void memmap_init(unsigned long, int, unsigned long, unsigned long);
+
#define __HAVE_ARCH_PTEP_ESTABLISH
#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG