aboutsummaryrefslogtreecommitdiff
path: root/arch/arm/mm/mmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm/mmu.c')
-rw-r--r--arch/arm/mm/mmu.c34
1 files changed, 27 insertions, 7 deletions
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 06ad914928c..8ba75406455 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -15,6 +15,7 @@
#include <linux/mman.h>
#include <linux/nodemask.h>
+#include <asm/cputype.h>
#include <asm/mach-types.h>
#include <asm/setup.h>
#include <asm/sizes.h>
@@ -27,9 +28,6 @@
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
-extern void _stext, _etext, __data_start, _end;
-extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
-
/*
* empty_zero_page is a special page that is used for
* zero-initialized data and COW.
@@ -578,12 +576,35 @@ void __init iotable_init(struct map_desc *io_desc, int nr)
create_mapping(io_desc + i);
}
+static unsigned long __initdata vmalloc_reserve = SZ_128M;
+
+/*
+ * vmalloc=size forces the vmalloc area to be exactly 'size'
+ * bytes. This can be used to increase (or decrease) the vmalloc
+ * area - the default is 128m.
+ */
+static void __init early_vmalloc(char **arg)
+{
+ vmalloc_reserve = memparse(*arg, arg);
+
+ if (vmalloc_reserve < SZ_16M) {
+ vmalloc_reserve = SZ_16M;
+ printk(KERN_WARNING
+ "vmalloc area too small, limiting to %luMB\n",
+ vmalloc_reserve >> 20);
+ }
+}
+__early_param("vmalloc=", early_vmalloc);
+
+#define VMALLOC_MIN (void *)(VMALLOC_END - vmalloc_reserve)
+
static int __init check_membank_valid(struct membank *mb)
{
/*
- * Check whether this memory region has non-zero size.
+ * Check whether this memory region has non-zero size or
+ * invalid node number.
*/
- if (mb->size == 0)
+ if (mb->size == 0 || mb->node >= MAX_NUMNODES)
return 0;
/*
@@ -617,8 +638,7 @@ static int __init check_membank_valid(struct membank *mb)
static void __init sanity_check_meminfo(struct meminfo *mi)
{
- int i;
- int j;
+ int i, j;
for (i = 0, j = 0; i < mi->nr_banks; i++) {
if (check_membank_valid(&mi->bank[i]))