aboutsummaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c90
-rw-r--r--mm/mmap.c1
-rw-r--r--mm/nommu.c8
-rw-r--r--mm/page-writeback.c7
-rw-r--r--mm/page_alloc.c28
-rw-r--r--mm/rmap.c1
-rw-r--r--mm/slab.c52
-rw-r--r--mm/sparse.c12
-rw-r--r--mm/vmalloc.c7
-rw-r--r--mm/vmscan.c8
10 files changed, 115 insertions, 99 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 5631d6b2a62..9cbf4fea4a5 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1110,6 +1110,45 @@ success:
return size;
}
+/*
+ * Performs necessary checks before doing a write
+ * @iov: io vector request
+ * @nr_segs: number of segments in the iovec
+ * @count: number of bytes to write
+ * @access_flags: type of access: %VERIFY_READ or %VERIFY_WRITE
+ *
+ * Adjust number of segments and amount of bytes to write (nr_segs should be
+ * properly initialized first). Returns appropriate error code that caller
+ * should return or zero in case that write should be allowed.
+ */
+int generic_segment_checks(const struct iovec *iov,
+ unsigned long *nr_segs, size_t *count, int access_flags)
+{
+ unsigned long seg;
+ size_t cnt = 0;
+ for (seg = 0; seg < *nr_segs; seg++) {
+ const struct iovec *iv = &iov[seg];
+
+ /*
+ * If any segment has a negative length, or the cumulative
+ * length ever wraps negative then return -EINVAL.
+ */
+ cnt += iv->iov_len;
+ if (unlikely((ssize_t)(cnt|iv->iov_len) < 0))
+ return -EINVAL;
+ if (access_ok(access_flags, iv->iov_base, iv->iov_len))
+ continue;
+ if (seg == 0)
+ return -EFAULT;
+ *nr_segs = seg;
+ cnt -= iv->iov_len; /* This segment is no good */
+ break;
+ }
+ *count = cnt;
+ return 0;
+}
+EXPORT_SYMBOL(generic_segment_checks);
+
/**
* generic_file_aio_read - generic filesystem read routine
* @iocb: kernel I/O control block
@@ -1131,24 +1170,9 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
loff_t *ppos = &iocb->ki_pos;
count = 0;
- for (seg = 0; seg < nr_segs; seg++) {
- const struct iovec *iv = &iov[seg];
-
- /*
- * If any segment has a negative length, or the cumulative
- * length ever wraps negative then return -EINVAL.
- */
- count += iv->iov_len;
- if (unlikely((ssize_t)(count|iv->iov_len) < 0))
- return -EINVAL;
- if (access_ok(VERIFY_WRITE, iv->iov_base, iv->iov_len))
- continue;
- if (seg == 0)
- return -EFAULT;
- nr_segs = seg;
- count -= iv->iov_len; /* This segment is no good */
- break;
- }
+ retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
+ if (retval)
+ return retval;
/* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
if (filp->f_flags & O_DIRECT) {
@@ -2218,30 +2242,14 @@ __generic_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov,
size_t ocount; /* original count */
size_t count; /* after file limit checks */
struct inode *inode = mapping->host;
- unsigned long seg;
loff_t pos;
ssize_t written;
ssize_t err;
ocount = 0;
- for (seg = 0; seg < nr_segs; seg++) {
- const struct iovec *iv = &iov[seg];
-
- /*
- * If any segment has a negative length, or the cumulative
- * length ever wraps negative then return -EINVAL.
- */
- ocount += iv->iov_len;
- if (unlikely((ssize_t)(ocount|iv->iov_len) < 0))
- return -EINVAL;
- if (access_ok(VERIFY_READ, iv->iov_base, iv->iov_len))
- continue;
- if (seg == 0)
- return -EFAULT;
- nr_segs = seg;
- ocount -= iv->iov_len; /* This segment is no good */
- break;
- }
+ err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ);
+ if (err)
+ return err;
count = ocount;
pos = *ppos;
@@ -2301,10 +2309,10 @@ __generic_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov,
* semantics.
*/
endbyte = pos + written_buffered - written - 1;
- err = do_sync_file_range(file, pos, endbyte,
- SYNC_FILE_RANGE_WAIT_BEFORE|
- SYNC_FILE_RANGE_WRITE|
- SYNC_FILE_RANGE_WAIT_AFTER);
+ err = do_sync_mapping_range(file->f_mapping, pos, endbyte,
+ SYNC_FILE_RANGE_WAIT_BEFORE|
+ SYNC_FILE_RANGE_WRITE|
+ SYNC_FILE_RANGE_WAIT_AFTER);
if (err == 0) {
written = written_buffered;
invalidate_mapping_pages(mapping,
diff --git a/mm/mmap.c b/mm/mmap.c
index 52646d61ff6..cc1f543eb1b 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1366,7 +1366,6 @@ unsigned long
get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags)
{
- unsigned long ret;
unsigned long (*get_area)(struct file *, unsigned long,
unsigned long, unsigned long, unsigned long);
diff --git a/mm/nommu.c b/mm/nommu.c
index 1f60194d9b9..2b16b00a5b1 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -262,6 +262,14 @@ void vunmap(void *addr)
}
/*
+ * Implement a stub for vmalloc_sync_all() if the architecture chose not to
+ * have one.
+ */
+void __attribute__((weak)) vmalloc_sync_all(void)
+{
+}
+
+/*
* sys_brk() for the most part doesn't need the global kernel
* lock, except when an application is doing something nasty
* like trying to un-brk an area that has already been mapped
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 029dfad5a23..63cd88840eb 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -683,12 +683,7 @@ retry:
}
ret = (*writepage)(page, wbc);
- if (ret) {
- if (ret == -ENOSPC)
- set_bit(AS_ENOSPC, &mapping->flags);
- else
- set_bit(AS_EIO, &mapping->flags);
- }
+ mapping_set_error(mapping, ret);
if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE))
unlock_page(page);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 59164313167..6fd0b7455b0 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -103,7 +103,7 @@ int min_free_kbytes = 1024;
unsigned long __meminitdata nr_kernel_pages;
unsigned long __meminitdata nr_all_pages;
-static unsigned long __initdata dma_reserve;
+static unsigned long __meminitdata dma_reserve;
#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
/*
@@ -126,10 +126,10 @@ static unsigned long __initdata dma_reserve;
#endif
#endif
- struct node_active_region __initdata early_node_map[MAX_ACTIVE_REGIONS];
- int __initdata nr_nodemap_entries;
- unsigned long __initdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
- unsigned long __initdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
+ struct node_active_region __meminitdata early_node_map[MAX_ACTIVE_REGIONS];
+ int __meminitdata nr_nodemap_entries;
+ unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
+ unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
unsigned long __initdata node_boundary_start_pfn[MAX_NUMNODES];
unsigned long __initdata node_boundary_end_pfn[MAX_NUMNODES];
@@ -2179,7 +2179,7 @@ void __init setup_per_cpu_pageset(void)
#endif
-static __meminit
+static __meminit noinline
int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
{
int i;
@@ -2267,7 +2267,7 @@ __meminit int init_currently_empty_zone(struct zone *zone,
* Basic iterator support. Return the first range of PFNs for a node
* Note: nid == MAX_NUMNODES returns first region regardless of node
*/
-static int __init first_active_region_index_in_nid(int nid)
+static int __meminit first_active_region_index_in_nid(int nid)
{
int i;
@@ -2282,7 +2282,7 @@ static int __init first_active_region_index_in_nid(int nid)
* Basic iterator support. Return the next active range of PFNs for a node
* Note: nid == MAX_NUMNODES returns next region regardles of node
*/
-static int __init next_active_region_index_in_nid(int index, int nid)
+static int __meminit next_active_region_index_in_nid(int index, int nid)
{
for (index = index + 1; index < nr_nodemap_entries; index++)
if (nid == MAX_NUMNODES || early_node_map[index].nid == nid)
@@ -2435,7 +2435,7 @@ static void __init account_node_boundary(unsigned int nid,
* with no available memory, a warning is printed and the start and end
* PFNs will be 0.
*/
-void __init get_pfn_range_for_nid(unsigned int nid,
+void __meminit get_pfn_range_for_nid(unsigned int nid,
unsigned long *start_pfn, unsigned long *end_pfn)
{
int i;
@@ -2460,7 +2460,7 @@ void __init get_pfn_range_for_nid(unsigned int nid,
* Return the number of pages a zone spans in a node, including holes
* present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
*/
-unsigned long __init zone_spanned_pages_in_node(int nid,
+unsigned long __meminit zone_spanned_pages_in_node(int nid,
unsigned long zone_type,
unsigned long *ignored)
{
@@ -2488,7 +2488,7 @@ unsigned long __init zone_spanned_pages_in_node(int nid,
* Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
* then all holes in the requested range will be accounted for.
*/
-unsigned long __init __absent_pages_in_range(int nid,
+unsigned long __meminit __absent_pages_in_range(int nid,
unsigned long range_start_pfn,
unsigned long range_end_pfn)
{
@@ -2548,7 +2548,7 @@ unsigned long __init absent_pages_in_range(unsigned long start_pfn,
}
/* Return the number of page frames in holes in a zone on a node */
-unsigned long __init zone_absent_pages_in_node(int nid,
+unsigned long __meminit zone_absent_pages_in_node(int nid,
unsigned long zone_type,
unsigned long *ignored)
{
@@ -2584,7 +2584,7 @@ static inline unsigned long zone_absent_pages_in_node(int nid,
#endif
-static void __init calculate_node_totalpages(struct pglist_data *pgdat,
+static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
unsigned long *zones_size, unsigned long *zholes_size)
{
unsigned long realtotalpages, totalpages = 0;
@@ -2692,7 +2692,7 @@ static void __meminit free_area_init_core(struct pglist_data *pgdat,
}
}
-static void __init alloc_node_mem_map(struct pglist_data *pgdat)
+static void __meminit alloc_node_mem_map(struct pglist_data *pgdat)
{
/* Skip empty nodes */
if (!pgdat->node_spanned_pages)
diff --git a/mm/rmap.c b/mm/rmap.c
index 75a32be64a2..304f51985c7 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -505,6 +505,7 @@ int page_mkclean(struct page *page)
return ret;
}
+EXPORT_SYMBOL_GPL(page_mkclean);
/**
* page_set_anon_rmap - setup new anonymous rmap
diff --git a/mm/slab.c b/mm/slab.c
index 5920a412b37..acda7e2d66e 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -148,10 +148,11 @@
* Usually, the kmalloc caches are cache_line_size() aligned, except when
* DEBUG and FORCED_DEBUG are enabled, then they are BYTES_PER_WORD aligned.
* Some archs want to perform DMA into kmalloc caches and need a guaranteed
- * alignment larger than BYTES_PER_WORD. ARCH_KMALLOC_MINALIGN allows that.
- * Note that this flag disables some debug features.
+ * alignment larger than the alignment of a 64-bit integer.
+ * ARCH_KMALLOC_MINALIGN allows that.
+ * Note that increasing this value may disable some debug features.
*/
-#define ARCH_KMALLOC_MINALIGN 0
+#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
#endif
#ifndef ARCH_SLAB_MINALIGN
@@ -536,19 +537,22 @@ static int obj_size(struct kmem_cache *cachep)
return cachep->obj_size;
}
-static unsigned long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
+static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
{
BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
- return (unsigned long*) (objp+obj_offset(cachep)-BYTES_PER_WORD);
+ return (unsigned long long*) (objp + obj_offset(cachep) -
+ sizeof(unsigned long long));
}
-static unsigned long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
+static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
{
BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
if (cachep->flags & SLAB_STORE_USER)
- return (unsigned long *)(objp + cachep->buffer_size -
- 2 * BYTES_PER_WORD);
- return (unsigned long *)(objp + cachep->buffer_size - BYTES_PER_WORD);
+ return (unsigned long long *)(objp + cachep->buffer_size -
+ sizeof(unsigned long long) -
+ BYTES_PER_WORD);
+ return (unsigned long long *) (objp + cachep->buffer_size -
+ sizeof(unsigned long long));
}
static void **dbg_userword(struct kmem_cache *cachep, void *objp)
@@ -561,8 +565,8 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp)
#define obj_offset(x) 0
#define obj_size(cachep) (cachep->buffer_size)
-#define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long *)NULL;})
-#define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long *)NULL;})
+#define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long long *)NULL;})
+#define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long long *)NULL;})
#define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;})
#endif
@@ -1776,7 +1780,7 @@ static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
char *realobj;
if (cachep->flags & SLAB_RED_ZONE) {
- printk(KERN_ERR "Redzone: 0x%lx/0x%lx.\n",
+ printk(KERN_ERR "Redzone: 0x%llx/0x%llx.\n",
*dbg_redzone1(cachep, objp),
*dbg_redzone2(cachep, objp));
}
@@ -2239,7 +2243,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
* is greater than BYTES_PER_WORD.
*/
if (flags & SLAB_RED_ZONE || flags & SLAB_STORE_USER)
- ralign = BYTES_PER_WORD;
+ ralign = __alignof__(unsigned long long);
/* 2) arch mandated alignment */
if (ralign < ARCH_SLAB_MINALIGN) {
@@ -2250,7 +2254,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
ralign = align;
}
/* disable debug if necessary */
- if (ralign > BYTES_PER_WORD)
+ if (ralign > __alignof__(unsigned long long))
flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
/*
* 4) Store it.
@@ -2271,8 +2275,8 @@ kmem_cache_create (const char *name, size_t size, size_t align,
*/
if (flags & SLAB_RED_ZONE) {
/* add space for red zone words */
- cachep->obj_offset += BYTES_PER_WORD;
- size += 2 * BYTES_PER_WORD;
+ cachep->obj_offset += sizeof(unsigned long long);
+ size += 2 * sizeof(unsigned long long);
}
if (flags & SLAB_STORE_USER) {
/* user store requires one word storage behind the end of
@@ -2833,7 +2837,7 @@ static void kfree_debugcheck(const void *objp)
static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
{
- unsigned long redzone1, redzone2;
+ unsigned long long redzone1, redzone2;
redzone1 = *dbg_redzone1(cache, obj);
redzone2 = *dbg_redzone2(cache, obj);
@@ -2849,7 +2853,7 @@ static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
else
slab_error(cache, "memory outside object was overwritten");
- printk(KERN_ERR "%p: redzone 1:0x%lx, redzone 2:0x%lx.\n",
+ printk(KERN_ERR "%p: redzone 1:0x%llx, redzone 2:0x%llx.\n",
obj, redzone1, redzone2);
}
@@ -3065,7 +3069,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
slab_error(cachep, "double free, or memory outside"
" object was overwritten");
printk(KERN_ERR
- "%p: redzone 1:0x%lx, redzone 2:0x%lx\n",
+ "%p: redzone 1:0x%llx, redzone 2:0x%llx\n",
objp, *dbg_redzone1(cachep, objp),
*dbg_redzone2(cachep, objp));
}
@@ -4428,16 +4432,12 @@ static void handle_slab(unsigned long *n, struct kmem_cache *c, struct slab *s)
static void show_symbol(struct seq_file *m, unsigned long address)
{
#ifdef CONFIG_KALLSYMS
- char *modname;
- const char *name;
unsigned long offset, size;
- char namebuf[KSYM_NAME_LEN+1];
-
- name = kallsyms_lookup(address, &size, &offset, &modname, namebuf);
+ char modname[MODULE_NAME_LEN + 1], name[KSYM_NAME_LEN + 1];
- if (name) {
+ if (lookup_symbol_attrs(address, &size, &offset, modname, name) == 0) {
seq_printf(m, "%s+%#lx/%#lx", name, offset, size);
- if (modname)
+ if (modname[0])
seq_printf(m, " [%s]", modname);
return;
}
diff --git a/mm/sparse.c b/mm/sparse.c
index 893e5621c24..6f3fff907bc 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -44,7 +44,7 @@ EXPORT_SYMBOL(page_to_nid);
#endif
#ifdef CONFIG_SPARSEMEM_EXTREME
-static struct mem_section *sparse_index_alloc(int nid)
+static struct mem_section noinline *sparse_index_alloc(int nid)
{
struct mem_section *section = NULL;
unsigned long array_size = SECTIONS_PER_ROOT *
@@ -61,7 +61,7 @@ static struct mem_section *sparse_index_alloc(int nid)
return section;
}
-static int sparse_index_init(unsigned long section_nr, int nid)
+static int __meminit sparse_index_init(unsigned long section_nr, int nid)
{
static DEFINE_SPINLOCK(index_init_lock);
unsigned long root = SECTION_NR_TO_ROOT(section_nr);
@@ -138,7 +138,7 @@ static inline int sparse_early_nid(struct mem_section *section)
}
/* Record a memory area against a node. */
-void memory_present(int nid, unsigned long start, unsigned long end)
+void __init memory_present(int nid, unsigned long start, unsigned long end)
{
unsigned long pfn;
@@ -197,7 +197,7 @@ struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pn
return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum);
}
-static int sparse_init_one_section(struct mem_section *ms,
+static int __meminit sparse_init_one_section(struct mem_section *ms,
unsigned long pnum, struct page *mem_map)
{
if (!valid_section(ms))
@@ -209,7 +209,7 @@ static int sparse_init_one_section(struct mem_section *ms,
return 1;
}
-static struct page *sparse_early_mem_map_alloc(unsigned long pnum)
+static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum)
{
struct page *map;
struct mem_section *ms = __nr_to_section(pnum);
@@ -288,6 +288,7 @@ void __init sparse_init(void)
}
}
+#ifdef CONFIG_MEMORY_HOTPLUG
/*
* returns the number of sections whose mem_maps were properly
* set. If this is <=0, then that means that the passed-in
@@ -327,3 +328,4 @@ out:
__kfree_section_memmap(memmap, nr_pages);
return ret;
}
+#endif
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index cb5aabda704..faa2a521dea 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -755,3 +755,10 @@ out_einval_locked:
}
EXPORT_SYMBOL(remap_vmalloc_range);
+/*
+ * Implement a stub for vmalloc_sync_all() if the architecture chose not to
+ * have one.
+ */
+void __attribute__((weak)) vmalloc_sync_all(void)
+{
+}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 56651a10c36..1c8e75a1cfc 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -284,12 +284,8 @@ static void handle_write_error(struct address_space *mapping,
struct page *page, int error)
{
lock_page(page);
- if (page_mapping(page) == mapping) {
- if (error == -ENOSPC)
- set_bit(AS_ENOSPC, &mapping->flags);
- else
- set_bit(AS_EIO, &mapping->flags);
- }
+ if (page_mapping(page) == mapping)
+ mapping_set_error(mapping, error);
unlock_page(page);
}