From cedabed49b39b4319bccc059a63344b6232b619c Mon Sep 17 00:00:00 2001 From: OGAWA Hirofumi Date: Wed, 13 Jan 2010 21:14:09 +0900 Subject: vfs: Fix vmtruncate() regression If __block_prepare_write() was failed in block_write_begin(), the allocated blocks can be outside of ->i_size. But new truncate_pagecache() in vmtuncate() does nothing if new < old. It means the above usage is not working anymore. So, this patch fixes it by removing "new < old" check. It would need more cleanup/change. But, now -rc and truncate working is in progress, so, this tried to fix it minimum change. Acked-by: Nick Piggin Signed-off-by: OGAWA Hirofumi Signed-off-by: Linus Torvalds --- mm/truncate.c | 30 ++++++++++++++---------------- 1 file changed, 14 insertions(+), 16 deletions(-) (limited to 'mm') diff --git a/mm/truncate.c b/mm/truncate.c index 342deee2268..e87e3724482 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -522,22 +522,20 @@ EXPORT_SYMBOL_GPL(invalidate_inode_pages2); */ void truncate_pagecache(struct inode *inode, loff_t old, loff_t new) { - if (new < old) { - struct address_space *mapping = inode->i_mapping; - - /* - * unmap_mapping_range is called twice, first simply for - * efficiency so that truncate_inode_pages does fewer - * single-page unmaps. However after this first call, and - * before truncate_inode_pages finishes, it is possible for - * private pages to be COWed, which remain after - * truncate_inode_pages finishes, hence the second - * unmap_mapping_range call must be made for correctness. - */ - unmap_mapping_range(mapping, new + PAGE_SIZE - 1, 0, 1); - truncate_inode_pages(mapping, new); - unmap_mapping_range(mapping, new + PAGE_SIZE - 1, 0, 1); - } + struct address_space *mapping = inode->i_mapping; + + /* + * unmap_mapping_range is called twice, first simply for + * efficiency so that truncate_inode_pages does fewer + * single-page unmaps. However after this first call, and + * before truncate_inode_pages finishes, it is possible for + * private pages to be COWed, which remain after + * truncate_inode_pages finishes, hence the second + * unmap_mapping_range call must be made for correctness. + */ + unmap_mapping_range(mapping, new + PAGE_SIZE - 1, 0, 1); + truncate_inode_pages(mapping, new); + unmap_mapping_range(mapping, new + PAGE_SIZE - 1, 0, 1); } EXPORT_SYMBOL(truncate_pagecache); -- cgit v1.2.3 From d2dbe08ddceb4ba2b274abb84326d7e69d454e5c Mon Sep 17 00:00:00 2001 From: Kazuhisa Ichikawa Date: Fri, 15 Jan 2010 17:01:20 -0800 Subject: mm/page_alloc: fix the range check for backward merging The current check for 'backward merging' within add_active_range() does not seem correct. start_pfn must be compared against early_node_map[i].start_pfn (and NOT against .end_pfn) to find out whether the new region is backward-mergeable with the existing range. Signed-off-by: Kazuhisa Ichikawa Acked-by: David Rientjes Cc: KOSAKI Motohiro Cc: Mel Gorman Cc: Christoph Lameter Cc: Johannes Weiner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_alloc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 4e9f5cc5fb5..6ea4966a633 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3998,7 +3998,7 @@ void __init add_active_range(unsigned int nid, unsigned long start_pfn, } /* Merge backward if suitable */ - if (start_pfn < early_node_map[i].end_pfn && + if (start_pfn < early_node_map[i].start_pfn && end_pfn >= early_node_map[i].start_pfn) { early_node_map[i].start_pfn = start_pfn; return; -- cgit v1.2.3 From de3fab39348dff18c69a0cd04efee9c276a02f51 Mon Sep 17 00:00:00 2001 From: KOSAKI Motohiro Date: Fri, 15 Jan 2010 17:01:25 -0800 Subject: vmscan: kswapd: don't retry balance_pgdat() if all zones are unreclaimable Commit f50de2d3 (vmscan: have kswapd sleep for a short interval and double check it should be asleep) can cause kswapd to enter an infinite loop if running on a single-CPU system. If all zones are unreclaimble, sleeping_prematurely return 1 and kswapd will call balance_pgdat() again. but it's totally meaningless, balance_pgdat() doesn't anything against unreclaimable zone! Signed-off-by: KOSAKI Motohiro Cc: Mel Gorman Reported-by: Will Newton Reviewed-by: Minchan Kim Reviewed-by: Rik van Riel Tested-by: Will Newton Reviewed-by: Wu Fengguang Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/vmscan.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'mm') diff --git a/mm/vmscan.c b/mm/vmscan.c index 885207a6b6b..c26986c85ce 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1922,6 +1922,9 @@ static int sleeping_prematurely(pg_data_t *pgdat, int order, long remaining) if (!populated_zone(zone)) continue; + if (zone_is_all_unreclaimable(zone)) + continue; + if (!zone_watermark_ok(zone, order, high_wmark_pages(zone), 0, 0)) return 1; -- cgit v1.2.3 From fce66477578d081f19aef5ea218664ff7758c33a Mon Sep 17 00:00:00 2001 From: Daisuke Nishimura Date: Fri, 15 Jan 2010 17:01:30 -0800 Subject: memcg: ensure list is empty at rmdir Current mem_cgroup_force_empty() only ensures mem->res.usage == 0 on success. But this doesn't guarantee memcg's LRU is really empty, because there are some cases in which !PageCgrupUsed pages exist on memcg's LRU. For example: - Pages can be uncharged by its owner process while they are on LRU. - race between mem_cgroup_add_lru_list() and __mem_cgroup_uncharge_common(). So there can be a case in which the usage is zero but some of the LRUs are not empty. OTOH, mem_cgroup_del_lru_list(), which can be called asynchronously with rmdir, accesses the mem_cgroup, so this access can cause a problem if it races with rmdir because the mem_cgroup might have been freed by rmdir. Actually, I saw a bug which seems to be caused by this race. [1530745.949906] BUG: unable to handle kernel NULL pointer dereference at 0000000000000230 [1530745.950651] IP: [] mem_cgroup_del_lru_list+0x30/0x80 [1530745.950651] PGD 3863de067 PUD 3862c7067 PMD 0 [1530745.950651] Oops: 0002 [#1] SMP [1530745.950651] last sysfs file: /sys/devices/system/cpu/cpu7/cache/index1/shared_cpu_map [1530745.950651] CPU 3 [1530745.950651] Modules linked in: configs ipt_REJECT xt_tcpudp iptable_filter ip_tables x_tables bridge stp nfsd nfs_acl auth_rpcgss exportfs autofs4 hidp rfcomm l2cap crc16 bluetooth lockd sunrpc ib_iser rdma_cm ib_cm iw_cm ib_sa ib_mad ib_core ib_addr iscsi_tcp bnx2i cnic uio ipv6 cxgb3i cxgb3 mdio libiscsi_tcp libiscsi scsi_transport_iscsi dm_mirror dm_multipath scsi_dh video output sbs sbshc battery ac lp kvm_intel kvm sg ide_cd_mod cdrom serio_raw tpm_tis tpm tpm_bios acpi_memhotplug button parport_pc parport rtc_cmos rtc_core rtc_lib e1000 i2c_i801 i2c_core pcspkr dm_region_hash dm_log dm_mod ata_piix libata shpchp megaraid_mbox sd_mod scsi_mod megaraid_mm ext3 jbd uhci_hcd ohci_hcd ehci_hcd [last unloaded: freq_table] [1530745.950651] Pid: 19653, comm: shmem_test_02 Tainted: G M 2.6.32-mm1-00701-g2b04386 #3 Express5800/140Rd-4 [N8100-1065] [1530745.950651] RIP: 0010:[] [] mem_cgroup_del_lru_list+0x30/0x80 [1530745.950651] RSP: 0018:ffff8803863ddcb8 EFLAGS: 00010002 [1530745.950651] RAX: 00000000000001e0 RBX: ffff8803abc02238 RCX: 00000000000001e0 [1530745.950651] RDX: 0000000000000000 RSI: ffff88038611a000 RDI: ffff8803abc02238 [1530745.950651] RBP: ffff8803863ddcc8 R08: 0000000000000002 R09: ffff8803a04c8643 [1530745.950651] R10: 0000000000000000 R11: ffffffff810c7333 R12: 0000000000000000 [1530745.950651] R13: ffff880000017f00 R14: 0000000000000092 R15: ffff8800179d0310 [1530745.950651] FS: 0000000000000000(0000) GS:ffff880017800000(0000) knlGS:0000000000000000 [1530745.950651] CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b [1530745.950651] CR2: 0000000000000230 CR3: 0000000379d87000 CR4: 00000000000006e0 [1530745.950651] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [1530745.950651] DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400 [1530745.950651] Process shmem_test_02 (pid: 19653, threadinfo ffff8803863dc000, task ffff88038612a8a0) [1530745.950651] Stack: [1530745.950651] ffffea00040c2fe8 0000000000000000 ffff8803863ddd98 ffffffff810c739a [1530745.950651] <0> 00000000863ddd18 000000000000000c 0000000000000000 0000000000000000 [1530745.950651] <0> 0000000000000002 0000000000000000 ffff8803863ddd68 0000000000000046 [1530745.950651] Call Trace: [1530745.950651] [] release_pages+0x142/0x1e7 [1530745.950651] [] ? pagevec_move_tail+0x6e/0x112 [1530745.950651] [] pagevec_move_tail+0xfd/0x112 [1530745.950651] [] lru_add_drain+0x76/0x94 [1530745.950651] [] exit_mmap+0x6e/0x145 [1530745.950651] [] mmput+0x5e/0xcf [1530745.950651] [] exit_mm+0x11c/0x129 [1530745.950651] [] ? audit_free+0x196/0x1c9 [1530745.950651] [] do_exit+0x1f5/0x6b7 [1530745.950651] [] ? up_read+0x2b/0x2f [1530745.950651] [] ? lockdep_sys_exit_thunk+0x35/0x67 [1530745.950651] [] do_group_exit+0x83/0xb0 [1530745.950651] [] sys_exit_group+0x17/0x1b [1530745.950651] [] system_call_fastpath+0x16/0x1b [1530745.950651] Code: 54 53 0f 1f 44 00 00 83 3d cc 29 7c 00 00 41 89 f4 75 63 eb 4e 48 83 7b 08 00 75 04 0f 0b eb fe 48 89 df e8 18 f3 ff ff 44 89 e2 <48> ff 4c d0 50 48 8b 05 2b 2d 7c 00 48 39 43 08 74 39 48 8b 4b [1530745.950651] RIP [] mem_cgroup_del_lru_list+0x30/0x80 [1530745.950651] RSP [1530745.950651] CR2: 0000000000000230 [1530745.950651] ---[ end trace c3419c1bb8acc34f ]--- [1530745.950651] Fixing recursive fault but reboot is needed! The problem here is pages on LRU may contain pointer to stale memcg. To make res->usage to be 0, all pages on memcg must be uncharged or moved to another(parent) memcg. Moved page_cgroup have already removed from original LRU, but uncharged page_cgroup contains pointer to memcg withou PCG_USED bit. (This asynchronous LRU work is for improving performance.) If PCG_USED bit is not set, page_cgroup will never be added to memcg's LRU. So, about pages not on LRU, they never access stale pointer. Then, what we have to take care of is page_cgroup _on_ LRU list. This patch fixes this problem by making mem_cgroup_force_empty() visit all LRUs before exiting its loop and guarantee there are no pages on its LRU. Signed-off-by: Daisuke Nishimura Acked-by: KAMEZAWA Hiroyuki Cc: Balbir Singh Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 488b644e0e8..954032b80be 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2586,7 +2586,7 @@ static int mem_cgroup_force_empty(struct mem_cgroup *mem, bool free_all) if (free_all) goto try_to_free; move_account: - while (mem->res.usage > 0) { + do { ret = -EBUSY; if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children)) goto out; @@ -2614,8 +2614,8 @@ move_account: if (ret == -ENOMEM) goto try_to_free; cond_resched(); - } - ret = 0; + /* "ret" should also be checked to ensure all lists are empty. */ + } while (mem->res.usage > 0 || ret); out: css_put(&mem->css); return ret; @@ -2648,10 +2648,7 @@ try_to_free: } lru_add_drain(); /* try move_account...there may be some *locked* pages. */ - if (mem->res.usage) - goto move_account; - ret = 0; - goto out; + goto move_account; } int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event) -- cgit v1.2.3 From 1e2ae599d37e60958c03ca5e46b1f657619a30cd Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 15 Jan 2010 17:01:33 -0800 Subject: nommu: struct vm_region's vm_usage count need not be atomic The vm_usage count field in struct vm_region does not need to be atomic as it's only even modified whilst nommu_region_sem is write locked. Signed-off-by: David Howells Acked-by: Al Viro Cc: Greg Ungerer Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/nommu.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'mm') diff --git a/mm/nommu.c b/mm/nommu.c index 17773862619..5e39294f8ea 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -552,11 +552,11 @@ static void free_page_series(unsigned long from, unsigned long to) static void __put_nommu_region(struct vm_region *region) __releases(nommu_region_sem) { - kenter("%p{%d}", region, atomic_read(®ion->vm_usage)); + kenter("%p{%d}", region, region->vm_usage); BUG_ON(!nommu_region_tree.rb_node); - if (atomic_dec_and_test(®ion->vm_usage)) { + if (--region->vm_usage == 0) { if (region->vm_top > region->vm_start) delete_nommu_region(region); up_write(&nommu_region_sem); @@ -1205,7 +1205,7 @@ unsigned long do_mmap_pgoff(struct file *file, if (!vma) goto error_getting_vma; - atomic_set(®ion->vm_usage, 1); + region->vm_usage = 1; region->vm_flags = vm_flags; region->vm_pgoff = pgoff; @@ -1272,7 +1272,7 @@ unsigned long do_mmap_pgoff(struct file *file, } /* we've found a region we can share */ - atomic_inc(&pregion->vm_usage); + pregion->vm_usage++; vma->vm_region = pregion; start = pregion->vm_start; start += (pgoff - pregion->vm_pgoff) << PAGE_SHIFT; @@ -1289,7 +1289,7 @@ unsigned long do_mmap_pgoff(struct file *file, vma->vm_region = NULL; vma->vm_start = 0; vma->vm_end = 0; - atomic_dec(&pregion->vm_usage); + pregion->vm_usage--; pregion = NULL; goto error_just_free; } @@ -1444,7 +1444,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, /* we're only permitted to split anonymous regions that have a single * owner */ if (vma->vm_file || - atomic_read(&vma->vm_region->vm_usage) != 1) + vma->vm_region->vm_usage != 1) return -ENOMEM; if (mm->map_count >= sysctl_max_map_count) @@ -1518,7 +1518,7 @@ static int shrink_vma(struct mm_struct *mm, /* cut the backing region down to size */ region = vma->vm_region; - BUG_ON(atomic_read(®ion->vm_usage) != 1); + BUG_ON(region->vm_usage != 1); down_write(&nommu_region_sem); delete_nommu_region(region); -- cgit v1.2.3 From 779c10232ceb11c1b259232c4845cfb2850287b7 Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 15 Jan 2010 17:01:34 -0800 Subject: nommu: remove a superfluous check of vm_region::vm_usage In split_vma(), there's no need to check if the VMA being split has a region that's in use by more than one VMA because: (1) The preceding test prohibits splitting of non-anonymous VMAs and regions (eg: file or chardev backed VMAs). (2) Anonymous regions can't be mapped multiple times because there's no handle by which to refer to the already existing region. (3) If a VMA has previously been split, then the region backing it has also been split into two regions, each of usage 1. Signed-off-by: David Howells Acked-by: Al Viro Cc: Greg Ungerer Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/nommu.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'mm') diff --git a/mm/nommu.c b/mm/nommu.c index 5e39294f8ea..d6dd656264a 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -1441,10 +1441,9 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, kenter(""); - /* we're only permitted to split anonymous regions that have a single - * owner */ - if (vma->vm_file || - vma->vm_region->vm_usage != 1) + /* we're only permitted to split anonymous regions (these should have + * only a single usage on the region) */ + if (vma->vm_file) return -ENOMEM; if (mm->map_count >= sysctl_max_map_count) -- cgit v1.2.3 From efc1a3b16930c41d64ffefde16b87d82f603a8a0 Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 15 Jan 2010 17:01:35 -0800 Subject: nommu: don't need get_unmapped_area() for NOMMU get_unmapped_area() is unnecessary for NOMMU as no-one calls it. Signed-off-by: David Howells Acked-by: Al Viro Cc: Greg Ungerer Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/nommu.c | 21 --------------------- mm/util.c | 2 +- 2 files changed, 1 insertion(+), 22 deletions(-) (limited to 'mm') diff --git a/mm/nommu.c b/mm/nommu.c index d6dd656264a..32be0cf51ba 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -1760,27 +1760,6 @@ void unmap_mapping_range(struct address_space *mapping, } EXPORT_SYMBOL(unmap_mapping_range); -/* - * ask for an unmapped area at which to create a mapping on a file - */ -unsigned long get_unmapped_area(struct file *file, unsigned long addr, - unsigned long len, unsigned long pgoff, - unsigned long flags) -{ - unsigned long (*get_area)(struct file *, unsigned long, unsigned long, - unsigned long, unsigned long); - - get_area = current->mm->get_unmapped_area; - if (file && file->f_op && file->f_op->get_unmapped_area) - get_area = file->f_op->get_unmapped_area; - - if (!get_area) - return -ENOSYS; - - return get_area(file, addr, len, pgoff, flags); -} -EXPORT_SYMBOL(get_unmapped_area); - /* * Check that a process has enough memory to allocate a new virtual * mapping. 0 means there is enough memory for the allocation to diff --git a/mm/util.c b/mm/util.c index 7c35ad95f92..834db7be240 100644 --- a/mm/util.c +++ b/mm/util.c @@ -220,7 +220,7 @@ char *strndup_user(const char __user *s, long n) } EXPORT_SYMBOL(strndup_user); -#ifndef HAVE_ARCH_PICK_MMAP_LAYOUT +#if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT) void arch_pick_mmap_layout(struct mm_struct *mm) { mm->mmap_base = TASK_UNMAPPED_BASE; -- cgit v1.2.3 From 7e6608724c640924aad1d556d17df33ebaa6124d Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 15 Jan 2010 17:01:39 -0800 Subject: nommu: fix shared mmap after truncate shrinkage problems Fix a problem in NOMMU mmap with ramfs whereby a shared mmap can happen over the end of a truncation. The problem is that ramfs_nommu_check_mappings() checks that the reduced file size against the VMA tree, but not the vm_region tree. The following sequence of events can cause the problem: fd = open("/tmp/x", O_RDWR|O_TRUNC|O_CREAT, 0600); ftruncate(fd, 32 * 1024); a = mmap(NULL, 32 * 1024, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); b = mmap(NULL, 16 * 1024, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); munmap(a, 32 * 1024); ftruncate(fd, 16 * 1024); c = mmap(NULL, 32 * 1024, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); Mapping 'a' creates a vm_region covering 32KB of the file. Mapping 'b' sees that the vm_region from 'a' is covering the region it wants and so shares it, pinning it in memory. Mapping 'a' then goes away and the file is truncated to the end of VMA 'b'. However, the region allocated by 'a' is still in effect, and has _not_ been reduced. Mapping 'c' is then created, and because there's a vm_region covering the desired region, get_unmapped_area() is _not_ called to repeat the check, and the mapping is granted, even though the pages from the latter half of the mapping have been discarded. However: d = mmap(NULL, 16 * 1024, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); Mapping 'd' should work, and should end up sharing the region allocated by 'a'. To deal with this, we shrink the vm_region struct during the truncation, lest do_mmap_pgoff() take it as licence to share the full region automatically without calling the get_unmapped_area() file op again. Signed-off-by: David Howells Acked-by: Al Viro Cc: Greg Ungerer Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/nommu.c | 62 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 62 insertions(+) (limited to 'mm') diff --git a/mm/nommu.c b/mm/nommu.c index 32be0cf51ba..48a2ecfaf05 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -1914,3 +1914,65 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, in mmput(mm); return len; } + +/** + * nommu_shrink_inode_mappings - Shrink the shared mappings on an inode + * @inode: The inode to check + * @size: The current filesize of the inode + * @newsize: The proposed filesize of the inode + * + * Check the shared mappings on an inode on behalf of a shrinking truncate to + * make sure that that any outstanding VMAs aren't broken and then shrink the + * vm_regions that extend that beyond so that do_mmap_pgoff() doesn't + * automatically grant mappings that are too large. + */ +int nommu_shrink_inode_mappings(struct inode *inode, size_t size, + size_t newsize) +{ + struct vm_area_struct *vma; + struct prio_tree_iter iter; + struct vm_region *region; + pgoff_t low, high; + size_t r_size, r_top; + + low = newsize >> PAGE_SHIFT; + high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; + + down_write(&nommu_region_sem); + + /* search for VMAs that fall within the dead zone */ + vma_prio_tree_foreach(vma, &iter, &inode->i_mapping->i_mmap, + low, high) { + /* found one - only interested if it's shared out of the page + * cache */ + if (vma->vm_flags & VM_SHARED) { + up_write(&nommu_region_sem); + return -ETXTBSY; /* not quite true, but near enough */ + } + } + + /* reduce any regions that overlap the dead zone - if in existence, + * these will be pointed to by VMAs that don't overlap the dead zone + * + * we don't check for any regions that start beyond the EOF as there + * shouldn't be any + */ + vma_prio_tree_foreach(vma, &iter, &inode->i_mapping->i_mmap, + 0, ULONG_MAX) { + if (!(vma->vm_flags & VM_SHARED)) + continue; + + region = vma->vm_region; + r_size = region->vm_top - region->vm_start; + r_top = (region->vm_pgoff << PAGE_SHIFT) + r_size; + + if (r_top > newsize) { + region->vm_top -= r_top - newsize; + if (region->vm_end > region->vm_top) + region->vm_end = region->vm_top; + } + } + + up_write(&nommu_region_sem); + return 0; +} -- cgit v1.2.3 From 6ccf80eb15ccaca4d3f1ab5162b9ded5eecd9971 Mon Sep 17 00:00:00 2001 From: KOSAKI Motohiro Date: Fri, 15 Jan 2010 17:01:18 -0800 Subject: page allocator: update NR_FREE_PAGES only when necessary commit f2260e6b (page allocator: update NR_FREE_PAGES only as necessary) made one minor regression. if __rmqueue() was failed, NR_FREE_PAGES stat go wrong. this patch fixes it. Signed-off-by: KOSAKI Motohiro Cc: Mel Gorman Reviewed-by: Minchan Kim Reported-by: Huang Shijie Reviewed-by: Christoph Lameter Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_alloc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 6ea4966a633..d2a8889b4c5 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1222,10 +1222,10 @@ again: } spin_lock_irqsave(&zone->lock, flags); page = __rmqueue(zone, order, migratetype); - __mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order)); spin_unlock(&zone->lock); if (!page) goto failed; + __mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order)); } __count_zone_vm_events(PGALLOC, zone, 1 << order); -- cgit v1.2.3