aboutsummaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorJeff Garzik <jgarzik@pobox.com>2005-09-08 05:43:49 -0400
committerJeff Garzik <jgarzik@pobox.com>2005-09-08 05:43:49 -0400
commit1d6ae775d7a948c9575658eb41184fd2e506c0df (patch)
tree8128a28e89d82f13bb8e3a2160382240c66e2816 /mm
parent739cdbf1d8f0739b80035b80d69d871e33749b86 (diff)
parentcaf39e87cc1182f7dae84eefc43ca14d54c78ef9 (diff)
Merge /spare/repo/linux-2.6/
Diffstat (limited to 'mm')
-rw-r--r--mm/Kconfig22
-rw-r--r--mm/filemap.c14
-rw-r--r--mm/hugetlb.c2
-rw-r--r--mm/madvise.c9
-rw-r--r--mm/memory.c18
-rw-r--r--mm/mempolicy.c12
-rw-r--r--mm/mmap.c9
-rw-r--r--mm/mremap.c4
-rw-r--r--mm/oom_kill.c62
-rw-r--r--mm/page_alloc.c57
-rw-r--r--mm/readahead.c1
-rw-r--r--mm/rmap.c29
-rw-r--r--mm/shmem.c93
-rw-r--r--mm/slab.c51
-rw-r--r--mm/sparse.c75
-rw-r--r--mm/swap_state.c6
-rw-r--r--mm/swapfile.c412
-rw-r--r--mm/vmalloc.c2
-rw-r--r--mm/vmscan.c20
19 files changed, 486 insertions, 412 deletions
diff --git a/mm/Kconfig b/mm/Kconfig
index cd379936cac..4e9937ac352 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -89,3 +89,25 @@ config NEED_MULTIPLE_NODES
config HAVE_MEMORY_PRESENT
def_bool y
depends on ARCH_HAVE_MEMORY_PRESENT || SPARSEMEM
+
+#
+# SPARSEMEM_EXTREME (which is the default) does some bootmem
+# allocations when memory_present() is called. If this can not
+# be done on your architecture, select this option. However,
+# statically allocating the mem_section[] array can potentially
+# consume vast quantities of .bss, so be careful.
+#
+# This option will also potentially produce smaller runtime code
+# with gcc 3.4 and later.
+#
+config SPARSEMEM_STATIC
+ def_bool n
+
+#
+# Architectecture platforms which require a two level mem_section in SPARSEMEM
+# must select this option. This is usually for architecture platforms with
+# an extremely sparse physical address space.
+#
+config SPARSEMEM_EXTREME
+ def_bool y
+ depends on SPARSEMEM && !SPARSEMEM_STATIC
diff --git a/mm/filemap.c b/mm/filemap.c
index c11418dd94e..88611928e71 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -54,9 +54,8 @@
*
* ->i_mmap_lock (vmtruncate)
* ->private_lock (__free_pte->__set_page_dirty_buffers)
- * ->swap_list_lock
- * ->swap_device_lock (exclusive_swap_page, others)
- * ->mapping->tree_lock
+ * ->swap_lock (exclusive_swap_page, others)
+ * ->mapping->tree_lock
*
* ->i_sem
* ->i_mmap_lock (truncate->unmap_mapping_range)
@@ -86,7 +85,7 @@
* ->page_table_lock (anon_vma_prepare and various)
*
* ->page_table_lock
- * ->swap_device_lock (try_to_unmap_one)
+ * ->swap_lock (try_to_unmap_one)
* ->private_lock (try_to_unmap_one)
* ->tree_lock (try_to_unmap_one)
* ->zone.lru_lock (follow_page->mark_page_accessed)
@@ -1505,8 +1504,12 @@ repeat:
return -EINVAL;
page = filemap_getpage(file, pgoff, nonblock);
+
+ /* XXX: This is wrong, a filesystem I/O error may have happened. Fix that as
+ * done in shmem_populate calling shmem_getpage */
if (!page && !nonblock)
return -ENOMEM;
+
if (page) {
err = install_page(mm, vma, addr, page, prot);
if (err) {
@@ -1514,6 +1517,9 @@ repeat:
return err;
}
} else {
+ /* No page was found just because we can't read it in now (being
+ * here implies nonblock != 0), but the page may exist, so set
+ * the PTE to fault it in later. */
err = install_file_pte(mm, vma, addr, pgoff, prot);
if (err)
return err;
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 6bf720bc662..901ac523a1c 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -360,8 +360,6 @@ int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
ret = -ENOMEM;
goto out;
}
- if (! pte_none(*pte))
- hugetlb_clean_stale_pgtable(pte);
idx = ((addr - vma->vm_start) >> HPAGE_SHIFT)
+ (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
diff --git a/mm/madvise.c b/mm/madvise.c
index c8c01a12fea..4454936f87d 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -37,7 +37,7 @@ static long madvise_behavior(struct vm_area_struct * vma,
if (new_flags == vma->vm_flags) {
*prev = vma;
- goto success;
+ goto out;
}
pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
@@ -62,6 +62,7 @@ static long madvise_behavior(struct vm_area_struct * vma,
goto out;
}
+success:
/*
* vm_flags is protected by the mmap_sem held in write mode.
*/
@@ -70,7 +71,6 @@ static long madvise_behavior(struct vm_area_struct * vma,
out:
if (error == -ENOMEM)
error = -EAGAIN;
-success:
return error;
}
@@ -237,8 +237,9 @@ asmlinkage long sys_madvise(unsigned long start, size_t len_in, int behavior)
* - different from the way of handling in mlock etc.
*/
vma = find_vma_prev(current->mm, start, &prev);
- if (!vma && prev)
- vma = prev->vm_next;
+ if (vma && start > vma->vm_start)
+ prev = vma;
+
for (;;) {
/* Still start < end. */
error = -ENOMEM;
diff --git a/mm/memory.c b/mm/memory.c
index e046b7e4b53..788a6281034 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -498,6 +498,17 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
unsigned long addr = vma->vm_start;
unsigned long end = vma->vm_end;
+ /*
+ * Don't copy ptes where a page fault will fill them correctly.
+ * Fork becomes much lighter when there are big shared or private
+ * readonly mappings. The tradeoff is that copy_page_range is more
+ * efficient than faulting.
+ */
+ if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_RESERVED))) {
+ if (!vma->anon_vma)
+ return 0;
+ }
+
if (is_vm_hugetlb_page(vma))
return copy_hugetlb_page_range(dst_mm, src_mm, vma);
@@ -551,7 +562,8 @@ static void zap_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
page->index > details->last_index))
continue;
}
- ptent = ptep_get_and_clear(tlb->mm, addr, pte);
+ ptent = ptep_get_and_clear_full(tlb->mm, addr, pte,
+ tlb->fullmm);
tlb_remove_tlb_entry(tlb, pte, addr);
if (unlikely(!page))
continue;
@@ -579,7 +591,7 @@ static void zap_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
continue;
if (!pte_file(ptent))
free_swap_and_cache(pte_to_swp_entry(ptent));
- pte_clear(tlb->mm, addr, pte);
+ pte_clear_full(tlb->mm, addr, pte, tlb->fullmm);
} while (pte++, addr += PAGE_SIZE, addr != end);
pte_unmap(pte - 1);
}
@@ -1944,7 +1956,7 @@ static int do_file_page(struct mm_struct * mm, struct vm_area_struct * vma,
* Fall back to the linear mapping if the fs does not support
* ->populate:
*/
- if (!vma->vm_ops || !vma->vm_ops->populate ||
+ if (!vma->vm_ops->populate ||
(write_access && !(vma->vm_flags & VM_SHARED))) {
pte_clear(mm, address, pte);
return do_no_page(mm, vma, address, write_access, pte, pmd);
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index b4eababc819..13492d66b7c 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -664,10 +664,10 @@ asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
#endif
/* Return effective policy for a VMA */
-static struct mempolicy *
-get_vma_policy(struct vm_area_struct *vma, unsigned long addr)
+struct mempolicy *
+get_vma_policy(struct task_struct *task, struct vm_area_struct *vma, unsigned long addr)
{
- struct mempolicy *pol = current->mempolicy;
+ struct mempolicy *pol = task->mempolicy;
if (vma) {
if (vma->vm_ops && vma->vm_ops->get_policy)
@@ -786,7 +786,7 @@ static struct page *alloc_page_interleave(unsigned int __nocast gfp, unsigned or
struct page *
alloc_page_vma(unsigned int __nocast gfp, struct vm_area_struct *vma, unsigned long addr)
{
- struct mempolicy *pol = get_vma_policy(vma, addr);
+ struct mempolicy *pol = get_vma_policy(current, vma, addr);
cpuset_update_current_mems_allowed();
@@ -908,7 +908,7 @@ void __mpol_free(struct mempolicy *p)
/* Find first node suitable for an allocation */
int mpol_first_node(struct vm_area_struct *vma, unsigned long addr)
{
- struct mempolicy *pol = get_vma_policy(vma, addr);
+ struct mempolicy *pol = get_vma_policy(current, vma, addr);
switch (pol->policy) {
case MPOL_DEFAULT:
@@ -928,7 +928,7 @@ int mpol_first_node(struct vm_area_struct *vma, unsigned long addr)
/* Find secondary valid nodes for an allocation */
int mpol_node_valid(int nid, struct vm_area_struct *vma, unsigned long addr)
{
- struct mempolicy *pol = get_vma_policy(vma, addr);
+ struct mempolicy *pol = get_vma_policy(current, vma, addr);
switch (pol->policy) {
case MPOL_PREFERRED:
diff --git a/mm/mmap.c b/mm/mmap.c
index 404319477e7..12334aecf8a 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -61,7 +61,7 @@ pgprot_t protection_map[16] = {
int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
int sysctl_overcommit_ratio = 50; /* default is 50% */
-int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
+int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
atomic_t vm_committed_space = ATOMIC_INIT(0);
/*
@@ -203,13 +203,6 @@ static void remove_vm_struct(struct vm_area_struct *vma)
kmem_cache_free(vm_area_cachep, vma);
}
-/*
- * sys_brk() for the most part doesn't need the global kernel
- * lock, except when an application is doing something nasty
- * like trying to un-brk an area that has already been mapped
- * to a regular file. in this case, the unmapping will need
- * to invoke file system routines that need the global lock.
- */
asmlinkage unsigned long sys_brk(unsigned long brk)
{
unsigned long rlim, retval;
diff --git a/mm/mremap.c b/mm/mremap.c
index fc45dc9a617..a32fed454bd 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -141,6 +141,10 @@ move_one_page(struct vm_area_struct *vma, unsigned long old_addr,
if (dst) {
pte_t pte;
pte = ptep_clear_flush(vma, old_addr, src);
+ /* ZERO_PAGE can be dependant on virtual addr */
+ if (pfn_valid(pte_pfn(pte)) &&
+ pte_page(pte) == ZERO_PAGE(old_addr))
+ pte = pte_wrprotect(mk_pte(ZERO_PAGE(new_addr), new_vma->vm_page_prot));
set_pte_at(mm, new_addr, dst, pte);
} else
error = -ENOMEM;
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 1e56076672f..5ec8da12cfd 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -6,8 +6,8 @@
* for goading me into coding this file...
*
* The routines in this file are used to kill a process when
- * we're seriously out of memory. This gets called from kswapd()
- * in linux/mm/vmscan.c when we really run out of memory.
+ * we're seriously out of memory. This gets called from __alloc_pages()
+ * in mm/page_alloc.c when we really run out of memory.
*
* Since we won't call these routines often (on a well-configured
* machine) this file will double as a 'coding guide' and a signpost
@@ -20,13 +20,14 @@
#include <linux/swap.h>
#include <linux/timex.h>
#include <linux/jiffies.h>
+#include <linux/cpuset.h>
/* #define DEBUG */
/**
* oom_badness - calculate a numeric value for how bad this task has been
* @p: task struct of which task we should calculate
- * @p: current uptime in seconds
+ * @uptime: current uptime in seconds
*
* The formula used is relatively simple and documented inline in the
* function. The main rationale is that we want to select a good task
@@ -57,9 +58,9 @@ unsigned long badness(struct task_struct *p, unsigned long uptime)
/*
* Processes which fork a lot of child processes are likely
- * a good choice. We add the vmsize of the childs if they
+ * a good choice. We add the vmsize of the children if they
* have an own mm. This prevents forking servers to flood the
- * machine with an endless amount of childs
+ * machine with an endless amount of children
*/
list_for_each(tsk, &p->children) {
struct task_struct *chld;
@@ -143,28 +144,36 @@ static struct task_struct * select_bad_process(void)
struct timespec uptime;
do_posix_clock_monotonic_gettime(&uptime);
- do_each_thread(g, p)
+ do_each_thread(g, p) {
+ unsigned long points;
+ int releasing;
+
/* skip the init task with pid == 1 */
- if (p->pid > 1 && p->oomkilladj != OOM_DISABLE) {
- unsigned long points;
-
- /*
- * This is in the process of releasing memory so wait it
- * to finish before killing some other task by mistake.
- */
- if ((unlikely(test_tsk_thread_flag(p, TIF_MEMDIE)) || (p->flags & PF_EXITING)) &&
- !(p->flags & PF_DEAD))
- return ERR_PTR(-1UL);
- if (p->flags & PF_SWAPOFF)
- return p;
-
- points = badness(p, uptime.tv_sec);
- if (points > maxpoints || !chosen) {
- chosen = p;
- maxpoints = points;
- }
+ if (p->pid == 1)
+ continue;
+ if (p->oomkilladj == OOM_DISABLE)
+ continue;
+ /* If p's nodes don't overlap ours, it won't help to kill p. */
+ if (!cpuset_excl_nodes_overlap(p))
+ continue;
+
+ /*
+ * This is in the process of releasing memory so for wait it
+ * to finish before killing some other task by mistake.
+ */
+ releasing = test_tsk_thread_flag(p, TIF_MEMDIE) ||
+ p->flags & PF_EXITING;
+ if (releasing && !(p->flags & PF_DEAD))
+ return ERR_PTR(-1UL);
+ if (p->flags & PF_SWAPOFF)
+ return p;
+
+ points = badness(p, uptime.tv_sec);
+ if (points > maxpoints || !chosen) {
+ chosen = p;
+ maxpoints = points;
}
- while_each_thread(g, p);
+ } while_each_thread(g, p);
return chosen;
}
@@ -189,7 +198,8 @@ static void __oom_kill_task(task_t *p)
return;
}
task_unlock(p);
- printk(KERN_ERR "Out of Memory: Killed process %d (%s).\n", p->pid, p->comm);
+ printk(KERN_ERR "Out of Memory: Killed process %d (%s).\n",
+ p->pid, p->comm);
/*
* We give our sacrificial lamb high priority and access to
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 8d088371196..3974fd81d27 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -42,13 +42,13 @@
* MCD - HACK: Find somewhere to initialize this EARLY, or make this
* initializer cleaner
*/
-nodemask_t node_online_map = { { [0] = 1UL } };
+nodemask_t node_online_map __read_mostly = { { [0] = 1UL } };
EXPORT_SYMBOL(node_online_map);
-nodemask_t node_possible_map = NODE_MASK_ALL;
+nodemask_t node_possible_map __read_mostly = NODE_MASK_ALL;
EXPORT_SYMBOL(node_possible_map);
-struct pglist_data *pgdat_list;
-unsigned long totalram_pages;
-unsigned long totalhigh_pages;
+struct pglist_data *pgdat_list __read_mostly;
+unsigned long totalram_pages __read_mostly;
+unsigned long totalhigh_pages __read_mostly;
long nr_swap_pages;
/*
@@ -68,7 +68,7 @@ EXPORT_SYMBOL(nr_swap_pages);
* Used by page_zone() to look up the address of the struct zone whose
* id is encoded in the upper bits of page->flags
*/
-struct zone *zone_table[1 << ZONETABLE_SHIFT];
+struct zone *zone_table[1 << ZONETABLE_SHIFT] __read_mostly;
EXPORT_SYMBOL(zone_table);
static char *zone_names[MAX_NR_ZONES] = { "DMA", "Normal", "HighMem" };
@@ -329,7 +329,7 @@ static inline void free_pages_check(const char *function, struct page *page)
1 << PG_writeback )))
bad_page(function, page);
if (PageDirty(page))
- ClearPageDirty(page);
+ __ClearPageDirty(page);
}
/*
@@ -806,11 +806,14 @@ __alloc_pages(unsigned int __nocast gfp_mask, unsigned int order,
classzone_idx = zone_idx(zones[0]);
restart:
- /* Go through the zonelist once, looking for a zone with enough free */
+ /*
+ * Go through the zonelist once, looking for a zone with enough free.
+ * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
+ */
for (i = 0; (z = zones[i]) != NULL; i++) {
int do_reclaim = should_reclaim_zone(z, gfp_mask);
- if (!cpuset_zone_allowed(z))
+ if (!cpuset_zone_allowed(z, __GFP_HARDWALL))
continue;
/*
@@ -845,6 +848,7 @@ zone_reclaim_retry:
*
* This is the last chance, in general, before the goto nopage.
* Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
+ * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
*/
for (i = 0; (z = zones[i]) != NULL; i++) {
if (!zone_watermark_ok(z, order, z->pages_min,
@@ -852,7 +856,7 @@ zone_reclaim_retry:
gfp_mask & __GFP_HIGH))
continue;
- if (wait && !cpuset_zone_allowed(z))
+ if (wait && !cpuset_zone_allowed(z, gfp_mask))
continue;
page = buffered_rmqueue(z, order, gfp_mask);
@@ -867,7 +871,7 @@ zone_reclaim_retry:
if (!(gfp_mask & __GFP_NOMEMALLOC)) {
/* go through the zonelist yet again, ignoring mins */
for (i = 0; (z = zones[i]) != NULL; i++) {
- if (!cpuset_zone_allowed(z))
+ if (!cpuset_zone_allowed(z, gfp_mask))
continue;
page = buffered_rmqueue(z, order, gfp_mask);
if (page)
@@ -903,7 +907,7 @@ rebalance:
gfp_mask & __GFP_HIGH))
continue;
- if (!cpuset_zone_allowed(z))
+ if (!cpuset_zone_allowed(z, gfp_mask))
continue;
page = buffered_rmqueue(z, order, gfp_mask);
@@ -922,7 +926,7 @@ rebalance:
classzone_idx, 0, 0))
continue;
- if (!cpuset_zone_allowed(z))
+ if (!cpuset_zone_allowed(z, __GFP_HARDWALL))
continue;
page = buffered_rmqueue(z, order, gfp_mask);
@@ -1130,19 +1134,20 @@ EXPORT_SYMBOL(nr_pagecache);
DEFINE_PER_CPU(long, nr_pagecache_local) = 0;
#endif
-void __get_page_state(struct page_state *ret, int nr)
+void __get_page_state(struct page_state *ret, int nr, cpumask_t *cpumask)
{
int cpu = 0;
memset(ret, 0, sizeof(*ret));
+ cpus_and(*cpumask, *cpumask, cpu_online_map);
- cpu = first_cpu(cpu_online_map);
+ cpu = first_cpu(*cpumask);
while (cpu < NR_CPUS) {
unsigned long *in, *out, off;
in = (unsigned long *)&per_cpu(page_states, cpu);
- cpu = next_cpu(cpu, cpu_online_map);
+ cpu = next_cpu(cpu, *cpumask);
if (cpu < NR_CPUS)
prefetch(&per_cpu(page_states, cpu));
@@ -1153,19 +1158,33 @@ void __get_page_state(struct page_state *ret, int nr)
}
}
+void get_page_state_node(struct page_state *ret, int node)
+{
+ int nr;
+ cpumask_t mask = node_to_cpumask(node);
+
+ nr = offsetof(struct page_state, GET_PAGE_STATE_LAST);
+ nr /= sizeof(unsigned long);
+
+ __get_page_state(ret, nr+1, &mask);
+}
+
void get_page_state(struct page_state *ret)
{
int nr;
+ cpumask_t mask = CPU_MASK_ALL;
nr = offsetof(struct page_state, GET_PAGE_STATE_LAST);
nr /= sizeof(unsigned long);
- __get_page_state(ret, nr + 1);
+ __get_page_state(ret, nr + 1, &mask);
}
void get_full_page_state(struct page_state *ret)
{
- __get_page_state(ret, sizeof(*ret) / sizeof(unsigned long));
+ cpumask_t mask = CPU_MASK_ALL;
+
+ __get_page_state(ret, sizeof(*ret) / sizeof(unsigned long), &mask);
}
unsigned long __read_page_state(unsigned long offset)
@@ -1909,7 +1928,7 @@ static void __init free_area_init_core(struct pglist_data *pgdat,
zone->nr_scan_inactive = 0;
zone->nr_active = 0;
zone->nr_inactive = 0;
- atomic_set(&zone->reclaim_in_progress, -1);
+ atomic_set(&zone->reclaim_in_progress, 0);
if (!size)
continue;
diff --git a/mm/readahead.c b/mm/readahead.c
index b840e7c6ea7..d0b50034e24 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -540,6 +540,7 @@ void handle_ra_miss(struct address_space *mapping,
{
ra->flags |= RA_FLAG_MISS;
ra->flags &= ~RA_FLAG_INCACHE;
+ ra->cache_hit = 0;
}
/*
diff --git a/mm/rmap.c b/mm/rmap.c
index 08ac5c7fa91..450f5241b5a 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -34,9 +34,8 @@
* anon_vma->lock
* mm->page_table_lock
* zone->lru_lock (in mark_page_accessed)
- * swap_list_lock (in swap_free etc's swap_info_get)
+ * swap_lock (in swap_duplicate, swap_info_get)
* mmlist_lock (in mmput, drain_mmlist and others)
- * swap_device_lock (in swap_duplicate, swap_info_get)
* mapping->private_lock (in __set_page_dirty_buffers)
* inode_lock (in set_page_dirty's __mark_inode_dirty)
* sb_lock (within inode_lock in fs/fs-writeback.c)
@@ -290,8 +289,6 @@ static int page_referenced_one(struct page *page,
pte_t *pte;
int referenced = 0;
- if (!get_mm_counter(mm, rss))
- goto out;
address = vma_address(page, vma);
if (address == -EFAULT)
goto out;
@@ -442,22 +439,19 @@ int page_referenced(struct page *page, int is_locked, int ignore_token)
void page_add_anon_rmap(struct page *page,
struct vm_area_struct *vma, unsigned long address)
{
- struct anon_vma *anon_vma = vma->anon_vma;
- pgoff_t index;
-
BUG_ON(PageReserved(page));
- BUG_ON(!anon_vma);
inc_mm_counter(vma->vm_mm, anon_rss);
- anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
- index = (address - vma->vm_start) >> PAGE_SHIFT;
- index += vma->vm_pgoff;
- index >>= PAGE_CACHE_SHIFT - PAGE_SHIFT;
-
if (atomic_inc_and_test(&page->_mapcount)) {
- page->index = index;
+ struct anon_vma *anon_vma = vma->anon_vma;
+
+ BUG_ON(!anon_vma);
+ anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
page->mapping = (struct address_space *) anon_vma;
+
+ page->index = linear_page_index(vma, address);
+
inc_page_state(nr_mapped);
}
/* else checking page index and mapping is racy */
@@ -518,8 +512,6 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma)
pte_t pteval;
int ret = SWAP_AGAIN;
- if (!get_mm_counter(mm, rss))
- goto out;
address = vma_address(page, vma);
if (address == -EFAULT)
goto out;
@@ -532,6 +524,8 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma)
* If the page is mlock()d, we cannot swap it out.
* If it's recently referenced (perhaps page_referenced
* skipped over this mm) then we should reactivate it.
+ *
+ * Pages belonging to VM_RESERVED regions should not happen here.
*/
if ((vma->vm_flags & (VM_LOCKED|VM_RESERVED)) ||
ptep_clear_flush_young(vma, address, pte)) {
@@ -767,8 +761,7 @@ static int try_to_unmap_file(struct page *page)
if (vma->vm_flags & (VM_LOCKED|VM_RESERVED))
continue;
cursor = (unsigned long) vma->vm_private_data;
- while (get_mm_counter(vma->vm_mm, rss) &&
- cursor < max_nl_cursor &&
+ while ( cursor < max_nl_cursor &&
cursor < vma->vm_end - vma->vm_start) {
try_to_unmap_cluster(cursor, &mapcount, vma);
cursor += CLUSTER_SIZE;
diff --git a/mm/shmem.c b/mm/shmem.c
index 5a81b1ee4f7..db2c9e8d990 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -45,7 +45,6 @@
#include <linux/swapops.h>
#include <linux/mempolicy.h>
#include <linux/namei.h>
-#include <linux/xattr.h>
#include <asm/uaccess.h>
#include <asm/div64.h>
#include <asm/pgtable.h>
@@ -179,10 +178,9 @@ static struct address_space_operations shmem_aops;
static struct file_operations shmem_file_operations;
static struct inode_operations shmem_inode_operations;
static struct inode_operations shmem_dir_inode_operations;
-static struct inode_operations shmem_special_inode_operations;
static struct vm_operations_struct shmem_vm_ops;
-static struct backing_dev_info shmem_backing_dev_info = {
+static struct backing_dev_info shmem_backing_dev_info __read_mostly = {
.ra_pages = 0, /* No readahead */
.capabilities = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK,
.unplug_io_fn = default_unplug_io_fn,
@@ -1195,6 +1193,7 @@ static int shmem_populate(struct vm_area_struct *vma,
err = shmem_getpage(inode, pgoff, &page, sgp, NULL);
if (err)
return err;
+ /* Page may still be null, but only if nonblock was set. */
if (page) {
mark_page_accessed(page);
err = install_page(mm, vma, addr, page, prot);
@@ -1202,7 +1201,10 @@ static int shmem_populate(struct vm_area_struct *vma,
page_cache_release(page);
return err;
}
- } else if (nonblock) {
+ } else {
+ /* No page was found just because we can't read it in
+ * now (being here implies nonblock != 0), but the page
+ * may exist, so set the PTE to fault it in later. */
err = install_file_pte(mm, vma, addr, pgoff, prot);
if (err)
return err;
@@ -1296,7 +1298,6 @@ shmem_get_inode(struct super_block *sb, int mode, dev_t dev)
switch (mode & S_IFMT) {
default:
- inode->i_op = &shmem_special_inode_operations;
init_special_inode(inode, mode, dev);
break;
case S_IFREG:
@@ -1800,12 +1801,6 @@ static void shmem_put_link(struct dentry *dentry, struct nameidata *nd, void *co
static struct inode_operations shmem_symlink_inline_operations = {
.readlink = generic_readlink,
.follow_link = shmem_follow_link_inline,
-#ifdef CONFIG_TMPFS_XATTR
- .setxattr = generic_setxattr,
- .getxattr = generic_getxattr,
- .listxattr = generic_listxattr,
- .removexattr = generic_removexattr,
-#endif
};
static struct inode_operations shmem_symlink_inode_operations = {
@@ -1813,12 +1808,6 @@ static struct inode_operations shmem_symlink_inode_operations = {
.readlink = generic_readlink,
.follow_link = shmem_follow_link,
.put_link = shmem_put_link,
-#ifdef CONFIG_TMPFS_XATTR
- .setxattr = generic_setxattr,
- .getxattr = generic_getxattr,
- .listxattr = generic_listxattr,
- .removexattr = generic_removexattr,
-#endif
};
static int shmem_parse_options(char *options, int *mode, uid_t *uid, gid_t *gid, unsigned long *blocks, unsigned long *inodes)
@@ -1938,12 +1927,6 @@ static void shmem_put_super(struct super_block *sb)
sb->s_fs_info = NULL;
}
-#ifdef CONFIG_TMPFS_XATTR
-static struct xattr_handler *shmem_xattr_handlers[];
-#else
-#define shmem_xattr_handlers NULL
-#endif
-
static int shmem_fill_super(struct super_block *sb,
void *data, int silent)
{
@@ -1994,7 +1977,6 @@ static int shmem_fill_super(struct super_block *sb,
sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
sb->s_magic = TMPFS_MAGIC;
sb->s_op = &shmem_ops;
- sb->s_xattr = shmem_xattr_handlers;
inode = shmem_get_inode(sb, S_IFDIR | mode, 0);
if (!inode)
@@ -2083,12 +2065,6 @@ static struct file_operations shmem_file_operations = {
static struct inode_operations shmem_inode_operations = {
.truncate = shmem_truncate,
.setattr = shmem_notify_change,
-#ifdef CONFIG_TMPFS_XATTR
- .setxattr = generic_setxattr,
- .getxattr = generic_getxattr,
- .listxattr = generic_listxattr,
- .removexattr = generic_removexattr,
-#endif
};
static struct inode_operations shmem_dir_inode_operations = {
@@ -2102,21 +2078,6 @@ static struct inode_operations shmem_dir_inode_operations = {
.rmdir = shmem_rmdir,
.mknod = shmem_mknod,
.rename = shmem_rename,
-#ifdef CONFIG_TMPFS_XATTR
- .setxattr = generic_setxattr,
- .getxattr = generic_getxattr,
- .listxattr = generic_listxattr,
- .removexattr = generic_removexattr,
-#endif
-#endif
-};
-
-static struct inode_operations shmem_special_inode_operations = {
-#ifdef CONFIG_TMPFS_XATTR
- .setxattr = generic_setxattr,
- .getxattr = generic_getxattr,
- .listxattr = generic_listxattr,
- .removexattr = generic_removexattr,
#endif
};
@@ -2142,48 +2103,6 @@ static struct vm_operations_struct shmem_vm_ops = {
};
-#ifdef CONFIG_TMPFS_SECURITY
-
-static size_t shmem_xattr_security_list(struct inode *inode, char *list, size_t list_len,
- const char *name, size_t name_len)
-{
- return security_inode_listsecurity(inode, list, list_len);
-}
-
-static int shmem_xattr_security_get(struct inode *inode, const char *name, void *buffer, size_t size)
-{
- if (strcmp(name, "") == 0)
- return -EINVAL;
- return security_inode_getsecurity(inode, name, buffer, size);
-}
-
-static int shmem_xattr_security_set(struct inode *inode, const char *name, const void *value, size_t size, int flags)
-{
- if (strcmp(name, "") == 0)
- return -EINVAL;
- return security_inode_setsecurity(inode, name, value, size, flags);
-}
-
-static struct xattr_handler shmem_xattr_security_handler = {
- .prefix = XATTR_SECURITY_PREFIX,
- .list = shmem_xattr_security_list,
- .get = shmem_xattr_security_get,
- .set = shmem_xattr_security_set,
-};
-
-#endif /* CONFIG_TMPFS_SECURITY */
-
-#ifdef CONFIG_TMPFS_XATTR
-
-static struct xattr_handler *shmem_xattr_handlers[] = {
-#ifdef CONFIG_TMPFS_SECURITY
- &shmem_xattr_security_handler,
-#endif
- NULL
-};
-
-#endif /* CONFIG_TMPFS_XATTR */
-
static struct super_block *shmem_get_sb(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data)
{
diff --git a/mm/slab.c b/mm/slab.c
index c9e706db463..d7c4443991f 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -189,6 +189,7 @@
* is less than 512 (PAGE_SIZE<<3), but greater than 256.
*/
+typedef unsigned int kmem_bufctl_t;
#define BUFCTL_END (((kmem_bufctl_t)(~0U))-0)
#define BUFCTL_FREE (((kmem_bufctl_t)(~0U))-1)
#define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-2)
@@ -600,7 +601,7 @@ static inline kmem_cache_t *__find_general_cachep(size_t size,
csizep++;
/*
- * Really subtile: The last entry with cs->cs_size==ULONG_MAX
+ * Really subtle: The last entry with cs->cs_size==ULONG_MAX
* has cs_{dma,}cachep==NULL. Thus no special case
* for large kmalloc calls required.
*/
@@ -2165,7 +2166,9 @@ static inline void *__cache_alloc(kmem_cache_t *cachep, unsigned int __nocast fl
objp = cache_alloc_refill(cachep, flags);
}
local_irq_restore(save_flags);
- objp = cache_alloc_debugcheck_after(cachep, flags, objp, __builtin_return_address(0));
+ objp = cache_alloc_debugcheck_after(cachep, flags, objp,
+ __builtin_return_address(0));
+ prefetchw(objp);
return objp;
}
@@ -2555,24 +2558,18 @@ void kmem_cache_free(kmem_cache_t *cachep, void *objp)
EXPORT_SYMBOL(kmem_cache_free);
/**
- * kcalloc - allocate memory for an array. The memory is set to zero.
- * @n: number of elements.
- * @size: element size.
+ * kzalloc - allocate memory. The memory is set to zero.
+ * @size: how many bytes of memory are required.
* @flags: the type of memory to allocate.
*/
-void *kcalloc(size_t n, size_t size, unsigned int __nocast flags)
+void *kzalloc(size_t size, unsigned int __nocast flags)
{
- void *ret = NULL;
-
- if (n != 0 && size > INT_MAX / n)
- return ret;
-
- ret = kmalloc(n * size, flags);
+ void *ret = kmalloc(size, flags);
if (ret)
- memset(ret, 0, n * size);
+ memset(ret, 0, size);
return ret;
}
-EXPORT_SYMBOL(kcalloc);
+EXPORT_SYMBOL(kzalloc);
/**
* kfree - free previously allocated memory
@@ -3073,20 +3070,24 @@ ssize_t slabinfo_write(struct file *file, const char __user *buffer,
}
#endif
+/**
+ * ksize - get the actual amount of memory allocated for a given object
+ * @objp: Pointer to the object
+ *
+ * kmalloc may internally round up allocations and return more memory
+ * than requested. ksize() can be used to determine the actual amount of
+ * memory allocated. The caller may use this additional memory, even though
+ * a smaller amount of memory was initially specified with the kmalloc call.
+ * The caller must guarantee that objp points to a valid object previously
+ * allocated with either kmalloc() or kmem_cache_alloc(). The object
+ * must not be freed during the duration of the call.
+ */
unsigned int ksize(const void *objp)
{
- kmem_cache_t *c;
- unsigned long flags;
- unsigned int size = 0;
-
- if (likely(objp != NULL)) {
- local_irq_save(flags);
- c = GET_PAGE_CACHE(virt_to_page(objp));
- size = kmem_cache_size(c);
- local_irq_restore(flags);
- }
+ if (unlikely(objp == NULL))
+ return 0;
- return size;
+ return obj_reallen(GET_PAGE_CACHE(virt_to_page(objp)));
}
diff --git a/mm/sparse.c b/mm/sparse.c
index b54e304df4a..347249a4917 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -6,6 +6,7 @@
#include <linux/mmzone.h>
#include <linux/bootmem.h>
#include <linux/module.h>
+#include <linux/spinlock.h>
#include <asm/dma.h>
/*
@@ -13,9 +14,64 @@
*
* 1) mem_section - memory sections, mem_map's for valid memory
*/
-struct mem_section mem_section[NR_MEM_SECTIONS];
+#ifdef CONFIG_SPARSEMEM_EXTREME
+struct mem_section *mem_section[NR_SECTION_ROOTS]
+ ____cacheline_maxaligned_in_smp;
+#else
+struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]
+ ____cacheline_maxaligned_in_smp;
+#endif
EXPORT_SYMBOL(mem_section);
+#ifdef CONFIG_SPARSEMEM_EXTREME
+static struct mem_section *sparse_index_alloc(int nid)
+{
+ struct mem_section *section = NULL;
+ unsigned long array_size = SECTIONS_PER_ROOT *
+ sizeof(struct mem_section);
+
+ section = alloc_bootmem_node(NODE_DATA(nid), array_size);
+
+ if (section)
+ memset(section, 0, array_size);
+
+ return section;
+}
+
+static int sparse_index_init(unsigned long section_nr, int nid)
+{
+ static spinlock_t index_init_lock = SPIN_LOCK_UNLOCKED;
+ unsigned long root = SECTION_NR_TO_ROOT(section_nr);
+ struct mem_section *section;
+ int ret = 0;
+
+ if (mem_section[root])
+ return -EEXIST;
+
+ section = sparse_index_alloc(nid);
+ /*
+ * This lock keeps two different sections from
+ * reallocating for the same index
+ */
+ spin_lock(&index_init_lock);
+
+ if (mem_section[root]) {
+ ret = -EEXIST;
+ goto out;
+ }
+
+ mem_section[root] = section;
+out:
+ spin_unlock(&index_init_lock);
+ return ret;
+}
+#else /* !SPARSEMEM_EXTREME */
+static inline int sparse_index_init(unsigned long section_nr, int nid)
+{
+ return 0;
+}
+#endif
+
/* Record a memory area against a node. */
void memory_present(int nid, unsigned long start, unsigned long end)
{
@@ -24,8 +80,13 @@ void memory_present(int nid, unsigned long start, unsigned long end)
start &= PAGE_SECTION_MASK;
for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
unsigned long section = pfn_to_section_nr(pfn);
- if (!mem_section[section].section_mem_map)
- mem_section[section].section_mem_map = SECTION_MARKED_PRESENT;
+ struct mem_section *ms;
+
+ sparse_index_init(section, nid);
+
+ ms = __nr_to_section(section);
+ if (!ms->section_mem_map)
+ ms->section_mem_map = SECTION_MARKED_PRESENT;
}
}
@@ -85,6 +146,7 @@ static struct page *sparse_early_mem_map_alloc(unsigned long pnum)
{
struct page *map;
int nid = early_pfn_to_nid(section_nr_to_pfn(pnum));
+ struct mem_section *ms = __nr_to_section(pnum);
map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION);
if (map)
@@ -96,7 +158,7 @@ static struct page *sparse_early_mem_map_alloc(unsigned long pnum)
return map;
printk(KERN_WARNING "%s: allocation failed\n", __FUNCTION__);
- mem_section[pnum].section_mem_map = 0;
+ ms->section_mem_map = 0;
return NULL;
}
@@ -114,8 +176,9 @@ void sparse_init(void)
continue;
map = sparse_early_mem_map_alloc(pnum);
- if (map)
- sparse_init_one_section(&mem_section[pnum], pnum, map);
+ if (!map)
+ continue;
+ sparse_init_one_section(__nr_to_section(pnum), pnum, map);
}
}
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 4f251775ef9..029e56eb5e7 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -124,6 +124,7 @@ void __delete_from_swap_cache(struct page *page)
BUG_ON(!PageLocked(page));
BUG_ON(!PageSwapCache(page));
BUG_ON(PageWriteback(page));
+ BUG_ON(PagePrivate(page));
radix_tree_delete(&swapper_space.page_tree, page->private);
page->private = 0;
@@ -196,11 +197,6 @@ void delete_from_swap_cache(struct page *page)
{
swp_entry_t entry;
- BUG_ON(!PageSwapCache(page));
- BUG_ON(!PageLocked(page));
- BUG_ON(PageWriteback(page));
- BUG_ON(PagePrivate(page));
-
entry.val = page->private;
write_lock_irq(&swapper_space.tree_lock);
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 60cd24a5520..4b6e8bf986b 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -31,7 +31,7 @@
#include <asm/tlbflush.h>
#include <linux/swapops.h>
-DEFINE_SPINLOCK(swaplock);
+DEFINE_SPINLOCK(swap_lock);
unsigned int nr_swapfiles;
long total_swap_pages;
static int swap_overflow;
@@ -51,13 +51,11 @@ static DECLARE_MUTEX(swapon_sem);
/*
* We need this because the bdev->unplug_fn can sleep and we cannot
- * hold swap_list_lock while calling the unplug_fn. And swap_list_lock
+ * hold swap_lock while calling the unplug_fn. And swap_lock
* cannot be turned into a semaphore.
*/
static DECLARE_RWSEM(swap_unplug_sem);
-#define SWAPFILE_CLUSTER 256
-
void swap_unplug_io_fn(struct backing_dev_info *unused_bdi, struct page *page)
{
swp_entry_t entry;
@@ -84,116 +82,135 @@ void swap_unplug_io_fn(struct backing_dev_info *unused_bdi, struct page *page)
up_read(&swap_unplug_sem);
}
-static inline int scan_swap_map(struct swap_info_struct *si)
+#define SWAPFILE_CLUSTER 256
+#define LATENCY_LIMIT 256
+
+static inline unsigned long scan_swap_map(struct swap_info_struct *si)
{
- unsigned long offset;
+ unsigned long offset, last_in_cluster;
+ int latency_ration = LATENCY_LIMIT;
+
/*
- * We try to cluster swap pages by allocating them
- * sequentially in swap. Once we've allocated
- * SWAPFILE_CLUSTER pages this way, however, we resort to
- * first-free allocation, starting a new cluster. This
- * prevents us from scattering swap pages all over the entire
- * swap partition, so that we reduce overall disk seek times
- * between swap pages. -- sct */
- if (si->cluster_nr) {
- while (si->cluster_next <= si->highest_bit) {
- offset = si->cluster_next++;
+ * We try to cluster swap pages by allocating them sequentially
+ * in swap. Once we've allocated SWAPFILE_CLUSTER pages this
+ * way, however, we resort to first-free allocation, starting
+ * a new cluster. This prevents us from scattering swap pages
+ * all over the entire swap partition, so that we reduce
+ * overall disk seek times between swap pages. -- sct
+ * But we do now try to find an empty cluster. -Andrea
+ */
+
+ si->flags += SWP_SCANNING;
+ if (unlikely(!si->cluster_nr)) {
+ si->cluster_nr = SWAPFILE_CLUSTER - 1;
+ if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER)
+ goto lowest;
+ spin_unlock(&swap_lock);
+
+ offset = si->lowest_bit;
+ last_in_cluster = offset + SWAPFILE_CLUSTER - 1;
+
+ /* Locate the first empty (unaligned) cluster */
+ for (; last_in_cluster <= si->highest_bit; offset++) {
if (si->swap_map[offset])
- continue;
- si->cluster_nr--;
- goto got_page;
- }
- }
- si->cluster_nr = SWAPFILE_CLUSTER;
-
- /* try to find an empty (even not aligned) cluster. */
- offset = si->lowest_bit;
- check_next_cluster:
- if (offset+SWAPFILE_CLUSTER-1 <= si->highest_bit)
- {
- unsigned long nr;
- for (nr = offset; nr < offset+SWAPFILE_CLUSTER; nr++)
- if (si->swap_map[nr])
- {
- offset = nr+1;
- goto check_next_cluster;
+ last_in_cluster = offset + SWAPFILE_CLUSTER;
+ else if (offset == last_in_cluster) {
+ spin_lock(&swap_lock);
+ si->cluster_next = offset-SWAPFILE_CLUSTER-1;
+ goto cluster;
}
- /* We found a completly empty cluster, so start
- * using it.
- */
- goto got_page;
+ if (unlikely(--latency_ration < 0)) {
+ cond_resched();
+ latency_ration = LATENCY_LIMIT;
+ }
+ }
+ spin_lock(&swap_lock);
+ goto lowest;
}
- /* No luck, so now go finegrined as usual. -Andrea */
- for (offset = si->lowest_bit; offset <= si->highest_bit ; offset++) {
- if (si->swap_map[offset])
- continue;
- si->lowest_bit = offset+1;
- got_page:
+
+ si->cluster_nr--;
+cluster:
+ offset = si->cluster_next;
+ if (offset > si->highest_bit)
+lowest: offset = si->lowest_bit;
+checks: if (!(si->flags & SWP_WRITEOK))
+ goto no_page;
+ if (!si->highest_bit)
+ goto no_page;
+ if (!si->swap_map[offset]) {
if (offset == si->lowest_bit)
si->lowest_bit++;
if (offset == si->highest_bit)
si->highest_bit--;
- if (si->lowest_bit > si->highest_bit) {
+ si->inuse_pages++;
+ if (si->inuse_pages == si->pages) {
si->lowest_bit = si->max;
si->highest_bit = 0;
}
si->swap_map[offset] = 1;
- si->inuse_pages++;
- nr_swap_pages--;
- si->cluster_next = offset+1;
+ si->cluster_next = offset + 1;
+ si->flags -= SWP_SCANNING;
return offset;
}
- si->lowest_bit = si->max;
- si->highest_bit = 0;
+
+ spin_unlock(&swap_lock);
+ while (++offset <= si->highest_bit) {
+ if (!si->swap_map[offset]) {
+ spin_lock(&swap_lock);
+ goto checks;
+ }
+ if (unlikely(--latency_ration < 0)) {
+ cond_resched();
+ latency_ration = LATENCY_LIMIT;
+ }
+ }
+ spin_lock(&swap_lock);
+ goto lowest;
+
+no_page:
+ si->flags -= SWP_SCANNING;
return 0;
}
swp_entry_t get_swap_page(void)
{
- struct swap_info_struct * p;
- unsigned long offset;
- swp_entry_t entry;
- int type, wrapped = 0;
+ struct swap_info_struct *si;
+ pgoff_t offset;
+ int type, next;
+ int wrapped = 0;
- entry.val = 0; /* Out of memory */
- swap_list_lock();
- type = swap_list.next;
- if (type < 0)
- goto out;
+ spin_lock(&swap_lock);
if (nr_swap_pages <= 0)
- goto out;
+ goto noswap;
+ nr_swap_pages--;
+
+ for (type = swap_list.next; type >= 0 && wrapped < 2; type = next) {
+ si = swap_info + type;
+ next = si->next;
+ if (next < 0 ||
+ (!wrapped && si->prio != swap_info[next].prio)) {
+ next = swap_list.head;
+ wrapped++;
+ }
- while (1) {
- p = &swap_info[type];
- if ((p->flags & SWP_ACTIVE) == SWP_ACTIVE) {
- swap_device_lock(p);
- offset = scan_swap_map(p);
- swap_device_unlock(p);
- if (offset) {
- entry = swp_entry(type,offset);
- type = swap_info[type].next;
- if (type < 0 ||
- p->prio != swap_info[type].prio) {
- swap_list.next = swap_list.head;
- } else {
- swap_list.next = type;
- }
- goto out;
- }
+ if (!si->highest_bit)
+ continue;
+ if (!(si->flags & SWP_WRITEOK))
+ continue;
+
+ swap_list.next = next;
+ offset = scan_swap_map(si);
+ if (offset) {
+ spin_unlock(&swap_lock);
+ return swp_entry(type, offset);
}
- type = p->next;
- if (!wrapped) {
- if (type < 0 || p->prio != swap_info[type].prio) {
- type = swap_list.head;
- wrapped = 1;
- }
- } else
- if (type < 0)
- goto out; /* out of swap space */
+ next = swap_list.next;
}
-out:
- swap_list_unlock();
- return entry;
+
+ nr_swap_pages++;
+noswap:
+ spin_unlock(&swap_lock);
+ return (swp_entry_t) {0};
}
static struct swap_info_struct * swap_info_get(swp_entry_t entry)
@@ -214,10 +231,7 @@ static struct swap_info_struct * swap_info_get(swp_entry_t entry)
goto bad_offset;
if (!p->swap_map[offset])
goto bad_free;
- swap_list_lock();
- if (p->prio > swap_info[swap_list.next].prio)
- swap_list.next = type;
- swap_device_lock(p);
+ spin_lock(&swap_lock);
return p;
bad_free:
@@ -235,12 +249,6 @@ out:
return NULL;
}
-static void swap_info_put(struct swap_info_struct * p)
-{
- swap_device_unlock(p);
- swap_list_unlock();
-}
-
static int swap_entry_free(struct swap_info_struct *p, unsigned long offset)
{
int count = p->swap_map[offset];
@@ -253,6 +261,8 @@ static int swap_entry_free(struct swap_info_struct *p, unsigned long offset)
p->lowest_bit = offset;
if (offset > p->highest_bit)
p->highest_bit = offset;
+ if (p->prio > swap_info[swap_list.next].prio)
+ swap_list.next = p - swap_info;
nr_swap_pages++;
p->inuse_pages--;
}
@@ -271,7 +281,7 @@ void swap_free(swp_entry_t entry)
p = swap_info_get(entry);
if (p) {
swap_entry_free(p, swp_offset(entry));
- swap_info_put(p);
+ spin_unlock(&swap_lock);
}
}
@@ -289,7 +299,7 @@ static inline int page_swapcount(struct page *page)
if (p) {
/* Subtract the 1 for the swap cache itself */
count = p->swap_map[swp_offset(entry)] - 1;
- swap_info_put(p);
+ spin_unlock(&swap_lock);
}
return count;
}
@@ -346,7 +356,7 @@ int remove_exclusive_swap_page(struct page *page)
}
write_unlock_irq(&swapper_space.tree_lock);
}
- swap_info_put(p);
+ spin_unlock(&swap_lock);
if (retval) {
swap_free(entry);
@@ -369,7 +379,7 @@ void free_swap_and_cache(swp_entry_t entry)
if (p) {
if (swap_entry_free(p, swp_offset(entry)) == 1)
page = find_trylock_page(&swapper_space, entry.val);
- swap_info_put(p);
+ spin_unlock(&swap_lock);
}
if (page) {
int one_user;
@@ -531,17 +541,18 @@ static int unuse_mm(struct mm_struct *mm,
* Scan swap_map from current position to next entry still in use.
* Recycle to start on reaching the end, returning 0 when empty.
*/
-static int find_next_to_unuse(struct swap_info_struct *si, int prev)
+static unsigned int find_next_to_unuse(struct swap_info_struct *si,
+ unsigned int prev)
{
- int max = si->max;
- int i = prev;
+ unsigned int max = si->max;
+ unsigned int i = prev;
int count;
/*
- * No need for swap_device_lock(si) here: we're just looking
+ * No need for swap_lock here: we're just looking
* for whether an entry is in use, not modifying it; false
* hits are okay, and sys_swapoff() has already prevented new
- * allocations from this area (while holding swap_list_lock()).
+ * allocations from this area (while holding swap_lock).
*/
for (;;) {
if (++i >= max) {
@@ -577,7 +588,7 @@ static int try_to_unuse(unsigned int type)
unsigned short swcount;
struct page *page;
swp_entry_t entry;
- int i = 0;
+ unsigned int i = 0;
int retval = 0;
int reset_overflow = 0;
int shmem;
@@ -731,9 +742,9 @@ static int try_to_unuse(unsigned int type)
* report them; but do report if we reset SWAP_MAP_MAX.
*/
if (*swap_map == SWAP_MAP_MAX) {
- swap_device_lock(si);
+ spin_lock(&swap_lock);
*swap_map = 1;
- swap_device_unlock(si);
+ spin_unlock(&swap_lock);
reset_overflow = 1;
}
@@ -797,9 +808,9 @@ static int try_to_unuse(unsigned int type)
}
/*
- * After a successful try_to_unuse, if no swap is now in use, we know we
- * can empty the mmlist. swap_list_lock must be held on entry and exit.
- * Note that mmlist_lock nests inside swap_list_lock, and an mm must be
+ * After a successful try_to_unuse, if no swap is now in use, we know
+ * we can empty the mmlist. swap_lock must be held on entry and exit.
+ * Note that mmlist_lock nests inside swap_lock, and an mm must be
* added to the mmlist just after page_duplicate - before would be racy.
*/
static void drain_mmlist(void)
@@ -832,9 +843,9 @@ sector_t map_swap_page(struct swap_info_struct *sis, pgoff_t offset)
offset < (se->start_page + se->nr_pages)) {
return se->start_block + (offset - se->start_page);
}
- lh = se->list.prev;
+ lh = se->list.next;
if (lh == &sis->extent_list)
- lh = lh->prev;
+ lh = lh->next;
se = list_entry(lh, struct swap_extent, list);
sis->curr_swap_extent = se;
BUG_ON(se == start_se); /* It *must* be present */
@@ -854,15 +865,13 @@ static void destroy_swap_extents(struct swap_info_struct *sis)
list_del(&se->list);
kfree(se);
}
- sis->nr_extents = 0;
}
/*
* Add a block range (and the corresponding page range) into this swapdev's
- * extent list. The extent list is kept sorted in block order.
+ * extent list. The extent list is kept sorted in page order.
*
- * This function rather assumes that it is called in ascending sector_t order.
- * It doesn't look for extent coalescing opportunities.
+ * This function rather assumes that it is called in ascending page order.
*/
static int
add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
@@ -872,16 +881,15 @@ add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
struct swap_extent *new_se;
struct list_head *lh;
- lh = sis->extent_list.next; /* The highest-addressed block */
- while (lh != &sis->extent_list) {
+ lh = sis->extent_list.prev; /* The highest page extent */
+ if (lh != &sis->extent_list) {
se = list_entry(lh, struct swap_extent, list);
- if (se->start_block + se->nr_pages == start_block &&
- se->start_page + se->nr_pages == start_page) {
+ BUG_ON(se->start_page + se->nr_pages != start_page);
+ if (se->start_block + se->nr_pages == start_block) {
/* Merge it */
se->nr_pages += nr_pages;
return 0;
}
- lh = lh->next;
}
/*
@@ -894,16 +902,8 @@ add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
new_se->nr_pages = nr_pages;
new_se->start_block = start_block;
- lh = sis->extent_list.prev; /* The lowest block */
- while (lh != &sis->extent_list) {
- se = list_entry(lh, struct swap_extent, list);
- if (se->start_block > start_block)
- break;
- lh = lh->prev;
- }
- list_add_tail(&new_se->list, lh);
- sis->nr_extents++;
- return 0;
+ list_add_tail(&new_se->list, &sis->extent_list);
+ return 1;
}
/*
@@ -926,7 +926,7 @@ add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
* requirements, they are simply tossed out - we will never use those blocks
* for swapping.
*
- * For S_ISREG swapfiles we hold i_sem across the life of the swapon. This
+ * For S_ISREG swapfiles we set S_SWAPFILE across the life of the swapon. This
* prevents root from shooting her foot off by ftruncating an in-use swapfile,
* which will scribble on the fs.
*
@@ -937,7 +937,7 @@ add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
* This is extremely effective. The average number of iterations in
* map_swap_page() has been measured at about 0.3 per page. - akpm.
*/
-static int setup_swap_extents(struct swap_info_struct *sis)
+static int setup_swap_extents(struct swap_info_struct *sis, sector_t *span)
{
struct inode *inode;
unsigned blocks_per_page;
@@ -945,11 +945,15 @@ static int setup_swap_extents(struct swap_info_struct *sis)
unsigned blkbits;
sector_t probe_block;
sector_t last_block;
+ sector_t lowest_block = -1;
+ sector_t highest_block = 0;
+ int nr_extents = 0;
int ret;
inode = sis->swap_file->f_mapping->host;
if (S_ISBLK(inode->i_mode)) {
ret = add_swap_extent(sis, 0, sis->max, 0);
+ *span = sis->pages;
goto done;
}
@@ -994,22 +998,32 @@ static int setup_swap_extents(struct swap_info_struct *sis)
}
}
+ first_block >>= (PAGE_SHIFT - blkbits);
+ if (page_no) { /* exclude the header page */
+ if (first_block < lowest_block)
+ lowest_block = first_block;
+ if (first_block > highest_block)
+ highest_block = first_block;
+ }
+
/*
* We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
*/
- ret = add_swap_extent(sis, page_no, 1,
- first_block >> (PAGE_SHIFT - blkbits));
- if (ret)
+ ret = add_swap_extent(sis, page_no, 1, first_block);
+ if (ret < 0)
goto out;
+ nr_extents += ret;
page_no++;
probe_block += blocks_per_page;
reprobe:
continue;
}
- ret = 0;
+ ret = nr_extents;
+ *span = 1 + highest_block - lowest_block;
if (page_no == 0)
- ret = -EINVAL;
+ page_no = 1; /* force Empty message */
sis->max = page_no;
+ sis->pages = page_no - 1;
sis->highest_bit = page_no - 1;
done:
sis->curr_swap_extent = list_entry(sis->extent_list.prev,
@@ -1069,7 +1083,7 @@ asmlinkage long sys_swapoff(const char __user * specialfile)
mapping = victim->f_mapping;
prev = -1;
- swap_list_lock();
+ spin_lock(&swap_lock);
for (type = swap_list.head; type >= 0; type = swap_info[type].next) {
p = swap_info + type;
if ((p->flags & SWP_ACTIVE) == SWP_ACTIVE) {
@@ -1080,14 +1094,14 @@ asmlinkage long sys_swapoff(const char __user * specialfile)
}
if (type < 0) {
err = -EINVAL;
- swap_list_unlock();
+ spin_unlock(&swap_lock);
goto out_dput;
}
if (!security_vm_enough_memory(p->pages))
vm_unacct_memory(p->pages);
else {
err = -ENOMEM;
- swap_list_unlock();
+ spin_unlock(&swap_lock);
goto out_dput;
}
if (prev < 0) {
@@ -1102,18 +1116,15 @@ asmlinkage long sys_swapoff(const char __user * specialfile)
nr_swap_pages -= p->pages;
total_swap_pages -= p->pages;
p->flags &= ~SWP_WRITEOK;
- swap_list_unlock();
+ spin_unlock(&swap_lock);
+
current->flags |= PF_SWAPOFF;
err = try_to_unuse(type);
current->flags &= ~PF_SWAPOFF;
- /* wait for any unplug function to finish */
- down_write(&swap_unplug_sem);
- up_write(&swap_unplug_sem);
-
if (err) {
/* re-insert swap space back into swap_list */
- swap_list_lock();
+ spin_lock(&swap_lock);
for (prev = -1, i = swap_list.head; i >= 0; prev = i, i = swap_info[i].next)
if (p->prio >= swap_info[i].prio)
break;
@@ -1125,22 +1136,35 @@ asmlinkage long sys_swapoff(const char __user * specialfile)
nr_swap_pages += p->pages;
total_swap_pages += p->pages;
p->flags |= SWP_WRITEOK;
- swap_list_unlock();
+ spin_unlock(&swap_lock);
goto out_dput;
}
+
+ /* wait for any unplug function to finish */
+ down_write(&swap_unplug_sem);
+ up_write(&swap_unplug_sem);
+
+ destroy_swap_extents(p);
down(&swapon_sem);
- swap_list_lock();
+ spin_lock(&swap_lock);
drain_mmlist();
- swap_device_lock(p);
+
+ /* wait for anyone still in scan_swap_map */
+ p->highest_bit = 0; /* cuts scans short */
+ while (p->flags >= SWP_SCANNING) {
+ spin_unlock(&swap_lock);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(1);
+ spin_lock(&swap_lock);
+ }
+
swap_file = p->swap_file;
p->swap_file = NULL;
p->max = 0;
swap_map = p->swap_map;
p->swap_map = NULL;
p->flags = 0;
- destroy_swap_extents(p);
- swap_device_unlock(p);
- swap_list_unlock();
+ spin_unlock(&swap_lock);
up(&swapon_sem);
vfree(swap_map);
inode = mapping->host;
@@ -1213,7 +1237,7 @@ static int swap_show(struct seq_file *swap, void *v)
file = ptr->swap_file;
len = seq_path(swap, file->f_vfsmnt, file->f_dentry, " \t\n\\");
- seq_printf(swap, "%*s%s\t%d\t%ld\t%d\n",
+ seq_printf(swap, "%*s%s\t%u\t%u\t%d\n",
len < 40 ? 40 - len : 1, " ",
S_ISBLK(file->f_dentry->d_inode->i_mode) ?
"partition" : "file\t",
@@ -1272,7 +1296,9 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags)
static int least_priority;
union swap_header *swap_header = NULL;
int swap_header_version;
- int nr_good_pages = 0;
+ unsigned int nr_good_pages = 0;
+ int nr_extents = 0;
+ sector_t span;
unsigned long maxpages = 1;
int swapfilesize;
unsigned short *swap_map;
@@ -1282,7 +1308,7 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags)
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- swap_list_lock();
+ spin_lock(&swap_lock);
p = swap_info;
for (type = 0 ; type < nr_swapfiles ; type++,p++)
if (!(p->flags & SWP_USED))
@@ -1301,14 +1327,13 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags)
* swp_entry_t or the architecture definition of a swap pte.
*/
if (type > swp_type(pte_to_swp_entry(swp_entry_to_pte(swp_entry(~0UL,0))))) {
- swap_list_unlock();
+ spin_unlock(&swap_lock);
goto out;
}
if (type >= nr_swapfiles)
nr_swapfiles = type+1;
INIT_LIST_HEAD(&p->extent_list);
p->flags = SWP_USED;
- p->nr_extents = 0;
p->swap_file = NULL;
p->old_block_size = 0;
p->swap_map = NULL;
@@ -1316,7 +1341,6 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags)
p->highest_bit = 0;
p->cluster_nr = 0;
p->inuse_pages = 0;
- spin_lock_init(&p->sdev_lock);
p->next = -1;
if (swap_flags & SWAP_FLAG_PREFER) {
p->prio =
@@ -1324,7 +1348,7 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags)
} else {
p->prio = --least_priority;
}
- swap_list_unlock();
+ spin_unlock(&swap_lock);
name = getname(specialfile);
error = PTR_ERR(name);
if (IS_ERR(name)) {
@@ -1426,6 +1450,8 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags)
}
p->lowest_bit = 1;
+ p->cluster_next = 1;
+
/*
* Find out how many pages are allowed for a single swap
* device. There are two limiting factors: 1) the number of
@@ -1446,6 +1472,10 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags)
p->highest_bit = maxpages - 1;
error = -EINVAL;
+ if (!maxpages)
+ goto bad_swap;
+ if (swap_header->info.nr_badpages && S_ISREG(inode->i_mode))
+ goto bad_swap;
if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
goto bad_swap;
@@ -1470,35 +1500,40 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags)
if (error)
goto bad_swap;
}
-
+
if (swapfilesize && maxpages > swapfilesize) {
printk(KERN_WARNING
"Swap area shorter than signature indicates\n");
error = -EINVAL;
goto bad_swap;
}
+ if (nr_good_pages) {
+ p->swap_map[0] = SWAP_MAP_BAD;
+ p->max = maxpages;
+ p->pages = nr_good_pages;
+ nr_extents = setup_swap_extents(p, &span);
+ if (nr_extents < 0) {
+ error = nr_extents;
+ goto bad_swap;
+ }
+ nr_good_pages = p->pages;
+ }
if (!nr_good_pages) {
printk(KERN_WARNING "Empty swap-file\n");
error = -EINVAL;
goto bad_swap;
}
- p->swap_map[0] = SWAP_MAP_BAD;
- p->max = maxpages;
- p->pages = nr_good_pages;
-
- error = setup_swap_extents(p);
- if (error)
- goto bad_swap;
down(&swapon_sem);
- swap_list_lock();
- swap_device_lock(p);
+ spin_lock(&swap_lock);
p->flags = SWP_ACTIVE;
nr_swap_pages += nr_good_pages;
total_swap_pages += nr_good_pages;
- printk(KERN_INFO "Adding %dk swap on %s. Priority:%d extents:%d\n",
- nr_good_pages<<(PAGE_SHIFT-10), name,
- p->prio, p->nr_extents);
+
+ printk(KERN_INFO "Adding %uk swap on %s. "
+ "Priority:%d extents:%d across:%lluk\n",
+ nr_good_pages<<(PAGE_SHIFT-10), name, p->prio,
+ nr_extents, (unsigned long long)span<<(PAGE_SHIFT-10));
/* insert swap space into swap_list: */
prev = -1;
@@ -1514,8 +1549,7 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags)
} else {
swap_info[prev].next = p - swap_info;
}
- swap_device_unlock(p);
- swap_list_unlock();
+ spin_unlock(&swap_lock);
up(&swapon_sem);
error = 0;
goto out;
@@ -1524,16 +1558,16 @@ bad_swap:
set_blocksize(bdev, p->old_block_size);
bd_release(bdev);
}
+ destroy_swap_extents(p);
bad_swap_2:
- swap_list_lock();
+ spin_lock(&swap_lock);
swap_map = p->swap_map;
p->swap_file = NULL;
p->swap_map = NULL;
p->flags = 0;
if (!(swap_flags & SWAP_FLAG_PREFER))
++least_priority;
- swap_list_unlock();
- destroy_swap_extents(p);
+ spin_unlock(&swap_lock);
vfree(swap_map);
if (swap_file)
filp_close(swap_file, NULL);
@@ -1557,7 +1591,7 @@ void si_swapinfo(struct sysinfo *val)
unsigned int i;
unsigned long nr_to_be_unused = 0;
- swap_list_lock();
+ spin_lock(&swap_lock);
for (i = 0; i < nr_swapfiles; i++) {
if (!(swap_info[i].flags & SWP_USED) ||
(swap_info[i].flags & SWP_WRITEOK))
@@ -1566,7 +1600,7 @@ void si_swapinfo(struct sysinfo *val)
}
val->freeswap = nr_swap_pages + nr_to_be_unused;
val->totalswap = total_swap_pages + nr_to_be_unused;
- swap_list_unlock();
+ spin_unlock(&swap_lock);
}
/*
@@ -1587,7 +1621,7 @@ int swap_duplicate(swp_entry_t entry)
p = type + swap_info;
offset = swp_offset(entry);
- swap_device_lock(p);
+ spin_lock(&swap_lock);
if (offset < p->max && p->swap_map[offset]) {
if (p->swap_map[offset] < SWAP_MAP_MAX - 1) {
p->swap_map[offset]++;
@@ -1599,7 +1633,7 @@ int swap_duplicate(swp_entry_t entry)
result = 1;
}
}
- swap_device_unlock(p);
+ spin_unlock(&swap_lock);
out:
return result;
@@ -1615,7 +1649,7 @@ get_swap_info_struct(unsigned type)
}
/*
- * swap_device_lock prevents swap_map being freed. Don't grab an extra
+ * swap_lock prevents swap_map being freed. Don't grab an extra
* reference on the swaphandle, it doesn't matter if it becomes unused.
*/
int valid_swaphandles(swp_entry_t entry, unsigned long *offset)
@@ -1631,7 +1665,7 @@ int valid_swaphandles(swp_entry_t entry, unsigned long *offset)
toff++, i--;
*offset = toff;
- swap_device_lock(swapdev);
+ spin_lock(&swap_lock);
do {
/* Don't read-ahead past the end of the swap area */
if (toff >= swapdev->max)
@@ -1644,6 +1678,6 @@ int valid_swaphandles(swp_entry_t entry, unsigned long *offset)
toff++;
ret++;
} while (--i);
- swap_device_unlock(swapdev);
+ spin_unlock(&swap_lock);
return ret;
}
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 8ff16a1eee6..67b358e57ef 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -158,8 +158,6 @@ int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
return err;
}
-#define IOREMAP_MAX_ORDER (7 + PAGE_SHIFT) /* 128 pages */
-
struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
unsigned long start, unsigned long end)
{
diff --git a/mm/vmscan.c b/mm/vmscan.c
index cfffe5098d5..a740778f688 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -822,6 +822,8 @@ shrink_zone(struct zone *zone, struct scan_control *sc)
unsigned long nr_active;
unsigned long nr_inactive;
+ atomic_inc(&zone->reclaim_in_progress);
+
/*
* Add one to `nr_to_scan' just to make sure that the kernel will
* slowly sift through the active list.
@@ -861,6 +863,8 @@ shrink_zone(struct zone *zone, struct scan_control *sc)
}
throttle_vm_writeout();
+
+ atomic_dec(&zone->reclaim_in_progress);
}
/*
@@ -890,7 +894,7 @@ shrink_caches(struct zone **zones, struct scan_control *sc)
if (zone->present_pages == 0)
continue;
- if (!cpuset_zone_allowed(zone))
+ if (!cpuset_zone_allowed(zone, __GFP_HARDWALL))
continue;
zone->temp_priority = sc->priority;
@@ -900,9 +904,7 @@ shrink_caches(struct zone **zones, struct scan_control *sc)
if (zone->all_unreclaimable && sc->priority != DEF_PRIORITY)
continue; /* Let kswapd poll it */
- atomic_inc(&zone->reclaim_in_progress);
shrink_zone(zone, sc);
- atomic_dec(&zone->reclaim_in_progress);
}
}
@@ -938,7 +940,7 @@ int try_to_free_pages(struct zone **zones, unsigned int gfp_mask)
for (i = 0; zones[i] != NULL; i++) {
struct zone *zone = zones[i];
- if (!cpuset_zone_allowed(zone))
+ if (!cpuset_zone_allowed(zone, __GFP_HARDWALL))
continue;
zone->temp_priority = DEF_PRIORITY;
@@ -984,7 +986,7 @@ out:
for (i = 0; zones[i] != 0; i++) {
struct zone *zone = zones[i];
- if (!cpuset_zone_allowed(zone))
+ if (!cpuset_zone_allowed(zone, __GFP_HARDWALL))
continue;
zone->prev_priority = zone->temp_priority;
@@ -1254,7 +1256,7 @@ void wakeup_kswapd(struct zone *zone, int order)
return;
if (pgdat->kswapd_max_order < order)
pgdat->kswapd_max_order = order;
- if (!cpuset_zone_allowed(zone))
+ if (!cpuset_zone_allowed(zone, __GFP_HARDWALL))
return;
if (!waitqueue_active(&zone->zone_pgdat->kswapd_wait))
return;
@@ -1358,14 +1360,13 @@ int zone_reclaim(struct zone *zone, unsigned int gfp_mask, unsigned int order)
sc.swap_cluster_max = SWAP_CLUSTER_MAX;
/* Don't reclaim the zone if there are other reclaimers active */
- if (!atomic_inc_and_test(&zone->reclaim_in_progress))
+ if (atomic_read(&zone->reclaim_in_progress) > 0)
goto out;
shrink_zone(zone, &sc);
total_reclaimed = sc.nr_reclaimed;
out:
- atomic_dec(&zone->reclaim_in_progress);
return total_reclaimed;
}
@@ -1375,6 +1376,9 @@ asmlinkage long sys_set_zone_reclaim(unsigned int node, unsigned int zone,
struct zone *z;
int i;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
if (node >= MAX_NUMNODES || !node_online(node))
return -EINVAL;