aboutsummaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/dmapool.c2
-rw-r--r--mm/kmemleak.c197
-rw-r--r--mm/memory.c26
-rw-r--r--mm/nommu.c12
-rw-r--r--mm/page-writeback.c5
-rw-r--r--mm/page_alloc.c17
-rw-r--r--mm/percpu.c24
-rw-r--r--mm/shmem.c6
-rw-r--r--mm/shmem_acl.c29
-rw-r--r--mm/slub.c10
10 files changed, 132 insertions, 196 deletions
diff --git a/mm/dmapool.c b/mm/dmapool.c
index b1f0885dda2..3df063706f5 100644
--- a/mm/dmapool.c
+++ b/mm/dmapool.c
@@ -86,10 +86,12 @@ show_pools(struct device *dev, struct device_attribute *attr, char *buf)
unsigned pages = 0;
unsigned blocks = 0;
+ spin_lock_irq(&pool->lock);
list_for_each_entry(page, &pool->page_list, page_list) {
pages++;
blocks += page->in_use;
}
+ spin_unlock_irq(&pool->lock);
/* per-pool info, no real statistics yet */
temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n",
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index c96f2c8700a..e766e1da09d 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -48,10 +48,10 @@
* scanned. This list is only modified during a scanning episode when the
* scan_mutex is held. At the end of a scan, the gray_list is always empty.
* Note that the kmemleak_object.use_count is incremented when an object is
- * added to the gray_list and therefore cannot be freed
- * - kmemleak_mutex (mutex): prevents multiple users of the "kmemleak" debugfs
- * file together with modifications to the memory scanning parameters
- * including the scan_thread pointer
+ * added to the gray_list and therefore cannot be freed. This mutex also
+ * prevents multiple users of the "kmemleak" debugfs file together with
+ * modifications to the memory scanning parameters including the scan_thread
+ * pointer
*
* The kmemleak_object structures have a use_count incremented or decremented
* using the get_object()/put_object() functions. When the use_count becomes
@@ -105,7 +105,6 @@
#define MAX_TRACE 16 /* stack trace length */
#define REPORTS_NR 50 /* maximum number of reported leaks */
#define MSECS_MIN_AGE 5000 /* minimum object age for reporting */
-#define MSECS_SCAN_YIELD 10 /* CPU yielding period */
#define SECS_FIRST_SCAN 60 /* delay before the first scan */
#define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */
@@ -186,19 +185,16 @@ static atomic_t kmemleak_error = ATOMIC_INIT(0);
static unsigned long min_addr = ULONG_MAX;
static unsigned long max_addr;
-/* used for yielding the CPU to other tasks during scanning */
-static unsigned long next_scan_yield;
static struct task_struct *scan_thread;
-static unsigned long jiffies_scan_yield;
+/* used to avoid reporting of recently allocated objects */
static unsigned long jiffies_min_age;
+static unsigned long jiffies_last_scan;
/* delay between automatic memory scannings */
static signed long jiffies_scan_wait;
/* enables or disables the task stacks scanning */
-static int kmemleak_stack_scan;
-/* mutex protecting the memory scanning */
+static int kmemleak_stack_scan = 1;
+/* protects the memory scanning, parameters and debug/kmemleak file access */
static DEFINE_MUTEX(scan_mutex);
-/* mutex protecting the access to the /sys/kernel/debug/kmemleak file */
-static DEFINE_MUTEX(kmemleak_mutex);
/* number of leaks reported (for limitation purposes) */
static int reported_leaks;
@@ -235,7 +231,7 @@ struct early_log {
};
/* early logging buffer and current position */
-static struct early_log early_log[200];
+static struct early_log early_log[CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE];
static int crt_early_log;
static void kmemleak_disable(void);
@@ -279,15 +275,6 @@ static int color_gray(const struct kmemleak_object *object)
}
/*
- * Objects are considered referenced if their color is gray and they have not
- * been deleted.
- */
-static int referenced_object(struct kmemleak_object *object)
-{
- return (object->flags & OBJECT_ALLOCATED) && color_gray(object);
-}
-
-/*
* Objects are considered unreferenced only if their color is white, they have
* not be deleted and have a minimum age to avoid false positives caused by
* pointers temporarily stored in CPU registers.
@@ -295,42 +282,28 @@ static int referenced_object(struct kmemleak_object *object)
static int unreferenced_object(struct kmemleak_object *object)
{
return (object->flags & OBJECT_ALLOCATED) && color_white(object) &&
- time_is_before_eq_jiffies(object->jiffies + jiffies_min_age);
+ time_before_eq(object->jiffies + jiffies_min_age,
+ jiffies_last_scan);
}
/*
- * Printing of the (un)referenced objects information, either to the seq file
- * or to the kernel log. The print_referenced/print_unreferenced functions
- * must be called with the object->lock held.
+ * Printing of the unreferenced objects information to the seq file. The
+ * print_unreferenced function must be called with the object->lock held.
*/
-#define print_helper(seq, x...) do { \
- struct seq_file *s = (seq); \
- if (s) \
- seq_printf(s, x); \
- else \
- pr_info(x); \
-} while (0)
-
-static void print_referenced(struct kmemleak_object *object)
-{
- pr_info("referenced object 0x%08lx (size %zu)\n",
- object->pointer, object->size);
-}
-
static void print_unreferenced(struct seq_file *seq,
struct kmemleak_object *object)
{
int i;
- print_helper(seq, "unreferenced object 0x%08lx (size %zu):\n",
- object->pointer, object->size);
- print_helper(seq, " comm \"%s\", pid %d, jiffies %lu\n",
- object->comm, object->pid, object->jiffies);
- print_helper(seq, " backtrace:\n");
+ seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
+ object->pointer, object->size);
+ seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu\n",
+ object->comm, object->pid, object->jiffies);
+ seq_printf(seq, " backtrace:\n");
for (i = 0; i < object->trace_len; i++) {
void *ptr = (void *)object->trace[i];
- print_helper(seq, " [<%p>] %pS\n", ptr, ptr);
+ seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
}
}
@@ -554,8 +527,10 @@ static void delete_object(unsigned long ptr)
write_lock_irqsave(&kmemleak_lock, flags);
object = lookup_object(ptr, 0);
if (!object) {
+#ifdef DEBUG
kmemleak_warn("Freeing unknown object at 0x%08lx\n",
ptr);
+#endif
write_unlock_irqrestore(&kmemleak_lock, flags);
return;
}
@@ -571,8 +546,6 @@ static void delete_object(unsigned long ptr)
* cannot be freed when it is being scanned.
*/
spin_lock_irqsave(&object->lock, flags);
- if (object->flags & OBJECT_REPORTED)
- print_referenced(object);
object->flags &= ~OBJECT_ALLOCATED;
spin_unlock_irqrestore(&object->lock, flags);
put_object(object);
@@ -696,7 +669,8 @@ static void log_early(int op_type, const void *ptr, size_t size,
struct early_log *log;
if (crt_early_log >= ARRAY_SIZE(early_log)) {
- kmemleak_stop("Early log buffer exceeded\n");
+ pr_warning("Early log buffer exceeded\n");
+ kmemleak_disable();
return;
}
@@ -808,21 +782,6 @@ void kmemleak_no_scan(const void *ptr)
EXPORT_SYMBOL(kmemleak_no_scan);
/*
- * Yield the CPU so that other tasks get a chance to run. The yielding is
- * rate-limited to avoid excessive number of calls to the schedule() function
- * during memory scanning.
- */
-static void scan_yield(void)
-{
- might_sleep();
-
- if (time_is_before_eq_jiffies(next_scan_yield)) {
- schedule();
- next_scan_yield = jiffies + jiffies_scan_yield;
- }
-}
-
-/*
* Memory scanning is a long process and it needs to be interruptable. This
* function checks whether such interrupt condition occured.
*/
@@ -862,15 +821,6 @@ static void scan_block(void *_start, void *_end,
if (scan_should_stop())
break;
- /*
- * When scanning a memory block with a corresponding
- * kmemleak_object, the CPU yielding is handled in the calling
- * code since it holds the object->lock to avoid the block
- * freeing.
- */
- if (!scanned)
- scan_yield();
-
object = find_and_get_object(pointer, 1);
if (!object)
continue;
@@ -952,6 +902,9 @@ static void kmemleak_scan(void)
struct kmemleak_object *object, *tmp;
struct task_struct *task;
int i;
+ int new_leaks = 0;
+
+ jiffies_last_scan = jiffies;
/* prepare the kmemleak_object's */
rcu_read_lock();
@@ -1033,7 +986,7 @@ static void kmemleak_scan(void)
*/
object = list_entry(gray_list.next, typeof(*object), gray_list);
while (&object->gray_list != &gray_list) {
- scan_yield();
+ cond_resched();
/* may add new objects to the list */
if (!scan_should_stop())
@@ -1049,6 +1002,32 @@ static void kmemleak_scan(void)
object = tmp;
}
WARN_ON(!list_empty(&gray_list));
+
+ /*
+ * If scanning was stopped do not report any new unreferenced objects.
+ */
+ if (scan_should_stop())
+ return;
+
+ /*
+ * Scanning result reporting.
+ */
+ rcu_read_lock();
+ list_for_each_entry_rcu(object, &object_list, object_list) {
+ spin_lock_irqsave(&object->lock, flags);
+ if (unreferenced_object(object) &&
+ !(object->flags & OBJECT_REPORTED)) {
+ object->flags |= OBJECT_REPORTED;
+ new_leaks++;
+ }
+ spin_unlock_irqrestore(&object->lock, flags);
+ }
+ rcu_read_unlock();
+
+ if (new_leaks)
+ pr_info("%d new suspected memory leaks (see "
+ "/sys/kernel/debug/kmemleak)\n", new_leaks);
+
}
/*
@@ -1070,36 +1049,12 @@ static int kmemleak_scan_thread(void *arg)
}
while (!kthread_should_stop()) {
- struct kmemleak_object *object;
signed long timeout = jiffies_scan_wait;
mutex_lock(&scan_mutex);
-
kmemleak_scan();
- reported_leaks = 0;
-
- rcu_read_lock();
- list_for_each_entry_rcu(object, &object_list, object_list) {
- unsigned long flags;
-
- if (reported_leaks >= REPORTS_NR)
- break;
- spin_lock_irqsave(&object->lock, flags);
- if (!(object->flags & OBJECT_REPORTED) &&
- unreferenced_object(object)) {
- print_unreferenced(NULL, object);
- object->flags |= OBJECT_REPORTED;
- reported_leaks++;
- } else if ((object->flags & OBJECT_REPORTED) &&
- referenced_object(object)) {
- print_referenced(object);
- object->flags &= ~OBJECT_REPORTED;
- }
- spin_unlock_irqrestore(&object->lock, flags);
- }
- rcu_read_unlock();
-
mutex_unlock(&scan_mutex);
+
/* wait before the next scan */
while (timeout && !kthread_should_stop())
timeout = schedule_timeout_interruptible(timeout);
@@ -1112,7 +1067,7 @@ static int kmemleak_scan_thread(void *arg)
/*
* Start the automatic memory scanning thread. This function must be called
- * with the kmemleak_mutex held.
+ * with the scan_mutex held.
*/
void start_scan_thread(void)
{
@@ -1127,7 +1082,7 @@ void start_scan_thread(void)
/*
* Stop the automatic memory scanning thread. This function must be called
- * with the kmemleak_mutex held.
+ * with the scan_mutex held.
*/
void stop_scan_thread(void)
{
@@ -1147,10 +1102,8 @@ static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos)
struct kmemleak_object *object;
loff_t n = *pos;
- if (!n) {
- kmemleak_scan();
+ if (!n)
reported_leaks = 0;
- }
if (reported_leaks >= REPORTS_NR)
return NULL;
@@ -1211,11 +1164,10 @@ static int kmemleak_seq_show(struct seq_file *seq, void *v)
unsigned long flags;
spin_lock_irqsave(&object->lock, flags);
- if (!unreferenced_object(object))
- goto out;
- print_unreferenced(seq, object);
- reported_leaks++;
-out:
+ if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object)) {
+ print_unreferenced(seq, object);
+ reported_leaks++;
+ }
spin_unlock_irqrestore(&object->lock, flags);
return 0;
}
@@ -1234,13 +1186,10 @@ static int kmemleak_open(struct inode *inode, struct file *file)
if (!atomic_read(&kmemleak_enabled))
return -EBUSY;
- ret = mutex_lock_interruptible(&kmemleak_mutex);
+ ret = mutex_lock_interruptible(&scan_mutex);
if (ret < 0)
goto out;
if (file->f_mode & FMODE_READ) {
- ret = mutex_lock_interruptible(&scan_mutex);
- if (ret < 0)
- goto kmemleak_unlock;
ret = seq_open(file, &kmemleak_seq_ops);
if (ret < 0)
goto scan_unlock;
@@ -1249,8 +1198,6 @@ static int kmemleak_open(struct inode *inode, struct file *file)
scan_unlock:
mutex_unlock(&scan_mutex);
-kmemleak_unlock:
- mutex_unlock(&kmemleak_mutex);
out:
return ret;
}
@@ -1259,11 +1206,9 @@ static int kmemleak_release(struct inode *inode, struct file *file)
{
int ret = 0;
- if (file->f_mode & FMODE_READ) {
+ if (file->f_mode & FMODE_READ)
seq_release(inode, file);
- mutex_unlock(&scan_mutex);
- }
- mutex_unlock(&kmemleak_mutex);
+ mutex_unlock(&scan_mutex);
return ret;
}
@@ -1278,6 +1223,7 @@ static int kmemleak_release(struct inode *inode, struct file *file)
* scan=off - stop the automatic memory scanning thread
* scan=... - set the automatic memory scanning period in seconds (0 to
* disable it)
+ * scan - trigger a memory scan
*/
static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
size_t size, loff_t *ppos)
@@ -1315,7 +1261,9 @@ static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
jiffies_scan_wait = msecs_to_jiffies(secs * 1000);
start_scan_thread();
}
- } else
+ } else if (strncmp(buf, "scan", 4) == 0)
+ kmemleak_scan();
+ else
return -EINVAL;
/* ignore the rest of the buffer, only one command at a time */
@@ -1340,11 +1288,9 @@ static int kmemleak_cleanup_thread(void *arg)
{
struct kmemleak_object *object;
- mutex_lock(&kmemleak_mutex);
+ mutex_lock(&scan_mutex);
stop_scan_thread();
- mutex_unlock(&kmemleak_mutex);
- mutex_lock(&scan_mutex);
rcu_read_lock();
list_for_each_entry_rcu(object, &object_list, object_list)
delete_object(object->pointer);
@@ -1411,7 +1357,6 @@ void __init kmemleak_init(void)
int i;
unsigned long flags;
- jiffies_scan_yield = msecs_to_jiffies(MSECS_SCAN_YIELD);
jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
@@ -1486,9 +1431,9 @@ static int __init kmemleak_late_init(void)
&kmemleak_fops);
if (!dentry)
pr_warning("Failed to create the debugfs kmemleak file\n");
- mutex_lock(&kmemleak_mutex);
+ mutex_lock(&scan_mutex);
start_scan_thread();
- mutex_unlock(&kmemleak_mutex);
+ mutex_unlock(&scan_mutex);
pr_info("Kernel memory leak detector initialized\n");
diff --git a/mm/memory.c b/mm/memory.c
index f46ac18ba23..65216194eb8 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1207,8 +1207,8 @@ static inline int use_zero_page(struct vm_area_struct *vma)
int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long start, int len, int flags,
- struct page **pages, struct vm_area_struct **vmas)
+ unsigned long start, int nr_pages, int flags,
+ struct page **pages, struct vm_area_struct **vmas)
{
int i;
unsigned int vm_flags = 0;
@@ -1217,7 +1217,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
int ignore = !!(flags & GUP_FLAGS_IGNORE_VMA_PERMISSIONS);
int ignore_sigkill = !!(flags & GUP_FLAGS_IGNORE_SIGKILL);
- if (len <= 0)
+ if (nr_pages <= 0)
return 0;
/*
* Require read or write permissions.
@@ -1269,7 +1269,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
vmas[i] = gate_vma;
i++;
start += PAGE_SIZE;
- len--;
+ nr_pages--;
continue;
}
@@ -1280,7 +1280,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
if (is_vm_hugetlb_page(vma)) {
i = follow_hugetlb_page(mm, vma, pages, vmas,
- &start, &len, i, write);
+ &start, &nr_pages, i, write);
continue;
}
@@ -1357,9 +1357,9 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
vmas[i] = vma;
i++;
start += PAGE_SIZE;
- len--;
- } while (len && start < vma->vm_end);
- } while (len);
+ nr_pages--;
+ } while (nr_pages && start < vma->vm_end);
+ } while (nr_pages);
return i;
}
@@ -1368,7 +1368,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
* @tsk: task_struct of target task
* @mm: mm_struct of target mm
* @start: starting user address
- * @len: number of pages from start to pin
+ * @nr_pages: number of pages from start to pin
* @write: whether pages will be written to by the caller
* @force: whether to force write access even if user mapping is
* readonly. This will result in the page being COWed even
@@ -1380,7 +1380,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
* Or NULL if the caller does not require them.
*
* Returns number of pages pinned. This may be fewer than the number
- * requested. If len is 0 or negative, returns 0. If no pages
+ * requested. If nr_pages is 0 or negative, returns 0. If no pages
* were pinned, returns -errno. Each page returned must be released
* with a put_page() call when it is finished with. vmas will only
* remain valid while mmap_sem is held.
@@ -1414,7 +1414,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
* See also get_user_pages_fast, for performance critical applications.
*/
int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long start, int len, int write, int force,
+ unsigned long start, int nr_pages, int write, int force,
struct page **pages, struct vm_area_struct **vmas)
{
int flags = 0;
@@ -1424,9 +1424,7 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
if (force)
flags |= GUP_FLAGS_FORCE;
- return __get_user_pages(tsk, mm,
- start, len, flags,
- pages, vmas);
+ return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas);
}
EXPORT_SYMBOL(get_user_pages);
diff --git a/mm/nommu.c b/mm/nommu.c
index 598bc871487..53cab10fece 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -173,8 +173,8 @@ unsigned int kobjsize(const void *objp)
}
int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long start, int len, int flags,
- struct page **pages, struct vm_area_struct **vmas)
+ unsigned long start, int nr_pages, int flags,
+ struct page **pages, struct vm_area_struct **vmas)
{
struct vm_area_struct *vma;
unsigned long vm_flags;
@@ -189,7 +189,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
vm_flags = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
vm_flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
- for (i = 0; i < len; i++) {
+ for (i = 0; i < nr_pages; i++) {
vma = find_vma(mm, start);
if (!vma)
goto finish_or_fault;
@@ -224,7 +224,7 @@ finish_or_fault:
* - don't permit access to VMAs that don't support it, such as I/O mappings
*/
int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long start, int len, int write, int force,
+ unsigned long start, int nr_pages, int write, int force,
struct page **pages, struct vm_area_struct **vmas)
{
int flags = 0;
@@ -234,9 +234,7 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
if (force)
flags |= GUP_FLAGS_FORCE;
- return __get_user_pages(tsk, mm,
- start, len, flags,
- pages, vmas);
+ return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas);
}
EXPORT_SYMBOL(get_user_pages);
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 7b0dcea4935..7687879253b 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -541,8 +541,11 @@ static void balance_dirty_pages(struct address_space *mapping)
* filesystems (i.e. NFS) in which data may have been
* written to the server's write cache, but has not yet
* been flushed to permanent storage.
+ * Only move pages to writeback if this bdi is over its
+ * threshold otherwise wait until the disk writes catch
+ * up.
*/
- if (bdi_nr_reclaimable) {
+ if (bdi_nr_reclaimable > bdi_thresh) {
writeback_inodes(&wbc);
pages_written += write_chunk - wbc.nr_to_write;
get_dirty_limits(&background_thresh, &dirty_thresh,
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index aecc9cdfdfc..e0f2cdf9d8b 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1153,10 +1153,10 @@ again:
* properly detect and handle allocation failures.
*
* We most definitely don't want callers attempting to
- * allocate greater than single-page units with
+ * allocate greater than order-1 page units with
* __GFP_NOFAIL.
*/
- WARN_ON_ONCE(order > 0);
+ WARN_ON_ONCE(order > 1);
}
spin_lock_irqsave(&zone->lock, flags);
page = __rmqueue(zone, order, migratetype);
@@ -4032,6 +4032,8 @@ static void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
int i, nid;
unsigned long usable_startpfn;
unsigned long kernelcore_node, kernelcore_remaining;
+ /* save the state before borrow the nodemask */
+ nodemask_t saved_node_state = node_states[N_HIGH_MEMORY];
unsigned long totalpages = early_calculate_totalpages();
int usable_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
@@ -4059,7 +4061,7 @@ static void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
/* If kernelcore was not specified, there is no ZONE_MOVABLE */
if (!required_kernelcore)
- return;
+ goto out;
/* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
find_usable_zone_for_movable();
@@ -4158,6 +4160,10 @@ restart:
for (nid = 0; nid < MAX_NUMNODES; nid++)
zone_movable_pfn[nid] =
roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
+
+out:
+ /* restore the node_state */
+ node_states[N_HIGH_MEMORY] = saved_node_state;
}
/* Any regular memory on that node ? */
@@ -4242,11 +4248,6 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
early_node_map[i].start_pfn,
early_node_map[i].end_pfn);
- /*
- * find_zone_movable_pfns_for_nodes/early_calculate_totalpages init
- * that node_mask, clear it at first
- */
- nodes_clear(node_states[N_HIGH_MEMORY]);
/* Initialise every node */
mminit_verify_pageflags_layout();
setup_nr_node_ids();
diff --git a/mm/percpu.c b/mm/percpu.c
index c0b2c1a76e8..b70f2acd885 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -549,14 +549,14 @@ static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme)
* @chunk: chunk of interest
* @page_start: page index of the first page to unmap
* @page_end: page index of the last page to unmap + 1
- * @flush: whether to flush cache and tlb or not
+ * @flush_tlb: whether to flush tlb or not
*
* For each cpu, unmap pages [@page_start,@page_end) out of @chunk.
* If @flush is true, vcache is flushed before unmapping and tlb
* after.
*/
static void pcpu_unmap(struct pcpu_chunk *chunk, int page_start, int page_end,
- bool flush)
+ bool flush_tlb)
{
unsigned int last = num_possible_cpus() - 1;
unsigned int cpu;
@@ -569,9 +569,8 @@ static void pcpu_unmap(struct pcpu_chunk *chunk, int page_start, int page_end,
* the whole region at once rather than doing it for each cpu.
* This could be an overkill but is more scalable.
*/
- if (flush)
- flush_cache_vunmap(pcpu_chunk_addr(chunk, 0, page_start),
- pcpu_chunk_addr(chunk, last, page_end));
+ flush_cache_vunmap(pcpu_chunk_addr(chunk, 0, page_start),
+ pcpu_chunk_addr(chunk, last, page_end));
for_each_possible_cpu(cpu)
unmap_kernel_range_noflush(
@@ -579,7 +578,7 @@ static void pcpu_unmap(struct pcpu_chunk *chunk, int page_start, int page_end,
(page_end - page_start) << PAGE_SHIFT);
/* ditto as flush_cache_vunmap() */
- if (flush)
+ if (flush_tlb)
flush_tlb_kernel_range(pcpu_chunk_addr(chunk, 0, page_start),
pcpu_chunk_addr(chunk, last, page_end));
}
@@ -1234,6 +1233,7 @@ static struct page * __init pcpue_get_page(unsigned int cpu, int pageno)
ssize_t __init pcpu_embed_first_chunk(size_t static_size, size_t reserved_size,
ssize_t dyn_size, ssize_t unit_size)
{
+ size_t chunk_size;
unsigned int cpu;
/* determine parameters and allocate */
@@ -1248,11 +1248,15 @@ ssize_t __init pcpu_embed_first_chunk(size_t static_size, size_t reserved_size,
} else
pcpue_unit_size = max_t(size_t, pcpue_size, PCPU_MIN_UNIT_SIZE);
- pcpue_ptr = __alloc_bootmem_nopanic(
- num_possible_cpus() * pcpue_unit_size,
- PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
- if (!pcpue_ptr)
+ chunk_size = pcpue_unit_size * num_possible_cpus();
+
+ pcpue_ptr = __alloc_bootmem_nopanic(chunk_size, PAGE_SIZE,
+ __pa(MAX_DMA_ADDRESS));
+ if (!pcpue_ptr) {
+ pr_warning("PERCPU: failed to allocate %zu bytes for "
+ "embedding\n", chunk_size);
return -ENOMEM;
+ }
/* return the leftover and copy */
for_each_possible_cpu(cpu) {
diff --git a/mm/shmem.c b/mm/shmem.c
index e89d7ec18ed..d713239ce2c 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1558,6 +1558,7 @@ static struct inode *shmem_get_inode(struct super_block *sb, int mode,
spin_lock_init(&info->lock);
info->flags = flags & VM_NORESERVE;
INIT_LIST_HEAD(&info->swaplist);
+ cache_no_acl(inode);
switch (mode & S_IFMT) {
default:
@@ -2388,7 +2389,6 @@ static void shmem_destroy_inode(struct inode *inode)
/* only struct inode is valid if it's an inline symlink */
mpol_free_shared_policy(&SHMEM_I(inode)->policy);
}
- shmem_acl_destroy_inode(inode);
kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
}
@@ -2397,10 +2397,6 @@ static void init_once(void *foo)
struct shmem_inode_info *p = (struct shmem_inode_info *) foo;
inode_init_once(&p->vfs_inode);
-#ifdef CONFIG_TMPFS_POSIX_ACL
- p->i_acl = NULL;
- p->i_default_acl = NULL;
-#endif
}
static int init_inodecache(void)
diff --git a/mm/shmem_acl.c b/mm/shmem_acl.c
index 8e5aadd7dcd..606a8e757a4 100644
--- a/mm/shmem_acl.c
+++ b/mm/shmem_acl.c
@@ -22,11 +22,11 @@ shmem_get_acl(struct inode *inode, int type)
spin_lock(&inode->i_lock);
switch(type) {
case ACL_TYPE_ACCESS:
- acl = posix_acl_dup(SHMEM_I(inode)->i_acl);
+ acl = posix_acl_dup(inode->i_acl);
break;
case ACL_TYPE_DEFAULT:
- acl = posix_acl_dup(SHMEM_I(inode)->i_default_acl);
+ acl = posix_acl_dup(inode->i_default_acl);
break;
}
spin_unlock(&inode->i_lock);
@@ -45,13 +45,13 @@ shmem_set_acl(struct inode *inode, int type, struct posix_acl *acl)
spin_lock(&inode->i_lock);
switch(type) {
case ACL_TYPE_ACCESS:
- free = SHMEM_I(inode)->i_acl;
- SHMEM_I(inode)->i_acl = posix_acl_dup(acl);
+ free = inode->i_acl;
+ inode->i_acl = posix_acl_dup(acl);
break;
case ACL_TYPE_DEFAULT:
- free = SHMEM_I(inode)->i_default_acl;
- SHMEM_I(inode)->i_default_acl = posix_acl_dup(acl);
+ free = inode->i_default_acl;
+ inode->i_default_acl = posix_acl_dup(acl);
break;
}
spin_unlock(&inode->i_lock);
@@ -155,23 +155,6 @@ shmem_acl_init(struct inode *inode, struct inode *dir)
}
/**
- * shmem_acl_destroy_inode - destroy acls hanging off the in-memory inode
- *
- * This is done before destroying the actual inode.
- */
-
-void
-shmem_acl_destroy_inode(struct inode *inode)
-{
- if (SHMEM_I(inode)->i_acl)
- posix_acl_release(SHMEM_I(inode)->i_acl);
- SHMEM_I(inode)->i_acl = NULL;
- if (SHMEM_I(inode)->i_default_acl)
- posix_acl_release(SHMEM_I(inode)->i_default_acl);
- SHMEM_I(inode)->i_default_acl = NULL;
-}
-
-/**
* shmem_check_acl - check_acl() callback for generic_permission()
*/
static int
diff --git a/mm/slub.c b/mm/slub.c
index ce62b770e2f..819f056b39c 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1085,11 +1085,17 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
{
struct page *page;
struct kmem_cache_order_objects oo = s->oo;
+ gfp_t alloc_gfp;
flags |= s->allocflags;
- page = alloc_slab_page(flags | __GFP_NOWARN | __GFP_NORETRY, node,
- oo);
+ /*
+ * Let the initial higher-order allocation fail under memory pressure
+ * so we fall-back to the minimum order allocation.
+ */
+ alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL;
+
+ page = alloc_slab_page(alloc_gfp, node, oo);
if (unlikely(!page)) {
oo = s->min;
/*