aboutsummaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/bitops.h6
-rw-r--r--include/linux/buffer_head.h26
-rw-r--r--include/linux/connector.h7
-rw-r--r--include/linux/console.h3
-rw-r--r--include/linux/console_struct.h1
-rw-r--r--include/linux/cpuset.h2
-rw-r--r--include/linux/dca.h47
-rw-r--r--include/linux/dma-mapping.h7
-rw-r--r--include/linux/fb.h3
-rw-r--r--include/linux/fs.h78
-rw-r--r--include/linux/gfp.h62
-rw-r--r--include/linux/hugetlb.h1
-rw-r--r--include/linux/init.h2
-rw-r--r--include/linux/interrupt.h9
-rw-r--r--include/linux/ioport.h3
-rw-r--r--include/linux/isdn.h3
-rw-r--r--include/linux/jbd.h1
-rw-r--r--include/linux/jiffies.h2
-rw-r--r--include/linux/kernel.h1
-rw-r--r--include/linux/kprobes.h6
-rw-r--r--include/linux/memory_hotplug.h18
-rw-r--r--include/linux/mempolicy.h7
-rw-r--r--include/linux/mm.h97
-rw-r--r--include/linux/mm_types.h158
-rw-r--r--include/linux/mmzone.h68
-rw-r--r--include/linux/nfsd/export.h11
-rw-r--r--include/linux/nodemask.h94
-rw-r--r--include/linux/page-isolation.h37
-rw-r--r--include/linux/pageblock-flags.h75
-rw-r--r--include/linux/pagemap.h36
-rw-r--r--include/linux/pci_ids.h4
-rw-r--r--include/linux/radix-tree.h40
-rw-r--r--include/linux/sched.h77
-rw-r--r--include/linux/selection.h1
-rw-r--r--include/linux/serial_core.h3
-rw-r--r--include/linux/slab.h4
-rw-r--r--include/linux/slub_def.h71
-rw-r--r--include/linux/sm501-regs.h18
-rw-r--r--include/linux/spi/spi.h12
-rw-r--r--include/linux/usb/gadget.h4
40 files changed, 775 insertions, 330 deletions
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index 638165f571d..b9fb8ee3308 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -8,6 +8,12 @@
*/
#include <asm/bitops.h>
+#define for_each_bit(bit, addr, size) \
+ for ((bit) = find_first_bit((addr), (size)); \
+ (bit) < (size); \
+ (bit) = find_next_bit((addr), (size), (bit) + 1))
+
+
static __inline__ int get_bitmask_order(unsigned int count)
{
int order;
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 35cadad84b1..da0d83fbadc 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -203,10 +203,20 @@ void block_invalidatepage(struct page *page, unsigned long offset);
int block_write_full_page(struct page *page, get_block_t *get_block,
struct writeback_control *wbc);
int block_read_full_page(struct page*, get_block_t*);
+int block_write_begin(struct file *, struct address_space *,
+ loff_t, unsigned, unsigned,
+ struct page **, void **, get_block_t*);
+int block_write_end(struct file *, struct address_space *,
+ loff_t, unsigned, unsigned,
+ struct page *, void *);
+int generic_write_end(struct file *, struct address_space *,
+ loff_t, unsigned, unsigned,
+ struct page *, void *);
+void page_zero_new_buffers(struct page *page, unsigned from, unsigned to);
int block_prepare_write(struct page*, unsigned, unsigned, get_block_t*);
-int cont_prepare_write(struct page*, unsigned, unsigned, get_block_t*,
- loff_t *);
-int generic_cont_expand(struct inode *inode, loff_t size);
+int cont_write_begin(struct file *, struct address_space *, loff_t,
+ unsigned, unsigned, struct page **, void **,
+ get_block_t *, loff_t *);
int generic_cont_expand_simple(struct inode *inode, loff_t size);
int block_commit_write(struct page *page, unsigned from, unsigned to);
int block_page_mkwrite(struct vm_area_struct *vma, struct page *page,
@@ -216,9 +226,13 @@ sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *);
int generic_commit_write(struct file *, struct page *, unsigned, unsigned);
int block_truncate_page(struct address_space *, loff_t, get_block_t *);
int file_fsync(struct file *, struct dentry *, int);
-int nobh_prepare_write(struct page*, unsigned, unsigned, get_block_t*);
-int nobh_commit_write(struct file *, struct page *, unsigned, unsigned);
-int nobh_truncate_page(struct address_space *, loff_t);
+int nobh_write_begin(struct file *, struct address_space *,
+ loff_t, unsigned, unsigned,
+ struct page **, void **, get_block_t*);
+int nobh_write_end(struct file *, struct address_space *,
+ loff_t, unsigned, unsigned,
+ struct page *, void *);
+int nobh_truncate_page(struct address_space *, loff_t, get_block_t *);
int nobh_writepage(struct page *page, get_block_t *get_block,
struct writeback_control *wbc);
diff --git a/include/linux/connector.h b/include/linux/connector.h
index b62f823e90c..13fc4541bf2 100644
--- a/include/linux/connector.h
+++ b/include/linux/connector.h
@@ -36,14 +36,15 @@
#define CN_VAL_CIFS 0x1
#define CN_W1_IDX 0x3 /* w1 communication */
#define CN_W1_VAL 0x1
+#define CN_IDX_V86D 0x4
+#define CN_VAL_V86D_UVESAFB 0x1
-
-#define CN_NETLINK_USERS 4
+#define CN_NETLINK_USERS 5
/*
* Maximum connector's message size.
*/
-#define CONNECTOR_MAX_MSG_SIZE 1024
+#define CONNECTOR_MAX_MSG_SIZE 16384
/*
* idx and val are unique identifiers which
diff --git a/include/linux/console.h b/include/linux/console.h
index 56a7bcda49c..0a4542ddb73 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -45,7 +45,8 @@ struct consw {
int (*con_font_get)(struct vc_data *, struct console_font *);
int (*con_font_default)(struct vc_data *, struct console_font *, char *);
int (*con_font_copy)(struct vc_data *, int);
- int (*con_resize)(struct vc_data *, unsigned int, unsigned int);
+ int (*con_resize)(struct vc_data *, unsigned int, unsigned int,
+ unsigned int);
int (*con_set_palette)(struct vc_data *, unsigned char *);
int (*con_scrolldelta)(struct vc_data *, int);
int (*con_set_origin)(struct vc_data *);
diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h
index dc77fed7b28..d71f7c0f931 100644
--- a/include/linux/console_struct.h
+++ b/include/linux/console_struct.h
@@ -100,6 +100,7 @@ struct vc_data {
unsigned char vc_G1_charset;
unsigned char vc_saved_G0;
unsigned char vc_saved_G1;
+ unsigned int vc_resize_user; /* resize request from user */
unsigned int vc_bell_pitch; /* Console bell pitch */
unsigned int vc_bell_duration; /* Console bell duration */
struct vc_data **vc_display_fg; /* [!] Ptr to var holding fg console for this display */
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 826b15e914e..9e633ea103c 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -93,7 +93,7 @@ static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
return node_possible_map;
}
-#define cpuset_current_mems_allowed (node_online_map)
+#define cpuset_current_mems_allowed (node_states[N_HIGH_MEMORY])
static inline void cpuset_init_current_mems_allowed(void) {}
static inline void cpuset_update_task_memory_state(void) {}
#define cpuset_nodes_subset_current_mems_allowed(nodes) (1)
diff --git a/include/linux/dca.h b/include/linux/dca.h
new file mode 100644
index 00000000000..83eaecc6f8a
--- /dev/null
+++ b/include/linux/dca.h
@@ -0,0 +1,47 @@
+#ifndef DCA_H
+#define DCA_H
+/* DCA Provider API */
+
+/* DCA Notifier Interface */
+void dca_register_notify(struct notifier_block *nb);
+void dca_unregister_notify(struct notifier_block *nb);
+
+#define DCA_PROVIDER_ADD 0x0001
+#define DCA_PROVIDER_REMOVE 0x0002
+
+struct dca_provider {
+ struct dca_ops *ops;
+ struct class_device *cd;
+ int id;
+};
+
+struct dca_ops {
+ int (*add_requester) (struct dca_provider *, struct device *);
+ int (*remove_requester) (struct dca_provider *, struct device *);
+ u8 (*get_tag) (struct dca_provider *, int cpu);
+};
+
+struct dca_provider *alloc_dca_provider(struct dca_ops *ops, int priv_size);
+void free_dca_provider(struct dca_provider *dca);
+int register_dca_provider(struct dca_provider *dca, struct device *dev);
+void unregister_dca_provider(struct dca_provider *dca);
+
+static inline void *dca_priv(struct dca_provider *dca)
+{
+ return (void *)dca + sizeof(struct dca_provider);
+}
+
+/* Requester API */
+int dca_add_requester(struct device *dev);
+int dca_remove_requester(struct device *dev);
+u8 dca_get_tag(int cpu);
+
+/* internal stuff */
+int __init dca_sysfs_init(void);
+void __exit dca_sysfs_exit(void);
+int dca_sysfs_add_provider(struct dca_provider *dca, struct device *dev);
+void dca_sysfs_remove_provider(struct dca_provider *dca);
+int dca_sysfs_add_req(struct dca_provider *dca, struct device *dev, int slot);
+void dca_sysfs_remove_req(struct dca_provider *dca, int slot);
+
+#endif /* DCA_H */
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 2dc21cbeb30..0ebfafbd338 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -24,6 +24,8 @@ enum dma_data_direction {
#define DMA_28BIT_MASK 0x000000000fffffffULL
#define DMA_24BIT_MASK 0x0000000000ffffffULL
+#define DMA_MASK_NONE 0x0ULL
+
static inline int valid_dma_direction(int dma_direction)
{
return ((dma_direction == DMA_BIDIRECTIONAL) ||
@@ -31,6 +33,11 @@ static inline int valid_dma_direction(int dma_direction)
(dma_direction == DMA_FROM_DEVICE));
}
+static inline int is_device_dma_capable(struct device *dev)
+{
+ return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE;
+}
+
#ifdef CONFIG_HAS_DMA
#include <asm/dma-mapping.h>
#else
diff --git a/include/linux/fb.h b/include/linux/fb.h
index cec54106aa8..58c57a33e5d 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -180,6 +180,7 @@ struct fb_bitfield {
};
#define FB_NONSTD_HAM 1 /* Hold-And-Modify (HAM) */
+#define FB_NONSTD_REV_PIX_IN_B 2 /* order of pixels in each byte is reversed */
#define FB_ACTIVATE_NOW 0 /* set values immediately (or vbl)*/
#define FB_ACTIVATE_NXTOPEN 1 /* activate on next open */
@@ -206,6 +207,7 @@ struct fb_bitfield {
#define FB_VMODE_NONINTERLACED 0 /* non interlaced */
#define FB_VMODE_INTERLACED 1 /* interlaced */
#define FB_VMODE_DOUBLE 2 /* double scan */
+#define FB_VMODE_ODD_FLD_FIRST 4 /* interlaced: top line first */
#define FB_VMODE_MASK 255
#define FB_VMODE_YWRAP 256 /* ywrap instead of panning */
@@ -1054,6 +1056,7 @@ struct fb_videomode {
u32 flag;
};
+extern const char *fb_mode_option;
extern const struct fb_videomode vesa_modes[];
struct fb_modelist {
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 4a6a21077ba..f70d52c4661 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -381,7 +381,7 @@ struct iattr {
* trying again. The aop will be taking reasonable
* precautions not to livelock. If the caller held a page
* reference, it should drop it before retrying. Returned
- * by readpage(), prepare_write(), and commit_write().
+ * by readpage().
*
* address_space_operation functions return these large constants to indicate
* special semantics to the caller. These are much larger than the bytes in a
@@ -394,6 +394,9 @@ enum positive_aop_returns {
AOP_TRUNCATED_PAGE = 0x80001,
};
+#define AOP_FLAG_UNINTERRUPTIBLE 0x0001 /* will not do a short write */
+#define AOP_FLAG_CONT_EXPAND 0x0002 /* called from cont_expand */
+
/*
* oh the beauties of C type declarations.
*/
@@ -401,6 +404,39 @@ struct page;
struct address_space;
struct writeback_control;
+struct iov_iter {
+ const struct iovec *iov;
+ unsigned long nr_segs;
+ size_t iov_offset;
+ size_t count;
+};
+
+size_t iov_iter_copy_from_user_atomic(struct page *page,
+ struct iov_iter *i, unsigned long offset, size_t bytes);
+size_t iov_iter_copy_from_user(struct page *page,
+ struct iov_iter *i, unsigned long offset, size_t bytes);
+void iov_iter_advance(struct iov_iter *i, size_t bytes);
+int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes);
+size_t iov_iter_single_seg_count(struct iov_iter *i);
+
+static inline void iov_iter_init(struct iov_iter *i,
+ const struct iovec *iov, unsigned long nr_segs,
+ size_t count, size_t written)
+{
+ i->iov = iov;
+ i->nr_segs = nr_segs;
+ i->iov_offset = 0;
+ i->count = count + written;
+
+ iov_iter_advance(i, written);
+}
+
+static inline size_t iov_iter_count(struct iov_iter *i)
+{
+ return i->count;
+}
+
+
struct address_space_operations {
int (*writepage)(struct page *page, struct writeback_control *wbc);
int (*readpage)(struct file *, struct page *);
@@ -421,6 +457,14 @@ struct address_space_operations {
*/
int (*prepare_write)(struct file *, struct page *, unsigned, unsigned);
int (*commit_write)(struct file *, struct page *, unsigned, unsigned);
+
+ int (*write_begin)(struct file *, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned flags,
+ struct page **pagep, void **fsdata);
+ int (*write_end)(struct file *, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned copied,
+ struct page *page, void *fsdata);
+
/* Unfortunately this kludge is needed for FIBMAP. Don't use it */
sector_t (*bmap)(struct address_space *, sector_t);
void (*invalidatepage) (struct page *, unsigned long);
@@ -435,6 +479,18 @@ struct address_space_operations {
int (*launder_page) (struct page *);
};
+/*
+ * pagecache_write_begin/pagecache_write_end must be used by general code
+ * to write into the pagecache.
+ */
+int pagecache_write_begin(struct file *, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned flags,
+ struct page **pagep, void **fsdata);
+
+int pagecache_write_end(struct file *, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned copied,
+ struct page *page, void *fsdata);
+
struct backing_dev_info;
struct address_space {
struct inode *host; /* owner: inode, block_device */
@@ -697,16 +753,14 @@ struct fown_struct {
* Track a single file's readahead state
*/
struct file_ra_state {
- pgoff_t start; /* where readahead started */
- unsigned long size; /* # of readahead pages */
- unsigned long async_size; /* do asynchronous readahead when
+ pgoff_t start; /* where readahead started */
+ unsigned int size; /* # of readahead pages */
+ unsigned int async_size; /* do asynchronous readahead when
there are only # of pages ahead */
- unsigned long ra_pages; /* Maximum readahead window */
- unsigned long mmap_hit; /* Cache hit stat for mmap accesses */
- unsigned long mmap_miss; /* Cache miss stat for mmap accesses */
- unsigned long prev_index; /* Cache last read() position */
- unsigned int prev_offset; /* Offset where last read() ended in a page */
+ unsigned int ra_pages; /* Maximum readahead window */
+ int mmap_miss; /* Cache miss stat for mmap accesses */
+ loff_t prev_pos; /* Cache last read() position */
};
/*
@@ -1835,6 +1889,12 @@ extern int simple_prepare_write(struct file *file, struct page *page,
unsigned offset, unsigned to);
extern int simple_commit_write(struct file *file, struct page *page,
unsigned offset, unsigned to);
+extern int simple_write_begin(struct file *file, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned flags,
+ struct page **pagep, void **fsdata);
+extern int simple_write_end(struct file *file, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned copied,
+ struct page *page, void *fsdata);
extern struct dentry *simple_lookup(struct inode *, struct dentry *, struct nameidata *);
extern ssize_t generic_read_dir(struct file *, char __user *, size_t, loff_t *);
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index bc68dd9a6d4..7e93a9ae706 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -48,18 +48,12 @@ struct vm_area_struct;
#define __GFP_NOMEMALLOC ((__force gfp_t)0x10000u) /* Don't use emergency reserves */
#define __GFP_HARDWALL ((__force gfp_t)0x20000u) /* Enforce hardwall cpuset memory allocs */
#define __GFP_THISNODE ((__force gfp_t)0x40000u)/* No fallback, no policies */
-#define __GFP_MOVABLE ((__force gfp_t)0x80000u) /* Page is movable */
+#define __GFP_RECLAIMABLE ((__force gfp_t)0x80000u) /* Page is reclaimable */
+#define __GFP_MOVABLE ((__force gfp_t)0x100000u) /* Page is movable */
-#define __GFP_BITS_SHIFT 20 /* Room for 20 __GFP_FOO bits */
+#define __GFP_BITS_SHIFT 21 /* Room for 21 __GFP_FOO bits */
#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
-/* if you forget to add the bitmask here kernel will crash, period */
-#define GFP_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS| \
- __GFP_COLD|__GFP_NOWARN|__GFP_REPEAT| \
- __GFP_NOFAIL|__GFP_NORETRY|__GFP_COMP| \
- __GFP_NOMEMALLOC|__GFP_HARDWALL|__GFP_THISNODE| \
- __GFP_MOVABLE)
-
/* This equals 0, but use constants in case they ever change */
#define GFP_NOWAIT (GFP_ATOMIC & ~__GFP_HIGH)
/* GFP_ATOMIC means both !wait (__GFP_WAIT not set) and use emergency pool */
@@ -67,6 +61,8 @@ struct vm_area_struct;
#define GFP_NOIO (__GFP_WAIT)
#define GFP_NOFS (__GFP_WAIT | __GFP_IO)
#define GFP_KERNEL (__GFP_WAIT | __GFP_IO | __GFP_FS)
+#define GFP_TEMPORARY (__GFP_WAIT | __GFP_IO | __GFP_FS | \
+ __GFP_RECLAIMABLE)
#define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
#define GFP_HIGHUSER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL | \
__GFP_HIGHMEM)
@@ -86,6 +82,19 @@ struct vm_area_struct;
#define GFP_THISNODE ((__force gfp_t)0)
#endif
+/* This mask makes up all the page movable related flags */
+#define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE)
+
+/* Control page allocator reclaim behavior */
+#define GFP_RECLAIM_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS|\
+ __GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\
+ __GFP_NORETRY|__GFP_NOMEMALLOC)
+
+/* Control allocation constraints */
+#define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
+
+/* Do not use these with a slab allocator */
+#define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
/* Flag - indicates that the buffer will be suitable for DMA. Ignored on some
platforms, used as appropriate on others */
@@ -95,25 +104,50 @@ struct vm_area_struct;
/* 4GB DMA on some platforms */
#define GFP_DMA32 __GFP_DMA32
+/* Convert GFP flags to their corresponding migrate type */
+static inline int allocflags_to_migratetype(gfp_t gfp_flags)
+{
+ WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK);
+
+ if (unlikely(page_group_by_mobility_disabled))
+ return MIGRATE_UNMOVABLE;
+
+ /* Group based on mobility */
+ return (((gfp_flags & __GFP_MOVABLE) != 0) << 1) |
+ ((gfp_flags & __GFP_RECLAIMABLE) != 0);
+}
static inline enum zone_type gfp_zone(gfp_t flags)
{
+ int base = 0;
+
+#ifdef CONFIG_NUMA
+ if (flags & __GFP_THISNODE)
+ base = MAX_NR_ZONES;
+#endif
+
#ifdef CONFIG_ZONE_DMA
if (flags & __GFP_DMA)
- return ZONE_DMA;
+ return base + ZONE_DMA;
#endif
#ifdef CONFIG_ZONE_DMA32
if (flags & __GFP_DMA32)
- return ZONE_DMA32;
+ return base + ZONE_DMA32;
#endif
if ((flags & (__GFP_HIGHMEM | __GFP_MOVABLE)) ==
(__GFP_HIGHMEM | __GFP_MOVABLE))
- return ZONE_MOVABLE;
+ return base + ZONE_MOVABLE;
#ifdef CONFIG_HIGHMEM
if (flags & __GFP_HIGHMEM)
- return ZONE_HIGHMEM;
+ return base + ZONE_HIGHMEM;
#endif
- return ZONE_NORMAL;
+ return base + ZONE_NORMAL;
+}
+
+static inline gfp_t set_migrateflags(gfp_t gfp, gfp_t migrate_flags)
+{
+ BUG_ON((gfp & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK);
+ return (gfp & ~(GFP_MOVABLE_MASK)) | migrate_flags;
}
/*
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 3a19b032c0e..ea0f50bfbe0 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -33,6 +33,7 @@ void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed);
extern unsigned long max_huge_pages;
extern unsigned long hugepages_treat_as_movable;
+extern int hugetlb_dynamic_pool;
extern const unsigned long hugetlb_zero, hugetlb_infinity;
extern int sysctl_hugetlb_shm_group;
diff --git a/include/linux/init.h b/include/linux/init.h
index f8d9d0b5cff..9b7a2ba8237 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -67,8 +67,10 @@
/* For assembly routines */
#define __INIT .section ".init.text","ax"
+#define __INIT_REFOK .section ".text.init.refok","ax"
#define __FINIT .previous
#define __INITDATA .section ".init.data","aw"
+#define __INITDATA_REFOK .section ".data.init.refok","aw"
#ifndef __ASSEMBLY__
/*
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 5523f19d88d..8e5f289052a 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -205,6 +205,15 @@ static inline int disable_irq_wake(unsigned int irq)
enable_irq(irq)
# endif
+static inline int enable_irq_wake(unsigned int irq)
+{
+ return 0;
+}
+
+static inline int disable_irq_wake(unsigned int irq)
+{
+ return 0;
+}
#endif /* CONFIG_GENERIC_HARDIRQS */
#ifndef __ARCH_SET_SOFTIRQ_PENDING
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index 71ea9231924..6187a8567bc 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -110,9 +110,6 @@ extern int allocate_resource(struct resource *root, struct resource *new,
int adjust_resource(struct resource *res, resource_size_t start,
resource_size_t size);
-/* get registered SYSTEM_RAM resources in specified area */
-extern int find_next_system_ram(struct resource *res);
-
/* Convenience shorthand with allocation */
#define request_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name))
#define request_mem_region(start,n,name) __request_region(&iomem_resource, (start), (n), (name))
diff --git a/include/linux/isdn.h b/include/linux/isdn.h
index d5dda4b643a..d0ecc8eebfb 100644
--- a/include/linux/isdn.h
+++ b/include/linux/isdn.h
@@ -167,6 +167,7 @@ typedef struct {
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/tcp.h>
+#include <linux/mutex.h>
#define ISDN_TTY_MAJOR 43
#define ISDN_TTYAUX_MAJOR 44
@@ -616,7 +617,7 @@ typedef struct isdn_devt {
int v110emu[ISDN_MAX_CHANNELS]; /* V.110 emulator-mode 0=none */
atomic_t v110use[ISDN_MAX_CHANNELS]; /* Usage-Semaphore for stream */
isdn_v110_stream *v110[ISDN_MAX_CHANNELS]; /* V.110 private data */
- struct semaphore sem; /* serialize list access*/
+ struct mutex mtx; /* serialize list access*/
unsigned long global_features;
} isdn_dev;
diff --git a/include/linux/jbd.h b/include/linux/jbd.h
index 700a93b7918..72f52237292 100644
--- a/include/linux/jbd.h
+++ b/include/linux/jbd.h
@@ -372,6 +372,7 @@ struct jbd_revoke_table_s;
* @h_sync: flag for sync-on-close
* @h_jdata: flag to force data journaling
* @h_aborted: flag indicating fatal error on handle
+ * @h_lockdep_map: lockdep info for debugging lock problems
**/
/* Docbook can't yet cope with the bit fields, but will leave the documentation
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index e757a74b9d1..8b080024bbc 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -148,6 +148,8 @@ static inline u64 get_jiffies_64(void)
*/
#define MAX_JIFFY_OFFSET ((LONG_MAX >> 1)-1)
+extern unsigned long preset_lpj;
+
/*
* We want to do realistic conversions of time so we need to use the same
* values the update wall clock code uses as the jiffies size. This value
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index d9725a28a26..5fdbc814c2e 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -35,6 +35,7 @@ extern const char linux_proc_banner[];
#define ALIGN(x,a) __ALIGN_MASK(x,(typeof(x))(a)-1)
#define __ALIGN_MASK(x,mask) (((x)+(mask))&~(mask))
#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a)))
+#define IS_ALIGNED(x,a) (((x) % ((typeof(x))(a))) == 0)
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 51464d12a4e..81891581e89 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -166,6 +166,12 @@ struct kretprobe_instance {
struct task_struct *task;
};
+struct kretprobe_blackpoint {
+ const char *name;
+ void *addr;
+};
+extern struct kretprobe_blackpoint kretprobe_blacklist[];
+
static inline void kretprobe_assert(struct kretprobe_instance *ri,
unsigned long orig_ret_address, unsigned long trampoline_address)
{
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 7b54666cea8..8fee7a45736 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -3,7 +3,6 @@
#include <linux/mmzone.h>
#include <linux/spinlock.h>
-#include <linux/mmzone.h>
#include <linux/notifier.h>
struct page;
@@ -59,11 +58,21 @@ extern int add_one_highpage(struct page *page, int pfn, int bad_ppro);
extern void online_page(struct page *page);
/* VM interface that may be used by firmware interface */
extern int online_pages(unsigned long, unsigned long);
+extern void __offline_isolated_pages(unsigned long, unsigned long);
+extern int offline_pages(unsigned long, unsigned long, unsigned long);
/* reasonably generic interface to expand the physical pages in a zone */
extern int __add_pages(struct zone *zone, unsigned long start_pfn,
unsigned long nr_pages);
+/*
+ * Walk thorugh all memory which is registered as resource.
+ * arg is (start_pfn, nr_pages, private_arg_pointer)
+ */
+extern int walk_memory_resource(unsigned long start_pfn,
+ unsigned long nr_pages, void *arg,
+ int (*func)(unsigned long, unsigned long, void *));
+
#ifdef CONFIG_NUMA
extern int memory_add_physaddr_to_nid(u64 start);
#else
@@ -161,13 +170,6 @@ static inline int mhp_notimplemented(const char *func)
}
#endif /* ! CONFIG_MEMORY_HOTPLUG */
-static inline int __remove_pages(struct zone *zone, unsigned long start_pfn,
- unsigned long nr_pages)
-{
- printk(KERN_WARNING "%s() called, not yet supported\n", __FUNCTION__);
- dump_stack();
- return -ENOSYS;
-}
extern int add_memory(int nid, u64 start, u64 size);
extern int arch_add_memory(int nid, u64 start, u64 size);
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index a020eb2d4e2..38c04d61ee0 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -19,6 +19,7 @@
/* Flags for get_mem_policy */
#define MPOL_F_NODE (1<<0) /* return next IL mode instead of node mask */
#define MPOL_F_ADDR (1<<1) /* look up vma using address */
+#define MPOL_F_MEMS_ALLOWED (1<<2) /* return allowed memories */
/* Flags for mbind */
#define MPOL_MF_STRICT (1<<0) /* Verify existing pages in the mapping */
@@ -143,7 +144,6 @@ struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
extern void numa_default_policy(void);
extern void numa_policy_init(void);
-extern void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *new);
extern void mpol_rebind_task(struct task_struct *tsk,
const nodemask_t *new);
extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new);
@@ -235,11 +235,6 @@ static inline void numa_default_policy(void)
{
}
-static inline void mpol_rebind_policy(struct mempolicy *pol,
- const nodemask_t *new)
-{
-}
-
static inline void mpol_rebind_task(struct task_struct *tsk,
const nodemask_t *new)
{
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 1692dd6cb91..7e87e1b1662 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -50,69 +50,6 @@ extern int sysctl_legacy_va_layout;
* mmap() functions).
*/
-/*
- * This struct defines a memory VMM memory area. There is one of these
- * per VM-area/task. A VM area is any part of the process virtual memory
- * space that has a special rule for the page-fault handlers (ie a shared
- * library, the executable area etc).
- */
-struct vm_area_struct {
- struct mm_struct * vm_mm; /* The address space we belong to. */
- unsigned long vm_start; /* Our start address within vm_mm. */
- unsigned long vm_end; /* The first byte after our end address
- within vm_mm. */
-
- /* linked list of VM areas per task, sorted by address */
- struct vm_area_struct *vm_next;
-
- pgprot_t vm_page_prot; /* Access permissions of this VMA. */
- unsigned long vm_flags; /* Flags, listed below. */
-
- struct rb_node vm_rb;
-
- /*
- * For areas with an address space and backing store,
- * linkage into the address_space->i_mmap prio tree, or
- * linkage to the list of like vmas hanging off its node, or
- * linkage of vma in the address_space->i_mmap_nonlinear list.
- */
- union {
- struct {
- struct list_head list;
- void *parent; /* aligns with prio_tree_node parent */
- struct vm_area_struct *head;
- } vm_set;
-
- struct raw_prio_tree_node prio_tree_node;
- } shared;
-
- /*
- * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
- * list, after a COW of one of the file pages. A MAP_SHARED vma
- * can only be in the i_mmap tree. An anonymous MAP_PRIVATE, stack
- * or brk vma (with NULL file) can only be in an anon_vma list.
- */
- struct list_head anon_vma_node; /* Serialized by anon_vma->lock */
- struct anon_vma *anon_vma; /* Serialized by page_table_lock */
-
- /* Function pointers to deal with this struct. */
- struct vm_operations_struct * vm_ops;
-
- /* Information about our backing store: */
- unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE
- units, *not* PAGE_CACHE_SIZE */
- struct file * vm_file; /* File we map to (can be NULL). */
- void * vm_private_data; /* was vm_pte (shared mem) */
- unsigned long vm_truncate_count;/* truncate_count or restart_addr */
-
-#ifndef CONFIG_MMU
- atomic_t vm_usage; /* refcount (VMAs shared if !MMU) */
-#endif
-#ifdef CONFIG_NUMA
- struct mempolicy *vm_policy; /* NUMA policy for the VMA */
-#endif
-};
-
extern struct kmem_cache *vm_area_cachep;
/*
@@ -631,10 +568,6 @@ static inline struct address_space *page_mapping(struct page *page)
VM_BUG_ON(PageSlab(page));
if (unlikely(PageSwapCache(page)))
mapping = &swapper_space;
-#ifdef CONFIG_SLUB
- else if (unlikely(PageSlab(page)))
- mapping = NULL;
-#endif
else if (unlikely((unsigned long)mapping & PAGE_MAPPING_ANON))
mapping = NULL;
return mapping;
@@ -715,9 +648,6 @@ static inline int page_mapped(struct page *page)
extern void show_free_areas(void);
#ifdef CONFIG_SHMEM
-int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *new);
-struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
- unsigned long addr);
int shmem_lock(struct file *file, int lock, struct user_struct *user);
#else
static inline int shmem_lock(struct file *file, int lock,
@@ -725,18 +655,6 @@ static inline int shmem_lock(struct file *file, int lock,
{
return 0;
}
-
-static inline int shmem_set_policy(struct vm_area_struct *vma,
- struct mempolicy *new)
-{
- return 0;
-}
-
-static inline struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
- unsigned long addr)
-{
- return NULL;
-}
#endif
struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags);
@@ -779,8 +697,6 @@ void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *start_vma,
unsigned long floor, unsigned long ceiling);
int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
struct vm_area_struct *vma);
-int zeromap_page_range(struct vm_area_struct *vma, unsigned long from,
- unsigned long size, pgprot_t prot);
void unmap_mapping_range(struct address_space *mapping,
loff_t const holebegin, loff_t const holelen, int even_cows);
@@ -1106,8 +1022,6 @@ int write_one_page(struct page *page, int wait);
/* readahead.c */
#define VM_MAX_READAHEAD 128 /* kbytes */
#define VM_MIN_READAHEAD 16 /* kbytes (includes current page) */
-#define VM_MAX_CACHE_HIT 256 /* max pages in a row in cache before
- * turning readahead off */
int do_page_cache_readahead(struct address_space *mapping, struct file *filp,
pgoff_t offset, unsigned long nr_to_read);
@@ -1218,5 +1132,16 @@ extern int randomize_va_space;
const char * arch_vma_name(struct vm_area_struct *vma);
+struct page *sparse_mem_map_populate(unsigned long pnum, int nid);
+pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
+pud_t *vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node);
+pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
+pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node);
+void *vmemmap_alloc_block(unsigned long size, int node);
+void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
+int vmemmap_populate_basepages(struct page *start_page,
+ unsigned long pages, int node);
+int vmemmap_populate(struct page *start_page, unsigned long pages, int node);
+
#endif /* __KERNEL__ */
#endif /* _LINUX_MM_H */
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index d5bb1796e12..87766791845 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -1,13 +1,26 @@
#ifndef _LINUX_MM_TYPES_H
#define _LINUX_MM_TYPES_H
+#include <linux/auxvec.h> /* For AT_VECTOR_SIZE */
#include <linux/types.h>
#include <linux/threads.h>
#include <linux/list.h>
#include <linux/spinlock.h>
+#include <linux/prio_tree.h>
+#include <linux/rbtree.h>
+#include <linux/rwsem.h>
+#include <linux/completion.h>
+#include <asm/page.h>
+#include <asm/mmu.h>
struct address_space;
+#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
+typedef atomic_long_t mm_counter_t;
+#else /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */
+typedef unsigned long mm_counter_t;
+#endif /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */
+
/*
* Each physical page in the system has a struct page associated with
* it to keep track of whatever it is we are using the page for at the
@@ -24,10 +37,7 @@ struct page {
* to show when page is mapped
* & limit reverse map searches.
*/
- struct { /* SLUB uses */
- short unsigned int inuse;
- short unsigned int offset;
- };
+ unsigned int inuse; /* SLUB: Nr of objects */
};
union {
struct {
@@ -49,13 +59,8 @@ struct page {
#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
spinlock_t ptl;
#endif
- struct { /* SLUB uses */
- void **lockless_freelist;
- struct kmem_cache *slab; /* Pointer to slab */
- };
- struct {
- struct page *first_page; /* Compound pages */
- };
+ struct kmem_cache *slab; /* SLUB: Pointer to slab */
+ struct page *first_page; /* Compound tail pages */
};
union {
pgoff_t index; /* Our offset within mapping. */
@@ -80,4 +85,135 @@ struct page {
#endif /* WANT_PAGE_VIRTUAL */
};
+/*
+ * This struct defines a memory VMM memory area. There is one of these
+ * per VM-area/task. A VM area is any part of the process virtual memory
+ * space that has a special rule for the page-fault handlers (ie a shared
+ * library, the executable area etc).
+ */
+struct vm_area_struct {
+ struct mm_struct * vm_mm; /* The address space we belong to. */
+ unsigned long vm_start; /* Our start address within vm_mm. */
+ unsigned long vm_end; /* The first byte after our end address
+ within vm_mm. */
+
+ /* linked list of VM areas per task, sorted by address */
+ struct vm_area_struct *vm_next;
+
+ pgprot_t vm_page_prot; /* Access permissions of this VMA. */
+ unsigned long vm_flags; /* Flags, listed below. */
+
+ struct rb_node vm_rb;
+
+ /*
+ * For areas with an address space and backing store,
+ * linkage into the address_space->i_mmap prio tree, or
+ * linkage to the list of like vmas hanging off its node, or
+ * linkage of vma in the address_space->i_mmap_nonlinear list.
+ */
+ union {
+ struct {
+ struct list_head list;
+ void *parent; /* aligns with prio_tree_node parent */
+ struct vm_area_struct *head;
+ } vm_set;
+
+ struct raw_prio_tree_node prio_tree_node;
+ } shared;
+
+ /*
+ * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
+ * list, after a COW of one of the file pages. A MAP_SHARED vma
+ * can only be in the i_mmap tree. An anonymous MAP_PRIVATE, stack
+ * or brk vma (with NULL file) can only be in an anon_vma list.
+ */
+ struct list_head anon_vma_node; /* Serialized by anon_vma->lock */
+ struct anon_vma *anon_vma; /* Serialized by page_table_lock */
+
+ /* Function pointers to deal with this struct. */
+ struct vm_operations_struct * vm_ops;
+
+ /* Information about our backing store: */
+ unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE
+ units, *not* PAGE_CACHE_SIZE */
+ struct file * vm_file; /* File we map to (can be NULL). */
+ void * vm_private_data; /* was vm_pte (shared mem) */
+ unsigned long vm_truncate_count;/* truncate_count or restart_addr */
+
+#ifndef CONFIG_MMU
+ atomic_t vm_usage; /* refcount (VMAs shared if !MMU) */
+#endif
+#ifdef CONFIG_NUMA
+ struct mempolicy *vm_policy; /* NUMA policy for the VMA */
+#endif
+};
+
+struct mm_struct {
+ struct vm_area_struct * mmap; /* list of VMAs */
+ struct rb_root mm_rb;
+ struct vm_area_struct * mmap_cache; /* last find_vma result */
+ unsigned long (*get_unmapped_area) (struct file *filp,
+ unsigned long addr, unsigned long len,
+ unsigned long pgoff, unsigned long flags);
+ void (*unmap_area) (struct mm_struct *mm, unsigned long addr);
+ unsigned long mmap_base; /* base of mmap area */
+ unsigned long task_size; /* size of task vm space */
+ unsigned long cached_hole_size; /* if non-zero, the largest hole below free_area_cache */
+ unsigned long free_area_cache; /* first hole of size cached_hole_size or larger */
+ pgd_t * pgd;
+ atomic_t mm_users; /* How many users with user space? */
+ atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */
+ int map_count; /* number of VMAs */
+ struct rw_semaphore mmap_sem;
+ spinlock_t page_table_lock; /* Protects page tables and some counters */
+
+ struct list_head mmlist; /* List of maybe swapped mm's. These are globally strung
+ * together off init_mm.mmlist, and are protected
+ * by mmlist_lock
+ */
+
+ /* Special counters, in some configurations protected by the
+ * page_table_lock, in other configurations by being atomic.
+ */
+ mm_counter_t _file_rss;
+ mm_counter_t _anon_rss;
+
+ unsigned long hiwater_rss; /* High-watermark of RSS usage */
+ unsigned long hiwater_vm; /* High-water virtual memory usage */
+
+ unsigned long total_vm, locked_vm, shared_vm, exec_vm;
+ unsigned long stack_vm, reserved_vm, def_flags, nr_ptes;
+ unsigned long start_code, end_code, start_data, end_data;
+ unsigned long start_brk, brk, start_stack;
+ unsigned long arg_start, arg_end, env_start, env_end;
+
+ unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
+
+ cpumask_t cpu_vm_mask;
+
+ /* Architecture-specific MM context */
+ mm_context_t context;
+
+ /* Swap token stuff */
+ /*
+ * Last value of global fault stamp as seen by this process.
+ * In other words, this value gives an indication of how long
+ * it has been since this task got the token.
+ * Look at mm/thrash.c
+ */
+ unsigned int faultstamp;
+ unsigned int token_priority;
+ unsigned int last_interval;
+
+ unsigned long flags; /* Must use atomic bitops to access the bits */
+
+ /* coredumping support */
+ int core_waiters;
+ struct completion *core_startup_done, core_done;
+
+ /* aio bits */
+ rwlock_t ioctx_list_lock;
+ struct kioctx *ioctx_list;
+};
+
#endif /* _LINUX_MM_TYPES_H */
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 4e5627379b0..f4bfe824834 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -13,6 +13,7 @@
#include <linux/init.h>
#include <linux/seqlock.h>
#include <linux/nodemask.h>
+#include <linux/pageblock-flags.h>
#include <asm/atomic.h>
#include <asm/page.h>
@@ -32,8 +33,29 @@
*/
#define PAGE_ALLOC_COSTLY_ORDER 3
+#define MIGRATE_UNMOVABLE 0
+#define MIGRATE_RECLAIMABLE 1
+#define MIGRATE_MOVABLE 2
+#define MIGRATE_RESERVE 3
+#define MIGRATE_ISOLATE 4 /* can't allocate from here */
+#define MIGRATE_TYPES 5
+
+#define for_each_migratetype_order(order, type) \
+ for (order = 0; order < MAX_ORDER; order++) \
+ for (type = 0; type < MIGRATE_TYPES; type++)
+
+extern int page_group_by_mobility_disabled;
+
+static inline int get_pageblock_migratetype(struct page *page)
+{
+ if (unlikely(page_group_by_mobility_disabled))
+ return MIGRATE_UNMOVABLE;
+
+ return get_pageblock_flags_group(page, PB_migrate, PB_migrate_end);
+}
+
struct free_area {
- struct list_head free_list;
+ struct list_head free_list[MIGRATE_TYPES];
unsigned long nr_free;
};
@@ -222,6 +244,14 @@ struct zone {
#endif
struct free_area free_area[MAX_ORDER];
+#ifndef CONFIG_SPARSEMEM
+ /*
+ * Flags for a pageblock_nr_pages block. See pageblock-flags.h.
+ * In SPARSEMEM, this map is stored in struct mem_section
+ */
+ unsigned long *pageblock_flags;
+#endif /* CONFIG_SPARSEMEM */
+
ZONE_PADDING(_pad1_)
@@ -324,6 +354,17 @@ struct zone {
#define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES)
#ifdef CONFIG_NUMA
+
+/*
+ * The NUMA zonelists are doubled becausse we need zonelists that restrict the
+ * allocations to a single node for GFP_THISNODE.
+ *
+ * [0 .. MAX_NR_ZONES -1] : Zonelists with fallback
+ * [MAZ_NR_ZONES ... MAZ_ZONELISTS -1] : No fallback (GFP_THISNODE)
+ */
+#define MAX_ZONELISTS (2 * MAX_NR_ZONES)
+
+
/*
* We cache key information from each zonelist for smaller cache
* footprint when scanning for free pages in get_page_from_freelist().
@@ -389,6 +430,7 @@ struct zonelist_cache {
unsigned long last_full_zap; /* when last zap'd (jiffies) */
};
#else
+#define MAX_ZONELISTS MAX_NR_ZONES
struct zonelist_cache;
#endif
@@ -455,7 +497,7 @@ extern struct page *mem_map;
struct bootmem_data;
typedef struct pglist_data {
struct zone node_zones[MAX_NR_ZONES];
- struct zonelist node_zonelists[MAX_NR_ZONES];
+ struct zonelist node_zonelists[MAX_ZONELISTS];
int nr_zones;
#ifdef CONFIG_FLAT_NODE_MEM_MAP
struct page *node_mem_map;
@@ -708,6 +750,9 @@ extern struct zone *next_zone(struct zone *zone);
#define PAGES_PER_SECTION (1UL << PFN_SECTION_SHIFT)
#define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1))
+#define SECTION_BLOCKFLAGS_BITS \
+ ((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS)
+
#if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS
#error Allocator MAX_ORDER exceeds SECTION_SIZE
#endif
@@ -727,6 +772,9 @@ struct mem_section {
* before using it wrong.
*/
unsigned long section_mem_map;
+
+ /* See declaration of similar field in struct zone */
+ unsigned long *pageblock_flags;
};
#ifdef CONFIG_SPARSEMEM_EXTREME
@@ -771,12 +819,17 @@ static inline struct page *__section_mem_map_addr(struct mem_section *section)
return (struct page *)map;
}
-static inline int valid_section(struct mem_section *section)
+static inline int present_section(struct mem_section *section)
{
return (section && (section->section_mem_map & SECTION_MARKED_PRESENT));
}
-static inline int section_has_mem_map(struct mem_section *section)
+static inline int present_section_nr(unsigned long nr)
+{
+ return present_section(__nr_to_section(nr));
+}
+
+static inline int valid_section(struct mem_section *section)
{
return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP));
}
@@ -798,6 +851,13 @@ static inline int pfn_valid(unsigned long pfn)
return valid_section(__nr_to_section(pfn_to_section_nr(pfn)));
}
+static inline int pfn_present(unsigned long pfn)
+{
+ if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
+ return 0;
+ return present_section(__nr_to_section(pfn_to_section_nr(pfn)));
+}
+
/*
* These are _only_ used during initialisation, therefore they
* can use __initdata ... They could have names to indicate
diff --git a/include/linux/nfsd/export.h b/include/linux/nfsd/export.h
index 5cd19246909..bcb7abafbca 100644
--- a/include/linux/nfsd/export.h
+++ b/include/linux/nfsd/export.h
@@ -127,17 +127,9 @@ void nfsd_export_shutdown(void);
void nfsd_export_flush(void);
void exp_readlock(void);
void exp_readunlock(void);
-struct svc_export * exp_get_by_name(struct auth_domain *clp,
- struct vfsmount *mnt,
- struct dentry *dentry,
- struct cache_req *reqp);
struct svc_export * rqst_exp_get_by_name(struct svc_rqst *,
struct vfsmount *,
struct dentry *);
-struct svc_export * exp_parent(struct auth_domain *clp,
- struct vfsmount *mnt,
- struct dentry *dentry,
- struct cache_req *reqp);
struct svc_export * rqst_exp_parent(struct svc_rqst *,
struct vfsmount *mnt,
struct dentry *dentry);
@@ -157,9 +149,6 @@ static inline void exp_get(struct svc_export *exp)
{
cache_get(&exp->h);
}
-extern struct svc_export *
-exp_find(struct auth_domain *clp, int fsid_type, u32 *fsidv,
- struct cache_req *reqp);
struct svc_export * rqst_exp_find(struct svc_rqst *, int, u32 *);
#endif /* __KERNEL__ */
diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
index 52c54a5720f..905e18f4b41 100644
--- a/include/linux/nodemask.h
+++ b/include/linux/nodemask.h
@@ -338,31 +338,88 @@ static inline void __nodes_remap(nodemask_t *dstp, const nodemask_t *srcp,
#endif /* MAX_NUMNODES */
/*
+ * Bitmasks that are kept for all the nodes.
+ */
+enum node_states {
+ N_POSSIBLE, /* The node could become online at some point */
+ N_ONLINE, /* The node is online */
+ N_NORMAL_MEMORY, /* The node has regular memory */
+#ifdef CONFIG_HIGHMEM
+ N_HIGH_MEMORY, /* The node has regular or high memory */
+#else
+ N_HIGH_MEMORY = N_NORMAL_MEMORY,
+#endif
+ N_CPU, /* The node has one or more cpus */
+ NR_NODE_STATES
+};
+
+/*
* The following particular system nodemasks and operations
* on them manage all possible and online nodes.
*/
-extern nodemask_t node_online_map;
-extern nodemask_t node_possible_map;
+extern nodemask_t node_states[NR_NODE_STATES];
#if MAX_NUMNODES > 1
-#define num_online_nodes() nodes_weight(node_online_map)
-#define num_possible_nodes() nodes_weight(node_possible_map)
-#define node_online(node) node_isset((node), node_online_map)
-#define node_possible(node) node_isset((node), node_possible_map)
-#define first_online_node first_node(node_online_map)
-#define next_online_node(nid) next_node((nid), node_online_map)
+static inline int node_state(int node, enum node_states state)
+{
+ return node_isset(node, node_states[state]);
+}
+
+static inline void node_set_state(int node, enum node_states state)
+{
+ __node_set(node, &node_states[state]);
+}
+
+static inline void node_clear_state(int node, enum node_states state)
+{
+ __node_clear(node, &node_states[state]);
+}
+
+static inline int num_node_state(enum node_states state)
+{
+ return nodes_weight(node_states[state]);
+}
+
+#define for_each_node_state(__node, __state) \
+ for_each_node_mask((__node), node_states[__state])
+
+#define first_online_node first_node(node_states[N_ONLINE])
+#define next_online_node(nid) next_node((nid), node_states[N_ONLINE])
+
extern int nr_node_ids;
#else
-#define num_online_nodes() 1
-#define num_possible_nodes() 1
-#define node_online(node) ((node) == 0)
-#define node_possible(node) ((node) == 0)
+
+static inline int node_state(int node, enum node_states state)
+{
+ return node == 0;
+}
+
+static inline void node_set_state(int node, enum node_states state)
+{
+}
+
+static inline void node_clear_state(int node, enum node_states state)
+{
+}
+
+static inline int num_node_state(enum node_states state)
+{
+ return 1;
+}
+
+#define for_each_node_state(node, __state) \
+ for ( (node) = 0; (node) == 0; (node) = 1)
+
#define first_online_node 0
#define next_online_node(nid) (MAX_NUMNODES)
#define nr_node_ids 1
+
#endif
+#define node_online_map node_states[N_ONLINE]
+#define node_possible_map node_states[N_POSSIBLE]
+
#define any_online_node(mask) \
({ \
int node; \
@@ -372,10 +429,15 @@ extern int nr_node_ids;
node; \
})
-#define node_set_online(node) set_bit((node), node_online_map.bits)
-#define node_set_offline(node) clear_bit((node), node_online_map.bits)
+#define num_online_nodes() num_node_state(N_ONLINE)
+#define num_possible_nodes() num_node_state(N_POSSIBLE)
+#define node_online(node) node_state((node), N_ONLINE)
+#define node_possible(node) node_state((node), N_POSSIBLE)
+
+#define node_set_online(node) node_set_state((node), N_ONLINE)
+#define node_set_offline(node) node_clear_state((node), N_ONLINE)
-#define for_each_node(node) for_each_node_mask((node), node_possible_map)
-#define for_each_online_node(node) for_each_node_mask((node), node_online_map)
+#define for_each_node(node) for_each_node_state(node, N_POSSIBLE)
+#define for_each_online_node(node) for_each_node_state(node, N_ONLINE)
#endif /* __LINUX_NODEMASK_H */
diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
new file mode 100644
index 00000000000..051c1b1ede4
--- /dev/null
+++ b/include/linux/page-isolation.h
@@ -0,0 +1,37 @@
+#ifndef __LINUX_PAGEISOLATION_H
+#define __LINUX_PAGEISOLATION_H
+
+/*
+ * Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE.
+ * If specified range includes migrate types other than MOVABLE,
+ * this will fail with -EBUSY.
+ *
+ * For isolating all pages in the range finally, the caller have to
+ * free all pages in the range. test_page_isolated() can be used for
+ * test it.
+ */
+extern int
+start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn);
+
+/*
+ * Changes MIGRATE_ISOLATE to MIGRATE_MOVABLE.
+ * target range is [start_pfn, end_pfn)
+ */
+extern int
+undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn);
+
+/*
+ * test all pages in [start_pfn, end_pfn)are isolated or not.
+ */
+extern int
+test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn);
+
+/*
+ * Internal funcs.Changes pageblock's migrate type.
+ * Please use make_pagetype_isolated()/make_pagetype_movable().
+ */
+extern int set_migratetype_isolate(struct page *page);
+extern void unset_migratetype_isolate(struct page *page);
+
+
+#endif
diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h
new file mode 100644
index 00000000000..e875905f7b1
--- /dev/null
+++ b/include/linux/pageblock-flags.h
@@ -0,0 +1,75 @@
+/*
+ * Macros for manipulating and testing flags related to a
+ * pageblock_nr_pages number of pages.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2006
+ *
+ * Original author, Mel Gorman
+ * Major cleanups and reduction of bit operations, Andy Whitcroft
+ */
+#ifndef PAGEBLOCK_FLAGS_H
+#define PAGEBLOCK_FLAGS_H
+
+#include <linux/types.h>
+
+/* Macro to aid the definition of ranges of bits */
+#define PB_range(name, required_bits) \
+ name, name ## _end = (name + required_bits) - 1
+
+/* Bit indices that affect a whole block of pages */
+enum pageblock_bits {
+ PB_range(PB_migrate, 3), /* 3 bits required for migrate types */
+ NR_PAGEBLOCK_BITS
+};
+
+#ifdef CONFIG_HUGETLB_PAGE
+
+#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
+
+/* Huge page sizes are variable */
+extern int pageblock_order;
+
+#else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
+
+/* Huge pages are a constant size */
+#define pageblock_order HUGETLB_PAGE_ORDER
+
+#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
+
+#else /* CONFIG_HUGETLB_PAGE */
+
+/* If huge pages are not used, group by MAX_ORDER_NR_PAGES */
+#define pageblock_order (MAX_ORDER-1)
+
+#endif /* CONFIG_HUGETLB_PAGE */
+
+#define pageblock_nr_pages (1UL << pageblock_order)
+
+/* Forward declaration */
+struct page;
+
+/* Declarations for getting and setting flags. See mm/page_alloc.c */
+unsigned long get_pageblock_flags_group(struct page *page,
+ int start_bitidx, int end_bitidx);
+void set_pageblock_flags_group(struct page *page, unsigned long flags,
+ int start_bitidx, int end_bitidx);
+
+#define get_pageblock_flags(page) \
+ get_pageblock_flags_group(page, 0, NR_PAGEBLOCK_BITS-1)
+#define set_pageblock_flags(page) \
+ set_pageblock_flags_group(page, 0, NR_PAGEBLOCK_BITS-1)
+
+#endif /* PAGEBLOCK_FLAGS_H */
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 8a83537d697..db8a410ae9e 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -84,11 +84,11 @@ static inline struct page *page_cache_alloc_cold(struct address_space *x)
typedef int filler_t(void *, struct page *);
extern struct page * find_get_page(struct address_space *mapping,
- unsigned long index);
+ pgoff_t index);
extern struct page * find_lock_page(struct address_space *mapping,
- unsigned long index);
+ pgoff_t index);
extern struct page * find_or_create_page(struct address_space *mapping,
- unsigned long index, gfp_t gfp_mask);
+ pgoff_t index, gfp_t gfp_mask);
unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
unsigned int nr_pages, struct page **pages);
unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
@@ -96,44 +96,47 @@ unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
int tag, unsigned int nr_pages, struct page **pages);
+struct page *__grab_cache_page(struct address_space *mapping, pgoff_t index);
+
/*
* Returns locked page at given index in given cache, creating it if needed.
*/
-static inline struct page *grab_cache_page(struct address_space *mapping, unsigned long index)
+static inline struct page *grab_cache_page(struct address_space *mapping,
+ pgoff_t index)
{
return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
}
extern struct page * grab_cache_page_nowait(struct address_space *mapping,
- unsigned long index);
+ pgoff_t index);
extern struct page * read_cache_page_async(struct address_space *mapping,
- unsigned long index, filler_t *filler,
+ pgoff_t index, filler_t *filler,
void *data);
extern struct page * read_cache_page(struct address_space *mapping,
- unsigned long index, filler_t *filler,
+ pgoff_t index, filler_t *filler,
void *data);
extern int read_cache_pages(struct address_space *mapping,
struct list_head *pages, filler_t *filler, void *data);
static inline struct page *read_mapping_page_async(
struct address_space *mapping,
- unsigned long index, void *data)
+ pgoff_t index, void *data)
{
filler_t *filler = (filler_t *)mapping->a_ops->readpage;
return read_cache_page_async(mapping, index, filler, data);
}
static inline struct page *read_mapping_page(struct address_space *mapping,
- unsigned long index, void *data)
+ pgoff_t index, void *data)
{
filler_t *filler = (filler_t *)mapping->a_ops->readpage;
return read_cache_page(mapping, index, filler, data);
}
int add_to_page_cache(struct page *page, struct address_space *mapping,
- unsigned long index, gfp_t gfp_mask);
+ pgoff_t index, gfp_t gfp_mask);
int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
- unsigned long index, gfp_t gfp_mask);
+ pgoff_t index, gfp_t gfp_mask);
extern void remove_from_page_cache(struct page *page);
extern void __remove_from_page_cache(struct page *page);
@@ -218,6 +221,9 @@ static inline int fault_in_pages_writeable(char __user *uaddr, int size)
{
int ret;
+ if (unlikely(size == 0))
+ return 0;
+
/*
* Writing zeroes into userspace here is OK, because we know that if
* the zero gets there, we'll be overwriting it.
@@ -237,19 +243,23 @@ static inline int fault_in_pages_writeable(char __user *uaddr, int size)
return ret;
}
-static inline void fault_in_pages_readable(const char __user *uaddr, int size)
+static inline int fault_in_pages_readable(const char __user *uaddr, int size)
{
volatile char c;
int ret;
+ if (unlikely(size == 0))
+ return 0;
+
ret = __get_user(c, uaddr);
if (ret == 0) {
const char __user *end = uaddr + size - 1;
if (((unsigned long)uaddr & PAGE_MASK) !=
((unsigned long)end & PAGE_MASK))
- __get_user(c, end);
+ ret = __get_user(c, end);
}
+ return ret;
}
#endif /* _LINUX_PAGEMAP_H */
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 2c49561f9b4..df948b44eda 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -1995,6 +1995,8 @@
#define PCI_VENDOR_ID_TOPIC 0x151f
#define PCI_DEVICE_ID_TOPIC_TP560 0x0000
+#define PCI_VENDOR_ID_MAINPINE 0x1522
+#define PCI_DEVICE_ID_MAINPINE_PBRIDGE 0x0100
#define PCI_VENDOR_ID_ENE 0x1524
#define PCI_DEVICE_ID_ENE_CB712_SD 0x0550
#define PCI_DEVICE_ID_ENE_CB712_SD_2 0x0551
@@ -2324,6 +2326,8 @@
#define PCI_DEVICE_ID_INTEL_MCH_PC 0x3599
#define PCI_DEVICE_ID_INTEL_MCH_PC1 0x359a
#define PCI_DEVICE_ID_INTEL_E7525_MCH 0x359e
+#define PCI_DEVICE_ID_INTEL_IOAT_CNB 0x360b
+#define PCI_DEVICE_ID_INTEL_IOAT_SCNB 0x65ff
#define PCI_DEVICE_ID_INTEL_TOLAPAI_0 0x5031
#define PCI_DEVICE_ID_INTEL_TOLAPAI_1 0x5032
#define PCI_DEVICE_ID_INTEL_82371SB_0 0x7000
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index f9e77d2ee32..b6116b4445c 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -26,28 +26,31 @@
#include <linux/rcupdate.h>
/*
- * A direct pointer (root->rnode pointing directly to a data item,
- * rather than another radix_tree_node) is signalled by the low bit
- * set in the root->rnode pointer.
+ * An indirect pointer (root->rnode pointing to a radix_tree_node, rather
+ * than a data item) is signalled by the low bit set in the root->rnode
+ * pointer.
*
- * In this case root->height is also NULL, but the direct pointer tests are
- * needed for RCU lookups when root->height is unreliable.
+ * In this case root->height is > 0, but the indirect pointer tests are
+ * needed for RCU lookups (because root->height is unreliable). The only
+ * time callers need worry about this is when doing a lookup_slot under
+ * RCU.
*/
-#define RADIX_TREE_DIRECT_PTR 1
+#define RADIX_TREE_INDIRECT_PTR 1
+#define RADIX_TREE_RETRY ((void *)-1UL)
-static inline void *radix_tree_ptr_to_direct(void *ptr)
+static inline void *radix_tree_ptr_to_indirect(void *ptr)
{
- return (void *)((unsigned long)ptr | RADIX_TREE_DIRECT_PTR);
+ return (void *)((unsigned long)ptr | RADIX_TREE_INDIRECT_PTR);
}
-static inline void *radix_tree_direct_to_ptr(void *ptr)
+static inline void *radix_tree_indirect_to_ptr(void *ptr)
{
- return (void *)((unsigned long)ptr & ~RADIX_TREE_DIRECT_PTR);
+ return (void *)((unsigned long)ptr & ~RADIX_TREE_INDIRECT_PTR);
}
-static inline int radix_tree_is_direct_ptr(void *ptr)
+static inline int radix_tree_is_indirect_ptr(void *ptr)
{
- return (int)((unsigned long)ptr & RADIX_TREE_DIRECT_PTR);
+ return (int)((unsigned long)ptr & RADIX_TREE_INDIRECT_PTR);
}
/*** radix-tree API starts here ***/
@@ -130,7 +133,10 @@ do { \
*/
static inline void *radix_tree_deref_slot(void **pslot)
{
- return radix_tree_direct_to_ptr(*pslot);
+ void *ret = *pslot;
+ if (unlikely(radix_tree_is_indirect_ptr(ret)))
+ ret = RADIX_TREE_RETRY;
+ return ret;
}
/**
* radix_tree_replace_slot - replace item in a slot
@@ -142,10 +148,8 @@ static inline void *radix_tree_deref_slot(void **pslot)
*/
static inline void radix_tree_replace_slot(void **pslot, void *item)
{
- BUG_ON(radix_tree_is_direct_ptr(item));
- rcu_assign_pointer(*pslot,
- (void *)((unsigned long)item |
- ((unsigned long)*pslot & RADIX_TREE_DIRECT_PTR)));
+ BUG_ON(radix_tree_is_indirect_ptr(item));
+ rcu_assign_pointer(*pslot, item);
}
int radix_tree_insert(struct radix_tree_root *, unsigned long, void *);
@@ -155,6 +159,8 @@ void *radix_tree_delete(struct radix_tree_root *, unsigned long);
unsigned int
radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
unsigned long first_index, unsigned int max_items);
+unsigned long radix_tree_next_hole(struct radix_tree_root *root,
+ unsigned long index, unsigned long max_scan);
int radix_tree_preload(gfp_t gfp_mask);
void radix_tree_init(void);
void *radix_tree_tag_set(struct radix_tree_root *root,
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 228e0a8ce24..592e3a55f81 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1,8 +1,6 @@
#ifndef _LINUX_SCHED_H
#define _LINUX_SCHED_H
-#include <linux/auxvec.h> /* For AT_VECTOR_SIZE */
-
/*
* cloning flags:
*/
@@ -58,12 +56,12 @@ struct sched_param {
#include <linux/cpumask.h>
#include <linux/errno.h>
#include <linux/nodemask.h>
+#include <linux/mm_types.h>
#include <asm/system.h>
#include <asm/semaphore.h>
#include <asm/page.h>
#include <asm/ptrace.h>
-#include <asm/mmu.h>
#include <asm/cputime.h>
#include <linux/smp.h>
@@ -319,7 +317,6 @@ extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
#define add_mm_counter(mm, member, value) atomic_long_add(value, &(mm)->_##member)
#define inc_mm_counter(mm, member) atomic_long_inc(&(mm)->_##member)
#define dec_mm_counter(mm, member) atomic_long_dec(&(mm)->_##member)
-typedef atomic_long_t mm_counter_t;
#else /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */
/*
@@ -331,7 +328,6 @@ typedef atomic_long_t mm_counter_t;
#define add_mm_counter(mm, member, value) (mm)->_##member += (value)
#define inc_mm_counter(mm, member) (mm)->_##member++
#define dec_mm_counter(mm, member) (mm)->_##member--
-typedef unsigned long mm_counter_t;
#endif /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */
@@ -368,74 +364,6 @@ extern int get_dumpable(struct mm_struct *mm);
#define MMF_DUMP_FILTER_DEFAULT \
((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED))
-struct mm_struct {
- struct vm_area_struct * mmap; /* list of VMAs */
- struct rb_root mm_rb;
- struct vm_area_struct * mmap_cache; /* last find_vma result */
- unsigned long (*get_unmapped_area) (struct file *filp,
- unsigned long addr, unsigned long len,
- unsigned long pgoff, unsigned long flags);
- void (*unmap_area) (struct mm_struct *mm, unsigned long addr);
- unsigned long mmap_base; /* base of mmap area */
- unsigned long task_size; /* size of task vm space */
- unsigned long cached_hole_size; /* if non-zero, the largest hole below free_area_cache */
- unsigned long free_area_cache; /* first hole of size cached_hole_size or larger */
- pgd_t * pgd;
- atomic_t mm_users; /* How many users with user space? */
- atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */
- int map_count; /* number of VMAs */
- struct rw_semaphore mmap_sem;
- spinlock_t page_table_lock; /* Protects page tables and some counters */
-
- struct list_head mmlist; /* List of maybe swapped mm's. These are globally strung
- * together off init_mm.mmlist, and are protected
- * by mmlist_lock
- */
-
- /* Special counters, in some configurations protected by the
- * page_table_lock, in other configurations by being atomic.
- */
- mm_counter_t _file_rss;
- mm_counter_t _anon_rss;
-
- unsigned long hiwater_rss; /* High-watermark of RSS usage */
- unsigned long hiwater_vm; /* High-water virtual memory usage */
-
- unsigned long total_vm, locked_vm, shared_vm, exec_vm;
- unsigned long stack_vm, reserved_vm, def_flags, nr_ptes;
- unsigned long start_code, end_code, start_data, end_data;
- unsigned long start_brk, brk, start_stack;
- unsigned long arg_start, arg_end, env_start, env_end;
-
- unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
-
- cpumask_t cpu_vm_mask;
-
- /* Architecture-specific MM context */
- mm_context_t context;
-
- /* Swap token stuff */
- /*
- * Last value of global fault stamp as seen by this process.
- * In other words, this value gives an indication of how long
- * it has been since this task got the token.
- * Look at mm/thrash.c
- */
- unsigned int faultstamp;
- unsigned int token_priority;
- unsigned int last_interval;
-
- unsigned long flags; /* Must use atomic bitops to access the bits */
-
- /* coredumping support */
- int core_waiters;
- struct completion *core_startup_done, core_done;
-
- /* aio bits */
- rwlock_t ioctx_list_lock;
- struct kioctx *ioctx_list;
-};
-
struct sighand_struct {
atomic_t count;
struct k_sigaction action[_NSIG];
@@ -801,9 +729,6 @@ struct sched_domain {
#endif
};
-extern int partition_sched_domains(cpumask_t *partition1,
- cpumask_t *partition2);
-
#endif /* CONFIG_SMP */
/*
diff --git a/include/linux/selection.h b/include/linux/selection.h
index f9457861937..8cdaa1151d2 100644
--- a/include/linux/selection.h
+++ b/include/linux/selection.h
@@ -13,6 +13,7 @@
struct tty_struct;
extern struct vc_data *sel_cons;
+struct tty_struct;
extern void clear_selection(void);
extern int set_selection(const struct tiocl_selection __user *sel, struct tty_struct *tty);
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 09d17b06bf0..4db77249281 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -291,7 +291,8 @@ struct uart_port {
resource_size_t mapbase; /* for ioremap */
struct device *dev; /* parent device */
unsigned char hub6; /* this should be in the 8250 driver */
- unsigned char unused[3];
+ unsigned char suspended;
+ unsigned char unused[2];
void *private_data; /* generic platform data pointer */
};
diff --git a/include/linux/slab.h b/include/linux/slab.h
index d859354b9e5..3a5bad3ad12 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -24,12 +24,14 @@
#define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
#define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
#define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
-#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */
#define SLAB_PANIC 0x00040000UL /* Panic if kmem_cache_create() fails */
#define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */
#define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */
#define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */
+/* The following flags affect the page allocator grouping pages by mobility */
+#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */
+#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */
/*
* ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
*
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 74962077f63..d65159d1d4f 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -11,6 +11,14 @@
#include <linux/workqueue.h>
#include <linux/kobject.h>
+struct kmem_cache_cpu {
+ void **freelist;
+ struct page *page;
+ int node;
+ unsigned int offset;
+ unsigned int objsize;
+};
+
struct kmem_cache_node {
spinlock_t list_lock; /* Protect partial list and nr_partial */
unsigned long nr_partial;
@@ -54,7 +62,11 @@ struct kmem_cache {
int defrag_ratio;
struct kmem_cache_node *node[MAX_NUMNODES];
#endif
- struct page *cpu_slab[NR_CPUS];
+#ifdef CONFIG_SMP
+ struct kmem_cache_cpu *cpu_slab[NR_CPUS];
+#else
+ struct kmem_cache_cpu cpu_slab;
+#endif
};
/*
@@ -72,7 +84,7 @@ struct kmem_cache {
* We keep the general caches in an array of slab caches that are used for
* 2^x bytes of allocations.
*/
-extern struct kmem_cache kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
+extern struct kmem_cache kmalloc_caches[PAGE_SHIFT];
/*
* Sorry that the following has to be that ugly but some versions of GCC
@@ -83,9 +95,6 @@ static __always_inline int kmalloc_index(size_t size)
if (!size)
return 0;
- if (size > KMALLOC_MAX_SIZE)
- return -1;
-
if (size <= KMALLOC_MIN_SIZE)
return KMALLOC_SHIFT_LOW;
@@ -102,6 +111,10 @@ static __always_inline int kmalloc_index(size_t size)
if (size <= 512) return 9;
if (size <= 1024) return 10;
if (size <= 2 * 1024) return 11;
+/*
+ * The following is only needed to support architectures with a larger page
+ * size than 4k.
+ */
if (size <= 4 * 1024) return 12;
if (size <= 8 * 1024) return 13;
if (size <= 16 * 1024) return 14;
@@ -109,13 +122,9 @@ static __always_inline int kmalloc_index(size_t size)
if (size <= 64 * 1024) return 16;
if (size <= 128 * 1024) return 17;
if (size <= 256 * 1024) return 18;
- if (size <= 512 * 1024) return 19;
+ if (size <= 512 * 1024) return 19;
if (size <= 1024 * 1024) return 20;
if (size <= 2 * 1024 * 1024) return 21;
- if (size <= 4 * 1024 * 1024) return 22;
- if (size <= 8 * 1024 * 1024) return 23;
- if (size <= 16 * 1024 * 1024) return 24;
- if (size <= 32 * 1024 * 1024) return 25;
return -1;
/*
@@ -140,19 +149,6 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
if (index == 0)
return NULL;
- /*
- * This function only gets expanded if __builtin_constant_p(size), so
- * testing it here shouldn't be needed. But some versions of gcc need
- * help.
- */
- if (__builtin_constant_p(size) && index < 0) {
- /*
- * Generate a link failure. Would be great if we could
- * do something to stop the compile here.
- */
- extern void __kmalloc_size_too_large(void);
- __kmalloc_size_too_large();
- }
return &kmalloc_caches[index];
}
@@ -168,15 +164,21 @@ void *__kmalloc(size_t size, gfp_t flags);
static __always_inline void *kmalloc(size_t size, gfp_t flags)
{
- if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) {
- struct kmem_cache *s = kmalloc_slab(size);
+ if (__builtin_constant_p(size)) {
+ if (size > PAGE_SIZE / 2)
+ return (void *)__get_free_pages(flags | __GFP_COMP,
+ get_order(size));
- if (!s)
- return ZERO_SIZE_PTR;
+ if (!(flags & SLUB_DMA)) {
+ struct kmem_cache *s = kmalloc_slab(size);
- return kmem_cache_alloc(s, flags);
- } else
- return __kmalloc(size, flags);
+ if (!s)
+ return ZERO_SIZE_PTR;
+
+ return kmem_cache_alloc(s, flags);
+ }
+ }
+ return __kmalloc(size, flags);
}
#ifdef CONFIG_NUMA
@@ -185,15 +187,16 @@ void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
{
- if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) {
- struct kmem_cache *s = kmalloc_slab(size);
+ if (__builtin_constant_p(size) &&
+ size <= PAGE_SIZE / 2 && !(flags & SLUB_DMA)) {
+ struct kmem_cache *s = kmalloc_slab(size);
if (!s)
return ZERO_SIZE_PTR;
return kmem_cache_alloc_node(s, flags, node);
- } else
- return __kmalloc_node(size, flags, node);
+ }
+ return __kmalloc_node(size, flags, node);
}
#endif
diff --git a/include/linux/sm501-regs.h b/include/linux/sm501-regs.h
index 014e73b31fc..df7620dd8f3 100644
--- a/include/linux/sm501-regs.h
+++ b/include/linux/sm501-regs.h
@@ -15,6 +15,24 @@
/* config 1 */
#define SM501_SYSTEM_CONTROL (0x000000)
+
+#define SM501_SYSCTRL_PANEL_TRISTATE (1<<0)
+#define SM501_SYSCTRL_MEM_TRISTATE (1<<1)
+#define SM501_SYSCTRL_CRT_TRISTATE (1<<2)
+
+#define SM501_SYSCTRL_PCI_SLAVE_BURST_MASK (3<<4)
+#define SM501_SYSCTRL_PCI_SLAVE_BURST_1 (0<<4)
+#define SM501_SYSCTRL_PCI_SLAVE_BURST_2 (1<<4)
+#define SM501_SYSCTRL_PCI_SLAVE_BURST_4 (2<<4)
+#define SM501_SYSCTRL_PCI_SLAVE_BURST_8 (3<<4)
+
+#define SM501_SYSCTRL_PCI_CLOCK_RUN_EN (1<<6)
+#define SM501_SYSCTRL_PCI_RETRY_DISABLE (1<<7)
+#define SM501_SYSCTRL_PCI_SUBSYS_LOCK (1<<11)
+#define SM501_SYSCTRL_PCI_BURST_READ_EN (1<<15)
+
+/* miscellaneous control */
+
#define SM501_MISC_CONTROL (0x000004)
#define SM501_MISC_BUS_SH (0x0)
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 002a3cddbdd..387e428f1cd 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -195,7 +195,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
/**
* struct spi_master - interface to SPI master controller
- * @cdev: class interface to this driver
+ * @dev: device interface to this driver
* @bus_num: board-specific (and often SOC-specific) identifier for a
* given SPI controller.
* @num_chipselect: chipselects are used to distinguish individual
@@ -222,7 +222,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* message's completion function when the transaction completes.
*/
struct spi_master {
- struct class_device cdev;
+ struct device dev;
/* other than negative (== assign one dynamically), bus_num is fully
* board-specific. usually that simplifies to being SOC-specific.
@@ -268,17 +268,17 @@ struct spi_master {
static inline void *spi_master_get_devdata(struct spi_master *master)
{
- return class_get_devdata(&master->cdev);
+ return dev_get_drvdata(&master->dev);
}
static inline void spi_master_set_devdata(struct spi_master *master, void *data)
{
- class_set_devdata(&master->cdev, data);
+ dev_set_drvdata(&master->dev, data);
}
static inline struct spi_master *spi_master_get(struct spi_master *master)
{
- if (!master || !class_device_get(&master->cdev))
+ if (!master || !get_device(&master->dev))
return NULL;
return master;
}
@@ -286,7 +286,7 @@ static inline struct spi_master *spi_master_get(struct spi_master *master)
static inline void spi_master_put(struct spi_master *master)
{
if (master)
- class_device_put(&master->cdev);
+ put_device(&master->dev);
}
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
index 46705e91573..c1527c2ef3c 100644
--- a/include/linux/usb/gadget.h
+++ b/include/linux/usb/gadget.h
@@ -481,7 +481,7 @@ static inline void *get_gadget_data (struct usb_gadget *gadget)
/**
* gadget_is_dualspeed - return true iff the hardware handles high speed
- * @gadget: controller that might support both high and full speeds
+ * @g: controller that might support both high and full speeds
*/
static inline int gadget_is_dualspeed(struct usb_gadget *g)
{
@@ -497,7 +497,7 @@ static inline int gadget_is_dualspeed(struct usb_gadget *g)
/**
* gadget_is_otg - return true iff the hardware is OTG-ready
- * @gadget: controller that might have a Mini-AB connector
+ * @g: controller that might have a Mini-AB connector
*
* This is a runtime test, since kernels with a USB-OTG stack sometimes
* run on boards which only have a Mini-B (or Mini-A) connector.