aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/acpi/acpiosxf.h1
-rw-r--r--include/acpi/acpixf.h4
-rw-r--r--include/acpi/pdc_intel.h2
-rw-r--r--include/asm-generic/percpu.h52
-rw-r--r--include/asm-generic/sections.h2
-rw-r--r--include/asm-generic/vmlinux.lds.h55
-rw-r--r--include/crypto/hash.h2
-rw-r--r--include/drm/i915_drm.h2
-rw-r--r--include/linux/Kbuild3
-rw-r--r--include/linux/acpi.h1
-rw-r--r--include/linux/async.h8
-rw-r--r--include/linux/ata.h15
-rw-r--r--include/linux/bio.h55
-rw-r--r--include/linux/blkdev.h8
-rw-r--r--include/linux/cgroup.h1
-rw-r--r--include/linux/clockchips.h1
-rw-r--r--include/linux/crypto.h7
-rw-r--r--include/linux/dmaengine.h12
-rw-r--r--include/linux/elfcore.h9
-rw-r--r--include/linux/fb.h15
-rw-r--r--include/linux/hugetlb.h11
-rw-r--r--include/linux/if_tunnel.h3
-rw-r--r--include/linux/init_task.h11
-rw-r--r--include/linux/interrupt.h1
-rw-r--r--include/linux/irq.h86
-rw-r--r--include/linux/irqnr.h1
-rw-r--r--include/linux/jbd2.h3
-rw-r--r--include/linux/kernel.h3
-rw-r--r--include/linux/libata.h19
-rw-r--r--include/linux/magic.h1
-rw-r--r--include/linux/mm.h4
-rw-r--r--include/linux/module.h26
-rw-r--r--include/linux/pci.h2
-rw-r--r--include/linux/pci_ids.h1
-rw-r--r--include/linux/percpu.h47
-rw-r--r--include/linux/sched.h79
-rw-r--r--include/linux/slab_def.h10
-rw-r--r--include/linux/smp.h6
-rw-r--r--include/linux/spinlock.h5
-rw-r--r--include/linux/stackprotector.h16
-rw-r--r--include/linux/syscalls.h28
-rw-r--r--include/linux/topology.h6
-rw-r--r--include/linux/wait.h11
-rw-r--r--include/media/v4l2-device.h8
-rw-r--r--include/net/inet_hashtables.h2
-rw-r--r--include/video/aty128.h4
-rw-r--r--include/video/mach64.h24
-rw-r--r--include/video/radeon.h18
48 files changed, 516 insertions, 175 deletions
diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h
index a62720a7edc..ab0b85cf21f 100644
--- a/include/acpi/acpiosxf.h
+++ b/include/acpi/acpiosxf.h
@@ -144,6 +144,7 @@ void __iomem *acpi_os_map_memory(acpi_physical_address where,
acpi_size length);
void acpi_os_unmap_memory(void __iomem * logical_address, acpi_size size);
+void early_acpi_os_unmap_memory(void __iomem * virt, acpi_size size);
#ifdef ACPI_FUTURE_USAGE
acpi_status
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index c8e8cf45830..cc40102fe2f 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -130,6 +130,10 @@ acpi_get_table_header(acpi_string signature,
struct acpi_table_header *out_table_header);
acpi_status
+acpi_get_table_with_size(acpi_string signature,
+ u32 instance, struct acpi_table_header **out_table,
+ acpi_size *tbl_size);
+acpi_status
acpi_get_table(acpi_string signature,
u32 instance, struct acpi_table_header **out_table);
diff --git a/include/acpi/pdc_intel.h b/include/acpi/pdc_intel.h
index e72bfdd887f..552637b0d05 100644
--- a/include/acpi/pdc_intel.h
+++ b/include/acpi/pdc_intel.h
@@ -14,6 +14,7 @@
#define ACPI_PDC_SMP_T_SWCOORD (0x0080)
#define ACPI_PDC_C_C1_FFH (0x0100)
#define ACPI_PDC_C_C2C3_FFH (0x0200)
+#define ACPI_PDC_SMP_P_HWCOORD (0x0800)
#define ACPI_PDC_EST_CAPABILITY_SMP (ACPI_PDC_SMP_C1PT | \
ACPI_PDC_C_C1_HALT | \
@@ -22,6 +23,7 @@
#define ACPI_PDC_EST_CAPABILITY_SWSMP (ACPI_PDC_SMP_C1PT | \
ACPI_PDC_C_C1_HALT | \
ACPI_PDC_SMP_P_SWCOORD | \
+ ACPI_PDC_SMP_P_HWCOORD | \
ACPI_PDC_P_FFH)
#define ACPI_PDC_C_CAPABILITY_SMP (ACPI_PDC_SMP_C2C3 | \
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index b0e63c672eb..00f45ff081a 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -80,4 +80,56 @@ extern void setup_per_cpu_areas(void);
#define DECLARE_PER_CPU(type, name) extern PER_CPU_ATTRIBUTES \
__typeof__(type) per_cpu_var(name)
+/*
+ * Optional methods for optimized non-lvalue per-cpu variable access.
+ *
+ * @var can be a percpu variable or a field of it and its size should
+ * equal char, int or long. percpu_read() evaluates to a lvalue and
+ * all others to void.
+ *
+ * These operations are guaranteed to be atomic w.r.t. preemption.
+ * The generic versions use plain get/put_cpu_var(). Archs are
+ * encouraged to implement single-instruction alternatives which don't
+ * require preemption protection.
+ */
+#ifndef percpu_read
+# define percpu_read(var) \
+ ({ \
+ typeof(per_cpu_var(var)) __tmp_var__; \
+ __tmp_var__ = get_cpu_var(var); \
+ put_cpu_var(var); \
+ __tmp_var__; \
+ })
+#endif
+
+#define __percpu_generic_to_op(var, val, op) \
+do { \
+ get_cpu_var(var) op val; \
+ put_cpu_var(var); \
+} while (0)
+
+#ifndef percpu_write
+# define percpu_write(var, val) __percpu_generic_to_op(var, (val), =)
+#endif
+
+#ifndef percpu_add
+# define percpu_add(var, val) __percpu_generic_to_op(var, (val), +=)
+#endif
+
+#ifndef percpu_sub
+# define percpu_sub(var, val) __percpu_generic_to_op(var, (val), -=)
+#endif
+
+#ifndef percpu_and
+# define percpu_and(var, val) __percpu_generic_to_op(var, (val), &=)
+#endif
+
+#ifndef percpu_or
+# define percpu_or(var, val) __percpu_generic_to_op(var, (val), |=)
+#endif
+
+#ifndef percpu_xor
+# define percpu_xor(var, val) __percpu_generic_to_op(var, (val), ^=)
+#endif
+
#endif /* _ASM_GENERIC_PERCPU_H_ */
diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h
index 79a7ff925bf..4ce48e87853 100644
--- a/include/asm-generic/sections.h
+++ b/include/asm-generic/sections.h
@@ -9,7 +9,7 @@ extern char __bss_start[], __bss_stop[];
extern char __init_begin[], __init_end[];
extern char _sinittext[], _einittext[];
extern char _end[];
-extern char __per_cpu_start[], __per_cpu_end[];
+extern char __per_cpu_load[], __per_cpu_start[], __per_cpu_end[];
extern char __kprobes_text_start[], __kprobes_text_end[];
extern char __initdata_begin[], __initdata_end[];
extern char __start_rodata[], __end_rodata[];
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index c61fab1dd2f..5406e70aba8 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -430,12 +430,59 @@
*(.initcall7.init) \
*(.initcall7s.init)
+/**
+ * PERCPU_VADDR - define output section for percpu area
+ * @vaddr: explicit base address (optional)
+ * @phdr: destination PHDR (optional)
+ *
+ * Macro which expands to output section for percpu area. If @vaddr
+ * is not blank, it specifies explicit base address and all percpu
+ * symbols will be offset from the given address. If blank, @vaddr
+ * always equals @laddr + LOAD_OFFSET.
+ *
+ * @phdr defines the output PHDR to use if not blank. Be warned that
+ * output PHDR is sticky. If @phdr is specified, the next output
+ * section in the linker script will go there too. @phdr should have
+ * a leading colon.
+ *
+ * Note that this macros defines __per_cpu_load as an absolute symbol.
+ * If there is no need to put the percpu section at a predetermined
+ * address, use PERCPU().
+ */
+#define PERCPU_VADDR(vaddr, phdr) \
+ VMLINUX_SYMBOL(__per_cpu_load) = .; \
+ .data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
+ - LOAD_OFFSET) { \
+ VMLINUX_SYMBOL(__per_cpu_start) = .; \
+ *(.data.percpu.first) \
+ *(.data.percpu.page_aligned) \
+ *(.data.percpu) \
+ *(.data.percpu.shared_aligned) \
+ VMLINUX_SYMBOL(__per_cpu_end) = .; \
+ } phdr \
+ . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu);
+
+/**
+ * PERCPU - define output section for percpu area, simple version
+ * @align: required alignment
+ *
+ * Align to @align and outputs output section for percpu area. This
+ * macro doesn't maniuplate @vaddr or @phdr and __per_cpu_load and
+ * __per_cpu_start will be identical.
+ *
+ * This macro is equivalent to ALIGN(align); PERCPU_VADDR( , ) except
+ * that __per_cpu_load is defined as a relative symbol against
+ * .data.percpu which is required for relocatable x86_32
+ * configuration.
+ */
#define PERCPU(align) \
. = ALIGN(align); \
- VMLINUX_SYMBOL(__per_cpu_start) = .; \
- .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { \
+ .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { \
+ VMLINUX_SYMBOL(__per_cpu_load) = .; \
+ VMLINUX_SYMBOL(__per_cpu_start) = .; \
+ *(.data.percpu.first) \
*(.data.percpu.page_aligned) \
*(.data.percpu) \
*(.data.percpu.shared_aligned) \
- } \
- VMLINUX_SYMBOL(__per_cpu_end) = .;
+ VMLINUX_SYMBOL(__per_cpu_end) = .; \
+ }
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index cd16d6e668c..d797e119e3d 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -222,7 +222,7 @@ static inline struct crypto_tfm *crypto_shash_tfm(struct crypto_shash *tfm)
static inline void crypto_free_shash(struct crypto_shash *tfm)
{
- crypto_free_tfm(crypto_shash_tfm(tfm));
+ crypto_destroy_tfm(tfm, crypto_shash_tfm(tfm));
}
static inline unsigned int crypto_shash_alignmask(
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index b3bcf72dc65..912cd52db96 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -261,6 +261,7 @@ typedef struct drm_i915_irq_wait {
#define I915_PARAM_LAST_DISPATCH 3
#define I915_PARAM_CHIPSET_ID 4
#define I915_PARAM_HAS_GEM 5
+#define I915_PARAM_NUM_FENCES_AVAIL 6
typedef struct drm_i915_getparam {
int param;
@@ -272,6 +273,7 @@ typedef struct drm_i915_getparam {
#define I915_SETPARAM_USE_MI_BATCHBUFFER_START 1
#define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY 2
#define I915_SETPARAM_ALLOW_BATCHBUFFER 3
+#define I915_SETPARAM_NUM_USED_FENCES 4
typedef struct drm_i915_setparam {
int param;
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index 12e9a2957ca..b97cdc516a8 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -41,6 +41,7 @@ header-y += baycom.h
header-y += bfs_fs.h
header-y += blkpg.h
header-y += bpqether.h
+header-y += bsg.h
header-y += can.h
header-y += cdk.h
header-y += chio.h
@@ -89,7 +90,6 @@ header-y += if_ppp.h
header-y += if_slip.h
header-y += if_strip.h
header-y += if_tun.h
-header-y += if_tunnel.h
header-y += in_route.h
header-y += ioctl.h
header-y += ip6_tunnel.h
@@ -235,6 +235,7 @@ unifdef-y += if_phonet.h
unifdef-y += if_pppol2tp.h
unifdef-y += if_pppox.h
unifdef-y += if_tr.h
+unifdef-y += if_tunnel.h
unifdef-y += if_vlan.h
unifdef-y += igmp.h
unifdef-y += inet_diag.h
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 6fce2fc2d12..d59f0fa4d77 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -79,6 +79,7 @@ typedef int (*acpi_table_handler) (struct acpi_table_header *table);
typedef int (*acpi_table_entry_handler) (struct acpi_subtable_header *header, const unsigned long end);
char * __acpi_map_table (unsigned long phys_addr, unsigned long size);
+void __init __acpi_unmap_table(char *map, unsigned long size);
int early_acpi_boot_init(void);
int acpi_boot_init (void);
int acpi_boot_table_init (void);
diff --git a/include/linux/async.h b/include/linux/async.h
index c4ecacd0b32..68a9530196f 100644
--- a/include/linux/async.h
+++ b/include/linux/async.h
@@ -17,9 +17,11 @@ typedef u64 async_cookie_t;
typedef void (async_func_ptr) (void *data, async_cookie_t cookie);
extern async_cookie_t async_schedule(async_func_ptr *ptr, void *data);
-extern async_cookie_t async_schedule_special(async_func_ptr *ptr, void *data, struct list_head *list);
+extern async_cookie_t async_schedule_domain(async_func_ptr *ptr, void *data,
+ struct list_head *list);
extern void async_synchronize_full(void);
-extern void async_synchronize_full_special(struct list_head *list);
+extern void async_synchronize_full_domain(struct list_head *list);
extern void async_synchronize_cookie(async_cookie_t cookie);
-extern void async_synchronize_cookie_special(async_cookie_t cookie, struct list_head *list);
+extern void async_synchronize_cookie_domain(async_cookie_t cookie,
+ struct list_head *list);
diff --git a/include/linux/ata.h b/include/linux/ata.h
index a53318b8cbd..08a86d5cdf1 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -731,12 +731,17 @@ static inline int ata_id_current_chs_valid(const u16 *id)
static inline int ata_id_is_cfa(const u16 *id)
{
- if (id[ATA_ID_CONFIG] == 0x848A) /* Standard CF */
+ if (id[ATA_ID_CONFIG] == 0x848A) /* Traditional CF */
return 1;
- /* Could be CF hiding as standard ATA */
- if (ata_id_major_version(id) >= 3 &&
- id[ATA_ID_COMMAND_SET_1] != 0xFFFF &&
- (id[ATA_ID_COMMAND_SET_1] & (1 << 2)))
+ /*
+ * CF specs don't require specific value in the word 0 anymore and yet
+ * they forbid to report the ATA version in the word 80 and require the
+ * CFA feature set support to be indicated in the word 83 in this case.
+ * Unfortunately, some cards only follow either of this requirements,
+ * and while those that don't indicate CFA feature support need some
+ * sort of quirk list, it seems impractical for the ones that do...
+ */
+ if ((id[ATA_ID_COMMAND_SET_2] & 0xC004) == 0x4004)
return 1;
return 0;
}
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 18462c5b8ff..2aa283ab062 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -144,7 +144,7 @@ struct bio {
* bit 1 -- rw-ahead when set
* bit 2 -- barrier
* Insert a serialization point in the IO queue, forcing previously
- * submitted IO to be completed before this oen is issued.
+ * submitted IO to be completed before this one is issued.
* bit 3 -- synchronous I/O hint: the block layer will unplug immediately
* Note that this does NOT indicate that the IO itself is sync, just
* that the block layer will not postpone issue of this IO by plugging.
@@ -163,12 +163,33 @@ struct bio {
#define BIO_RW 0 /* Must match RW in req flags (blkdev.h) */
#define BIO_RW_AHEAD 1 /* Must match FAILFAST in req flags */
#define BIO_RW_BARRIER 2
-#define BIO_RW_SYNC 3
-#define BIO_RW_META 4
-#define BIO_RW_DISCARD 5
-#define BIO_RW_FAILFAST_DEV 6
-#define BIO_RW_FAILFAST_TRANSPORT 7
-#define BIO_RW_FAILFAST_DRIVER 8
+#define BIO_RW_SYNCIO 3
+#define BIO_RW_UNPLUG 4
+#define BIO_RW_META 5
+#define BIO_RW_DISCARD 6
+#define BIO_RW_FAILFAST_DEV 7
+#define BIO_RW_FAILFAST_TRANSPORT 8
+#define BIO_RW_FAILFAST_DRIVER 9
+
+#define BIO_RW_SYNC (BIO_RW_SYNCIO | BIO_RW_UNPLUG)
+
+#define bio_rw_flagged(bio, flag) ((bio)->bi_rw & (1 << (flag)))
+
+/*
+ * Old defines, these should eventually be replaced by direct usage of
+ * bio_rw_flagged()
+ */
+#define bio_barrier(bio) bio_rw_flagged(bio, BIO_RW_BARRIER)
+#define bio_sync(bio) bio_rw_flagged(bio, BIO_RW_SYNCIO)
+#define bio_unplug(bio) bio_rw_flagged(bio, BIO_RW_UNPLUG)
+#define bio_failfast_dev(bio) bio_rw_flagged(bio, BIO_RW_FAILFAST_DEV)
+#define bio_failfast_transport(bio) \
+ bio_rw_flagged(bio, BIO_RW_FAILFAST_TRANSPORT)
+#define bio_failfast_driver(bio) \
+ bio_rw_flagged(bio, BIO_RW_FAILFAST_DRIVER)
+#define bio_rw_ahead(bio) bio_rw_flagged(bio, BIO_RW_AHEAD)
+#define bio_rw_meta(bio) bio_rw_flagged(bio, BIO_RW_META)
+#define bio_discard(bio) bio_rw_flagged(bio, BIO_RW_DISCARD)
/*
* upper 16 bits of bi_rw define the io priority of this bio
@@ -193,15 +214,6 @@ struct bio {
#define bio_offset(bio) bio_iovec((bio))->bv_offset
#define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx)
#define bio_sectors(bio) ((bio)->bi_size >> 9)
-#define bio_barrier(bio) ((bio)->bi_rw & (1 << BIO_RW_BARRIER))
-#define bio_sync(bio) ((bio)->bi_rw & (1 << BIO_RW_SYNC))
-#define bio_failfast_dev(bio) ((bio)->bi_rw & (1 << BIO_RW_FAILFAST_DEV))
-#define bio_failfast_transport(bio) \
- ((bio)->bi_rw & (1 << BIO_RW_FAILFAST_TRANSPORT))
-#define bio_failfast_driver(bio) ((bio)->bi_rw & (1 << BIO_RW_FAILFAST_DRIVER))
-#define bio_rw_ahead(bio) ((bio)->bi_rw & (1 << BIO_RW_AHEAD))
-#define bio_rw_meta(bio) ((bio)->bi_rw & (1 << BIO_RW_META))
-#define bio_discard(bio) ((bio)->bi_rw & (1 << BIO_RW_DISCARD))
#define bio_empty_barrier(bio) (bio_barrier(bio) && !bio_has_data(bio) && !bio_discard(bio))
static inline unsigned int bio_cur_sectors(struct bio *bio)
@@ -312,7 +324,6 @@ struct bio_integrity_payload {
void *bip_buf; /* generated integrity data */
bio_end_io_t *bip_end_io; /* saved I/O completion fn */
- int bip_error; /* saved I/O error */
unsigned int bip_size;
unsigned short bip_pool; /* pool the ivec came from */
@@ -440,12 +451,13 @@ extern struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly;
#ifdef CONFIG_HIGHMEM
/*
- * remember to add offset! and never ever reenable interrupts between a
- * bvec_kmap_irq and bvec_kunmap_irq!!
+ * remember never ever reenable interrupts between a bvec_kmap_irq and
+ * bvec_kunmap_irq!
*
* This function MUST be inlined - it plays with the CPU interrupt flags.
*/
-static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
+static __always_inline char *bvec_kmap_irq(struct bio_vec *bvec,
+ unsigned long *flags)
{
unsigned long addr;
@@ -461,7 +473,8 @@ static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
return (char *) addr + bvec->bv_offset;
}
-static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
+static __always_inline void bvec_kunmap_irq(char *buffer,
+ unsigned long *flags)
{
unsigned long ptr = (unsigned long) buffer & PAGE_MASK;
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 044467ef7b1..dcaa0fd84b0 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -108,6 +108,7 @@ enum rq_flag_bits {
__REQ_RW_META, /* metadata io request */
__REQ_COPY_USER, /* contains copies of user pages */
__REQ_INTEGRITY, /* integrity metadata has been remapped */
+ __REQ_UNPLUG, /* unplug queue on submission */
__REQ_NR_BITS, /* stops here */
};
@@ -134,6 +135,7 @@ enum rq_flag_bits {
#define REQ_RW_META (1 << __REQ_RW_META)
#define REQ_COPY_USER (1 << __REQ_COPY_USER)
#define REQ_INTEGRITY (1 << __REQ_INTEGRITY)
+#define REQ_UNPLUG (1 << __REQ_UNPLUG)
#define BLK_MAX_CDB 16
@@ -449,6 +451,11 @@ struct request_queue
#define QUEUE_FLAG_STACKABLE 13 /* supports request stacking */
#define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */
#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
+#define QUEUE_FLAG_IO_STAT 15 /* do IO stats */
+
+#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
+ (1 << QUEUE_FLAG_CLUSTER) | \
+ (1 << QUEUE_FLAG_STACKABLE))
static inline int queue_is_locked(struct request_queue *q)
{
@@ -565,6 +572,7 @@ enum {
#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
+#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
#define blk_queue_flushing(q) ((q)->ordseq)
#define blk_queue_stackable(q) \
test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags)
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index e4e8e117d27..499900d0cee 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -378,6 +378,7 @@ struct cgroup_subsys {
* - initiating hotplug events
*/
struct mutex hierarchy_mutex;
+ struct lock_class_key subsys_key;
/*
* Link to parent, and list entry in parent's children.
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h
index cea153697ec..3a1dbba4d3a 100644
--- a/include/linux/clockchips.h
+++ b/include/linux/clockchips.h
@@ -36,6 +36,7 @@ enum clock_event_nofitiers {
CLOCK_EVT_NOTIFY_BROADCAST_EXIT,
CLOCK_EVT_NOTIFY_SUSPEND,
CLOCK_EVT_NOTIFY_RESUME,
+ CLOCK_EVT_NOTIFY_CPU_DYING,
CLOCK_EVT_NOTIFY_CPU_DEAD,
};
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 3bacd71509f..1f2e9020acc 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -552,7 +552,12 @@ struct crypto_tfm *crypto_alloc_tfm(const char *alg_name,
const struct crypto_type *frontend,
u32 type, u32 mask);
struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask);
-void crypto_free_tfm(struct crypto_tfm *tfm);
+void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm);
+
+static inline void crypto_free_tfm(struct crypto_tfm *tfm)
+{
+ return crypto_destroy_tfm(tfm, tfm);
+}
int alg_test(const char *driver, const char *alg, u32 type, u32 mask);
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 3e0f64c335c..3e68469c188 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -282,6 +282,18 @@ static inline void dmaengine_put(void)
}
#endif
+#ifdef CONFIG_NET_DMA
+#define net_dmaengine_get() dmaengine_get()
+#define net_dmaengine_put() dmaengine_put()
+#else
+static inline void net_dmaengine_get(void)
+{
+}
+static inline void net_dmaengine_put(void)
+{
+}
+#endif
+
dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan,
void *dest, void *src, size_t len);
dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan,
diff --git a/include/linux/elfcore.h b/include/linux/elfcore.h
index 5ca54d77079..7605c5e9589 100644
--- a/include/linux/elfcore.h
+++ b/include/linux/elfcore.h
@@ -111,6 +111,15 @@ static inline void elf_core_copy_regs(elf_gregset_t *elfregs, struct pt_regs *re
#endif
}
+static inline void elf_core_copy_kernel_regs(elf_gregset_t *elfregs, struct pt_regs *regs)
+{
+#ifdef ELF_CORE_COPY_KERNEL_REGS
+ ELF_CORE_COPY_KERNEL_REGS((*elfregs), regs);
+#else
+ elf_core_copy_regs(elfregs, regs);
+#endif
+}
+
static inline int elf_core_copy_task_regs(struct task_struct *t, elf_gregset_t* elfregs)
{
#ifdef ELF_CORE_COPY_TASK_REGS
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 818fe21257e..31527e17076 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -960,6 +960,21 @@ extern struct fb_info *registered_fb[FB_MAX];
extern int num_registered_fb;
extern struct class *fb_class;
+static inline int lock_fb_info(struct fb_info *info)
+{
+ mutex_lock(&info->lock);
+ if (!info->fbops) {
+ mutex_unlock(&info->lock);
+ return 0;
+ }
+ return 1;
+}
+
+static inline void unlock_fb_info(struct fb_info *info)
+{
+ mutex_unlock(&info->lock);
+}
+
static inline void __fb_pad_aligned_buffer(u8 *dst, u32 d_pitch,
u8 *src, u32 s_pitch, u32 height)
{
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index f1d2fba19ea..03be7f29ca0 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -33,7 +33,8 @@ unsigned long hugetlb_total_pages(void);
int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, int write_access);
int hugetlb_reserve_pages(struct inode *inode, long from, long to,
- struct vm_area_struct *vma);
+ struct vm_area_struct *vma,
+ int acctflags);
void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed);
extern unsigned long hugepages_treat_as_movable;
@@ -138,7 +139,7 @@ static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
extern const struct file_operations hugetlbfs_file_operations;
extern struct vm_operations_struct hugetlb_vm_ops;
-struct file *hugetlb_file_setup(const char *name, size_t);
+struct file *hugetlb_file_setup(const char *name, size_t, int);
int hugetlb_get_quota(struct address_space *mapping, long delta);
void hugetlb_put_quota(struct address_space *mapping, long delta);
@@ -158,9 +159,9 @@ static inline void set_file_hugepages(struct file *file)
}
#else /* !CONFIG_HUGETLBFS */
-#define is_file_hugepages(file) 0
-#define set_file_hugepages(file) BUG()
-#define hugetlb_file_setup(name,size) ERR_PTR(-ENOSYS)
+#define is_file_hugepages(file) 0
+#define set_file_hugepages(file) BUG()
+#define hugetlb_file_setup(name,size,acctflag) ERR_PTR(-ENOSYS)
#endif /* !CONFIG_HUGETLBFS */
diff --git a/include/linux/if_tunnel.h b/include/linux/if_tunnel.h
index aeab2cb32a9..82c43624c06 100644
--- a/include/linux/if_tunnel.h
+++ b/include/linux/if_tunnel.h
@@ -2,7 +2,10 @@
#define _IF_TUNNEL_H_
#include <linux/types.h>
+
+#ifdef __KERNEL__
#include <linux/ip.h>
+#endif
#define SIOCGETTUNNEL (SIOCDEVPRIVATE + 0)
#define SIOCADDTUNNEL (SIOCDEVPRIVATE + 1)
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index ea0ea1a4c36..e752d973fa2 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -48,12 +48,11 @@ extern struct fs_struct init_fs;
.posix_timers = LIST_HEAD_INIT(sig.posix_timers), \
.cpu_timers = INIT_CPU_TIMERS(sig.cpu_timers), \
.rlim = INIT_RLIMITS, \
- .cputime = { .totals = { \
- .utime = cputime_zero, \
- .stime = cputime_zero, \
- .sum_exec_runtime = 0, \
- .lock = __SPIN_LOCK_UNLOCKED(sig.cputime.totals.lock), \
- }, }, \
+ .cputimer = { \
+ .cputime = INIT_CPUTIME, \
+ .running = 0, \
+ .lock = __SPIN_LOCK_UNLOCKED(sig.cputimer.lock), \
+ }, \
}
extern struct nsproxy init_nsproxy;
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 9127f6b51a3..472f11765f6 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -467,6 +467,7 @@ int show_interrupts(struct seq_file *p, void *v);
struct irq_desc;
extern int early_irq_init(void);
+extern int arch_probe_nr_irqs(void);
extern int arch_early_irq_init(void);
extern int arch_init_chip_data(struct irq_desc *desc, int cpu);
diff --git a/include/linux/irq.h b/include/linux/irq.h
index f899b502f18..27a67536511 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -182,11 +182,11 @@ struct irq_desc {
unsigned int irqs_unhandled;
spinlock_t lock;
#ifdef CONFIG_SMP
- cpumask_t affinity;
+ cpumask_var_t affinity;
unsigned int cpu;
-#endif
#ifdef CONFIG_GENERIC_PENDING_IRQ
- cpumask_t pending_mask;
+ cpumask_var_t pending_mask;
+#endif
#endif
#ifdef CONFIG_PROC_FS
struct proc_dir_entry *dir;
@@ -422,4 +422,84 @@ extern int set_irq_msi(unsigned int irq, struct msi_desc *entry);
#endif /* !CONFIG_S390 */
+#ifdef CONFIG_SMP
+/**
+ * init_alloc_desc_masks - allocate cpumasks for irq_desc
+ * @desc: pointer to irq_desc struct
+ * @cpu: cpu which will be handling the cpumasks
+ * @boot: true if need bootmem
+ *
+ * Allocates affinity and pending_mask cpumask if required.
+ * Returns true if successful (or not required).
+ * Side effect: affinity has all bits set, pending_mask has all bits clear.
+ */
+static inline bool init_alloc_desc_masks(struct irq_desc *desc, int cpu,
+ bool boot)
+{
+ int node;
+
+ if (boot) {
+ alloc_bootmem_cpumask_var(&desc->affinity);
+ cpumask_setall(desc->affinity);
+
+#ifdef CONFIG_GENERIC_PENDING_IRQ
+ alloc_bootmem_cpumask_var(&desc->pending_mask);
+ cpumask_clear(desc->pending_mask);
+#endif
+ return true;
+ }
+
+ node = cpu_to_node(cpu);
+
+ if (!alloc_cpumask_var_node(&desc->affinity, GFP_ATOMIC, node))
+ return false;
+ cpumask_setall(desc->affinity);
+
+#ifdef CONFIG_GENERIC_PENDING_IRQ
+ if (!alloc_cpumask_var_node(&desc->pending_mask, GFP_ATOMIC, node)) {
+ free_cpumask_var(desc->affinity);
+ return false;
+ }
+ cpumask_clear(desc->pending_mask);
+#endif
+ return true;
+}
+
+/**
+ * init_copy_desc_masks - copy cpumasks for irq_desc
+ * @old_desc: pointer to old irq_desc struct
+ * @new_desc: pointer to new irq_desc struct
+ *
+ * Insures affinity and pending_masks are copied to new irq_desc.
+ * If !CONFIG_CPUMASKS_OFFSTACK the cpumasks are embedded in the
+ * irq_desc struct so the copy is redundant.
+ */
+
+static inline void init_copy_desc_masks(struct irq_desc *old_desc,
+ struct irq_desc *new_desc)
+{
+#ifdef CONFIG_CPUMASKS_OFFSTACK
+ cpumask_copy(new_desc->affinity, old_desc->affinity);
+
+#ifdef CONFIG_GENERIC_PENDING_IRQ
+ cpumask_copy(new_desc->pending_mask, old_desc->pending_mask);
+#endif
+#endif
+}
+
+#else /* !CONFIG_SMP */
+
+static inline bool init_alloc_desc_masks(struct irq_desc *desc, int cpu,
+ bool boot)
+{
+ return true;
+}
+
+static inline void init_copy_desc_masks(struct irq_desc *old_desc,
+ struct irq_desc *new_desc)
+{
+}
+
+#endif /* CONFIG_SMP */
+
#endif /* _LINUX_IRQ_H */
diff --git a/include/linux/irqnr.h b/include/linux/irqnr.h
index 86af92e9e84..887477bc2ab 100644
--- a/include/linux/irqnr.h
+++ b/include/linux/irqnr.h
@@ -20,6 +20,7 @@
# define for_each_irq_desc_reverse(irq, desc) \
for (irq = nr_irqs - 1; irq >= 0; irq--)
+
#else /* CONFIG_GENERIC_HARDIRQS */
extern int nr_irqs;
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index b45109c61fb..b28b37eb11c 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -308,7 +308,8 @@ void buffer_assertion_failure(struct buffer_head *bh);
int val = (expr); \
if (!val) { \
printk(KERN_ERR \
- "EXT3-fs unexpected failure: %s;\n",# expr); \
+ "JBD2 unexpected failure: %s: %s;\n", \
+ __func__, #expr); \
printk(KERN_ERR why "\n"); \
} \
val; \
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 343df9ef241..7fa371898e3 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -480,7 +480,8 @@ static inline char *pack_hex_byte(char *buf, u8 byte)
/*
* swap - swap value of @a and @b
*/
-#define swap(a, b) ({ typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; })
+#define swap(a, b) \
+ do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
/**
* container_of - cast a member of a structure out to the containing structure
diff --git a/include/linux/libata.h b/include/linux/libata.h
index bca3ba25f52..5d87bc09a1f 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -380,6 +380,7 @@ enum {
ATA_HORKAGE_ATAPI_MOD16_DMA = (1 << 11), /* use ATAPI DMA for commands
not multiple of 16 bytes */
ATA_HORKAGE_FIRMWARE_WARN = (1 << 12), /* firwmare update warning */
+ ATA_HORKAGE_1_5_GBPS = (1 << 13), /* force 1.5 Gbps */
/* DMA mask for user DMA control: User visible values; DO NOT
renumber */
@@ -580,7 +581,7 @@ struct ata_device {
acpi_handle acpi_handle;
union acpi_object *gtf_cache;
#endif
- /* n_sector is used as CLEAR_OFFSET, read comment above CLEAR_OFFSET */
+ /* n_sector is CLEAR_BEGIN, read comment above CLEAR_BEGIN */
u64 n_sectors; /* size of device, if ATA */
unsigned int class; /* ATA_DEV_xxx */
unsigned long unpark_deadline;
@@ -605,20 +606,22 @@ struct ata_device {
u16 heads; /* Number of heads */
u16 sectors; /* Number of sectors per track */
- /* error history */
- int spdn_cnt;
- struct ata_ering ering;
-
union {
u16 id[ATA_ID_WORDS]; /* IDENTIFY xxx DEVICE data */
u32 gscr[SATA_PMP_GSCR_DWORDS]; /* PMP GSCR block */
};
+
+ /* error history */
+ int spdn_cnt;
+ /* ering is CLEAR_END, read comment above CLEAR_END */
+ struct ata_ering ering;
};
-/* Offset into struct ata_device. Fields above it are maintained
- * acress device init. Fields below are zeroed.
+/* Fields between ATA_DEVICE_CLEAR_BEGIN and ATA_DEVICE_CLEAR_END are
+ * cleared to zero on ata_dev_init().
*/
-#define ATA_DEVICE_CLEAR_OFFSET offsetof(struct ata_device, n_sectors)
+#define ATA_DEVICE_CLEAR_BEGIN offsetof(struct ata_device, n_sectors)
+#define ATA_DEVICE_CLEAR_END offsetof(struct ata_device, ering)
struct ata_eh_info {
struct ata_device *dev; /* offending device */
diff --git a/include/linux/magic.h b/include/linux/magic.h
index 0b4df7eba85..5b4e28bcb78 100644
--- a/include/linux/magic.h
+++ b/include/linux/magic.h
@@ -49,4 +49,5 @@
#define FUTEXFS_SUPER_MAGIC 0xBAD1DEA
#define INOTIFYFS_SUPER_MAGIC 0x2BAD1DEA
+#define STACK_END_MAGIC 0x57AC6E9D
#endif /* __LINUX_MAGIC_H__ */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index e8ddc98b840..7dc04ff5ab8 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1129,8 +1129,7 @@ extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
unsigned long flag, unsigned long pgoff);
extern unsigned long mmap_region(struct file *file, unsigned long addr,
unsigned long len, unsigned long flags,
- unsigned int vm_flags, unsigned long pgoff,
- int accountable);
+ unsigned int vm_flags, unsigned long pgoff);
static inline unsigned long do_mmap(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot,
@@ -1305,5 +1304,6 @@ void vmemmap_populate_print_last(void);
extern void *alloc_locked_buffer(size_t size);
extern void free_locked_buffer(void *buffer, size_t size);
+extern void release_locked_buffer(void *buffer, size_t size);
#endif /* __KERNEL__ */
#endif /* _LINUX_MM_H */
diff --git a/include/linux/module.h b/include/linux/module.h
index 4f7ea12463d..145a75528cc 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -219,11 +219,6 @@ void *__symbol_get_gpl(const char *symbol);
#endif
-struct module_ref
-{
- local_t count;
-} ____cacheline_aligned;
-
enum module_state
{
MODULE_STATE_LIVE,
@@ -344,8 +339,11 @@ struct module
/* Destruction function. */
void (*exit)(void);
- /* Reference counts */
- struct module_ref ref[NR_CPUS];
+#ifdef CONFIG_SMP
+ char *refptr;
+#else
+ local_t ref;
+#endif
#endif
};
#ifndef MODULE_ARCH_INIT
@@ -395,13 +393,21 @@ void __symbol_put(const char *symbol);
#define symbol_put(x) __symbol_put(MODULE_SYMBOL_PREFIX #x)
void symbol_put_addr(void *addr);
+static inline local_t *__module_ref_addr(struct module *mod, int cpu)
+{
+#ifdef CONFIG_SMP
+ return (local_t *) (mod->refptr + per_cpu_offset(cpu));
+#else
+ return &mod->ref;
+#endif
+}
+
/* Sometimes we know we already have a refcount, and it's easier not
to handle the error case (which only happens with rmmod --wait). */
static inline void __module_get(struct module *module)
{
if (module) {
- BUG_ON(module_refcount(module) == 0);
- local_inc(&module->ref[get_cpu()].count);
+ local_inc(__module_ref_addr(module, get_cpu()));
put_cpu();
}
}
@@ -413,7 +419,7 @@ static inline int try_module_get(struct module *module)
if (module) {
unsigned int cpu = get_cpu();
if (likely(module_is_live(module)))
- local_inc(&module->ref[cpu].count);
+ local_inc(__module_ref_addr(module, cpu));
else
ret = 0;
put_cpu();
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 48890cf3f96..7bd624bfdcf 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -684,7 +684,7 @@ int pci_enable_rom(struct pci_dev *pdev);
void pci_disable_rom(struct pci_dev *pdev);
void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size);
void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom);
-size_t pci_get_rom_size(void __iomem *rom, size_t size);
+size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size);
/* Power management related routines */
int pci_save_state(struct pci_dev *dev);
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index febc10ed385..52a9fe08451 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2425,6 +2425,7 @@
#define PCI_DEVICE_ID_INTEL_ICH7_0 0x27b8
#define PCI_DEVICE_ID_INTEL_ICH7_1 0x27b9
#define PCI_DEVICE_ID_INTEL_ICH7_30 0x27b0
+#define PCI_DEVICE_ID_INTEL_TGP_LPC 0x27bc
#define PCI_DEVICE_ID_INTEL_ICH7_31 0x27bd
#define PCI_DEVICE_ID_INTEL_ICH7_17 0x27da
#define PCI_DEVICE_ID_INTEL_ICH7_19 0x27dd
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 9f2a3751873..3577ffd90d4 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -8,35 +8,46 @@
#include <asm/percpu.h>
+#ifndef PER_CPU_BASE_SECTION
+#ifdef CONFIG_SMP
+#define PER_CPU_BASE_SECTION ".data.percpu"
+#else
+#define PER_CPU_BASE_SECTION ".data"
+#endif
+#endif
+
#ifdef CONFIG_SMP
-#define DEFINE_PER_CPU(type, name) \
- __attribute__((__section__(".data.percpu"))) \
- PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
#ifdef MODULE
-#define SHARED_ALIGNED_SECTION ".data.percpu"
+#define PER_CPU_SHARED_ALIGNED_SECTION ""
#else
-#define SHARED_ALIGNED_SECTION ".data.percpu.shared_aligned"
+#define PER_CPU_SHARED_ALIGNED_SECTION ".shared_aligned"
#endif
+#define PER_CPU_FIRST_SECTION ".first"
-#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
- __attribute__((__section__(SHARED_ALIGNED_SECTION))) \
- PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name \
- ____cacheline_aligned_in_smp
+#else
-#define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \
- __attribute__((__section__(".data.percpu.page_aligned"))) \
+#define PER_CPU_SHARED_ALIGNED_SECTION ""
+#define PER_CPU_FIRST_SECTION ""
+
+#endif
+
+#define DEFINE_PER_CPU_SECTION(type, name, section) \
+ __attribute__((__section__(PER_CPU_BASE_SECTION section))) \
PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
-#else
+
#define DEFINE_PER_CPU(type, name) \
- PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
+ DEFINE_PER_CPU_SECTION(type, name, "")
-#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
- DEFINE_PER_CPU(type, name)
+#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
+ DEFINE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \
+ ____cacheline_aligned_in_smp
-#define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \
- DEFINE_PER_CPU(type, name)
-#endif
+#define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \
+ DEFINE_PER_CPU_SECTION(type, name, ".page_aligned")
+
+#define DEFINE_PER_CPU_FIRST(type, name) \
+ DEFINE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION)
#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 5a7c7638873..f0a50b20e8a 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -453,23 +453,33 @@ struct task_cputime {
cputime_t utime;
cputime_t stime;
unsigned long long sum_exec_runtime;
- spinlock_t lock;
};
/* Alternate field names when used to cache expirations. */
#define prof_exp stime
#define virt_exp utime
#define sched_exp sum_exec_runtime
+#define INIT_CPUTIME \
+ (struct task_cputime) { \
+ .utime = cputime_zero, \
+ .stime = cputime_zero, \
+ .sum_exec_runtime = 0, \
+ }
+
/**
- * struct thread_group_cputime - thread group interval timer counts
- * @totals: thread group interval timers; substructure for
- * uniprocessor kernel, per-cpu for SMP kernel.
+ * struct thread_group_cputimer - thread group interval timer counts
+ * @cputime: thread group interval timers.
+ * @running: non-zero when there are timers running and
+ * @cputime receives updates.
+ * @lock: lock for fields in this struct.
*
* This structure contains the version of task_cputime, above, that is
- * used for thread group CPU clock calculations.
+ * used for thread group CPU timer calculations.
*/
-struct thread_group_cputime {
- struct task_cputime totals;
+struct thread_group_cputimer {
+ struct task_cputime cputime;
+ int running;
+ spinlock_t lock;
};
/*
@@ -518,10 +528,10 @@ struct signal_struct {
cputime_t it_prof_incr, it_virt_incr;
/*
- * Thread group totals for process CPU clocks.
- * See thread_group_cputime(), et al, for details.
+ * Thread group totals for process CPU timers.
+ * See thread_group_cputimer(), et al, for details.
*/
- struct thread_group_cputime cputime;
+ struct thread_group_cputimer cputimer;
/* Earliest-expiration cache. */
struct task_cputime cputime_expires;
@@ -558,7 +568,7 @@ struct signal_struct {
* Live threads maintain their own counters and add to these
* in __exit_signal, except for the group leader.
*/
- cputime_t cutime, cstime;
+ cputime_t utime, stime, cutime, cstime;
cputime_t gtime;
cputime_t cgtime;
unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
@@ -567,6 +577,14 @@ struct signal_struct {
struct task_io_accounting ioac;
/*
+ * Cumulative ns of schedule CPU time fo dead threads in the
+ * group, not including a zombie group leader, (This only differs
+ * from jiffies_to_ns(utime + stime) if sched_clock uses something
+ * other than jiffies.)
+ */
+ unsigned long long sum_sched_runtime;
+
+ /*
* We don't bother to synchronize most readers of this at all,
* because there is no reader checking a limit that actually needs
* to get both rlim_cur and rlim_max atomically, and either one
@@ -1160,10 +1178,9 @@ struct task_struct {
pid_t pid;
pid_t tgid;
-#ifdef CONFIG_CC_STACKPROTECTOR
/* Canary value for the -fstack-protector gcc feature */
unsigned long stack_canary;
-#endif
+
/*
* pointers to (original) parent process, youngest child, younger sibling,
* older sibling, respectively. (p->father can be replaced with
@@ -2069,6 +2086,19 @@ static inline int object_is_on_stack(void *obj)
extern void thread_info_cache_init(void);
+#ifdef CONFIG_DEBUG_STACK_USAGE
+static inline unsigned long stack_not_used(struct task_struct *p)
+{
+ unsigned long *n = end_of_stack(p);
+
+ do { /* Skip over canary */
+ n++;
+ } while (!*n);
+
+ return (unsigned long)n - (unsigned long)end_of_stack(p);
+}
+#endif
+
/* set thread flags in other task's structures
* - see asm/thread_info.h for TIF_xxxx flags available
*/
@@ -2182,27 +2212,14 @@ static inline int spin_needbreak(spinlock_t *lock)
/*
* Thread group CPU time accounting.
*/
-
-static inline
-void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
-{
- struct task_cputime *totals = &tsk->signal->cputime.totals;
- unsigned long flags;
-
- spin_lock_irqsave(&totals->lock, flags);
- *times = *totals;
- spin_unlock_irqrestore(&totals->lock, flags);
-}
+void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
+void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
static inline void thread_group_cputime_init(struct signal_struct *sig)
{
- sig->cputime.totals = (struct task_cputime){
- .utime = cputime_zero,
- .stime = cputime_zero,
- .sum_exec_runtime = 0,
- };
-
- spin_lock_init(&sig->cputime.totals.lock);
+ sig->cputimer.cputime = INIT_CPUTIME;
+ spin_lock_init(&sig->cputimer.lock);
+ sig->cputimer.running = 0;
}
static inline void thread_group_cputime_free(struct signal_struct *sig)
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index 39c3a5eb8eb..6ca6a7b66d7 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -43,10 +43,7 @@ static inline void *kmalloc(size_t size, gfp_t flags)
i++;
#include <linux/kmalloc_sizes.h>
#undef CACHE
- {
- extern void __you_cannot_kmalloc_that_much(void);
- __you_cannot_kmalloc_that_much();
- }
+ return NULL;
found:
#ifdef CONFIG_ZONE_DMA
if (flags & GFP_DMA)
@@ -77,10 +74,7 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
i++;
#include <linux/kmalloc_sizes.h>
#undef CACHE
- {
- extern void __you_cannot_kmalloc_that_much(void);
- __you_cannot_kmalloc_that_much();
- }
+ return NULL;
found:
#ifdef CONFIG_ZONE_DMA
if (flags & GFP_DMA)
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 715196b09d6..bbacb7baa44 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -176,6 +176,12 @@ static inline void init_call_single_data(void)
#define put_cpu() preempt_enable()
#define put_cpu_no_resched() preempt_enable_no_resched()
+/*
+ * Callback to arch code if there's nosmp or maxcpus=0 on the
+ * boot command line:
+ */
+extern void arch_disable_smp_support(void);
+
void smp_setup_processor_id(void);
#endif /* __LINUX_SMP_H */
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index e0c0fccced4..a0c66a2e00a 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -124,7 +124,12 @@ do { \
#ifdef CONFIG_GENERIC_LOCKBREAK
#define spin_is_contended(lock) ((lock)->break_lock)
#else
+
+#ifdef __raw_spin_is_contended
#define spin_is_contended(lock) __raw_spin_is_contended(&(lock)->raw_lock)
+#else
+#define spin_is_contended(lock) (((void)(lock), 0))
+#endif /*__raw_spin_is_contended*/
#endif
/**
diff --git a/include/linux/stackprotector.h b/include/linux/stackprotector.h
new file mode 100644
index 00000000000..6f3e54c704c
--- /dev/null
+++ b/include/linux/stackprotector.h
@@ -0,0 +1,16 @@
+#ifndef _LINUX_STACKPROTECTOR_H
+#define _LINUX_STACKPROTECTOR_H 1
+
+#include <linux/compiler.h>
+#include <linux/sched.h>
+#include <linux/random.h>
+
+#ifdef CONFIG_CC_STACKPROTECTOR
+# include <asm/stackprotector.h>
+#else
+static inline void boot_init_stack_canary(void)
+{
+}
+#endif
+
+#endif
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 0eda02ff241..f9f900cfd06 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -95,13 +95,13 @@ struct old_linux_dirent;
#define __SC_TEST5(t5, a5, ...) __SC_TEST(t5); __SC_TEST4(__VA_ARGS__)
#define __SC_TEST6(t6, a6, ...) __SC_TEST(t6); __SC_TEST5(__VA_ARGS__)
-#define SYSCALL_DEFINE0(name) asmlinkage long sys_##name(void)
-#define SYSCALL_DEFINE1(...) SYSCALL_DEFINEx(1, __VA_ARGS__)
-#define SYSCALL_DEFINE2(...) SYSCALL_DEFINEx(2, __VA_ARGS__)
-#define SYSCALL_DEFINE3(...) SYSCALL_DEFINEx(3, __VA_ARGS__)
-#define SYSCALL_DEFINE4(...) SYSCALL_DEFINEx(4, __VA_ARGS__)
-#define SYSCALL_DEFINE5(...) SYSCALL_DEFINEx(5, __VA_ARGS__)
-#define SYSCALL_DEFINE6(...) SYSCALL_DEFINEx(6, __VA_ARGS__)
+#define SYSCALL_DEFINE0(name) asmlinkage long sys_##name(void)
+#define SYSCALL_DEFINE1(name, ...) SYSCALL_DEFINEx(1, _##name, __VA_ARGS__)
+#define SYSCALL_DEFINE2(name, ...) SYSCALL_DEFINEx(2, _##name, __VA_ARGS__)
+#define SYSCALL_DEFINE3(name, ...) SYSCALL_DEFINEx(3, _##name, __VA_ARGS__)
+#define SYSCALL_DEFINE4(name, ...) SYSCALL_DEFINEx(4, _##name, __VA_ARGS__)
+#define SYSCALL_DEFINE5(name, ...) SYSCALL_DEFINEx(5, _##name, __VA_ARGS__)
+#define SYSCALL_DEFINE6(name, ...) SYSCALL_DEFINEx(6, _##name, __VA_ARGS__)
#ifdef CONFIG_PPC64
#define SYSCALL_ALIAS(alias, name) \
@@ -121,21 +121,21 @@ struct old_linux_dirent;
#define SYSCALL_DEFINE(name) static inline long SYSC_##name
#define SYSCALL_DEFINEx(x, name, ...) \
- asmlinkage long sys_##name(__SC_DECL##x(__VA_ARGS__)); \
- static inline long SYSC_##name(__SC_DECL##x(__VA_ARGS__)); \
- asmlinkage long SyS_##name(__SC_LONG##x(__VA_ARGS__)) \
+ asmlinkage long sys##name(__SC_DECL##x(__VA_ARGS__)); \
+ static inline long SYSC##name(__SC_DECL##x(__VA_ARGS__)); \
+ asmlinkage long SyS##name(__SC_LONG##x(__VA_ARGS__)) \
{ \
__SC_TEST##x(__VA_ARGS__); \
- return (long) SYSC_##name(__SC_CAST##x(__VA_ARGS__)); \
+ return (long) SYSC##name(__SC_CAST##x(__VA_ARGS__)); \
} \
- SYSCALL_ALIAS(sys_##name, SyS_##name); \
- static inline long SYSC_##name(__SC_DECL##x(__VA_ARGS__))
+ SYSCALL_ALIAS(sys##name, SyS##name); \
+ static inline long SYSC##name(__SC_DECL##x(__VA_ARGS__))
#else /* CONFIG_HAVE_SYSCALL_WRAPPERS */
#define SYSCALL_DEFINE(name) asmlinkage long sys_##name
#define SYSCALL_DEFINEx(x, name, ...) \
- asmlinkage long sys_##name(__SC_DECL##x(__VA_ARGS__))
+ asmlinkage long sys##name(__SC_DECL##x(__VA_ARGS__))
#endif /* CONFIG_HAVE_SYSCALL_WRAPPERS */
diff --git a/include/linux/topology.h b/include/linux/topology.h
index e632d29f054..a16b9e06f2e 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -193,5 +193,11 @@ int arch_update_cpu_topology(void);
#ifndef topology_core_siblings
#define topology_core_siblings(cpu) cpumask_of_cpu(cpu)
#endif
+#ifndef topology_thread_cpumask
+#define topology_thread_cpumask(cpu) cpumask_of(cpu)
+#endif
+#ifndef topology_core_cpumask
+#define topology_core_cpumask(cpu) cpumask_of(cpu)
+#endif
#endif /* _LINUX_TOPOLOGY_H */
diff --git a/include/linux/wait.h b/include/linux/wait.h
index ef609f842fa..a210ede73b5 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -132,6 +132,8 @@ static inline void __remove_wait_queue(wait_queue_head_t *head,
list_del(&old->task_list);
}
+void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
+ int nr_exclusive, int sync, void *key);
void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
extern void __wake_up_locked(wait_queue_head_t *q, unsigned int mode);
extern void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
@@ -333,16 +335,19 @@ do { \
for (;;) { \
prepare_to_wait_exclusive(&wq, &__wait, \
TASK_INTERRUPTIBLE); \
- if (condition) \
+ if (condition) { \
+ finish_wait(&wq, &__wait); \
break; \
+ } \
if (!signal_pending(current)) { \
schedule(); \
continue; \
} \
ret = -ERESTARTSYS; \
+ abort_exclusive_wait(&wq, &__wait, \
+ TASK_INTERRUPTIBLE, NULL); \
break; \
} \
- finish_wait(&wq, &__wait); \
} while (0)
#define wait_event_interruptible_exclusive(wq, condition) \
@@ -431,6 +436,8 @@ extern long interruptible_sleep_on_timeout(wait_queue_head_t *q,
void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state);
void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
+void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait,
+ unsigned int mode, void *key);
int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
index 9bf4ccc93db..55e41afd95e 100644
--- a/include/media/v4l2-device.h
+++ b/include/media/v4l2-device.h
@@ -94,16 +94,16 @@ void v4l2_device_unregister_subdev(struct v4l2_subdev *sd);
/* Call the specified callback for all subdevs matching grp_id (if 0, then
match them all). Ignore any errors. Note that you cannot add or delete
a subdev while walking the subdevs list. */
-#define v4l2_device_call_all(dev, grp_id, o, f, args...) \
+#define v4l2_device_call_all(dev, grpid, o, f, args...) \
__v4l2_device_call_subdevs(dev, \
- !(grp_id) || sd->grp_id == (grp_id), o, f , ##args)
+ !(grpid) || sd->grp_id == (grpid), o, f , ##args)
/* Call the specified callback for all subdevs matching grp_id (if 0, then
match them all). If the callback returns an error other than 0 or
-ENOIOCTLCMD, then return with that error code. Note that you cannot
add or delete a subdev while walking the subdevs list. */
-#define v4l2_device_call_until_err(dev, grp_id, o, f, args...) \
+#define v4l2_device_call_until_err(dev, grpid, o, f, args...) \
__v4l2_device_call_subdevs_until_err(dev, \
- !(grp_id) || sd->grp_id == (grp_id), o, f , ##args)
+ !(grpid) || sd->grp_id == (grpid), o, f , ##args)
#endif
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index f44bb5c77a7..d0a043153cc 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -182,7 +182,7 @@ static inline int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo)
size = 2048;
if (nr_pcpus >= 32)
size = 4096;
- if (sizeof(rwlock_t) != 0) {
+ if (sizeof(spinlock_t) != 0) {
#ifdef CONFIG_NUMA
if (size * sizeof(spinlock_t) > PAGE_SIZE)
hashinfo->ehash_locks = vmalloc(size * sizeof(spinlock_t));
diff --git a/include/video/aty128.h b/include/video/aty128.h
index 7079beb005e..51ac69f05bd 100644
--- a/include/video/aty128.h
+++ b/include/video/aty128.h
@@ -21,9 +21,9 @@
#define I2C_CNTL_1 0x0094
#define PALETTE_INDEX 0x00b0
#define PALETTE_DATA 0x00b4
-#define CONFIG_CNTL 0x00e0
+#define CNFG_CNTL 0x00e0
#define GEN_RESET_CNTL 0x00f0
-#define CONFIG_MEMSIZE 0x00f8
+#define CNFG_MEMSIZE 0x00f8
#define MEM_CNTL 0x0140
#define MEM_POWER_MISC 0x015c
#define AGP_BASE 0x0170
diff --git a/include/video/mach64.h b/include/video/mach64.h
index a8332e528ec..89e91c0cb73 100644
--- a/include/video/mach64.h
+++ b/include/video/mach64.h
@@ -103,7 +103,7 @@
#define CUR_HORZ_VERT_OFF 0x0070 /* Dword offset 0_1C */
#define CUR2_HORZ_VERT_OFF 0x0070 /* Dword offset 0_1C */
-#define CONFIG_PANEL_LG 0x0074 /* Dword offset 0_1D (LG) */
+#define CNFG_PANEL_LG 0x0074 /* Dword offset 0_1D (LG) */
/* General I/O Control */
#define GP_IO 0x0078 /* Dword offset 0_1E */
@@ -146,8 +146,8 @@
#define CLOCK_SEL_CNTL 0x0090 /* Dword offset 0_24 */
/* Configuration */
-#define CONFIG_STAT1 0x0094 /* Dword offset 0_25 */
-#define CONFIG_STAT2 0x0098 /* Dword offset 0_26 */
+#define CNFG_STAT1 0x0094 /* Dword offset 0_25 */
+#define CNFG_STAT2 0x0098 /* Dword offset 0_26 */
/* Bus Control */
#define BUS_CNTL 0x00A0 /* Dword offset 0_28 */
@@ -190,9 +190,9 @@
#define POWER_MANAGEMENT_LG 0x00D8 /* Dword offset 0_36 (LG) */
/* Configuration */
-#define CONFIG_CNTL 0x00DC /* Dword offset 0_37 (CT, ET, VT) */
-#define CONFIG_CHIP_ID 0x00E0 /* Dword offset 0_38 */
-#define CONFIG_STAT0 0x00E4 /* Dword offset 0_39 */
+#define CNFG_CNTL 0x00DC /* Dword offset 0_37 (CT, ET, VT) */
+#define CNFG_CHIP_ID 0x00E0 /* Dword offset 0_38 */
+#define CNFG_STAT0 0x00E4 /* Dword offset 0_39 */
/* Test and Debug */
#define CRC_SIG 0x00E8 /* Dword offset 0_3A */
@@ -851,17 +851,17 @@
#define PLL_YCLK_CNTL 0x29
#define PM_DYN_CLK_CNTL 0x2A
-/* CONFIG_CNTL register constants */
+/* CNFG_CNTL register constants */
#define APERTURE_4M_ENABLE 1
#define APERTURE_8M_ENABLE 2
#define VGA_APERTURE_ENABLE 4
-/* CONFIG_STAT0 register constants (GX, CX) */
+/* CNFG_STAT0 register constants (GX, CX) */
#define CFG_BUS_TYPE 0x00000007
#define CFG_MEM_TYPE 0x00000038
#define CFG_INIT_DAC_TYPE 0x00000e00
-/* CONFIG_STAT0 register constants (CT, ET, VT) */
+/* CNFG_STAT0 register constants (CT, ET, VT) */
#define CFG_MEM_TYPE_xT 0x00000007
#define ISA 0
@@ -942,7 +942,7 @@
#define PCI_ATI_VENDOR_ID 0x1002
-/* CONFIG_CHIP_ID register constants */
+/* CNFG_CHIP_ID register constants */
#define CFG_CHIP_TYPE 0x0000FFFF
#define CFG_CHIP_CLASS 0x00FF0000
#define CFG_CHIP_REV 0xFF000000
@@ -951,7 +951,7 @@
#define CFG_CHIP_MINOR 0xC0000000
-/* Chip IDs read from CONFIG_CHIP_ID */
+/* Chip IDs read from CNFG_CHIP_ID */
/* mach64GX family */
#define GX_CHIP_ID 0xD7 /* mach64GX (ATI888GX00) */
@@ -1254,7 +1254,7 @@
#define CRTC2_DISPLAY_DIS 0x00000400
/* LCD register indices */
-#define CONFIG_PANEL 0x00
+#define CNFG_PANEL 0x00
#define LCD_GEN_CNTL 0x01
#define DSTN_CONTROL 0x02
#define HFB_PITCH_ADDR 0x03
diff --git a/include/video/radeon.h b/include/video/radeon.h
index 1cd09cc5b16..e072b16b39a 100644
--- a/include/video/radeon.h
+++ b/include/video/radeon.h
@@ -11,13 +11,13 @@
#define HI_STAT 0x004C
#define BUS_CNTL1 0x0034
#define I2C_CNTL_1 0x0094
-#define CONFIG_CNTL 0x00E0
-#define CONFIG_MEMSIZE 0x00F8
-#define CONFIG_APER_0_BASE 0x0100
-#define CONFIG_APER_1_BASE 0x0104
-#define CONFIG_APER_SIZE 0x0108
-#define CONFIG_REG_1_BASE 0x010C
-#define CONFIG_REG_APER_SIZE 0x0110
+#define CNFG_CNTL 0x00E0
+#define CNFG_MEMSIZE 0x00F8
+#define CNFG_APER_0_BASE 0x0100
+#define CNFG_APER_1_BASE 0x0104
+#define CNFG_APER_SIZE 0x0108
+#define CNFG_REG_1_BASE 0x010C
+#define CNFG_REG_APER_SIZE 0x0110
#define PAD_AGPINPUT_DELAY 0x0164
#define PAD_CTLR_STRENGTH 0x0168
#define PAD_CTLR_UPDATE 0x016C
@@ -509,7 +509,7 @@
/* CLOCK_CNTL_INDEX bit constants */
#define PLL_WR_EN 0x00000080
-/* CONFIG_CNTL bit constants */
+/* CNFG_CNTL bit constants */
#define CFG_VGA_RAM_EN 0x00000100
#define CFG_ATI_REV_ID_MASK (0xf << 16)
#define CFG_ATI_REV_A11 (0 << 16)
@@ -980,7 +980,7 @@
/* masks */
-#define CONFIG_MEMSIZE_MASK 0x1f000000
+#define CNFG_MEMSIZE_MASK 0x1f000000
#define MEM_CFG_TYPE 0x40000000
#define DST_OFFSET_MASK 0x003fffff
#define DST_PITCH_MASK 0x3fc00000