diff options
Diffstat (limited to 'include/linux')
-rw-r--r-- | include/linux/Kbuild | 1 | ||||
-rw-r--r-- | include/linux/agp_backend.h | 5 | ||||
-rw-r--r-- | include/linux/bitmap.h | 1 | ||||
-rw-r--r-- | include/linux/bootmem.h | 4 | ||||
-rw-r--r-- | include/linux/byteorder.h | 372 | ||||
-rw-r--r-- | include/linux/firmware-map.h | 26 | ||||
-rw-r--r-- | include/linux/i2c-id.h | 2 | ||||
-rw-r--r-- | include/linux/init.h | 1 | ||||
-rw-r--r-- | include/linux/kernel.h | 14 | ||||
-rw-r--r-- | include/linux/lockdep.h | 70 | ||||
-rw-r--r-- | include/linux/mm.h | 20 | ||||
-rw-r--r-- | include/linux/rcuclassic.h | 2 | ||||
-rw-r--r-- | include/linux/sched.h | 31 | ||||
-rw-r--r-- | include/linux/seq_file.h | 12 | ||||
-rw-r--r-- | include/linux/spinlock.h | 6 | ||||
-rw-r--r-- | include/linux/spinlock_api_smp.h | 2 | ||||
-rw-r--r-- | include/linux/swab.h | 309 |
17 files changed, 774 insertions, 104 deletions
diff --git a/include/linux/Kbuild b/include/linux/Kbuild index a26f565e818..327f60658d9 100644 --- a/include/linux/Kbuild +++ b/include/linux/Kbuild @@ -356,6 +356,7 @@ unifdef-y += virtio_balloon.h unifdef-y += virtio_console.h unifdef-y += virtio_pci.h unifdef-y += virtio_ring.h +unifdef-y += virtio_rng.h unifdef-y += vt.h unifdef-y += wait.h unifdef-y += wanrouter.h diff --git a/include/linux/agp_backend.h b/include/linux/agp_backend.h index 972b12bcfb3..2b8df8b420f 100644 --- a/include/linux/agp_backend.h +++ b/include/linux/agp_backend.h @@ -30,6 +30,8 @@ #ifndef _AGP_BACKEND_H #define _AGP_BACKEND_H 1 +#include <linux/list.h> + enum chipset_type { NOT_SUPPORTED, SUPPORTED, @@ -78,6 +80,8 @@ struct agp_memory { bool is_bound; bool is_flushed; bool vmalloc_flag; + /* list of agp_memory mapped to the aperture */ + struct list_head mapped_list; }; #define AGP_NORMAL_MEMORY 0 @@ -96,6 +100,7 @@ extern struct agp_memory *agp_allocate_memory(struct agp_bridge_data *, size_t, extern int agp_copy_info(struct agp_bridge_data *, struct agp_kern_info *); extern int agp_bind_memory(struct agp_memory *, off_t); extern int agp_unbind_memory(struct agp_memory *); +extern int agp_rebind_memory(void); extern void agp_enable(struct agp_bridge_data *, u32); extern struct agp_bridge_data *agp_backend_acquire(struct pci_dev *); extern void agp_backend_release(struct agp_bridge_data *); diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h index 1abfe664c44..89781fd4885 100644 --- a/include/linux/bitmap.h +++ b/include/linux/bitmap.h @@ -110,6 +110,7 @@ extern int __bitmap_weight(const unsigned long *bitmap, int bits); extern int bitmap_scnprintf(char *buf, unsigned int len, const unsigned long *src, int nbits); +extern int bitmap_scnprintf_len(unsigned int nr_bits); extern int __bitmap_parse(const char *buf, unsigned int buflen, int is_user, unsigned long *dst, int nbits); extern int bitmap_parse_user(const char __user *ubuf, unsigned int ulen, diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h index 652470b687c..95837bfb525 100644 --- a/include/linux/bootmem.h +++ b/include/linux/bootmem.h @@ -97,10 +97,14 @@ extern void *__alloc_bootmem_low_node(pg_data_t *pgdat, #ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE #define alloc_bootmem(x) \ __alloc_bootmem(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) +#define alloc_bootmem_nopanic(x) \ + __alloc_bootmem_nopanic(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) #define alloc_bootmem_low(x) \ __alloc_bootmem_low(x, SMP_CACHE_BYTES, 0) #define alloc_bootmem_pages(x) \ __alloc_bootmem(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) +#define alloc_bootmem_pages_nopanic(x) \ + __alloc_bootmem_nopanic(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) #define alloc_bootmem_low_pages(x) \ __alloc_bootmem_low(x, PAGE_SIZE, 0) #define alloc_bootmem_node(pgdat, x) \ diff --git a/include/linux/byteorder.h b/include/linux/byteorder.h new file mode 100644 index 00000000000..29f002d73d9 --- /dev/null +++ b/include/linux/byteorder.h @@ -0,0 +1,372 @@ +#ifndef _LINUX_BYTEORDER_H +#define _LINUX_BYTEORDER_H + +#include <linux/types.h> +#include <linux/swab.h> + +#if defined(__LITTLE_ENDIAN) && defined(__BIG_ENDIAN) +# error Fix asm/byteorder.h to define one endianness +#endif + +#if !defined(__LITTLE_ENDIAN) && !defined(__BIG_ENDIAN) +# error Fix asm/byteorder.h to define arch endianness +#endif + +#ifdef __LITTLE_ENDIAN +# undef __LITTLE_ENDIAN +# define __LITTLE_ENDIAN 1234 +#endif + +#ifdef __BIG_ENDIAN +# undef __BIG_ENDIAN +# define __BIG_ENDIAN 4321 +#endif + +#if defined(__LITTLE_ENDIAN) && !defined(__LITTLE_ENDIAN_BITFIELD) +# define __LITTLE_ENDIAN_BITFIELD +#endif + +#if defined(__BIG_ENDIAN) && !defined(__BIG_ENDIAN_BITFIELD) +# define __BIG_ENDIAN_BITFIELD +#endif + +#ifdef __LITTLE_ENDIAN +# define __le16_to_cpu(x) ((__force __u16)(__le16)(x)) +# define __le32_to_cpu(x) ((__force __u32)(__le32)(x)) +# define __le64_to_cpu(x) ((__force __u64)(__le64)(x)) +# define __cpu_to_le16(x) ((__force __le16)(__u16)(x)) +# define __cpu_to_le32(x) ((__force __le32)(__u32)(x)) +# define __cpu_to_le64(x) ((__force __le64)(__u64)(x)) + +# define __be16_to_cpu(x) __swab16((__force __u16)(__be16)(x)) +# define __be32_to_cpu(x) __swab32((__force __u32)(__be32)(x)) +# define __be64_to_cpu(x) __swab64((__force __u64)(__be64)(x)) +# define __cpu_to_be16(x) ((__force __be16)__swab16(x)) +# define __cpu_to_be32(x) ((__force __be32)__swab32(x)) +# define __cpu_to_be64(x) ((__force __be64)__swab64(x)) +#endif + +#ifdef __BIG_ENDIAN +# define __be16_to_cpu(x) ((__force __u16)(__be16)(x)) +# define __be32_to_cpu(x) ((__force __u32)(__be32)(x)) +# define __be64_to_cpu(x) ((__force __u64)(__be64)(x)) +# define __cpu_to_be16(x) ((__force __be16)(__u16)(x)) +# define __cpu_to_be32(x) ((__force __be32)(__u32)(x)) +# define __cpu_to_be64(x) ((__force __be64)(__u64)(x)) + +# define __le16_to_cpu(x) __swab16((__force __u16)(__le16)(x)) +# define __le32_to_cpu(x) __swab32((__force __u32)(__le32)(x)) +# define __le64_to_cpu(x) __swab64((__force __u64)(__le64)(x)) +# define __cpu_to_le16(x) ((__force __le16)__swab16(x)) +# define __cpu_to_le32(x) ((__force __le32)__swab32(x)) +# define __cpu_to_le64(x) ((__force __le64)__swab64(x)) +#endif + +/* + * These helpers could be phased out over time as the base version + * handles constant folding. + */ +#define __constant_htonl(x) __cpu_to_be32(x) +#define __constant_ntohl(x) __be32_to_cpu(x) +#define __constant_htons(x) __cpu_to_be16(x) +#define __constant_ntohs(x) __be16_to_cpu(x) + +#define __constant_le16_to_cpu(x) __le16_to_cpu(x) +#define __constant_le32_to_cpu(x) __le32_to_cpu(x) +#define __constant_le64_to_cpu(x) __le64_to_cpu(x) +#define __constant_be16_to_cpu(x) __be16_to_cpu(x) +#define __constant_be32_to_cpu(x) __be32_to_cpu(x) +#define __constant_be64_to_cpu(x) __be64_to_cpu(x) + +#define __constant_cpu_to_le16(x) __cpu_to_le16(x) +#define __constant_cpu_to_le32(x) __cpu_to_le32(x) +#define __constant_cpu_to_le64(x) __cpu_to_le64(x) +#define __constant_cpu_to_be16(x) __cpu_to_be16(x) +#define __constant_cpu_to_be32(x) __cpu_to_be32(x) +#define __constant_cpu_to_be64(x) __cpu_to_be64(x) + +static inline void __le16_to_cpus(__u16 *p) +{ +#ifdef __BIG_ENDIAN + __swab16s(p); +#endif +} + +static inline void __cpu_to_le16s(__u16 *p) +{ +#ifdef __BIG_ENDIAN + __swab16s(p); +#endif +} + +static inline void __le32_to_cpus(__u32 *p) +{ +#ifdef __BIG_ENDIAN + __swab32s(p); +#endif +} + +static inline void __cpu_to_le32s(__u32 *p) +{ +#ifdef __BIG_ENDIAN + __swab32s(p); +#endif +} + +static inline void __le64_to_cpus(__u64 *p) +{ +#ifdef __BIG_ENDIAN + __swab64s(p); +#endif +} + +static inline void __cpu_to_le64s(__u64 *p) +{ +#ifdef __BIG_ENDIAN + __swab64s(p); +#endif +} + +static inline void __be16_to_cpus(__u16 *p) +{ +#ifdef __LITTLE_ENDIAN + __swab16s(p); +#endif +} + +static inline void __cpu_to_be16s(__u16 *p) +{ +#ifdef __LITTLE_ENDIAN + __swab16s(p); +#endif +} + +static inline void __be32_to_cpus(__u32 *p) +{ +#ifdef __LITTLE_ENDIAN + __swab32s(p); +#endif +} + +static inline void __cpu_to_be32s(__u32 *p) +{ +#ifdef __LITTLE_ENDIAN + __swab32s(p); +#endif +} + +static inline void __be64_to_cpus(__u64 *p) +{ +#ifdef __LITTLE_ENDIAN + __swab64s(p); +#endif +} + +static inline void __cpu_to_be64s(__u64 *p) +{ +#ifdef __LITTLE_ENDIAN + __swab64s(p); +#endif +} + +static inline __u16 __le16_to_cpup(const __le16 *p) +{ +#ifdef __LITTLE_ENDIAN + return (__force __u16)*p; +#else + return __swab16p((__force __u16 *)p); +#endif +} + +static inline __u32 __le32_to_cpup(const __le32 *p) +{ +#ifdef __LITTLE_ENDIAN + return (__force __u32)*p; +#else + return __swab32p((__force __u32 *)p); +#endif +} + +static inline __u64 __le64_to_cpup(const __le64 *p) +{ +#ifdef __LITTLE_ENDIAN + return (__force __u64)*p; +#else + return __swab64p((__force __u64 *)p); +#endif +} + +static inline __le16 __cpu_to_le16p(const __u16 *p) +{ +#ifdef __LITTLE_ENDIAN + return (__force __le16)*p; +#else + return (__force __le16)__swab16p(p); +#endif +} + +static inline __le32 __cpu_to_le32p(const __u32 *p) +{ +#ifdef __LITTLE_ENDIAN + return (__force __le32)*p; +#else + return (__force __le32)__swab32p(p); +#endif +} + +static inline __le64 __cpu_to_le64p(const __u64 *p) +{ +#ifdef __LITTLE_ENDIAN + return (__force __le64)*p; +#else + return (__force __le64)__swab64p(p); +#endif +} + +static inline __u16 __be16_to_cpup(const __be16 *p) +{ +#ifdef __BIG_ENDIAN + return (__force __u16)*p; +#else + return __swab16p((__force __u16 *)p); +#endif +} + +static inline __u32 __be32_to_cpup(const __be32 *p) +{ +#ifdef __BIG_ENDIAN + return (__force __u32)*p; +#else + return __swab32p((__force __u32 *)p); +#endif +} + +static inline __u64 __be64_to_cpup(const __be64 *p) +{ +#ifdef __BIG_ENDIAN + return (__force __u64)*p; +#else + return __swab64p((__force __u64 *)p); +#endif +} + +static inline __be16 __cpu_to_be16p(const __u16 *p) +{ +#ifdef __BIG_ENDIAN + return (__force __be16)*p; +#else + return (__force __be16)__swab16p(p); +#endif +} + +static inline __be32 __cpu_to_be32p(const __u32 *p) +{ +#ifdef __BIG_ENDIAN + return (__force __be32)*p; +#else + return (__force __be32)__swab32p(p); +#endif +} + +static inline __be64 __cpu_to_be64p(const __u64 *p) +{ +#ifdef __BIG_ENDIAN + return (__force __be64)*p; +#else + return (__force __be64)__swab64p(p); +#endif +} + +#ifdef __KERNEL__ + +# define le16_to_cpu __le16_to_cpu +# define le32_to_cpu __le32_to_cpu +# define le64_to_cpu __le64_to_cpu +# define be16_to_cpu __be16_to_cpu +# define be32_to_cpu __be32_to_cpu +# define be64_to_cpu __be64_to_cpu +# define cpu_to_le16 __cpu_to_le16 +# define cpu_to_le32 __cpu_to_le32 +# define cpu_to_le64 __cpu_to_le64 +# define cpu_to_be16 __cpu_to_be16 +# define cpu_to_be32 __cpu_to_be32 +# define cpu_to_be64 __cpu_to_be64 + +# define le16_to_cpup __le16_to_cpup +# define le32_to_cpup __le32_to_cpup +# define le64_to_cpup __le64_to_cpup +# define be16_to_cpup __be16_to_cpup +# define be32_to_cpup __be32_to_cpup +# define be64_to_cpup __be64_to_cpup +# define cpu_to_le16p __cpu_to_le16p +# define cpu_to_le32p __cpu_to_le32p +# define cpu_to_le64p __cpu_to_le64p +# define cpu_to_be16p __cpu_to_be16p +# define cpu_to_be32p __cpu_to_be32p +# define cpu_to_be64p __cpu_to_be64p + +# define le16_to_cpus __le16_to_cpus +# define le32_to_cpus __le32_to_cpus +# define le64_to_cpus __le64_to_cpus +# define be16_to_cpus __be16_to_cpus +# define be32_to_cpus __be32_to_cpus +# define be64_to_cpus __be64_to_cpus +# define cpu_to_le16s __cpu_to_le16s +# define cpu_to_le32s __cpu_to_le32s +# define cpu_to_le64s __cpu_to_le64s +# define cpu_to_be16s __cpu_to_be16s +# define cpu_to_be32s __cpu_to_be32s +# define cpu_to_be64s __cpu_to_be64s + +/* + * They have to be macros in order to do the constant folding + * correctly - if the argument passed into a inline function + * it is no longer constant according to gcc.. + */ +# undef ntohl +# undef ntohs +# undef htonl +# undef htons + +# define ___htonl(x) __cpu_to_be32(x) +# define ___htons(x) __cpu_to_be16(x) +# define ___ntohl(x) __be32_to_cpu(x) +# define ___ntohs(x) __be16_to_cpu(x) + +# define htonl(x) ___htonl(x) +# define ntohl(x) ___ntohl(x) +# define htons(x) ___htons(x) +# define ntohs(x) ___ntohs(x) + +static inline void le16_add_cpu(__le16 *var, u16 val) +{ + *var = cpu_to_le16(le16_to_cpup(var) + val); +} + +static inline void le32_add_cpu(__le32 *var, u32 val) +{ + *var = cpu_to_le32(le32_to_cpup(var) + val); +} + +static inline void le64_add_cpu(__le64 *var, u64 val) +{ + *var = cpu_to_le64(le64_to_cpup(var) + val); +} + +static inline void be16_add_cpu(__be16 *var, u16 val) +{ + *var = cpu_to_be16(be16_to_cpup(var) + val); +} + +static inline void be32_add_cpu(__be32 *var, u32 val) +{ + *var = cpu_to_be32(be32_to_cpup(var) + val); +} + +static inline void be64_add_cpu(__be64 *var, u64 val) +{ + *var = cpu_to_be64(be64_to_cpup(var) + val); +} + +#endif /* __KERNEL__ */ +#endif /* _LINUX_BYTEORDER_H */ diff --git a/include/linux/firmware-map.h b/include/linux/firmware-map.h index acbdbcc1605..6e199c8dfac 100644 --- a/include/linux/firmware-map.h +++ b/include/linux/firmware-map.h @@ -24,34 +24,8 @@ */ #ifdef CONFIG_FIRMWARE_MEMMAP -/** - * Adds a firmware mapping entry. This function uses kmalloc() for memory - * allocation. Use firmware_map_add_early() if you want to use the bootmem - * allocator. - * - * That function must be called before late_initcall. - * - * @start: Start of the memory range. - * @end: End of the memory range (inclusive). - * @type: Type of the memory range. - * - * Returns 0 on success, or -ENOMEM if no memory could be allocated. - */ int firmware_map_add(resource_size_t start, resource_size_t end, const char *type); - -/** - * Adds a firmware mapping entry. This function uses the bootmem allocator - * for memory allocation. Use firmware_map_add() if you want to use kmalloc(). - * - * That function must be called before late_initcall. - * - * @start: Start of the memory range. - * @end: End of the memory range (inclusive). - * @type: Type of the memory range. - * - * Returns 0 on success, or -ENOMEM if no memory could be allocated. - */ int firmware_map_add_early(resource_size_t start, resource_size_t end, const char *type); diff --git a/include/linux/i2c-id.h b/include/linux/i2c-id.h index 4862398e05b..bf34c5f4c05 100644 --- a/include/linux/i2c-id.h +++ b/include/linux/i2c-id.h @@ -39,7 +39,6 @@ #define I2C_DRIVERID_SAA7111A 8 /* video input processor */ #define I2C_DRIVERID_SAA7185B 13 /* video encoder */ #define I2C_DRIVERID_SAA7110 22 /* video decoder */ -#define I2C_DRIVERID_MGATVO 23 /* Matrox TVOut */ #define I2C_DRIVERID_SAA5249 24 /* SAA5249 and compatibles */ #define I2C_DRIVERID_PCF8583 25 /* real time clock */ #define I2C_DRIVERID_SAB3036 26 /* SAB3036 tuner */ @@ -95,7 +94,6 @@ #define I2C_HW_B_BT848 0x010005 /* BT848 video boards */ #define I2C_HW_B_VIA 0x010007 /* Via vt82c586b */ #define I2C_HW_B_HYDRA 0x010008 /* Apple Hydra Mac I/O */ -#define I2C_HW_B_G400 0x010009 /* Matrox G400 */ #define I2C_HW_B_I810 0x01000a /* Intel I810 */ #define I2C_HW_B_VOO 0x01000b /* 3dfx Voodoo 3 / Banshee */ #define I2C_HW_B_SCX200 0x01000e /* Nat'l Semi SCx200 I2C */ diff --git a/include/linux/init.h b/include/linux/init.h index 11b84e10605..93538b696e3 100644 --- a/include/linux/init.h +++ b/include/linux/init.h @@ -139,6 +139,7 @@ extern initcall_t __con_initcall_start[], __con_initcall_end[]; extern initcall_t __security_initcall_start[], __security_initcall_end[]; /* Defined in init/main.c */ +extern int do_one_initcall(initcall_t fn); extern char __initdata boot_command_line[]; extern char *saved_command_line; extern unsigned int reset_devices; diff --git a/include/linux/kernel.h b/include/linux/kernel.h index aaa998f65c7..2651f805ba6 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -108,6 +108,13 @@ struct completion; struct pt_regs; struct user; +#ifdef CONFIG_PREEMPT_VOLUNTARY +extern int _cond_resched(void); +# define might_resched() _cond_resched() +#else +# define might_resched() do { } while (0) +#endif + /** * might_sleep - annotation for functions that can sleep * @@ -118,13 +125,6 @@ struct user; * be bitten later when the calling function happens to sleep when it is not * supposed to. */ -#ifdef CONFIG_PREEMPT_VOLUNTARY -extern int _cond_resched(void); -# define might_resched() _cond_resched() -#else -# define might_resched() do { } while (0) -#endif - #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP void __might_sleep(char *file, int line); # define might_sleep() \ diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 2486eb4edbf..331e5f1c2d8 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -89,6 +89,7 @@ struct lock_class { struct lockdep_subclass_key *key; unsigned int subclass; + unsigned int dep_gen_id; /* * IRQ/softirq usage tracking bits: @@ -189,6 +190,14 @@ struct lock_chain { u64 chain_key; }; +#define MAX_LOCKDEP_KEYS_BITS 13 +/* + * Subtract one because we offset hlock->class_idx by 1 in order + * to make 0 mean no class. This avoids overflowing the class_idx + * bitfield and hitting the BUG in hlock_class(). + */ +#define MAX_LOCKDEP_KEYS ((1UL << MAX_LOCKDEP_KEYS_BITS) - 1) + struct held_lock { /* * One-way hash of the dependency chain up to this point. We @@ -205,14 +214,14 @@ struct held_lock { * with zero), here we store the previous hash value: */ u64 prev_chain_key; - struct lock_class *class; unsigned long acquire_ip; struct lockdep_map *instance; - + struct lockdep_map *nest_lock; #ifdef CONFIG_LOCK_STAT u64 waittime_stamp; u64 holdtime_stamp; #endif + unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS; /* * The lock-stack is unified in that the lock chains of interrupt * contexts nest ontop of process context chains, but we 'separate' @@ -226,11 +235,11 @@ struct held_lock { * The following field is used to detect when we cross into an * interrupt context: */ - int irq_context; - int trylock; - int read; - int check; - int hardirqs_off; + unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */ + unsigned int trylock:1; + unsigned int read:2; /* see lock_acquire() comment */ + unsigned int check:2; /* see lock_acquire() comment */ + unsigned int hardirqs_off:1; }; /* @@ -294,11 +303,15 @@ extern void lockdep_init_map(struct lockdep_map *lock, const char *name, * 2: full validation */ extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass, - int trylock, int read, int check, unsigned long ip); + int trylock, int read, int check, + struct lockdep_map *nest_lock, unsigned long ip); extern void lock_release(struct lockdep_map *lock, int nested, unsigned long ip); +extern void lock_set_subclass(struct lockdep_map *lock, unsigned int subclass, + unsigned long ip); + # define INIT_LOCKDEP .lockdep_recursion = 0, #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) @@ -313,8 +326,9 @@ static inline void lockdep_on(void) { } -# define lock_acquire(l, s, t, r, c, i) do { } while (0) +# define lock_acquire(l, s, t, r, c, n, i) do { } while (0) # define lock_release(l, n, i) do { } while (0) +# define lock_set_subclass(l, s, i) do { } while (0) # define lockdep_init() do { } while (0) # define lockdep_info() do { } while (0) # define lockdep_init_map(lock, name, key, sub) do { (void)(key); } while (0) @@ -400,9 +414,11 @@ static inline void print_irqtrace_events(struct task_struct *curr) #ifdef CONFIG_DEBUG_LOCK_ALLOC # ifdef CONFIG_PROVE_LOCKING -# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i) +# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) +# define spin_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i) # else -# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i) +# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) +# define spin_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, NULL, i) # endif # define spin_release(l, n, i) lock_release(l, n, i) #else @@ -412,11 +428,11 @@ static inline void print_irqtrace_events(struct task_struct *curr) #ifdef CONFIG_DEBUG_LOCK_ALLOC # ifdef CONFIG_PROVE_LOCKING -# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i) -# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 2, i) +# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) +# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 2, NULL, i) # else -# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i) -# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 1, i) +# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) +# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 1, NULL, i) # endif # define rwlock_release(l, n, i) lock_release(l, n, i) #else @@ -427,9 +443,9 @@ static inline void print_irqtrace_events(struct task_struct *curr) #ifdef CONFIG_DEBUG_LOCK_ALLOC # ifdef CONFIG_PROVE_LOCKING -# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i) +# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) # else -# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i) +# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) # endif # define mutex_release(l, n, i) lock_release(l, n, i) #else @@ -439,11 +455,11 @@ static inline void print_irqtrace_events(struct task_struct *curr) #ifdef CONFIG_DEBUG_LOCK_ALLOC # ifdef CONFIG_PROVE_LOCKING -# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i) -# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, i) +# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) +# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, NULL, i) # else -# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i) -# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, i) +# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) +# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, NULL, i) # endif # define rwsem_release(l, n, i) lock_release(l, n, i) #else @@ -452,4 +468,16 @@ static inline void print_irqtrace_events(struct task_struct *curr) # define rwsem_release(l, n, i) do { } while (0) #endif +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# ifdef CONFIG_PROVE_LOCKING +# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 2, NULL, _THIS_IP_) +# else +# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 1, NULL, _THIS_IP_) +# endif +# define lock_map_release(l) lock_release(l, 1, _THIS_IP_) +#else +# define lock_map_acquire(l) do { } while (0) +# define lock_map_release(l) do { } while (0) +#endif + #endif /* __LINUX_LOCKDEP_H */ diff --git a/include/linux/mm.h b/include/linux/mm.h index 335288bff1b..fa651609b65 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -834,7 +834,6 @@ extern int mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, unsigned long start, unsigned long end, unsigned long newflags); -#ifdef CONFIG_HAVE_GET_USER_PAGES_FAST /* * get_user_pages_fast provides equivalent functionality to get_user_pages, * operating on current and current->mm (force=0 and doesn't return any vmas). @@ -848,25 +847,6 @@ extern int mprotect_fixup(struct vm_area_struct *vma, int get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages); -#else -/* - * Should probably be moved to asm-generic, and architectures can include it if - * they don't implement their own get_user_pages_fast. - */ -#define get_user_pages_fast(start, nr_pages, write, pages) \ -({ \ - struct mm_struct *mm = current->mm; \ - int ret; \ - \ - down_read(&mm->mmap_sem); \ - ret = get_user_pages(current, mm, start, nr_pages, \ - write, 0, pages, NULL); \ - up_read(&mm->mmap_sem); \ - \ - ret; \ -}) -#endif - /* * A callback you can register to apply pressure to ageable caches. * diff --git a/include/linux/rcuclassic.h b/include/linux/rcuclassic.h index 8c774905dcf..4ab84362272 100644 --- a/include/linux/rcuclassic.h +++ b/include/linux/rcuclassic.h @@ -117,7 +117,7 @@ extern int rcu_needs_cpu(int cpu); #ifdef CONFIG_DEBUG_LOCK_ALLOC extern struct lockdep_map rcu_lock_map; # define rcu_read_acquire() \ - lock_acquire(&rcu_lock_map, 0, 0, 2, 1, _THIS_IP_) + lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_) # define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_) #else # define rcu_read_acquire() do { } while (0) diff --git a/include/linux/sched.h b/include/linux/sched.h index 5270d449ff9..5850bfb968a 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1551,16 +1551,10 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) extern unsigned long long sched_clock(void); -#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK -static inline void sched_clock_init(void) -{ -} - -static inline u64 sched_clock_cpu(int cpu) -{ - return sched_clock(); -} +extern void sched_clock_init(void); +extern u64 sched_clock_cpu(int cpu); +#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK static inline void sched_clock_tick(void) { } @@ -1572,28 +1566,11 @@ static inline void sched_clock_idle_sleep_event(void) static inline void sched_clock_idle_wakeup_event(u64 delta_ns) { } - -#ifdef CONFIG_NO_HZ -static inline void sched_clock_tick_stop(int cpu) -{ -} - -static inline void sched_clock_tick_start(int cpu) -{ -} -#endif - -#else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ -extern void sched_clock_init(void); -extern u64 sched_clock_cpu(int cpu); +#else extern void sched_clock_tick(void); extern void sched_clock_idle_sleep_event(void); extern void sched_clock_idle_wakeup_event(u64 delta_ns); -#ifdef CONFIG_NO_HZ -extern void sched_clock_tick_stop(int cpu); -extern void sched_clock_tick_start(int cpu); #endif -#endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ /* * For kernel-internal use: high-speed (but slightly incorrect) per-cpu diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h index a66304a0995..a1783b229ef 100644 --- a/include/linux/seq_file.h +++ b/include/linux/seq_file.h @@ -4,6 +4,8 @@ #include <linux/types.h> #include <linux/string.h> #include <linux/mutex.h> +#include <linux/cpumask.h> +#include <linux/nodemask.h> struct seq_operations; struct file; @@ -47,6 +49,16 @@ int seq_path(struct seq_file *, struct path *, char *); int seq_dentry(struct seq_file *, struct dentry *, char *); int seq_path_root(struct seq_file *m, struct path *path, struct path *root, char *esc); +int seq_bitmap(struct seq_file *m, unsigned long *bits, unsigned int nr_bits); +static inline int seq_cpumask(struct seq_file *m, cpumask_t *mask) +{ + return seq_bitmap(m, mask->bits, NR_CPUS); +} + +static inline int seq_nodemask(struct seq_file *m, nodemask_t *mask) +{ + return seq_bitmap(m, mask->bits, MAX_NUMNODES); +} int single_open(struct file *, int (*)(struct seq_file *, void *), void *); int single_release(struct inode *, struct file *); diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 61e5610ad16..e0c0fccced4 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -183,8 +183,14 @@ do { \ #ifdef CONFIG_DEBUG_LOCK_ALLOC # define spin_lock_nested(lock, subclass) _spin_lock_nested(lock, subclass) +# define spin_lock_nest_lock(lock, nest_lock) \ + do { \ + typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\ + _spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \ + } while (0) #else # define spin_lock_nested(lock, subclass) _spin_lock(lock) +# define spin_lock_nest_lock(lock, nest_lock) _spin_lock(lock) #endif #define write_lock(lock) _write_lock(lock) diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h index 8a2307ce729..d79845d034b 100644 --- a/include/linux/spinlock_api_smp.h +++ b/include/linux/spinlock_api_smp.h @@ -22,6 +22,8 @@ int in_lock_functions(unsigned long addr); void __lockfunc _spin_lock(spinlock_t *lock) __acquires(lock); void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass) __acquires(lock); +void __lockfunc _spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *map) + __acquires(lock); void __lockfunc _read_lock(rwlock_t *lock) __acquires(lock); void __lockfunc _write_lock(rwlock_t *lock) __acquires(lock); void __lockfunc _spin_lock_bh(spinlock_t *lock) __acquires(lock); diff --git a/include/linux/swab.h b/include/linux/swab.h new file mode 100644 index 00000000000..270d5c208a8 --- /dev/null +++ b/include/linux/swab.h @@ -0,0 +1,309 @@ +#ifndef _LINUX_SWAB_H +#define _LINUX_SWAB_H + +#include <linux/types.h> +#include <linux/compiler.h> +#include <asm/byteorder.h> + +/* + * casts are necessary for constants, because we never know how for sure + * how U/UL/ULL map to __u16, __u32, __u64. At least not in a portable way. + */ +#define __const_swab16(x) ((__u16)( \ + (((__u16)(x) & (__u16)0x00ffU) << 8) | \ + (((__u16)(x) & (__u16)0xff00U) >> 8))) + +#define __const_swab32(x) ((__u32)( \ + (((__u32)(x) & (__u32)0x000000ffUL) << 24) | \ + (((__u32)(x) & (__u32)0x0000ff00UL) << 8) | \ + (((__u32)(x) & (__u32)0x00ff0000UL) >> 8) | \ + (((__u32)(x) & (__u32)0xff000000UL) >> 24))) + +#define __const_swab64(x) ((__u64)( \ + (((__u64)(x) & (__u64)0x00000000000000ffULL) << 56) | \ + (((__u64)(x) & (__u64)0x000000000000ff00ULL) << 40) | \ + (((__u64)(x) & (__u64)0x0000000000ff0000ULL) << 24) | \ + (((__u64)(x) & (__u64)0x00000000ff000000ULL) << 8) | \ + (((__u64)(x) & (__u64)0x000000ff00000000ULL) >> 8) | \ + (((__u64)(x) & (__u64)0x0000ff0000000000ULL) >> 24) | \ + (((__u64)(x) & (__u64)0x00ff000000000000ULL) >> 40) | \ + (((__u64)(x) & (__u64)0xff00000000000000ULL) >> 56))) + +#define __const_swahw32(x) ((__u32)( \ + (((__u32)(x) & (__u32)0x0000ffffUL) << 16) | \ + (((__u32)(x) & (__u32)0xffff0000UL) >> 16))) + +#define __const_swahb32(x) ((__u32)( \ + (((__u32)(x) & (__u32)0x00ff00ffUL) << 8) | \ + (((__u32)(x) & (__u32)0xff00ff00UL) >> 8))) + +/* + * Implement the following as inlines, but define the interface using + * macros to allow constant folding when possible: + * ___swab16, ___swab32, ___swab64, ___swahw32, ___swahb32 + */ + +static inline __attribute_const__ __u16 ___swab16(__u16 val) +{ +#ifdef __arch_swab16 + return __arch_swab16(val); +#elif defined(__arch_swab16p) + return __arch_swab16p(&val); +#else + return __const_swab16(val); +#endif +} + +static inline __attribute_const__ __u32 ___swab32(__u32 val) +{ +#ifdef __arch_swab32 + return __arch_swab32(val); +#elif defined(__arch_swab32p) + return __arch_swab32p(&val); +#else + return __const_swab32(val); +#endif +} + +static inline __attribute_const__ __u64 ___swab64(__u64 val) +{ +#ifdef __arch_swab64 + return __arch_swab64(val); +#elif defined(__arch_swab64p) + return __arch_swab64p(&val); +#elif defined(__SWAB_64_THRU_32__) + __u32 h = val >> 32; + __u32 l = val & ((1ULL << 32) - 1); + return (((__u64)___swab32(l)) << 32) | ((__u64)(___swab32(h))); +#else + return __const_swab64(val); +#endif +} + +static inline __attribute_const__ __u32 ___swahw32(__u32 val) +{ +#ifdef __arch_swahw32 + return __arch_swahw32(val); +#elif defined(__arch_swahw32p) + return __arch_swahw32p(&val); +#else + return __const_swahw32(val); +#endif +} + +static inline __attribute_const__ __u32 ___swahb32(__u32 val) +{ +#ifdef __arch_swahb32 + return __arch_swahb32(val); +#elif defined(__arch_swahb32p) + return __arch_swahb32p(&val); +#else + return __const_swahb32(val); +#endif +} + +/** + * __swab16 - return a byteswapped 16-bit value + * @x: value to byteswap + */ +#define __swab16(x) \ + (__builtin_constant_p((__u16)(x)) ? \ + __const_swab16((x)) : \ + ___swab16((x))) + +/** + * __swab32 - return a byteswapped 32-bit value + * @x: value to byteswap + */ +#define __swab32(x) \ + (__builtin_constant_p((__u32)(x)) ? \ + __const_swab32((x)) : \ + ___swab32((x))) + +/** + * __swab64 - return a byteswapped 64-bit value + * @x: value to byteswap + */ +#define __swab64(x) \ + (__builtin_constant_p((__u64)(x)) ? \ + __const_swab64((x)) : \ + ___swab64((x))) + +/** + * __swahw32 - return a word-swapped 32-bit value + * @x: value to wordswap + * + * __swahw32(0x12340000) is 0x00001234 + */ +#define __swahw32(x) \ + (__builtin_constant_p((__u32)(x)) ? \ + __const_swahw32((x)) : \ + ___swahw32((x))) + +/** + * __swahb32 - return a high and low byte-swapped 32-bit value + * @x: value to byteswap + * + * __swahb32(0x12345678) is 0x34127856 + */ +#define __swahb32(x) \ + (__builtin_constant_p((__u32)(x)) ? \ + __const_swahb32((x)) : \ + ___swahb32((x))) + +/** + * __swab16p - return a byteswapped 16-bit value from a pointer + * @p: pointer to a naturally-aligned 16-bit value + */ +static inline __u16 __swab16p(const __u16 *p) +{ +#ifdef __arch_swab16p + return __arch_swab16p(p); +#else + return __swab16(*p); +#endif +} + +/** + * __swab32p - return a byteswapped 32-bit value from a pointer + * @p: pointer to a naturally-aligned 32-bit value + */ +static inline __u32 __swab32p(const __u32 *p) +{ +#ifdef __arch_swab32p + return __arch_swab32p(p); +#else + return __swab32(*p); +#endif +} + +/** + * __swab64p - return a byteswapped 64-bit value from a pointer + * @p: pointer to a naturally-aligned 64-bit value + */ +static inline __u64 __swab64p(const __u64 *p) +{ +#ifdef __arch_swab64p + return __arch_swab64p(p); +#else + return __swab64(*p); +#endif +} + +/** + * __swahw32p - return a wordswapped 32-bit value from a pointer + * @p: pointer to a naturally-aligned 32-bit value + * + * See __swahw32() for details of wordswapping. + */ +static inline __u32 __swahw32p(const __u32 *p) +{ +#ifdef __arch_swahw32p + return __arch_swahw32p(p); +#else + return __swahw32(*p); +#endif +} + +/** + * __swahb32p - return a high and low byteswapped 32-bit value from a pointer + * @p: pointer to a naturally-aligned 32-bit value + * + * See __swahb32() for details of high/low byteswapping. + */ +static inline __u32 __swahb32p(const __u32 *p) +{ +#ifdef __arch_swahb32p + return __arch_swahb32p(p); +#else + return __swahb32(*p); +#endif +} + +/** + * __swab16s - byteswap a 16-bit value in-place + * @p: pointer to a naturally-aligned 16-bit value + */ +static inline void __swab16s(__u16 *p) +{ +#ifdef __arch_swab16s + __arch_swab16s(p); +#else + *p = __swab16p(p); +#endif +} +/** + * __swab32s - byteswap a 32-bit value in-place + * @p: pointer to a naturally-aligned 32-bit value + */ +static inline void __swab32s(__u32 *p) +{ +#ifdef __arch_swab32s + __arch_swab32s(p); +#else + *p = __swab32p(p); +#endif +} + +/** + * __swab64s - byteswap a 64-bit value in-place + * @p: pointer to a naturally-aligned 64-bit value + */ +static inline void __swab64s(__u64 *p) +{ +#ifdef __arch_swab64s + __arch_swab64s(p); +#else + *p = __swab64p(p); +#endif +} + +/** + * __swahw32s - wordswap a 32-bit value in-place + * @p: pointer to a naturally-aligned 32-bit value + * + * See __swahw32() for details of wordswapping + */ +static inline void __swahw32s(__u32 *p) +{ +#ifdef __arch_swahw32s + __arch_swahw32s(p); +#else + *p = __swahw32p(p); +#endif +} + +/** + * __swahb32s - high and low byteswap a 32-bit value in-place + * @p: pointer to a naturally-aligned 32-bit value + * + * See __swahb32() for details of high and low byte swapping + */ +static inline void __swahb32s(__u32 *p) +{ +#ifdef __arch_swahb32s + __arch_swahb32s(p); +#else + *p = __swahb32p(p); +#endif +} + +#ifdef __KERNEL__ +# define swab16 __swab16 +# define swab32 __swab32 +# define swab64 __swab64 +# define swahw32 __swahw32 +# define swahb32 __swahb32 +# define swab16p __swab16p +# define swab32p __swab32p +# define swab64p __swab64p +# define swahw32p __swahw32p +# define swahb32p __swahb32p +# define swab16s __swab16s +# define swab32s __swab32s +# define swab64s __swab64s +# define swahw32s __swahw32s +# define swahb32s __swahb32s +#endif /* __KERNEL__ */ + +#endif /* _LINUX_SWAB_H */ |