diff options
Diffstat (limited to 'include/linux')
96 files changed, 2998 insertions, 832 deletions
diff --git a/include/linux/Kbuild b/include/linux/Kbuild index 4c4142c5aa6..327f60658d9 100644 --- a/include/linux/Kbuild +++ b/include/linux/Kbuild @@ -97,6 +97,7 @@ header-y += ioctl.h header-y += ip6_tunnel.h header-y += ipmi_msgdefs.h header-y += ipsec.h +header-y += ip_vs.h header-y += ipx.h header-y += irda.h header-y += iso_fs.h @@ -355,6 +356,7 @@ unifdef-y += virtio_balloon.h unifdef-y += virtio_console.h unifdef-y += virtio_pci.h unifdef-y += virtio_ring.h +unifdef-y += virtio_rng.h unifdef-y += vt.h unifdef-y += wait.h unifdef-y += wanrouter.h diff --git a/include/linux/agp_backend.h b/include/linux/agp_backend.h index 972b12bcfb3..2b8df8b420f 100644 --- a/include/linux/agp_backend.h +++ b/include/linux/agp_backend.h @@ -30,6 +30,8 @@ #ifndef _AGP_BACKEND_H #define _AGP_BACKEND_H 1 +#include <linux/list.h> + enum chipset_type { NOT_SUPPORTED, SUPPORTED, @@ -78,6 +80,8 @@ struct agp_memory { bool is_bound; bool is_flushed; bool vmalloc_flag; + /* list of agp_memory mapped to the aperture */ + struct list_head mapped_list; }; #define AGP_NORMAL_MEMORY 0 @@ -96,6 +100,7 @@ extern struct agp_memory *agp_allocate_memory(struct agp_bridge_data *, size_t, extern int agp_copy_info(struct agp_bridge_data *, struct agp_kern_info *); extern int agp_bind_memory(struct agp_memory *, off_t); extern int agp_unbind_memory(struct agp_memory *); +extern int agp_rebind_memory(void); extern void agp_enable(struct agp_bridge_data *, u32); extern struct agp_bridge_data *agp_backend_acquire(struct pci_dev *); extern void agp_backend_release(struct agp_bridge_data *); diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h index 1abfe664c44..89781fd4885 100644 --- a/include/linux/bitmap.h +++ b/include/linux/bitmap.h @@ -110,6 +110,7 @@ extern int __bitmap_weight(const unsigned long *bitmap, int bits); extern int bitmap_scnprintf(char *buf, unsigned int len, const unsigned long *src, int nbits); +extern int bitmap_scnprintf_len(unsigned int nr_bits); extern int __bitmap_parse(const char *buf, unsigned int buflen, int is_user, unsigned long *dst, int nbits); extern int bitmap_parse_user(const char __user *ubuf, unsigned int ulen, diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 88d68081a0f..e61f22be4d0 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -655,6 +655,7 @@ extern struct request *blk_get_request(struct request_queue *, int, gfp_t); extern void blk_insert_request(struct request_queue *, struct request *, int, void *); extern void blk_requeue_request(struct request_queue *, struct request *); extern void blk_plug_device(struct request_queue *); +extern void blk_plug_device_unlocked(struct request_queue *); extern int blk_remove_plug(struct request_queue *); extern void blk_recount_segments(struct request_queue *, struct bio *); extern int scsi_cmd_ioctl(struct file *, struct request_queue *, diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h index 652470b687c..95837bfb525 100644 --- a/include/linux/bootmem.h +++ b/include/linux/bootmem.h @@ -97,10 +97,14 @@ extern void *__alloc_bootmem_low_node(pg_data_t *pgdat, #ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE #define alloc_bootmem(x) \ __alloc_bootmem(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) +#define alloc_bootmem_nopanic(x) \ + __alloc_bootmem_nopanic(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) #define alloc_bootmem_low(x) \ __alloc_bootmem_low(x, SMP_CACHE_BYTES, 0) #define alloc_bootmem_pages(x) \ __alloc_bootmem(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) +#define alloc_bootmem_pages_nopanic(x) \ + __alloc_bootmem_nopanic(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) #define alloc_bootmem_low_pages(x) \ __alloc_bootmem_low(x, PAGE_SIZE, 0) #define alloc_bootmem_node(pgdat, x) \ diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 82aa36c53ea..eadaab44015 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -115,7 +115,6 @@ BUFFER_FNS(Uptodate, uptodate) BUFFER_FNS(Dirty, dirty) TAS_BUFFER_FNS(Dirty, dirty) BUFFER_FNS(Lock, locked) -TAS_BUFFER_FNS(Lock, locked) BUFFER_FNS(Req, req) TAS_BUFFER_FNS(Req, req) BUFFER_FNS(Mapped, mapped) @@ -205,6 +204,8 @@ void block_invalidatepage(struct page *page, unsigned long offset); int block_write_full_page(struct page *page, get_block_t *get_block, struct writeback_control *wbc); int block_read_full_page(struct page*, get_block_t*); +int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc, + unsigned long from); int block_write_begin(struct file *, struct address_space *, loff_t, unsigned, unsigned, struct page **, void **, get_block_t*); @@ -319,10 +320,15 @@ static inline void wait_on_buffer(struct buffer_head *bh) __wait_on_buffer(bh); } +static inline int trylock_buffer(struct buffer_head *bh) +{ + return likely(!test_and_set_bit(BH_Lock, &bh->b_state)); +} + static inline void lock_buffer(struct buffer_head *bh) { might_sleep(); - if (test_set_buffer_locked(bh)) + if (!trylock_buffer(bh)) __lock_buffer(bh); } diff --git a/include/linux/byteorder.h b/include/linux/byteorder.h new file mode 100644 index 00000000000..29f002d73d9 --- /dev/null +++ b/include/linux/byteorder.h @@ -0,0 +1,372 @@ +#ifndef _LINUX_BYTEORDER_H +#define _LINUX_BYTEORDER_H + +#include <linux/types.h> +#include <linux/swab.h> + +#if defined(__LITTLE_ENDIAN) && defined(__BIG_ENDIAN) +# error Fix asm/byteorder.h to define one endianness +#endif + +#if !defined(__LITTLE_ENDIAN) && !defined(__BIG_ENDIAN) +# error Fix asm/byteorder.h to define arch endianness +#endif + +#ifdef __LITTLE_ENDIAN +# undef __LITTLE_ENDIAN +# define __LITTLE_ENDIAN 1234 +#endif + +#ifdef __BIG_ENDIAN +# undef __BIG_ENDIAN +# define __BIG_ENDIAN 4321 +#endif + +#if defined(__LITTLE_ENDIAN) && !defined(__LITTLE_ENDIAN_BITFIELD) +# define __LITTLE_ENDIAN_BITFIELD +#endif + +#if defined(__BIG_ENDIAN) && !defined(__BIG_ENDIAN_BITFIELD) +# define __BIG_ENDIAN_BITFIELD +#endif + +#ifdef __LITTLE_ENDIAN +# define __le16_to_cpu(x) ((__force __u16)(__le16)(x)) +# define __le32_to_cpu(x) ((__force __u32)(__le32)(x)) +# define __le64_to_cpu(x) ((__force __u64)(__le64)(x)) +# define __cpu_to_le16(x) ((__force __le16)(__u16)(x)) +# define __cpu_to_le32(x) ((__force __le32)(__u32)(x)) +# define __cpu_to_le64(x) ((__force __le64)(__u64)(x)) + +# define __be16_to_cpu(x) __swab16((__force __u16)(__be16)(x)) +# define __be32_to_cpu(x) __swab32((__force __u32)(__be32)(x)) +# define __be64_to_cpu(x) __swab64((__force __u64)(__be64)(x)) +# define __cpu_to_be16(x) ((__force __be16)__swab16(x)) +# define __cpu_to_be32(x) ((__force __be32)__swab32(x)) +# define __cpu_to_be64(x) ((__force __be64)__swab64(x)) +#endif + +#ifdef __BIG_ENDIAN +# define __be16_to_cpu(x) ((__force __u16)(__be16)(x)) +# define __be32_to_cpu(x) ((__force __u32)(__be32)(x)) +# define __be64_to_cpu(x) ((__force __u64)(__be64)(x)) +# define __cpu_to_be16(x) ((__force __be16)(__u16)(x)) +# define __cpu_to_be32(x) ((__force __be32)(__u32)(x)) +# define __cpu_to_be64(x) ((__force __be64)(__u64)(x)) + +# define __le16_to_cpu(x) __swab16((__force __u16)(__le16)(x)) +# define __le32_to_cpu(x) __swab32((__force __u32)(__le32)(x)) +# define __le64_to_cpu(x) __swab64((__force __u64)(__le64)(x)) +# define __cpu_to_le16(x) ((__force __le16)__swab16(x)) +# define __cpu_to_le32(x) ((__force __le32)__swab32(x)) +# define __cpu_to_le64(x) ((__force __le64)__swab64(x)) +#endif + +/* + * These helpers could be phased out over time as the base version + * handles constant folding. + */ +#define __constant_htonl(x) __cpu_to_be32(x) +#define __constant_ntohl(x) __be32_to_cpu(x) +#define __constant_htons(x) __cpu_to_be16(x) +#define __constant_ntohs(x) __be16_to_cpu(x) + +#define __constant_le16_to_cpu(x) __le16_to_cpu(x) +#define __constant_le32_to_cpu(x) __le32_to_cpu(x) +#define __constant_le64_to_cpu(x) __le64_to_cpu(x) +#define __constant_be16_to_cpu(x) __be16_to_cpu(x) +#define __constant_be32_to_cpu(x) __be32_to_cpu(x) +#define __constant_be64_to_cpu(x) __be64_to_cpu(x) + +#define __constant_cpu_to_le16(x) __cpu_to_le16(x) +#define __constant_cpu_to_le32(x) __cpu_to_le32(x) +#define __constant_cpu_to_le64(x) __cpu_to_le64(x) +#define __constant_cpu_to_be16(x) __cpu_to_be16(x) +#define __constant_cpu_to_be32(x) __cpu_to_be32(x) +#define __constant_cpu_to_be64(x) __cpu_to_be64(x) + +static inline void __le16_to_cpus(__u16 *p) +{ +#ifdef __BIG_ENDIAN + __swab16s(p); +#endif +} + +static inline void __cpu_to_le16s(__u16 *p) +{ +#ifdef __BIG_ENDIAN + __swab16s(p); +#endif +} + +static inline void __le32_to_cpus(__u32 *p) +{ +#ifdef __BIG_ENDIAN + __swab32s(p); +#endif +} + +static inline void __cpu_to_le32s(__u32 *p) +{ +#ifdef __BIG_ENDIAN + __swab32s(p); +#endif +} + +static inline void __le64_to_cpus(__u64 *p) +{ +#ifdef __BIG_ENDIAN + __swab64s(p); +#endif +} + +static inline void __cpu_to_le64s(__u64 *p) +{ +#ifdef __BIG_ENDIAN + __swab64s(p); +#endif +} + +static inline void __be16_to_cpus(__u16 *p) +{ +#ifdef __LITTLE_ENDIAN + __swab16s(p); +#endif +} + +static inline void __cpu_to_be16s(__u16 *p) +{ +#ifdef __LITTLE_ENDIAN + __swab16s(p); +#endif +} + +static inline void __be32_to_cpus(__u32 *p) +{ +#ifdef __LITTLE_ENDIAN + __swab32s(p); +#endif +} + +static inline void __cpu_to_be32s(__u32 *p) +{ +#ifdef __LITTLE_ENDIAN + __swab32s(p); +#endif +} + +static inline void __be64_to_cpus(__u64 *p) +{ +#ifdef __LITTLE_ENDIAN + __swab64s(p); +#endif +} + +static inline void __cpu_to_be64s(__u64 *p) +{ +#ifdef __LITTLE_ENDIAN + __swab64s(p); +#endif +} + +static inline __u16 __le16_to_cpup(const __le16 *p) +{ +#ifdef __LITTLE_ENDIAN + return (__force __u16)*p; +#else + return __swab16p((__force __u16 *)p); +#endif +} + +static inline __u32 __le32_to_cpup(const __le32 *p) +{ +#ifdef __LITTLE_ENDIAN + return (__force __u32)*p; +#else + return __swab32p((__force __u32 *)p); +#endif +} + +static inline __u64 __le64_to_cpup(const __le64 *p) +{ +#ifdef __LITTLE_ENDIAN + return (__force __u64)*p; +#else + return __swab64p((__force __u64 *)p); +#endif +} + +static inline __le16 __cpu_to_le16p(const __u16 *p) +{ +#ifdef __LITTLE_ENDIAN + return (__force __le16)*p; +#else + return (__force __le16)__swab16p(p); +#endif +} + +static inline __le32 __cpu_to_le32p(const __u32 *p) +{ +#ifdef __LITTLE_ENDIAN + return (__force __le32)*p; +#else + return (__force __le32)__swab32p(p); +#endif +} + +static inline __le64 __cpu_to_le64p(const __u64 *p) +{ +#ifdef __LITTLE_ENDIAN + return (__force __le64)*p; +#else + return (__force __le64)__swab64p(p); +#endif +} + +static inline __u16 __be16_to_cpup(const __be16 *p) +{ +#ifdef __BIG_ENDIAN + return (__force __u16)*p; +#else + return __swab16p((__force __u16 *)p); +#endif +} + +static inline __u32 __be32_to_cpup(const __be32 *p) +{ +#ifdef __BIG_ENDIAN + return (__force __u32)*p; +#else + return __swab32p((__force __u32 *)p); +#endif +} + +static inline __u64 __be64_to_cpup(const __be64 *p) +{ +#ifdef __BIG_ENDIAN + return (__force __u64)*p; +#else + return __swab64p((__force __u64 *)p); +#endif +} + +static inline __be16 __cpu_to_be16p(const __u16 *p) +{ +#ifdef __BIG_ENDIAN + return (__force __be16)*p; +#else + return (__force __be16)__swab16p(p); +#endif +} + +static inline __be32 __cpu_to_be32p(const __u32 *p) +{ +#ifdef __BIG_ENDIAN + return (__force __be32)*p; +#else + return (__force __be32)__swab32p(p); +#endif +} + +static inline __be64 __cpu_to_be64p(const __u64 *p) +{ +#ifdef __BIG_ENDIAN + return (__force __be64)*p; +#else + return (__force __be64)__swab64p(p); +#endif +} + +#ifdef __KERNEL__ + +# define le16_to_cpu __le16_to_cpu +# define le32_to_cpu __le32_to_cpu +# define le64_to_cpu __le64_to_cpu +# define be16_to_cpu __be16_to_cpu +# define be32_to_cpu __be32_to_cpu +# define be64_to_cpu __be64_to_cpu +# define cpu_to_le16 __cpu_to_le16 +# define cpu_to_le32 __cpu_to_le32 +# define cpu_to_le64 __cpu_to_le64 +# define cpu_to_be16 __cpu_to_be16 +# define cpu_to_be32 __cpu_to_be32 +# define cpu_to_be64 __cpu_to_be64 + +# define le16_to_cpup __le16_to_cpup +# define le32_to_cpup __le32_to_cpup +# define le64_to_cpup __le64_to_cpup +# define be16_to_cpup __be16_to_cpup +# define be32_to_cpup __be32_to_cpup +# define be64_to_cpup __be64_to_cpup +# define cpu_to_le16p __cpu_to_le16p +# define cpu_to_le32p __cpu_to_le32p +# define cpu_to_le64p __cpu_to_le64p +# define cpu_to_be16p __cpu_to_be16p +# define cpu_to_be32p __cpu_to_be32p +# define cpu_to_be64p __cpu_to_be64p + +# define le16_to_cpus __le16_to_cpus +# define le32_to_cpus __le32_to_cpus +# define le64_to_cpus __le64_to_cpus +# define be16_to_cpus __be16_to_cpus +# define be32_to_cpus __be32_to_cpus +# define be64_to_cpus __be64_to_cpus +# define cpu_to_le16s __cpu_to_le16s +# define cpu_to_le32s __cpu_to_le32s +# define cpu_to_le64s __cpu_to_le64s +# define cpu_to_be16s __cpu_to_be16s +# define cpu_to_be32s __cpu_to_be32s +# define cpu_to_be64s __cpu_to_be64s + +/* + * They have to be macros in order to do the constant folding + * correctly - if the argument passed into a inline function + * it is no longer constant according to gcc.. + */ +# undef ntohl +# undef ntohs +# undef htonl +# undef htons + +# define ___htonl(x) __cpu_to_be32(x) +# define ___htons(x) __cpu_to_be16(x) +# define ___ntohl(x) __be32_to_cpu(x) +# define ___ntohs(x) __be16_to_cpu(x) + +# define htonl(x) ___htonl(x) +# define ntohl(x) ___ntohl(x) +# define htons(x) ___htons(x) +# define ntohs(x) ___ntohs(x) + +static inline void le16_add_cpu(__le16 *var, u16 val) +{ + *var = cpu_to_le16(le16_to_cpup(var) + val); +} + +static inline void le32_add_cpu(__le32 *var, u32 val) +{ + *var = cpu_to_le32(le32_to_cpup(var) + val); +} + +static inline void le64_add_cpu(__le64 *var, u64 val) +{ + *var = cpu_to_le64(le64_to_cpup(var) + val); +} + +static inline void be16_add_cpu(__be16 *var, u16 val) +{ + *var = cpu_to_be16(be16_to_cpup(var) + val); +} + +static inline void be32_add_cpu(__be32 *var, u32 val) +{ + *var = cpu_to_be32(be32_to_cpup(var) + val); +} + +static inline void be64_add_cpu(__be64 *var, u64 val) +{ + *var = cpu_to_be64(be64_to_cpup(var) + val); +} + +#endif /* __KERNEL__ */ +#endif /* _LINUX_BYTEORDER_H */ diff --git a/include/linux/completion.h b/include/linux/completion.h index d2961b66d53..57faa60de9b 100644 --- a/include/linux/completion.h +++ b/include/linux/completion.h @@ -55,4 +55,49 @@ extern void complete_all(struct completion *); #define INIT_COMPLETION(x) ((x).done = 0) + +/** + * try_wait_for_completion - try to decrement a completion without blocking + * @x: completion structure + * + * Returns: 0 if a decrement cannot be done without blocking + * 1 if a decrement succeeded. + * + * If a completion is being used as a counting completion, + * attempt to decrement the counter without blocking. This + * enables us to avoid waiting if the resource the completion + * is protecting is not available. + */ +static inline bool try_wait_for_completion(struct completion *x) +{ + int ret = 1; + + spin_lock_irq(&x->wait.lock); + if (!x->done) + ret = 0; + else + x->done--; + spin_unlock_irq(&x->wait.lock); + return ret; +} + +/** + * completion_done - Test to see if a completion has any waiters + * @x: completion structure + * + * Returns: 0 if there are waiters (wait_for_completion() in progress) + * 1 if there are no waiters. + * + */ +static inline bool completion_done(struct completion *x) +{ + int ret = 1; + + spin_lock_irq(&x->wait.lock); + if (!x->done) + ret = 0; + spin_unlock_irq(&x->wait.lock); + return ret; +} + #endif diff --git a/include/linux/configfs.h b/include/linux/configfs.h index d62c19ff041..7f627775c94 100644 --- a/include/linux/configfs.h +++ b/include/linux/configfs.h @@ -40,6 +40,7 @@ #include <linux/list.h> #include <linux/kref.h> #include <linux/mutex.h> +#include <linux/err.h> #include <asm/atomic.h> @@ -129,8 +130,25 @@ struct configfs_attribute { /* * Users often need to create attribute structures for their configurable * attributes, containing a configfs_attribute member and function pointers - * for the show() and store() operations on that attribute. They can use - * this macro (similar to sysfs' __ATTR) to make defining attributes easier. + * for the show() and store() operations on that attribute. If they don't + * need anything else on the extended attribute structure, they can use + * this macro to define it The argument _item is the name of the + * config_item structure. + */ +#define CONFIGFS_ATTR_STRUCT(_item) \ +struct _item##_attribute { \ + struct configfs_attribute attr; \ + ssize_t (*show)(struct _item *, char *); \ + ssize_t (*store)(struct _item *, const char *, size_t); \ +} + +/* + * With the extended attribute structure, users can use this macro + * (similar to sysfs' __ATTR) to make defining attributes easier. + * An example: + * #define MYITEM_ATTR(_name, _mode, _show, _store) \ + * struct myitem_attribute childless_attr_##_name = \ + * __CONFIGFS_ATTR(_name, _mode, _show, _store) */ #define __CONFIGFS_ATTR(_name, _mode, _show, _store) \ { \ @@ -142,6 +160,52 @@ struct configfs_attribute { .show = _show, \ .store = _store, \ } +/* Here is a readonly version, only requiring a show() operation */ +#define __CONFIGFS_ATTR_RO(_name, _show) \ +{ \ + .attr = { \ + .ca_name = __stringify(_name), \ + .ca_mode = 0444, \ + .ca_owner = THIS_MODULE, \ + }, \ + .show = _show, \ +} + +/* + * With these extended attributes, the simple show_attribute() and + * store_attribute() operations need to call the show() and store() of the + * attributes. This is a common pattern, so we provide a macro to define + * them. The argument _item is the name of the config_item structure. + * This macro expects the attributes to be named "struct <name>_attribute" + * and the function to_<name>() to exist; + */ +#define CONFIGFS_ATTR_OPS(_item) \ +static ssize_t _item##_attr_show(struct config_item *item, \ + struct configfs_attribute *attr, \ + char *page) \ +{ \ + struct _item *_item = to_##_item(item); \ + struct _item##_attribute *_item##_attr = \ + container_of(attr, struct _item##_attribute, attr); \ + ssize_t ret = 0; \ + \ + if (_item##_attr->show) \ + ret = _item##_attr->show(_item, page); \ + return ret; \ +} \ +static ssize_t _item##_attr_store(struct config_item *item, \ + struct configfs_attribute *attr, \ + const char *page, size_t count) \ +{ \ + struct _item *_item = to_##_item(item); \ + struct _item##_attribute *_item##_attr = \ + container_of(attr, struct _item##_attribute, attr); \ + ssize_t ret = -EINVAL; \ + \ + if (_item##_attr->store) \ + ret = _item##_attr->store(_item, page, count); \ + return ret; \ +} /* * If allow_link() exists, the item can symlink(2) out to other diff --git a/include/linux/connector.h b/include/linux/connector.h index 96a89d3d672..5c7f9468f75 100644 --- a/include/linux/connector.h +++ b/include/linux/connector.h @@ -38,8 +38,9 @@ #define CN_W1_VAL 0x1 #define CN_IDX_V86D 0x4 #define CN_VAL_V86D_UVESAFB 0x1 +#define CN_IDX_BB 0x5 /* BlackBoard, from the TSP GPL sampling framework */ -#define CN_NETLINK_USERS 5 +#define CN_NETLINK_USERS 6 /* * Maximum connector's message size. diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 2270ca5ec63..6fd5668aa57 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -106,6 +106,7 @@ struct cpufreq_policy { #define CPUFREQ_ADJUST (0) #define CPUFREQ_INCOMPATIBLE (1) #define CPUFREQ_NOTIFY (2) +#define CPUFREQ_START (3) #define CPUFREQ_SHARED_TYPE_NONE (0) /* None */ #define CPUFREQ_SHARED_TYPE_HW (1) /* HW does needed coordination */ diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 1b5c98e7fef..d3219d73f8e 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -62,15 +62,7 @@ * int next_cpu_nr(cpu, mask) Next cpu past 'cpu', or nr_cpu_ids * * cpumask_t cpumask_of_cpu(cpu) Return cpumask with bit 'cpu' set - *ifdef CONFIG_HAS_CPUMASK_OF_CPU - * cpumask_of_cpu_ptr_declare(v) Declares cpumask_t *v - * cpumask_of_cpu_ptr_next(v, cpu) Sets v = &cpumask_of_cpu_map[cpu] - * cpumask_of_cpu_ptr(v, cpu) Combines above two operations - *else - * cpumask_of_cpu_ptr_declare(v) Declares cpumask_t _v and *v = &_v - * cpumask_of_cpu_ptr_next(v, cpu) Sets _v = cpumask_of_cpu(cpu) - * cpumask_of_cpu_ptr(v, cpu) Combines above two operations - *endif + * (can be used as an lvalue) * CPU_MASK_ALL Initializer - all bits set * CPU_MASK_NONE Initializer - no bits set * unsigned long *cpus_addr(mask) Array of unsigned long's in mask @@ -273,37 +265,30 @@ static inline void __cpus_shift_left(cpumask_t *dstp, bitmap_shift_left(dstp->bits, srcp->bits, n, nbits); } +/* + * Special-case data structure for "single bit set only" constant CPU masks. + * + * We pre-generate all the 64 (or 32) possible bit positions, with enough + * padding to the left and the right, and return the constant pointer + * appropriately offset. + */ +extern const unsigned long + cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)]; + +static inline const cpumask_t *get_cpu_mask(unsigned int cpu) +{ + const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG]; + p -= cpu / BITS_PER_LONG; + return (const cpumask_t *)p; +} + +/* + * In cases where we take the address of the cpumask immediately, + * gcc optimizes it out (it's a constant) and there's no huge stack + * variable created: + */ +#define cpumask_of_cpu(cpu) (*get_cpu_mask(cpu)) -#ifdef CONFIG_HAVE_CPUMASK_OF_CPU_MAP -extern cpumask_t *cpumask_of_cpu_map; -#define cpumask_of_cpu(cpu) (cpumask_of_cpu_map[cpu]) -#define cpumask_of_cpu_ptr(v, cpu) \ - const cpumask_t *v = &cpumask_of_cpu(cpu) -#define cpumask_of_cpu_ptr_declare(v) \ - const cpumask_t *v -#define cpumask_of_cpu_ptr_next(v, cpu) \ - v = &cpumask_of_cpu(cpu) -#else -#define cpumask_of_cpu(cpu) \ -({ \ - typeof(_unused_cpumask_arg_) m; \ - if (sizeof(m) == sizeof(unsigned long)) { \ - m.bits[0] = 1UL<<(cpu); \ - } else { \ - cpus_clear(m); \ - cpu_set((cpu), m); \ - } \ - m; \ -}) -#define cpumask_of_cpu_ptr(v, cpu) \ - cpumask_t _##v = cpumask_of_cpu(cpu); \ - const cpumask_t *v = &_##v -#define cpumask_of_cpu_ptr_declare(v) \ - cpumask_t _##v; \ - const cpumask_t *v = &_##v -#define cpumask_of_cpu_ptr_next(v, cpu) \ - _##v = cpumask_of_cpu(cpu) -#endif #define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS) diff --git a/include/linux/cred.h b/include/linux/cred.h new file mode 100644 index 00000000000..b69222cc1fd --- /dev/null +++ b/include/linux/cred.h @@ -0,0 +1,50 @@ +/* Credentials management + * + * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#ifndef _LINUX_CRED_H +#define _LINUX_CRED_H + +#define get_current_user() (get_uid(current->user)) + +#define task_uid(task) ((task)->uid) +#define task_gid(task) ((task)->gid) +#define task_euid(task) ((task)->euid) +#define task_egid(task) ((task)->egid) + +#define current_uid() (current->uid) +#define current_gid() (current->gid) +#define current_euid() (current->euid) +#define current_egid() (current->egid) +#define current_suid() (current->suid) +#define current_sgid() (current->sgid) +#define current_fsuid() (current->fsuid) +#define current_fsgid() (current->fsgid) +#define current_cap() (current->cap_effective) + +#define current_uid_gid(_uid, _gid) \ +do { \ + *(_uid) = current->uid; \ + *(_gid) = current->gid; \ +} while(0) + +#define current_euid_egid(_uid, _gid) \ +do { \ + *(_uid) = current->euid; \ + *(_gid) = current->egid; \ +} while(0) + +#define current_fsuid_fsgid(_uid, _gid) \ +do { \ + *(_uid) = current->fsuid; \ + *(_gid) = current->fsgid; \ +} while(0) + +#endif /* _LINUX_CRED_H */ diff --git a/include/linux/dcache.h b/include/linux/dcache.h index 98202c672fd..07aa198f19e 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -230,6 +230,7 @@ extern void d_delete(struct dentry *); extern struct dentry * d_alloc(struct dentry *, const struct qstr *); extern struct dentry * d_alloc_anon(struct inode *); extern struct dentry * d_splice_alias(struct inode *, struct dentry *); +extern struct dentry * d_add_ci(struct inode *, struct dentry *, struct qstr *); extern void shrink_dcache_sb(struct super_block *); extern void shrink_dcache_parent(struct dentry *); extern void shrink_dcache_for_umount(struct super_block *); diff --git a/include/linux/dm9000.h b/include/linux/dm9000.h index fc82446b642..c30879cf93b 100644 --- a/include/linux/dm9000.h +++ b/include/linux/dm9000.h @@ -27,6 +27,7 @@ struct dm9000_plat_data { unsigned int flags; + unsigned char dev_addr[6]; /* allow replacement IO routines */ diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index 8bb5e87df36..b4b038b89ee 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -27,9 +27,24 @@ struct ethtool_cmd { __u8 autoneg; /* Enable or disable autonegotiation */ __u32 maxtxpkt; /* Tx pkts before generating tx int */ __u32 maxrxpkt; /* Rx pkts before generating rx int */ - __u32 reserved[4]; + __u16 speed_hi; + __u16 reserved2; + __u32 reserved[3]; }; +static inline void ethtool_cmd_speed_set(struct ethtool_cmd *ep, + __u32 speed) +{ + + ep->speed = (__u16)speed; + ep->speed_hi = (__u16)(speed >> 16); +} + +static inline __u32 ethtool_cmd_speed(struct ethtool_cmd *ep) +{ + return (ep->speed_hi << 16) | ep->speed; +} + #define ETHTOOL_BUSINFO_LEN 32 /* these strings are set to whatever the driver author decides... */ struct ethtool_drvinfo { diff --git a/include/linux/file.h b/include/linux/file.h index 27c64bdc68c..a20259e248a 100644 --- a/include/linux/file.h +++ b/include/linux/file.h @@ -34,8 +34,9 @@ extern struct file *fget(unsigned int fd); extern struct file *fget_light(unsigned int fd, int *fput_needed); extern void set_close_on_exec(unsigned int fd, int flag); extern void put_filp(struct file *); +extern int alloc_fd(unsigned start, unsigned flags); extern int get_unused_fd(void); -extern int get_unused_fd_flags(int flags); +#define get_unused_fd_flags(flags) alloc_fd(0, (flags)) extern void put_unused_fd(unsigned int fd); extern void fd_install(unsigned int fd, struct file *file); diff --git a/include/linux/firmware-map.h b/include/linux/firmware-map.h index acbdbcc1605..6e199c8dfac 100644 --- a/include/linux/firmware-map.h +++ b/include/linux/firmware-map.h @@ -24,34 +24,8 @@ */ #ifdef CONFIG_FIRMWARE_MEMMAP -/** - * Adds a firmware mapping entry. This function uses kmalloc() for memory - * allocation. Use firmware_map_add_early() if you want to use the bootmem - * allocator. - * - * That function must be called before late_initcall. - * - * @start: Start of the memory range. - * @end: End of the memory range (inclusive). - * @type: Type of the memory range. - * - * Returns 0 on success, or -ENOMEM if no memory could be allocated. - */ int firmware_map_add(resource_size_t start, resource_size_t end, const char *type); - -/** - * Adds a firmware mapping entry. This function uses the bootmem allocator - * for memory allocation. Use firmware_map_add() if you want to use kmalloc(). - * - * That function must be called before late_initcall. - * - * @start: Start of the memory range. - * @end: End of the memory range (inclusive). - * @type: Type of the memory range. - * - * Returns 0 on success, or -ENOMEM if no memory could be allocated. - */ int firmware_map_add_early(resource_size_t start, resource_size_t end, const char *type); diff --git a/include/linux/fs.h b/include/linux/fs.h index 8252b045e62..580b513668f 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -443,6 +443,27 @@ static inline size_t iov_iter_count(struct iov_iter *i) return i->count; } +/* + * "descriptor" for what we're up to with a read. + * This allows us to use the same read code yet + * have multiple different users of the data that + * we read from a file. + * + * The simplest case just copies the data to user + * mode. + */ +typedef struct { + size_t written; + size_t count; + union { + char __user *buf; + void *data; + } arg; + int error; +} read_descriptor_t; + +typedef int (*read_actor_t)(read_descriptor_t *, struct page *, + unsigned long, unsigned long); struct address_space_operations { int (*writepage)(struct page *page, struct writeback_control *wbc); @@ -484,6 +505,8 @@ struct address_space_operations { int (*migratepage) (struct address_space *, struct page *, struct page *); int (*launder_page) (struct page *); + int (*is_partially_uptodate) (struct page *, read_descriptor_t *, + unsigned long); }; /* @@ -1198,27 +1221,6 @@ struct block_device_operations { struct module *owner; }; -/* - * "descriptor" for what we're up to with a read. - * This allows us to use the same read code yet - * have multiple different users of the data that - * we read from a file. - * - * The simplest case just copies the data to user - * mode. - */ -typedef struct { - size_t written; - size_t count; - union { - char __user * buf; - void *data; - } arg; - int error; -} read_descriptor_t; - -typedef int (*read_actor_t)(read_descriptor_t *, struct page *, unsigned long, unsigned long); - /* These macros are for out of kernel modules to test that * the kernel supports the unlocked_ioctl and compat_ioctl * fields in struct file_operations. */ diff --git a/include/linux/harrier_defs.h b/include/linux/harrier_defs.h deleted file mode 100644 index efef11db790..00000000000 --- a/include/linux/harrier_defs.h +++ /dev/null @@ -1,212 +0,0 @@ -/* - * include/linux/harrier_defs.h - * - * Definitions for Motorola MCG Harrier North Bridge & Memory controller - * - * Author: Dale Farnsworth - * dale.farnsworth@mvista.com - * - * Extracted from asm-ppc/harrier.h by: - * Randy Vinson - * rvinson@mvista.com - * - * Copyright 2001-2002 MontaVista Software Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - -#ifndef __ASMPPC_HARRIER_DEFS_H -#define __ASMPPC_HARRIER_DEFS_H - -#define HARRIER_DEFAULT_XCSR_BASE 0xfeff0000 - -#define HARRIER_VEND_DEV_ID 0x1057480b - -#define HARRIER_VENI_OFF 0x00 - -#define HARRIER_REVI_OFF 0x05 -#define HARRIER_UCTL_OFF 0xd0 -#define HARRIER_XTAL64_MASK 0x02 - -#define HARRIER_MISC_CSR_OFF 0x1c -#define HARRIER_RSTOUT 0x01000000 -#define HARRIER_SYSCON 0x08000000 -#define HARRIER_EREADY 0x10000000 -#define HARRIER_ERDYS 0x20000000 - -/* Function exception registers */ -#define HARRIER_FEEN_OFF 0x40 /* enable */ -#define HARRIER_FEST_OFF 0x44 /* status */ -#define HARRIER_FEMA_OFF 0x48 /* mask */ -#define HARRIER_FECL_OFF 0x4c /* clear */ - -#define HARRIER_FE_DMA 0x80 -#define HARRIER_FE_MIDB 0x40 -#define HARRIER_FE_MIM0 0x20 -#define HARRIER_FE_MIM1 0x10 -#define HARRIER_FE_MIP 0x08 -#define HARRIER_FE_UA0 0x04 -#define HARRIER_FE_UA1 0x02 -#define HARRIER_FE_ABT 0x01 - -#define HARRIER_SERIAL_0_OFF 0xc0 - -#define HARRIER_MBAR_OFF 0xe0 -#define HARRIER_MBAR_MSK 0xfffc0000 -#define HARRIER_MPIC_CSR_OFF 0xe4 -#define HARRIER_MPIC_OPI_ENABLE 0x40 -#define HARRIER_MPIC_IFEVP_OFF 0x10200 -#define HARRIER_MPIC_IFEVP_VECT_MSK 0xff -#define HARRIER_MPIC_IFEDE_OFF 0x10210 - -/* - * Define the Memory Controller register offsets. - */ -#define HARRIER_SDBA_OFF 0x110 -#define HARRIER_SDBB_OFF 0x114 -#define HARRIER_SDBC_OFF 0x118 -#define HARRIER_SDBD_OFF 0x11c -#define HARRIER_SDBE_OFF 0x120 -#define HARRIER_SDBF_OFF 0x124 -#define HARRIER_SDBG_OFF 0x128 -#define HARRIER_SDBH_OFF 0x12c - -#define HARRIER_SDB_ENABLE 0x00000100 -#define HARRIER_SDB_SIZE_MASK 0xf -#define HARRIER_SDB_SIZE_SHIFT 16 -#define HARRIER_SDB_BASE_MASK 0xff -#define HARRIER_SDB_BASE_SHIFT 24 - -/* - * Define outbound register offsets. - */ -#define HARRIER_OTAD0_OFF 0x220 -#define HARRIER_OTOF0_OFF 0x224 -#define HARRIER_OTAD1_OFF 0x228 -#define HARRIER_OTOF1_OFF 0x22c -#define HARRIER_OTAD2_OFF 0x230 -#define HARRIER_OTOF2_OFF 0x234 -#define HARRIER_OTAD3_OFF 0x238 -#define HARRIER_OTOF3_OFF 0x23c - -#define HARRIER_OTADX_START_MSK 0xffff0000UL -#define HARRIER_OTADX_END_MSK 0x0000ffffUL - -#define HARRIER_OTOFX_OFF_MSK 0xffff0000UL -#define HARRIER_OTOFX_ENA 0x80UL -#define HARRIER_OTOFX_WPE 0x10UL -#define HARRIER_OTOFX_SGE 0x08UL -#define HARRIER_OTOFX_RAE 0x04UL -#define HARRIER_OTOFX_MEM 0x02UL -#define HARRIER_OTOFX_IOM 0x01UL - -/* - * Define generic message passing register offsets - */ -/* Mirrored registers (visible from both PowerPC and PCI space) */ -#define HARRIER_XCSR_MP_BASE_OFF 0x290 /* base offset in XCSR space */ -#define HARRIER_PMEP_MP_BASE_OFF 0x100 /* base offset in PMEM space */ -#define HARRIER_MGOM0_OFF 0x00 /* outbound msg 0 */ -#define HARRIER_MGOM1_OFF 0x04 /* outbound msg 1 */ -#define HARRIER_MGOD_OFF 0x08 /* outbound doorbells */ - -#define HARRIER_MGIM0_OFF 0x10 /* inbound msg 0 */ -#define HARRIER_MGIM1_OFF 0x14 /* inbound msg 1 */ -#define HARRIER_MGID_OFF 0x18 /* inbound doorbells */ - -/* PowerPC-only registers */ -#define HARRIER_MGIDM_OFF 0x20 /* inbound doorbell mask */ - -/* PCI-only registers */ -#define HARRIER_PMEP_MGST_OFF 0x20 /* (outbound) interrupt status */ -#define HARRIER_PMEP_MGMS_OFF 0x24 /* (outbound) interrupt mask */ -#define HARRIER_MG_OMI0 (1<<4) -#define HARRIER_MG_OMI1 (1<<5) - -#define HARRIER_PMEP_MGODM_OFF 0x28 /* outbound doorbell mask */ - -/* - * Define PCI configuration space register offsets - */ -#define HARRIER_XCSR_TO_PCFS_OFF 0x300 - -/* - * Define message passing attribute register offset - */ -#define HARRIER_MPAT_OFF 0x44 - -/* - * Define inbound attribute register offsets. - */ -#define HARRIER_ITSZ0_OFF 0x48 -#define HARRIER_ITAT0_OFF 0x4c - -#define HARRIER_ITSZ1_OFF 0x50 -#define HARRIER_ITAT1_OFF 0x54 - -#define HARRIER_ITSZ2_OFF 0x58 -#define HARRIER_ITAT2_OFF 0x5c - -#define HARRIER_ITSZ3_OFF 0x60 -#define HARRIER_ITAT3_OFF 0x64 - -/* inbound translation size constants */ -#define HARRIER_ITSZ_MSK 0xff -#define HARRIER_ITSZ_4KB 0x00 -#define HARRIER_ITSZ_8KB 0x01 -#define HARRIER_ITSZ_16KB 0x02 -#define HARRIER_ITSZ_32KB 0x03 -#define HARRIER_ITSZ_64KB 0x04 -#define HARRIER_ITSZ_128KB 0x05 -#define HARRIER_ITSZ_256KB 0x06 -#define HARRIER_ITSZ_512KB 0x07 -#define HARRIER_ITSZ_1MB 0x08 -#define HARRIER_ITSZ_2MB 0x09 -#define HARRIER_ITSZ_4MB 0x0A -#define HARRIER_ITSZ_8MB 0x0B -#define HARRIER_ITSZ_16MB 0x0C -#define HARRIER_ITSZ_32MB 0x0D -#define HARRIER_ITSZ_64MB 0x0E -#define HARRIER_ITSZ_128MB 0x0F -#define HARRIER_ITSZ_256MB 0x10 -#define HARRIER_ITSZ_512MB 0x11 -#define HARRIER_ITSZ_1GB 0x12 -#define HARRIER_ITSZ_2GB 0x13 - -/* inbound translation offset */ -#define HARRIER_ITOF_SHIFT 0x10 -#define HARRIER_ITOF_MSK 0xffff - -/* inbound translation atttributes */ -#define HARRIER_ITAT_PRE (1<<3) -#define HARRIER_ITAT_RAE (1<<4) -#define HARRIER_ITAT_WPE (1<<5) -#define HARRIER_ITAT_MEM (1<<6) -#define HARRIER_ITAT_ENA (1<<7) -#define HARRIER_ITAT_GBL (1<<16) - -#define HARRIER_LBA_OFF 0x80 -#define HARRIER_LBA_MSK (1<<31) - -#define HARRIER_XCSR_SIZE 1024 - -/* macros to calculate message passing register offsets */ -#define HARRIER_MP_XCSR(x) ((u32)HARRIER_XCSR_MP_BASE_OFF + (u32)x) - -#define HARRIER_MP_PMEP(x) ((u32)HARRIER_PMEP_MP_BASE_OFF + (u32)x) - -/* - * Define PCI configuration space register offsets - */ -#define HARRIER_MPBAR_OFF PCI_BASE_ADDRESS_0 -#define HARRIER_ITBAR0_OFF PCI_BASE_ADDRESS_1 -#define HARRIER_ITBAR1_OFF PCI_BASE_ADDRESS_2 -#define HARRIER_ITBAR2_OFF PCI_BASE_ADDRESS_3 -#define HARRIER_ITBAR3_OFF PCI_BASE_ADDRESS_4 - -#define HARRIER_XCSR_CONFIG(x) ((u32)HARRIER_XCSR_TO_PCFS_OFF + (u32)x) - -#endif /* __ASMPPC_HARRIER_DEFS_H */ diff --git a/include/linux/i2c-id.h b/include/linux/i2c-id.h index 4862398e05b..bf34c5f4c05 100644 --- a/include/linux/i2c-id.h +++ b/include/linux/i2c-id.h @@ -39,7 +39,6 @@ #define I2C_DRIVERID_SAA7111A 8 /* video input processor */ #define I2C_DRIVERID_SAA7185B 13 /* video encoder */ #define I2C_DRIVERID_SAA7110 22 /* video decoder */ -#define I2C_DRIVERID_MGATVO 23 /* Matrox TVOut */ #define I2C_DRIVERID_SAA5249 24 /* SAA5249 and compatibles */ #define I2C_DRIVERID_PCF8583 25 /* real time clock */ #define I2C_DRIVERID_SAB3036 26 /* SAB3036 tuner */ @@ -95,7 +94,6 @@ #define I2C_HW_B_BT848 0x010005 /* BT848 video boards */ #define I2C_HW_B_VIA 0x010007 /* Via vt82c586b */ #define I2C_HW_B_HYDRA 0x010008 /* Apple Hydra Mac I/O */ -#define I2C_HW_B_G400 0x010009 /* Matrox G400 */ #define I2C_HW_B_I810 0x01000a /* Intel I810 */ #define I2C_HW_B_VOO 0x01000b /* 3dfx Voodoo 3 / Banshee */ #define I2C_HW_B_SCX200 0x01000e /* Nat'l Semi SCx200 I2C */ diff --git a/include/linux/i2c-pnx.h b/include/linux/i2c-pnx.h index e6e9c814da6..f13255e0640 100644 --- a/include/linux/i2c-pnx.h +++ b/include/linux/i2c-pnx.h @@ -12,7 +12,9 @@ #ifndef __I2C_PNX_H__ #define __I2C_PNX_H__ -#include <asm/arch/i2c.h> +#include <linux/pm.h> + +struct platform_device; struct i2c_pnx_mif { int ret; /* Return value */ diff --git a/include/linux/ide.h b/include/linux/ide.h index b846bc44a27..87c12ed9695 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h @@ -219,18 +219,7 @@ static inline int __ide_default_irq(unsigned long base) #include <asm-generic/ide_iops.h> #endif -#ifndef MAX_HWIFS -#if defined(CONFIG_BLACKFIN) || defined(CONFIG_H8300) || defined(CONFIG_XTENSA) -# define MAX_HWIFS 1 -#else -# define MAX_HWIFS 10 -#endif -#endif - -#if !defined(MAX_HWIFS) || defined(CONFIG_EMBEDDED) -#undef MAX_HWIFS -#define MAX_HWIFS CONFIG_IDE_MAX_HWIFS -#endif +#define MAX_HWIFS 10 /* Currently only m68k, apus and m8xx need it */ #ifndef IDE_ARCH_ACK_INTR @@ -509,24 +498,33 @@ struct ide_tp_ops { extern const struct ide_tp_ops default_tp_ops; +/** + * struct ide_port_ops - IDE port operations + * + * @init_dev: host specific initialization of a device + * @set_pio_mode: routine to program host for PIO mode + * @set_dma_mode: routine to program host for DMA mode + * @selectproc: tweaks hardware to select drive + * @reset_poll: chipset polling based on hba specifics + * @pre_reset: chipset specific changes to default for device-hba resets + * @resetproc: routine to reset controller after a disk reset + * @maskproc: special host masking for drive selection + * @quirkproc: check host's drive quirk list + * + * @mdma_filter: filter MDMA modes + * @udma_filter: filter UDMA modes + * + * @cable_detect: detect cable type + */ struct ide_port_ops { - /* host specific initialization of a device */ void (*init_dev)(ide_drive_t *); - /* routine to program host for PIO mode */ void (*set_pio_mode)(ide_drive_t *, const u8); - /* routine to program host for DMA mode */ void (*set_dma_mode)(ide_drive_t *, const u8); - /* tweaks hardware to select drive */ void (*selectproc)(ide_drive_t *); - /* chipset polling based on hba specifics */ int (*reset_poll)(ide_drive_t *); - /* chipset specific changes to default for device-hba resets */ void (*pre_reset)(ide_drive_t *); - /* routine to reset controller after a disk reset */ void (*resetproc)(ide_drive_t *); - /* special host masking for drive selection */ void (*maskproc)(ide_drive_t *, int); - /* check host's drive quirk list */ void (*quirkproc)(ide_drive_t *); u8 (*mdma_filter)(ide_drive_t *); diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index a1630ba0b87..7f4df7c7659 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -506,6 +506,19 @@ struct ieee80211_channel_sw_ie { u8 count; } __attribute__ ((packed)); +/** + * struct ieee80211_tim + * + * This structure refers to "Traffic Indication Map information element" + */ +struct ieee80211_tim_ie { + u8 dtim_count; + u8 dtim_period; + u8 bitmap_ctrl; + /* variable size: 1 - 251 bytes */ + u8 virtual_map[0]; +} __attribute__ ((packed)); + struct ieee80211_mgmt { __le16 frame_control; __le16 duration; diff --git a/include/linux/ihex.h b/include/linux/ihex.h index 2baace2788a..31d8629e75a 100644 --- a/include/linux/ihex.h +++ b/include/linux/ihex.h @@ -18,7 +18,7 @@ struct ihex_binrec { __be32 addr; __be16 len; uint8_t data[0]; -} __attribute__((aligned(4))); +} __attribute__((packed)); /* Find the next record, taking into account the 4-byte alignment */ static inline const struct ihex_binrec * diff --git a/include/linux/init.h b/include/linux/init.h index 11b84e10605..93538b696e3 100644 --- a/include/linux/init.h +++ b/include/linux/init.h @@ -139,6 +139,7 @@ extern initcall_t __con_initcall_start[], __con_initcall_end[]; extern initcall_t __security_initcall_start[], __security_initcall_end[]; /* Defined in init/main.c */ +extern int do_one_initcall(initcall_t fn); extern char __initdata boot_command_line[]; extern char *saved_command_line; extern unsigned int reset_devices; diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 62aa4f895ab..58ff4e74b2f 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -223,35 +223,6 @@ static inline int disable_irq_wake(unsigned int irq) #define or_softirq_pending(x) (local_softirq_pending() |= (x)) #endif -/* - * Temporary defines for UP kernels, until all code gets fixed. - */ -#ifndef CONFIG_SMP -static inline void __deprecated cli(void) -{ - local_irq_disable(); -} -static inline void __deprecated sti(void) -{ - local_irq_enable(); -} -static inline void __deprecated save_flags(unsigned long *x) -{ - local_save_flags(*x); -} -#define save_flags(x) save_flags(&x) -static inline void __deprecated restore_flags(unsigned long x) -{ - local_irq_restore(x); -} - -static inline void __deprecated save_and_cli(unsigned long *x) -{ - local_irq_save(*x); -} -#define save_and_cli(x) save_and_cli(&x) -#endif /* CONFIG_SMP */ - /* Some architectures might implement lazy enabling/disabling of * interrupts. In some cases, such as stop_machine, we might want * to ensure that after a local_irq_disable(), interrupts have diff --git a/include/linux/ioport.h b/include/linux/ioport.h index 2cd07cc2968..22d2115458c 100644 --- a/include/linux/ioport.h +++ b/include/linux/ioport.h @@ -118,6 +118,10 @@ extern int allocate_resource(struct resource *root, struct resource *new, int adjust_resource(struct resource *res, resource_size_t start, resource_size_t size); resource_size_t resource_alignment(struct resource *res); +static inline resource_size_t resource_size(struct resource *res) +{ + return res->end - res->start + 1; +} /* Convenience shorthand with allocation */ #define request_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name)) diff --git a/include/linux/ip_vs.h b/include/linux/ip_vs.h new file mode 100644 index 00000000000..ec6eb49af2d --- /dev/null +++ b/include/linux/ip_vs.h @@ -0,0 +1,245 @@ +/* + * IP Virtual Server + * data structure and functionality definitions + */ + +#ifndef _IP_VS_H +#define _IP_VS_H + +#include <linux/types.h> /* For __beXX types in userland */ + +#define IP_VS_VERSION_CODE 0x010201 +#define NVERSION(version) \ + (version >> 16) & 0xFF, \ + (version >> 8) & 0xFF, \ + version & 0xFF + +/* + * Virtual Service Flags + */ +#define IP_VS_SVC_F_PERSISTENT 0x0001 /* persistent port */ +#define IP_VS_SVC_F_HASHED 0x0002 /* hashed entry */ + +/* + * Destination Server Flags + */ +#define IP_VS_DEST_F_AVAILABLE 0x0001 /* server is available */ +#define IP_VS_DEST_F_OVERLOAD 0x0002 /* server is overloaded */ + +/* + * IPVS sync daemon states + */ +#define IP_VS_STATE_NONE 0x0000 /* daemon is stopped */ +#define IP_VS_STATE_MASTER 0x0001 /* started as master */ +#define IP_VS_STATE_BACKUP 0x0002 /* started as backup */ + +/* + * IPVS socket options + */ +#define IP_VS_BASE_CTL (64+1024+64) /* base */ + +#define IP_VS_SO_SET_NONE IP_VS_BASE_CTL /* just peek */ +#define IP_VS_SO_SET_INSERT (IP_VS_BASE_CTL+1) +#define IP_VS_SO_SET_ADD (IP_VS_BASE_CTL+2) +#define IP_VS_SO_SET_EDIT (IP_VS_BASE_CTL+3) +#define IP_VS_SO_SET_DEL (IP_VS_BASE_CTL+4) +#define IP_VS_SO_SET_FLUSH (IP_VS_BASE_CTL+5) +#define IP_VS_SO_SET_LIST (IP_VS_BASE_CTL+6) +#define IP_VS_SO_SET_ADDDEST (IP_VS_BASE_CTL+7) +#define IP_VS_SO_SET_DELDEST (IP_VS_BASE_CTL+8) +#define IP_VS_SO_SET_EDITDEST (IP_VS_BASE_CTL+9) +#define IP_VS_SO_SET_TIMEOUT (IP_VS_BASE_CTL+10) +#define IP_VS_SO_SET_STARTDAEMON (IP_VS_BASE_CTL+11) +#define IP_VS_SO_SET_STOPDAEMON (IP_VS_BASE_CTL+12) +#define IP_VS_SO_SET_RESTORE (IP_VS_BASE_CTL+13) +#define IP_VS_SO_SET_SAVE (IP_VS_BASE_CTL+14) +#define IP_VS_SO_SET_ZERO (IP_VS_BASE_CTL+15) +#define IP_VS_SO_SET_MAX IP_VS_SO_SET_ZERO + +#define IP_VS_SO_GET_VERSION IP_VS_BASE_CTL +#define IP_VS_SO_GET_INFO (IP_VS_BASE_CTL+1) +#define IP_VS_SO_GET_SERVICES (IP_VS_BASE_CTL+2) +#define IP_VS_SO_GET_SERVICE (IP_VS_BASE_CTL+3) +#define IP_VS_SO_GET_DESTS (IP_VS_BASE_CTL+4) +#define IP_VS_SO_GET_DEST (IP_VS_BASE_CTL+5) /* not used now */ +#define IP_VS_SO_GET_TIMEOUT (IP_VS_BASE_CTL+6) +#define IP_VS_SO_GET_DAEMON (IP_VS_BASE_CTL+7) +#define IP_VS_SO_GET_MAX IP_VS_SO_GET_DAEMON + + +/* + * IPVS Connection Flags + */ +#define IP_VS_CONN_F_FWD_MASK 0x0007 /* mask for the fwd methods */ +#define IP_VS_CONN_F_MASQ 0x0000 /* masquerading/NAT */ +#define IP_VS_CONN_F_LOCALNODE 0x0001 /* local node */ +#define IP_VS_CONN_F_TUNNEL 0x0002 /* tunneling */ +#define IP_VS_CONN_F_DROUTE 0x0003 /* direct routing */ +#define IP_VS_CONN_F_BYPASS 0x0004 /* cache bypass */ +#define IP_VS_CONN_F_SYNC 0x0020 /* entry created by sync */ +#define IP_VS_CONN_F_HASHED 0x0040 /* hashed entry */ +#define IP_VS_CONN_F_NOOUTPUT 0x0080 /* no output packets */ +#define IP_VS_CONN_F_INACTIVE 0x0100 /* not established */ +#define IP_VS_CONN_F_OUT_SEQ 0x0200 /* must do output seq adjust */ +#define IP_VS_CONN_F_IN_SEQ 0x0400 /* must do input seq adjust */ +#define IP_VS_CONN_F_SEQ_MASK 0x0600 /* in/out sequence mask */ +#define IP_VS_CONN_F_NO_CPORT 0x0800 /* no client port set yet */ +#define IP_VS_CONN_F_TEMPLATE 0x1000 /* template, not connection */ + +#define IP_VS_SCHEDNAME_MAXLEN 16 +#define IP_VS_IFNAME_MAXLEN 16 + + +/* + * The struct ip_vs_service_user and struct ip_vs_dest_user are + * used to set IPVS rules through setsockopt. + */ +struct ip_vs_service_user { + /* virtual service addresses */ + u_int16_t protocol; + __be32 addr; /* virtual ip address */ + __be16 port; + u_int32_t fwmark; /* firwall mark of service */ + + /* virtual service options */ + char sched_name[IP_VS_SCHEDNAME_MAXLEN]; + unsigned flags; /* virtual service flags */ + unsigned timeout; /* persistent timeout in sec */ + __be32 netmask; /* persistent netmask */ +}; + + +struct ip_vs_dest_user { + /* destination server address */ + __be32 addr; + __be16 port; + + /* real server options */ + unsigned conn_flags; /* connection flags */ + int weight; /* destination weight */ + + /* thresholds for active connections */ + u_int32_t u_threshold; /* upper threshold */ + u_int32_t l_threshold; /* lower threshold */ +}; + + +/* + * IPVS statistics object (for user space) + */ +struct ip_vs_stats_user +{ + __u32 conns; /* connections scheduled */ + __u32 inpkts; /* incoming packets */ + __u32 outpkts; /* outgoing packets */ + __u64 inbytes; /* incoming bytes */ + __u64 outbytes; /* outgoing bytes */ + + __u32 cps; /* current connection rate */ + __u32 inpps; /* current in packet rate */ + __u32 outpps; /* current out packet rate */ + __u32 inbps; /* current in byte rate */ + __u32 outbps; /* current out byte rate */ +}; + + +/* The argument to IP_VS_SO_GET_INFO */ +struct ip_vs_getinfo { + /* version number */ + unsigned int version; + + /* size of connection hash table */ + unsigned int size; + + /* number of virtual services */ + unsigned int num_services; +}; + + +/* The argument to IP_VS_SO_GET_SERVICE */ +struct ip_vs_service_entry { + /* which service: user fills in these */ + u_int16_t protocol; + __be32 addr; /* virtual address */ + __be16 port; + u_int32_t fwmark; /* firwall mark of service */ + + /* service options */ + char sched_name[IP_VS_SCHEDNAME_MAXLEN]; + unsigned flags; /* virtual service flags */ + unsigned timeout; /* persistent timeout */ + __be32 netmask; /* persistent netmask */ + + /* number of real servers */ + unsigned int num_dests; + + /* statistics */ + struct ip_vs_stats_user stats; +}; + + +struct ip_vs_dest_entry { + __be32 addr; /* destination address */ + __be16 port; + unsigned conn_flags; /* connection flags */ + int weight; /* destination weight */ + + u_int32_t u_threshold; /* upper threshold */ + u_int32_t l_threshold; /* lower threshold */ + + u_int32_t activeconns; /* active connections */ + u_int32_t inactconns; /* inactive connections */ + u_int32_t persistconns; /* persistent connections */ + + /* statistics */ + struct ip_vs_stats_user stats; +}; + + +/* The argument to IP_VS_SO_GET_DESTS */ +struct ip_vs_get_dests { + /* which service: user fills in these */ + u_int16_t protocol; + __be32 addr; /* virtual address */ + __be16 port; + u_int32_t fwmark; /* firwall mark of service */ + + /* number of real servers */ + unsigned int num_dests; + + /* the real servers */ + struct ip_vs_dest_entry entrytable[0]; +}; + + +/* The argument to IP_VS_SO_GET_SERVICES */ +struct ip_vs_get_services { + /* number of virtual services */ + unsigned int num_services; + + /* service table */ + struct ip_vs_service_entry entrytable[0]; +}; + + +/* The argument to IP_VS_SO_GET_TIMEOUT */ +struct ip_vs_timeout_user { + int tcp_timeout; + int tcp_fin_timeout; + int udp_timeout; +}; + + +/* The argument to IP_VS_SO_GET_DAEMON */ +struct ip_vs_daemon_user { + /* sync daemon state (master/backup) */ + int state; + + /* multicast interface name */ + char mcast_ifn[IP_VS_IFNAME_MAXLEN]; + + /* SyncID we belong to */ + int syncid; +}; + +#endif /* _IP_VS_H */ diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h index 57aefa160a9..b9614488744 100644 --- a/include/linux/kallsyms.h +++ b/include/linux/kallsyms.h @@ -108,8 +108,7 @@ static inline void print_fn_descriptor_symbol(const char *fmt, void *addr) static inline void print_ip_sym(unsigned long ip) { - printk("[<%p>]", (void *) ip); - print_symbol(" %s\n", ip); + printk("[<%p>] %pS\n", (void *) ip, (void *) ip); } #endif /*_LINUX_KALLSYMS_H*/ diff --git a/include/linux/kernel.h b/include/linux/kernel.h index fdbbf72ca2e..2651f805ba6 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -75,6 +75,12 @@ extern const char linux_proc_banner[]; */ #define upper_32_bits(n) ((u32)(((n) >> 16) >> 16)) +/** + * lower_32_bits - return bits 0-31 of a number + * @n: the number we're accessing + */ +#define lower_32_bits(n) ((u32)(n)) + #define KERN_EMERG "<0>" /* system is unusable */ #define KERN_ALERT "<1>" /* action must be taken immediately */ #define KERN_CRIT "<2>" /* critical conditions */ @@ -102,6 +108,13 @@ struct completion; struct pt_regs; struct user; +#ifdef CONFIG_PREEMPT_VOLUNTARY +extern int _cond_resched(void); +# define might_resched() _cond_resched() +#else +# define might_resched() do { } while (0) +#endif + /** * might_sleep - annotation for functions that can sleep * @@ -112,13 +125,6 @@ struct user; * be bitten later when the calling function happens to sleep when it is not * supposed to. */ -#ifdef CONFIG_PREEMPT_VOLUNTARY -extern int _cond_resched(void); -# define might_resched() _cond_resched() -#else -# define might_resched() do { } while (0) -#endif - #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP void __might_sleep(char *file, int line); # define might_sleep() \ diff --git a/include/linux/kexec.h b/include/linux/kexec.h index 82f88a8a827..32110cede64 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -130,8 +130,8 @@ void vmcoreinfo_append_str(const char *fmt, ...) __attribute__ ((format (printf, 1, 2))); unsigned long paddr_vmcoreinfo_note(void); -#define VMCOREINFO_OSRELEASE(name) \ - vmcoreinfo_append_str("OSRELEASE=%s\n", #name) +#define VMCOREINFO_OSRELEASE(value) \ + vmcoreinfo_append_str("OSRELEASE=%s\n", value) #define VMCOREINFO_PAGESIZE(value) \ vmcoreinfo_append_str("PAGESIZE=%ld\n", value) #define VMCOREINFO_SYMBOL(name) \ diff --git a/include/linux/kvm.h b/include/linux/kvm.h index 0ea064cbfbc..69511f74f91 100644 --- a/include/linux/kvm.h +++ b/include/linux/kvm.h @@ -371,6 +371,7 @@ struct kvm_trace_rec { #define KVM_CAP_PV_MMU 13 #define KVM_CAP_MP_STATE 14 #define KVM_CAP_COALESCED_MMIO 15 +#define KVM_CAP_SYNC_MMU 16 /* Changes to host mmap are reflected in guest */ /* * ioctls for VM fds diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 07d68a8ae8e..8525afc5310 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -121,6 +121,12 @@ struct kvm { struct kvm_coalesced_mmio_dev *coalesced_mmio_dev; struct kvm_coalesced_mmio_ring *coalesced_mmio_ring; #endif + +#ifdef KVM_ARCH_WANT_MMU_NOTIFIER + struct mmu_notifier mmu_notifier; + unsigned long mmu_notifier_seq; + long mmu_notifier_count; +#endif }; /* The guest did something we don't support. */ @@ -332,4 +338,22 @@ int kvm_trace_ioctl(unsigned int ioctl, unsigned long arg) #define kvm_trace_cleanup() ((void)0) #endif +#ifdef KVM_ARCH_WANT_MMU_NOTIFIER +static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_seq) +{ + if (unlikely(vcpu->kvm->mmu_notifier_count)) + return 1; + /* + * Both reads happen under the mmu_lock and both values are + * modified under mmu_lock, so there's no need of smb_rmb() + * here in between, otherwise mmu_notifier_count should be + * read before mmu_notifier_seq, see + * mmu_notifier_invalidate_range_end write side. + */ + if (vcpu->kvm->mmu_notifier_seq != mmu_seq) + return 1; + return 0; +} +#endif + #endif diff --git a/include/linux/libata.h b/include/linux/libata.h index 5b247b8a6b3..06b80337303 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -60,9 +60,9 @@ /* note: prints function name for you */ #ifdef ATA_DEBUG -#define DPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args) +#define DPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ## args) #ifdef ATA_VERBOSE_DEBUG -#define VPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args) +#define VPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ## args) #else #define VPRINTK(fmt, args...) #endif /* ATA_VERBOSE_DEBUG */ @@ -71,7 +71,7 @@ #define VPRINTK(fmt, args...) #endif /* ATA_DEBUG */ -#define BPRINTK(fmt, args...) if (ap->flags & ATA_FLAG_DEBUGMSG) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args) +#define BPRINTK(fmt, args...) if (ap->flags & ATA_FLAG_DEBUGMSG) printk(KERN_ERR "%s: " fmt, __func__, ## args) /* NEW: debug levels */ #define HAVE_LIBATA_MSG 1 @@ -750,6 +750,7 @@ struct ata_port_operations { void (*set_piomode)(struct ata_port *ap, struct ata_device *dev); void (*set_dmamode)(struct ata_port *ap, struct ata_device *dev); int (*set_mode)(struct ata_link *link, struct ata_device **r_failed_dev); + unsigned int (*read_id)(struct ata_device *dev, struct ata_taskfile *tf, u16 *id); void (*dev_config)(struct ata_device *dev); @@ -951,6 +952,8 @@ extern void ata_id_string(const u16 *id, unsigned char *s, unsigned int ofs, unsigned int len); extern void ata_id_c_string(const u16 *id, unsigned char *s, unsigned int ofs, unsigned int len); +extern unsigned int ata_do_dev_read_id(struct ata_device *dev, + struct ata_taskfile *tf, u16 *id); extern void ata_qc_complete(struct ata_queued_cmd *qc); extern int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active); extern void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd, diff --git a/include/linux/list.h b/include/linux/list.h index 453916bc041..db35ef02e74 100644 --- a/include/linux/list.h +++ b/include/linux/list.h @@ -214,22 +214,62 @@ static inline int list_is_singular(const struct list_head *head) return !list_empty(head) && (head->next == head->prev); } +static inline void __list_cut_position(struct list_head *list, + struct list_head *head, struct list_head *entry) +{ + struct list_head *new_first = entry->next; + list->next = head->next; + list->next->prev = list; + list->prev = entry; + entry->next = list; + head->next = new_first; + new_first->prev = head; +} + +/** + * list_cut_position - cut a list into two + * @list: a new list to add all removed entries + * @head: a list with entries + * @entry: an entry within head, could be the head itself + * and if so we won't cut the list + * + * This helper moves the initial part of @head, up to and + * including @entry, from @head to @list. You should + * pass on @entry an element you know is on @head. @list + * should be an empty list or a list you do not care about + * losing its data. + * + */ +static inline void list_cut_position(struct list_head *list, + struct list_head *head, struct list_head *entry) +{ + if (list_empty(head)) + return; + if (list_is_singular(head) && + (head->next != entry && head != entry)) + return; + if (entry == head) + INIT_LIST_HEAD(list); + else + __list_cut_position(list, head, entry); +} + static inline void __list_splice(const struct list_head *list, - struct list_head *head) + struct list_head *prev, + struct list_head *next) { struct list_head *first = list->next; struct list_head *last = list->prev; - struct list_head *at = head->next; - first->prev = head; - head->next = first; + first->prev = prev; + prev->next = first; - last->next = at; - at->prev = last; + last->next = next; + next->prev = last; } /** - * list_splice - join two lists + * list_splice - join two lists, this is designed for stacks * @list: the new list to add. * @head: the place to add it in the first list. */ @@ -237,7 +277,19 @@ static inline void list_splice(const struct list_head *list, struct list_head *head) { if (!list_empty(list)) - __list_splice(list, head); + __list_splice(list, head, head->next); +} + +/** + * list_splice_tail - join two lists, each list being a queue + * @list: the new list to add. + * @head: the place to add it in the first list. + */ +static inline void list_splice_tail(struct list_head *list, + struct list_head *head) +{ + if (!list_empty(list)) + __list_splice(list, head->prev, head); } /** @@ -251,7 +303,24 @@ static inline void list_splice_init(struct list_head *list, struct list_head *head) { if (!list_empty(list)) { - __list_splice(list, head); + __list_splice(list, head, head->next); + INIT_LIST_HEAD(list); + } +} + +/** + * list_splice_tail_init - join two lists and reinitialise the emptied list + * @list: the new list to add. + * @head: the place to add it in the first list. + * + * Each of the lists is a queue. + * The list at @list is reinitialised + */ +static inline void list_splice_tail_init(struct list_head *list, + struct list_head *head) +{ + if (!list_empty(list)) { + __list_splice(list, head->prev, head); INIT_LIST_HEAD(list); } } diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 2486eb4edbf..331e5f1c2d8 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -89,6 +89,7 @@ struct lock_class { struct lockdep_subclass_key *key; unsigned int subclass; + unsigned int dep_gen_id; /* * IRQ/softirq usage tracking bits: @@ -189,6 +190,14 @@ struct lock_chain { u64 chain_key; }; +#define MAX_LOCKDEP_KEYS_BITS 13 +/* + * Subtract one because we offset hlock->class_idx by 1 in order + * to make 0 mean no class. This avoids overflowing the class_idx + * bitfield and hitting the BUG in hlock_class(). + */ +#define MAX_LOCKDEP_KEYS ((1UL << MAX_LOCKDEP_KEYS_BITS) - 1) + struct held_lock { /* * One-way hash of the dependency chain up to this point. We @@ -205,14 +214,14 @@ struct held_lock { * with zero), here we store the previous hash value: */ u64 prev_chain_key; - struct lock_class *class; unsigned long acquire_ip; struct lockdep_map *instance; - + struct lockdep_map *nest_lock; #ifdef CONFIG_LOCK_STAT u64 waittime_stamp; u64 holdtime_stamp; #endif + unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS; /* * The lock-stack is unified in that the lock chains of interrupt * contexts nest ontop of process context chains, but we 'separate' @@ -226,11 +235,11 @@ struct held_lock { * The following field is used to detect when we cross into an * interrupt context: */ - int irq_context; - int trylock; - int read; - int check; - int hardirqs_off; + unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */ + unsigned int trylock:1; + unsigned int read:2; /* see lock_acquire() comment */ + unsigned int check:2; /* see lock_acquire() comment */ + unsigned int hardirqs_off:1; }; /* @@ -294,11 +303,15 @@ extern void lockdep_init_map(struct lockdep_map *lock, const char *name, * 2: full validation */ extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass, - int trylock, int read, int check, unsigned long ip); + int trylock, int read, int check, + struct lockdep_map *nest_lock, unsigned long ip); extern void lock_release(struct lockdep_map *lock, int nested, unsigned long ip); +extern void lock_set_subclass(struct lockdep_map *lock, unsigned int subclass, + unsigned long ip); + # define INIT_LOCKDEP .lockdep_recursion = 0, #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) @@ -313,8 +326,9 @@ static inline void lockdep_on(void) { } -# define lock_acquire(l, s, t, r, c, i) do { } while (0) +# define lock_acquire(l, s, t, r, c, n, i) do { } while (0) # define lock_release(l, n, i) do { } while (0) +# define lock_set_subclass(l, s, i) do { } while (0) # define lockdep_init() do { } while (0) # define lockdep_info() do { } while (0) # define lockdep_init_map(lock, name, key, sub) do { (void)(key); } while (0) @@ -400,9 +414,11 @@ static inline void print_irqtrace_events(struct task_struct *curr) #ifdef CONFIG_DEBUG_LOCK_ALLOC # ifdef CONFIG_PROVE_LOCKING -# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i) +# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) +# define spin_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i) # else -# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i) +# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) +# define spin_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, NULL, i) # endif # define spin_release(l, n, i) lock_release(l, n, i) #else @@ -412,11 +428,11 @@ static inline void print_irqtrace_events(struct task_struct *curr) #ifdef CONFIG_DEBUG_LOCK_ALLOC # ifdef CONFIG_PROVE_LOCKING -# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i) -# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 2, i) +# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) +# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 2, NULL, i) # else -# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i) -# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 1, i) +# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) +# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 1, NULL, i) # endif # define rwlock_release(l, n, i) lock_release(l, n, i) #else @@ -427,9 +443,9 @@ static inline void print_irqtrace_events(struct task_struct *curr) #ifdef CONFIG_DEBUG_LOCK_ALLOC # ifdef CONFIG_PROVE_LOCKING -# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i) +# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) # else -# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i) +# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) # endif # define mutex_release(l, n, i) lock_release(l, n, i) #else @@ -439,11 +455,11 @@ static inline void print_irqtrace_events(struct task_struct *curr) #ifdef CONFIG_DEBUG_LOCK_ALLOC # ifdef CONFIG_PROVE_LOCKING -# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i) -# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, i) +# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) +# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, NULL, i) # else -# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i) -# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, i) +# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) +# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, NULL, i) # endif # define rwsem_release(l, n, i) lock_release(l, n, i) #else @@ -452,4 +468,16 @@ static inline void print_irqtrace_events(struct task_struct *curr) # define rwsem_release(l, n, i) do { } while (0) #endif +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# ifdef CONFIG_PROVE_LOCKING +# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 2, NULL, _THIS_IP_) +# else +# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 1, NULL, _THIS_IP_) +# endif +# define lock_map_release(l) lock_release(l, 1, _THIS_IP_) +#else +# define lock_map_acquire(l) do { } while (0) +# define lock_map_release(l) do { } while (0) +#endif + #endif /* __LINUX_LOCKDEP_H */ diff --git a/include/linux/mISDNif.h b/include/linux/mISDNif.h index 5c948f33781..8f2d60da04e 100644 --- a/include/linux/mISDNif.h +++ b/include/linux/mISDNif.h @@ -37,7 +37,7 @@ */ #define MISDN_MAJOR_VERSION 1 #define MISDN_MINOR_VERSION 0 -#define MISDN_RELEASE 18 +#define MISDN_RELEASE 19 /* primitives for information exchange * generell format @@ -242,7 +242,8 @@ struct mISDNhead { #define TEI_SAPI 63 #define CTRL_SAPI 0 -#define MISDN_CHMAP_SIZE 4 +#define MISDN_MAX_CHANNEL 127 +#define MISDN_CHMAP_SIZE ((MISDN_MAX_CHANNEL + 1) >> 3) #define SOL_MISDN 0 @@ -275,11 +276,32 @@ struct mISDN_devinfo { u_int Dprotocols; u_int Bprotocols; u_int protocol; - u_long channelmap[MISDN_CHMAP_SIZE]; + u_char channelmap[MISDN_CHMAP_SIZE]; u_int nrbchan; char name[MISDN_MAX_IDLEN]; }; +static inline int +test_channelmap(u_int nr, u_char *map) +{ + if (nr <= MISDN_MAX_CHANNEL) + return map[nr >> 3] & (1 << (nr & 7)); + else + return 0; +} + +static inline void +set_channelmap(u_int nr, u_char *map) +{ + map[nr >> 3] |= (1 << (nr & 7)); +} + +static inline void +clear_channelmap(u_int nr, u_char *map) +{ + map[nr >> 3] &= ~(1 << (nr & 7)); +} + /* CONTROL_CHANNEL parameters */ #define MISDN_CTRL_GETOP 0x0000 #define MISDN_CTRL_LOOP 0x0001 @@ -405,7 +427,7 @@ struct mISDNdevice { u_int Dprotocols; u_int Bprotocols; u_int nrbchan; - u_long channelmap[MISDN_CHMAP_SIZE]; + u_char channelmap[MISDN_CHMAP_SIZE]; struct list_head bchannels; struct mISDNchannel *teimgr; struct device dev; @@ -430,7 +452,7 @@ struct mISDNstack { #endif }; -/* global alloc/queue dunctions */ +/* global alloc/queue functions */ static inline struct sk_buff * mI_alloc_skb(unsigned int len, gfp_t gfp_mask) diff --git a/include/linux/maple.h b/include/linux/maple.h index d31e36ebb43..c23d3f51ba4 100644 --- a/include/linux/maple.h +++ b/include/linux/maple.h @@ -2,6 +2,7 @@ #define __LINUX_MAPLE_H #include <linux/device.h> +#include <mach/maple.h> extern struct bus_type maple_bus_type; @@ -33,6 +34,7 @@ struct mapleq { void *sendbuf, *recvbuf, *recvbufdcsp; unsigned char length; enum maple_code command; + struct mutex mutex; }; struct maple_devinfo { @@ -49,7 +51,6 @@ struct maple_devinfo { struct maple_device { struct maple_driver *driver; struct mapleq *mq; - void *private_data; void (*callback) (struct mapleq * mq); unsigned long when, interval, function; struct maple_devinfo devinfo; @@ -61,8 +62,6 @@ struct maple_device { struct maple_driver { unsigned long function; - int (*connect) (struct maple_device * dev); - void (*disconnect) (struct maple_device * dev); struct device_driver drv; }; @@ -70,10 +69,17 @@ void maple_getcond_callback(struct maple_device *dev, void (*callback) (struct mapleq * mq), unsigned long interval, unsigned long function); -int maple_driver_register(struct device_driver *drv); -void maple_add_packet(struct mapleq *mq); +int maple_driver_register(struct maple_driver *); +void maple_driver_unregister(struct maple_driver *); + +int maple_add_packet_sleeps(struct maple_device *mdev, u32 function, + u32 command, u32 length, void *data); +void maple_clear_dev(struct maple_device *mdev); #define to_maple_dev(n) container_of(n, struct maple_device, dev) #define to_maple_driver(n) container_of(n, struct maple_driver, drv) +#define maple_get_drvdata(d) dev_get_drvdata(&(d)->dev) +#define maple_set_drvdata(d,p) dev_set_drvdata(&(d)->dev, (p)) + #endif /* __LINUX_MAPLE_H */ diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h index bb3dd054592..49ef857cdb2 100644 --- a/include/linux/mfd/core.h +++ b/include/linux/mfd/core.h @@ -1,5 +1,3 @@ -#ifndef MFD_CORE_H -#define MFD_CORE_H /* * drivers/mfd/mfd-core.h * @@ -13,6 +11,9 @@ * */ +#ifndef MFD_CORE_H +#define MFD_CORE_H + #include <linux/platform_device.h> /* @@ -28,7 +29,13 @@ struct mfd_cell { int (*suspend)(struct platform_device *dev); int (*resume)(struct platform_device *dev); - void *driver_data; /* driver-specific data */ + /* driver-specific data for MFD-aware "cell" drivers */ + void *driver_data; + + /* platform_data can be used to either pass data to "generic" + driver or as a hook to mfd_cell for the "cell" drivers */ + void *platform_data; + size_t data_size; /* * This resources can be specified relatievly to the parent device. @@ -38,18 +45,11 @@ struct mfd_cell { const struct resource *resources; }; -static inline struct mfd_cell * -mfd_get_cell(struct platform_device *pdev) -{ - return (struct mfd_cell *)pdev->dev.platform_data; -} - -extern int mfd_add_devices( - struct platform_device *parent, - const struct mfd_cell *cells, int n_devs, - struct resource *mem_base, - int irq_base); +extern int mfd_add_devices(struct device *parent, int id, + const struct mfd_cell *cells, int n_devs, + struct resource *mem_base, + int irq_base); -extern void mfd_remove_devices(struct platform_device *parent); +extern void mfd_remove_devices(struct device *parent); #endif diff --git a/include/linux/mfd/t7l66xb.h b/include/linux/mfd/t7l66xb.h new file mode 100644 index 00000000000..e83c7f2036f --- /dev/null +++ b/include/linux/mfd/t7l66xb.h @@ -0,0 +1,36 @@ +/* + * This file contains the definitions for the T7L66XB + * + * (C) Copyright 2005 Ian Molton <spyro@f2s.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ +#ifndef MFD_T7L66XB_H +#define MFD_T7L66XB_H + +#include <linux/mfd/core.h> +#include <linux/mfd/tmio.h> + +struct t7l66xb_platform_data { + int (*enable_clk32k)(struct platform_device *dev); + void (*disable_clk32k)(struct platform_device *dev); + int (*enable)(struct platform_device *dev); + int (*disable)(struct platform_device *dev); + int (*suspend)(struct platform_device *dev); + int (*resume)(struct platform_device *dev); + + int irq_base; /* The base for subdevice irqs */ + + struct tmio_nand_data *nand_data; +}; + + +#define IRQ_T7L66XB_MMC (1) +#define IRQ_T7L66XB_NAND (3) + +#define T7L66XB_NR_IRQS 8 + +#endif diff --git a/include/linux/mfd/tc6387xb.h b/include/linux/mfd/tc6387xb.h new file mode 100644 index 00000000000..fa06e0610b8 --- /dev/null +++ b/include/linux/mfd/tc6387xb.h @@ -0,0 +1,23 @@ +/* + * This file contains the definitions for the TC6387XB + * + * (C) Copyright 2005 Ian Molton <spyro@f2s.com> + * + * May be copied or modified under the terms of the GNU General Public + * License. See linux/COPYING for more information. + * + */ +#ifndef MFD_TC6387XB_H +#define MFD_TC6387XB_H + +struct tc6387xb_platform_data { + int (*enable_clk32k)(struct platform_device *dev); + void (*disable_clk32k)(struct platform_device *dev); + + int (*enable)(struct platform_device *dev); + int (*disable)(struct platform_device *dev); + int (*suspend)(struct platform_device *dev); + int (*resume)(struct platform_device *dev); +}; + +#endif diff --git a/include/linux/mfd/tc6393xb.h b/include/linux/mfd/tc6393xb.h index 7cc824a58f7..fec7b3f7a81 100644 --- a/include/linux/mfd/tc6393xb.h +++ b/include/linux/mfd/tc6393xb.h @@ -14,8 +14,8 @@ * published by the Free Software Foundation. */ -#ifndef TC6393XB_H -#define TC6393XB_H +#ifndef MFD_TC6393XB_H +#define MFD_TC6393XB_H /* Also one should provide the CK3P6MI clock */ struct tc6393xb_platform_data { @@ -29,7 +29,7 @@ struct tc6393xb_platform_data { int (*suspend)(struct platform_device *dev); int (*resume)(struct platform_device *dev); - int irq_base; /* a base for cascaded irq */ + int irq_base; /* base for subdevice irqs */ int gpio_base; struct tmio_nand_data *nand_data; @@ -40,9 +40,6 @@ struct tc6393xb_platform_data { */ #define IRQ_TC6393_NAND 0 #define IRQ_TC6393_MMC 1 -#define IRQ_TC6393_OHCI 2 -#define IRQ_TC6393_SERIAL 3 -#define IRQ_TC6393_FB 4 #define TC6393XB_NR_IRQS 8 diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h index 9438d8c9ac1..ec612e66391 100644 --- a/include/linux/mfd/tmio.h +++ b/include/linux/mfd/tmio.h @@ -1,6 +1,21 @@ #ifndef MFD_TMIO_H #define MFD_TMIO_H +#define tmio_ioread8(addr) readb(addr) +#define tmio_ioread16(addr) readw(addr) +#define tmio_ioread16_rep(r, b, l) readsw(r, b, l) +#define tmio_ioread32(addr) \ + (((u32) readw((addr))) | (((u32) readw((addr) + 2)) << 16)) + +#define tmio_iowrite8(val, addr) writeb((val), (addr)) +#define tmio_iowrite16(val, addr) writew((val), (addr)) +#define tmio_iowrite16_rep(r, b, l) writesw(r, b, l) +#define tmio_iowrite32(val, addr) \ + do { \ + writew((val), (addr)); \ + writew((val) >> 16, (addr) + 2); \ + } while (0) + /* * data for the NAND controller */ @@ -10,8 +25,4 @@ struct tmio_nand_data { unsigned int num_partitions; }; -#define TMIO_NAND_CONFIG "tmio-nand-config" -#define TMIO_NAND_CONTROL "tmio-nand-control" -#define TMIO_NAND_IRQ "tmio-nand" - #endif diff --git a/include/linux/mlx4/cq.h b/include/linux/mlx4/cq.h index 071cf96cf01..6f65b2c8bb8 100644 --- a/include/linux/mlx4/cq.h +++ b/include/linux/mlx4/cq.h @@ -39,17 +39,18 @@ #include <linux/mlx4/doorbell.h> struct mlx4_cqe { - __be32 my_qpn; + __be32 vlan_my_qpn; __be32 immed_rss_invalid; __be32 g_mlpath_rqpn; - u8 sl; - u8 reserved1; + __be16 sl_vid; __be16 rlid; - __be32 ipoib_status; + __be16 status; + u8 ipv6_ext_mask; + u8 badfcs_enc; __be32 byte_cnt; __be16 wqe_index; __be16 checksum; - u8 reserved2[3]; + u8 reserved[3]; u8 owner_sr_opcode; }; @@ -64,6 +65,11 @@ struct mlx4_err_cqe { }; enum { + MLX4_CQE_VLAN_PRESENT_MASK = 1 << 29, + MLX4_CQE_QPN_MASK = 0xffffff, +}; + +enum { MLX4_CQE_OWNER_MASK = 0x80, MLX4_CQE_IS_SEND_MASK = 0x40, MLX4_CQE_OPCODE_MASK = 0x1f @@ -86,13 +92,19 @@ enum { }; enum { - MLX4_CQE_IPOIB_STATUS_IPV4 = 1 << 22, - MLX4_CQE_IPOIB_STATUS_IPV4F = 1 << 23, - MLX4_CQE_IPOIB_STATUS_IPV6 = 1 << 24, - MLX4_CQE_IPOIB_STATUS_IPV4OPT = 1 << 25, - MLX4_CQE_IPOIB_STATUS_TCP = 1 << 26, - MLX4_CQE_IPOIB_STATUS_UDP = 1 << 27, - MLX4_CQE_IPOIB_STATUS_IPOK = 1 << 28, + MLX4_CQE_STATUS_IPV4 = 1 << 6, + MLX4_CQE_STATUS_IPV4F = 1 << 7, + MLX4_CQE_STATUS_IPV6 = 1 << 8, + MLX4_CQE_STATUS_IPV4OPT = 1 << 9, + MLX4_CQE_STATUS_TCP = 1 << 10, + MLX4_CQE_STATUS_UDP = 1 << 11, + MLX4_CQE_STATUS_IPOK = 1 << 12, +}; + +enum { + MLX4_CQE_LLC = 1, + MLX4_CQE_SNAP = 1 << 1, + MLX4_CQE_BAD_FCS = 1 << 4, }; static inline void mlx4_cq_arm(struct mlx4_cq *cq, u32 cmd, diff --git a/include/linux/mm.h b/include/linux/mm.h index 6e695eaab4c..fa651609b65 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -744,6 +744,8 @@ struct zap_details { struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, pte_t pte); +int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, + unsigned long size); unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address, unsigned long size, struct zap_details *); unsigned long unmap_vmas(struct mmu_gather **tlb, @@ -832,7 +834,6 @@ extern int mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, unsigned long start, unsigned long end, unsigned long newflags); -#ifdef CONFIG_HAVE_GET_USER_PAGES_FAST /* * get_user_pages_fast provides equivalent functionality to get_user_pages, * operating on current and current->mm (force=0 and doesn't return any vmas). @@ -846,25 +847,6 @@ extern int mprotect_fixup(struct vm_area_struct *vma, int get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages); -#else -/* - * Should probably be moved to asm-generic, and architectures can include it if - * they don't implement their own get_user_pages_fast. - */ -#define get_user_pages_fast(start, nr_pages, write, pages) \ -({ \ - struct mm_struct *mm = current->mm; \ - int ret; \ - \ - down_read(&mm->mmap_sem); \ - ret = get_user_pages(current, mm, start, nr_pages, \ - write, 0, pages, NULL); \ - up_read(&mm->mmap_sem); \ - \ - ret; \ -}) -#endif - /* * A callback you can register to apply pressure to ageable caches. * @@ -1041,7 +1023,6 @@ extern unsigned long absent_pages_in_range(unsigned long start_pfn, extern void get_pfn_range_for_nid(unsigned int nid, unsigned long *start_pfn, unsigned long *end_pfn); extern unsigned long find_min_pfn_with_active_regions(void); -extern unsigned long find_max_pfn_with_active_regions(void); extern void free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn); typedef int (*work_fn_t)(unsigned long, unsigned long, void *); @@ -1104,6 +1085,9 @@ extern struct vm_area_struct *copy_vma(struct vm_area_struct **, unsigned long addr, unsigned long len, pgoff_t pgoff); extern void exit_mmap(struct mm_struct *); +extern int mm_take_all_locks(struct mm_struct *mm); +extern void mm_drop_all_locks(struct mm_struct *mm); + #ifdef CONFIG_PROC_FS /* From fs/proc/base.c. callers must _not_ hold the mm's exe_file_lock */ extern void added_exe_file_vma(struct mm_struct *mm); diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 746f975b58e..386edbe2cb4 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -10,6 +10,7 @@ #include <linux/rbtree.h> #include <linux/rwsem.h> #include <linux/completion.h> +#include <linux/cpumask.h> #include <asm/page.h> #include <asm/mmu.h> @@ -253,6 +254,9 @@ struct mm_struct { struct file *exe_file; unsigned long num_exe_file_vmas; #endif +#ifdef CONFIG_MMU_NOTIFIER + struct mmu_notifier_mm *mmu_notifier_mm; +#endif }; #endif /* _LINUX_MM_TYPES_H */ diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h new file mode 100644 index 00000000000..b77486d152c --- /dev/null +++ b/include/linux/mmu_notifier.h @@ -0,0 +1,279 @@ +#ifndef _LINUX_MMU_NOTIFIER_H +#define _LINUX_MMU_NOTIFIER_H + +#include <linux/list.h> +#include <linux/spinlock.h> +#include <linux/mm_types.h> + +struct mmu_notifier; +struct mmu_notifier_ops; + +#ifdef CONFIG_MMU_NOTIFIER + +/* + * The mmu notifier_mm structure is allocated and installed in + * mm->mmu_notifier_mm inside the mm_take_all_locks() protected + * critical section and it's released only when mm_count reaches zero + * in mmdrop(). + */ +struct mmu_notifier_mm { + /* all mmu notifiers registerd in this mm are queued in this list */ + struct hlist_head list; + /* to serialize the list modifications and hlist_unhashed */ + spinlock_t lock; +}; + +struct mmu_notifier_ops { + /* + * Called either by mmu_notifier_unregister or when the mm is + * being destroyed by exit_mmap, always before all pages are + * freed. This can run concurrently with other mmu notifier + * methods (the ones invoked outside the mm context) and it + * should tear down all secondary mmu mappings and freeze the + * secondary mmu. If this method isn't implemented you've to + * be sure that nothing could possibly write to the pages + * through the secondary mmu by the time the last thread with + * tsk->mm == mm exits. + * + * As side note: the pages freed after ->release returns could + * be immediately reallocated by the gart at an alias physical + * address with a different cache model, so if ->release isn't + * implemented because all _software_ driven memory accesses + * through the secondary mmu are terminated by the time the + * last thread of this mm quits, you've also to be sure that + * speculative _hardware_ operations can't allocate dirty + * cachelines in the cpu that could not be snooped and made + * coherent with the other read and write operations happening + * through the gart alias address, so leading to memory + * corruption. + */ + void (*release)(struct mmu_notifier *mn, + struct mm_struct *mm); + + /* + * clear_flush_young is called after the VM is + * test-and-clearing the young/accessed bitflag in the + * pte. This way the VM will provide proper aging to the + * accesses to the page through the secondary MMUs and not + * only to the ones through the Linux pte. + */ + int (*clear_flush_young)(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long address); + + /* + * Before this is invoked any secondary MMU is still ok to + * read/write to the page previously pointed to by the Linux + * pte because the page hasn't been freed yet and it won't be + * freed until this returns. If required set_page_dirty has to + * be called internally to this method. + */ + void (*invalidate_page)(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long address); + + /* + * invalidate_range_start() and invalidate_range_end() must be + * paired and are called only when the mmap_sem and/or the + * locks protecting the reverse maps are held. The subsystem + * must guarantee that no additional references are taken to + * the pages in the range established between the call to + * invalidate_range_start() and the matching call to + * invalidate_range_end(). + * + * Invalidation of multiple concurrent ranges may be + * optionally permitted by the driver. Either way the + * establishment of sptes is forbidden in the range passed to + * invalidate_range_begin/end for the whole duration of the + * invalidate_range_begin/end critical section. + * + * invalidate_range_start() is called when all pages in the + * range are still mapped and have at least a refcount of one. + * + * invalidate_range_end() is called when all pages in the + * range have been unmapped and the pages have been freed by + * the VM. + * + * The VM will remove the page table entries and potentially + * the page between invalidate_range_start() and + * invalidate_range_end(). If the page must not be freed + * because of pending I/O or other circumstances then the + * invalidate_range_start() callback (or the initial mapping + * by the driver) must make sure that the refcount is kept + * elevated. + * + * If the driver increases the refcount when the pages are + * initially mapped into an address space then either + * invalidate_range_start() or invalidate_range_end() may + * decrease the refcount. If the refcount is decreased on + * invalidate_range_start() then the VM can free pages as page + * table entries are removed. If the refcount is only + * droppped on invalidate_range_end() then the driver itself + * will drop the last refcount but it must take care to flush + * any secondary tlb before doing the final free on the + * page. Pages will no longer be referenced by the linux + * address space but may still be referenced by sptes until + * the last refcount is dropped. + */ + void (*invalidate_range_start)(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long start, unsigned long end); + void (*invalidate_range_end)(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long start, unsigned long end); +}; + +/* + * The notifier chains are protected by mmap_sem and/or the reverse map + * semaphores. Notifier chains are only changed when all reverse maps and + * the mmap_sem locks are taken. + * + * Therefore notifier chains can only be traversed when either + * + * 1. mmap_sem is held. + * 2. One of the reverse map locks is held (i_mmap_lock or anon_vma->lock). + * 3. No other concurrent thread can access the list (release) + */ +struct mmu_notifier { + struct hlist_node hlist; + const struct mmu_notifier_ops *ops; +}; + +static inline int mm_has_notifiers(struct mm_struct *mm) +{ + return unlikely(mm->mmu_notifier_mm); +} + +extern int mmu_notifier_register(struct mmu_notifier *mn, + struct mm_struct *mm); +extern int __mmu_notifier_register(struct mmu_notifier *mn, + struct mm_struct *mm); +extern void mmu_notifier_unregister(struct mmu_notifier *mn, + struct mm_struct *mm); +extern void __mmu_notifier_mm_destroy(struct mm_struct *mm); +extern void __mmu_notifier_release(struct mm_struct *mm); +extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm, + unsigned long address); +extern void __mmu_notifier_invalidate_page(struct mm_struct *mm, + unsigned long address); +extern void __mmu_notifier_invalidate_range_start(struct mm_struct *mm, + unsigned long start, unsigned long end); +extern void __mmu_notifier_invalidate_range_end(struct mm_struct *mm, + unsigned long start, unsigned long end); + +static inline void mmu_notifier_release(struct mm_struct *mm) +{ + if (mm_has_notifiers(mm)) + __mmu_notifier_release(mm); +} + +static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm, + unsigned long address) +{ + if (mm_has_notifiers(mm)) + return __mmu_notifier_clear_flush_young(mm, address); + return 0; +} + +static inline void mmu_notifier_invalidate_page(struct mm_struct *mm, + unsigned long address) +{ + if (mm_has_notifiers(mm)) + __mmu_notifier_invalidate_page(mm, address); +} + +static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ + if (mm_has_notifiers(mm)) + __mmu_notifier_invalidate_range_start(mm, start, end); +} + +static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ + if (mm_has_notifiers(mm)) + __mmu_notifier_invalidate_range_end(mm, start, end); +} + +static inline void mmu_notifier_mm_init(struct mm_struct *mm) +{ + mm->mmu_notifier_mm = NULL; +} + +static inline void mmu_notifier_mm_destroy(struct mm_struct *mm) +{ + if (mm_has_notifiers(mm)) + __mmu_notifier_mm_destroy(mm); +} + +/* + * These two macros will sometime replace ptep_clear_flush. + * ptep_clear_flush is impleemnted as macro itself, so this also is + * implemented as a macro until ptep_clear_flush will converted to an + * inline function, to diminish the risk of compilation failure. The + * invalidate_page method over time can be moved outside the PT lock + * and these two macros can be later removed. + */ +#define ptep_clear_flush_notify(__vma, __address, __ptep) \ +({ \ + pte_t __pte; \ + struct vm_area_struct *___vma = __vma; \ + unsigned long ___address = __address; \ + __pte = ptep_clear_flush(___vma, ___address, __ptep); \ + mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \ + __pte; \ +}) + +#define ptep_clear_flush_young_notify(__vma, __address, __ptep) \ +({ \ + int __young; \ + struct vm_area_struct *___vma = __vma; \ + unsigned long ___address = __address; \ + __young = ptep_clear_flush_young(___vma, ___address, __ptep); \ + __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \ + ___address); \ + __young; \ +}) + +#else /* CONFIG_MMU_NOTIFIER */ + +static inline void mmu_notifier_release(struct mm_struct *mm) +{ +} + +static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm, + unsigned long address) +{ + return 0; +} + +static inline void mmu_notifier_invalidate_page(struct mm_struct *mm, + unsigned long address) +{ +} + +static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ +} + +static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ +} + +static inline void mmu_notifier_mm_init(struct mm_struct *mm) +{ +} + +static inline void mmu_notifier_mm_destroy(struct mm_struct *mm) +{ +} + +#define ptep_clear_flush_young_notify ptep_clear_flush_young +#define ptep_clear_flush_notify ptep_clear_flush + +#endif /* CONFIG_MMU_NOTIFIER */ + +#endif /* _LINUX_MMU_NOTIFIER_H */ diff --git a/include/linux/mount.h b/include/linux/mount.h index b5efaa2132a..30a1d63b6fb 100644 --- a/include/linux/mount.h +++ b/include/linux/mount.h @@ -105,7 +105,8 @@ extern struct vfsmount *vfs_kern_mount(struct file_system_type *type, struct nameidata; -extern int do_add_mount(struct vfsmount *newmnt, struct nameidata *nd, +struct path; +extern int do_add_mount(struct vfsmount *newmnt, struct path *path, int mnt_flags, struct list_head *fslist); extern void mark_mounts_for_expiry(struct list_head *mounts); diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h index 4ed40caff4e..92263654855 100644 --- a/include/linux/mtd/mtd.h +++ b/include/linux/mtd/mtd.h @@ -272,7 +272,11 @@ static inline void mtd_erase_callback(struct erase_info *instr) printk(KERN_INFO args); \ } while(0) #else /* CONFIG_MTD_DEBUG */ -#define DEBUG(n, args...) do { } while(0) +#define DEBUG(n, args...) \ + do { \ + if (0) \ + printk(KERN_INFO args); \ + } while(0) #endif /* CONFIG_MTD_DEBUG */ diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index 83f678702df..81774e5facf 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -177,7 +177,9 @@ typedef enum { #define NAND_MUST_PAD(chip) (!(chip->options & NAND_NO_PADDING)) #define NAND_HAS_CACHEPROG(chip) ((chip->options & NAND_CACHEPRG)) #define NAND_HAS_COPYBACK(chip) ((chip->options & NAND_COPYBACK)) -#define NAND_SUBPAGE_READ(chip) ((chip->ecc.mode == NAND_ECC_SOFT)) +/* Large page NAND with SOFT_ECC should support subpage reads */ +#define NAND_SUBPAGE_READ(chip) ((chip->ecc.mode == NAND_ECC_SOFT) \ + && (chip->page_shift > 9)) /* Mask to zero out the chip options, which come from the id table */ #define NAND_CHIPOPTIONS_MSK (0x0000ffff & ~NAND_NO_AUTOINCR) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index b4d056ceab9..488c56e649b 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -61,9 +61,7 @@ struct wireless_dev; #define NET_XMIT_DROP 1 /* skb dropped */ #define NET_XMIT_CN 2 /* congestion notification */ #define NET_XMIT_POLICED 3 /* skb is shot by police */ -#define NET_XMIT_BYPASS 4 /* packet does not leave via dequeue; - (TC use only - dev_queue_xmit - returns this as NET_XMIT_SUCCESS) */ +#define NET_XMIT_MASK 0xFFFF /* qdisc flags in net/sch_generic.h */ /* Backlog congestion levels */ #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */ @@ -440,6 +438,7 @@ static inline void napi_synchronize(const struct napi_struct *n) enum netdev_queue_state_t { __QUEUE_STATE_XOFF, + __QUEUE_STATE_FROZEN, }; struct netdev_queue { @@ -636,7 +635,7 @@ struct net_device unsigned int real_num_tx_queues; unsigned long tx_queue_len; /* Max frames per queue allowed */ - + spinlock_t tx_global_lock; /* * One part is mostly used on xmit path (device) */ @@ -1099,6 +1098,11 @@ static inline int netif_queue_stopped(const struct net_device *dev) return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0)); } +static inline int netif_tx_queue_frozen(const struct netdev_queue *dev_queue) +{ + return test_bit(__QUEUE_STATE_FROZEN, &dev_queue->state); +} + /** * netif_running - test if up * @dev: network device @@ -1475,6 +1479,26 @@ static inline void __netif_tx_lock_bh(struct netdev_queue *txq) txq->xmit_lock_owner = smp_processor_id(); } +static inline int __netif_tx_trylock(struct netdev_queue *txq) +{ + int ok = spin_trylock(&txq->_xmit_lock); + if (likely(ok)) + txq->xmit_lock_owner = smp_processor_id(); + return ok; +} + +static inline void __netif_tx_unlock(struct netdev_queue *txq) +{ + txq->xmit_lock_owner = -1; + spin_unlock(&txq->_xmit_lock); +} + +static inline void __netif_tx_unlock_bh(struct netdev_queue *txq) +{ + txq->xmit_lock_owner = -1; + spin_unlock_bh(&txq->_xmit_lock); +} + /** * netif_tx_lock - grab network device transmit lock * @dev: network device @@ -1484,12 +1508,23 @@ static inline void __netif_tx_lock_bh(struct netdev_queue *txq) */ static inline void netif_tx_lock(struct net_device *dev) { - int cpu = smp_processor_id(); unsigned int i; + int cpu; + spin_lock(&dev->tx_global_lock); + cpu = smp_processor_id(); for (i = 0; i < dev->num_tx_queues; i++) { struct netdev_queue *txq = netdev_get_tx_queue(dev, i); + + /* We are the only thread of execution doing a + * freeze, but we have to grab the _xmit_lock in + * order to synchronize with threads which are in + * the ->hard_start_xmit() handler and already + * checked the frozen bit. + */ __netif_tx_lock(txq, cpu); + set_bit(__QUEUE_STATE_FROZEN, &txq->state); + __netif_tx_unlock(txq); } } @@ -1499,40 +1534,22 @@ static inline void netif_tx_lock_bh(struct net_device *dev) netif_tx_lock(dev); } -static inline int __netif_tx_trylock(struct netdev_queue *txq) -{ - int ok = spin_trylock(&txq->_xmit_lock); - if (likely(ok)) - txq->xmit_lock_owner = smp_processor_id(); - return ok; -} - -static inline int netif_tx_trylock(struct net_device *dev) -{ - return __netif_tx_trylock(netdev_get_tx_queue(dev, 0)); -} - -static inline void __netif_tx_unlock(struct netdev_queue *txq) -{ - txq->xmit_lock_owner = -1; - spin_unlock(&txq->_xmit_lock); -} - -static inline void __netif_tx_unlock_bh(struct netdev_queue *txq) -{ - txq->xmit_lock_owner = -1; - spin_unlock_bh(&txq->_xmit_lock); -} - static inline void netif_tx_unlock(struct net_device *dev) { unsigned int i; for (i = 0; i < dev->num_tx_queues; i++) { struct netdev_queue *txq = netdev_get_tx_queue(dev, i); - __netif_tx_unlock(txq); - } + /* No need to grab the _xmit_lock here. If the + * queue is not stopped for another reason, we + * force a schedule. + */ + clear_bit(__QUEUE_STATE_FROZEN, &txq->state); + if (!test_bit(__QUEUE_STATE_XOFF, &txq->state)) + __netif_schedule(txq->qdisc); + } + spin_unlock(&dev->tx_global_lock); } static inline void netif_tx_unlock_bh(struct net_device *dev) @@ -1556,13 +1573,18 @@ static inline void netif_tx_unlock_bh(struct net_device *dev) static inline void netif_tx_disable(struct net_device *dev) { unsigned int i; + int cpu; - netif_tx_lock_bh(dev); + local_bh_disable(); + cpu = smp_processor_id(); for (i = 0; i < dev->num_tx_queues; i++) { struct netdev_queue *txq = netdev_get_tx_queue(dev, i); + + __netif_tx_lock(txq, cpu); netif_tx_stop_queue(txq); + __netif_tx_unlock(txq); } - netif_tx_unlock_bh(dev); + local_bh_enable(); } static inline void netif_addr_lock(struct net_device *dev) diff --git a/include/linux/netfilter/nf_conntrack_tcp.h b/include/linux/netfilter/nf_conntrack_tcp.h index 22ce29995f1..a049df4f223 100644 --- a/include/linux/netfilter/nf_conntrack_tcp.h +++ b/include/linux/netfilter/nf_conntrack_tcp.h @@ -30,6 +30,9 @@ enum tcp_conntrack { /* Be liberal in window checking */ #define IP_CT_TCP_FLAG_BE_LIBERAL 0x08 +/* Has unacknowledged data */ +#define IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED 0x10 + struct nf_ct_tcp_flags { u_int8_t flags; u_int8_t mask; diff --git a/include/linux/of.h b/include/linux/of.h index 59a61bdc98b..79886ade070 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -70,5 +70,6 @@ extern int of_n_addr_cells(struct device_node *np); extern int of_n_size_cells(struct device_node *np); extern const struct of_device_id *of_match_node( const struct of_device_id *matches, const struct device_node *node); +extern int of_modalias_node(struct device_node *node, char *modalias, int len); #endif /* _LINUX_OF_H */ diff --git a/include/linux/of_spi.h b/include/linux/of_spi.h new file mode 100644 index 00000000000..5f71ee8c086 --- /dev/null +++ b/include/linux/of_spi.h @@ -0,0 +1,18 @@ +/* + * OpenFirmware SPI support routines + * Copyright (C) 2008 Secret Lab Technologies Ltd. + * + * Support routines for deriving SPI device attachments from the device + * tree. + */ + +#ifndef __LINUX_OF_SPI_H +#define __LINUX_OF_SPI_H + +#include <linux/of.h> +#include <linux/spi/spi.h> + +extern void of_register_spi_devices(struct spi_master *master, + struct device_node *np); + +#endif /* __LINUX_OF_SPI */ diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 54590a9a103..c74d3e87531 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -163,7 +163,7 @@ static inline int Page##uname(struct page *page) \ struct page; /* forward declaration */ -PAGEFLAG(Locked, locked) TESTSCFLAG(Locked, locked) +TESTPAGEFLAG(Locked, locked) PAGEFLAG(Error, error) PAGEFLAG(Referenced, referenced) TESTCLEARFLAG(Referenced, referenced) PAGEFLAG(Dirty, dirty) TESTSCFLAG(Dirty, dirty) __CLEARPAGEFLAG(Dirty, dirty) @@ -239,9 +239,6 @@ static inline void __SetPageUptodate(struct page *page) { smp_wmb(); __set_bit(PG_uptodate, &(page)->flags); -#ifdef CONFIG_S390 - page_clear_dirty(page); -#endif } static inline void SetPageUptodate(struct page *page) diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index a81d8189042..5da31c12101 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -20,6 +20,7 @@ */ #define AS_EIO (__GFP_BITS_SHIFT + 0) /* IO error on async write */ #define AS_ENOSPC (__GFP_BITS_SHIFT + 1) /* ENOSPC on async write */ +#define AS_MM_ALL_LOCKS (__GFP_BITS_SHIFT + 2) /* under mm_take_all_locks() */ static inline void mapping_set_error(struct address_space *mapping, int error) { @@ -142,6 +143,29 @@ static inline int page_cache_get_speculative(struct page *page) return 1; } +/* + * Same as above, but add instead of inc (could just be merged) + */ +static inline int page_cache_add_speculative(struct page *page, int count) +{ + VM_BUG_ON(in_interrupt()); + +#if !defined(CONFIG_SMP) && defined(CONFIG_CLASSIC_RCU) +# ifdef CONFIG_PREEMPT + VM_BUG_ON(!in_atomic()); +# endif + VM_BUG_ON(page_count(page) == 0); + atomic_add(count, &page->_count); + +#else + if (unlikely(!atomic_add_unless(&page->_count, count, 0))) + return 0; +#endif + VM_BUG_ON(PageCompound(page) && page != compound_head(page)); + + return 1; +} + static inline int page_freeze_refs(struct page *page, int count) { return likely(atomic_cmpxchg(&page->_count, count, 0) == count); @@ -226,29 +250,6 @@ static inline struct page *read_mapping_page(struct address_space *mapping, return read_cache_page(mapping, index, filler, data); } -int add_to_page_cache_locked(struct page *page, struct address_space *mapping, - pgoff_t index, gfp_t gfp_mask); -int add_to_page_cache_lru(struct page *page, struct address_space *mapping, - pgoff_t index, gfp_t gfp_mask); -extern void remove_from_page_cache(struct page *page); -extern void __remove_from_page_cache(struct page *page); - -/* - * Like add_to_page_cache_locked, but used to add newly allocated pages: - * the page is new, so we can just run SetPageLocked() against it. - */ -static inline int add_to_page_cache(struct page *page, - struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) -{ - int error; - - SetPageLocked(page); - error = add_to_page_cache_locked(page, mapping, offset, gfp_mask); - if (unlikely(error)) - ClearPageLocked(page); - return error; -} - /* * Return byte-offset into filesystem object for page. */ @@ -270,13 +271,28 @@ extern int __lock_page_killable(struct page *page); extern void __lock_page_nosync(struct page *page); extern void unlock_page(struct page *page); +static inline void set_page_locked(struct page *page) +{ + set_bit(PG_locked, &page->flags); +} + +static inline void clear_page_locked(struct page *page) +{ + clear_bit(PG_locked, &page->flags); +} + +static inline int trylock_page(struct page *page) +{ + return !test_and_set_bit(PG_locked, &page->flags); +} + /* * lock_page may only be called if we have the page's inode pinned. */ static inline void lock_page(struct page *page) { might_sleep(); - if (TestSetPageLocked(page)) + if (!trylock_page(page)) __lock_page(page); } @@ -288,7 +304,7 @@ static inline void lock_page(struct page *page) static inline int lock_page_killable(struct page *page) { might_sleep(); - if (TestSetPageLocked(page)) + if (!trylock_page(page)) return __lock_page_killable(page); return 0; } @@ -300,7 +316,7 @@ static inline int lock_page_killable(struct page *page) static inline void lock_page_nosync(struct page *page) { might_sleep(); - if (TestSetPageLocked(page)) + if (!trylock_page(page)) __lock_page_nosync(page); } @@ -385,4 +401,27 @@ static inline int fault_in_pages_readable(const char __user *uaddr, int size) return ret; } +int add_to_page_cache_locked(struct page *page, struct address_space *mapping, + pgoff_t index, gfp_t gfp_mask); +int add_to_page_cache_lru(struct page *page, struct address_space *mapping, + pgoff_t index, gfp_t gfp_mask); +extern void remove_from_page_cache(struct page *page); +extern void __remove_from_page_cache(struct page *page); + +/* + * Like add_to_page_cache_locked, but used to add newly allocated pages: + * the page is new, so we can just run set_page_locked() against it. + */ +static inline int add_to_page_cache(struct page *page, + struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) +{ + int error; + + set_page_locked(page); + error = add_to_page_cache_locked(page, mapping, offset, gfp_mask); + if (unlikely(error)) + clear_page_locked(page); + return error; +} + #endif /* _LINUX_PAGEMAP_H */ diff --git a/include/linux/parser.h b/include/linux/parser.h index cc554ca8bc7..7dcd0507575 100644 --- a/include/linux/parser.h +++ b/include/linux/parser.h @@ -14,7 +14,7 @@ struct match_token { const char *pattern; }; -typedef const struct match_token match_table_t[]; +typedef struct match_token match_table_t[]; /* Maximum number of arguments that match_token will find in a pattern */ enum {MAX_OPT_ARGS = 3}; diff --git a/include/linux/pci-aspm.h b/include/linux/pci-aspm.h index a1a1e618e99..91ba0b338b4 100644 --- a/include/linux/pci-aspm.h +++ b/include/linux/pci-aspm.h @@ -27,6 +27,7 @@ extern void pcie_aspm_init_link_state(struct pci_dev *pdev); extern void pcie_aspm_exit_link_state(struct pci_dev *pdev); extern void pcie_aspm_pm_state_change(struct pci_dev *pdev); extern void pci_disable_link_state(struct pci_dev *pdev, int state); +extern void pcie_no_aspm(void); #else static inline void pcie_aspm_init_link_state(struct pci_dev *pdev) { @@ -40,6 +41,10 @@ static inline void pcie_aspm_pm_state_change(struct pci_dev *pdev) static inline void pci_disable_link_state(struct pci_dev *pdev, int state) { } + +static inline void pcie_no_aspm(void) +{ +} #endif #ifdef CONFIG_PCIEASPM_DEBUG /* this depends on CONFIG_PCIEASPM */ diff --git a/include/linux/pci.h b/include/linux/pci.h index 1d296d31abe..c0e14008a3c 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -124,6 +124,8 @@ enum pci_dev_flags { * generation too. */ PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG = (__force pci_dev_flags_t) 1, + /* Device configuration is irrevocably lost if disabled into D3 */ + PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) 2, }; typedef unsigned short __bitwise pci_bus_flags_t; @@ -639,6 +641,7 @@ int pci_restore_state(struct pci_dev *dev); int pci_set_power_state(struct pci_dev *dev, pci_power_t state); pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state); bool pci_pme_capable(struct pci_dev *dev, pci_power_t state); +void pci_pme_active(struct pci_dev *dev, bool enable); int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable); pci_power_t pci_target_state(struct pci_dev *dev); int pci_prepare_to_sleep(struct pci_dev *dev); @@ -678,10 +681,12 @@ void pci_enable_bridges(struct pci_bus *bus); /* Proper probing supporting hot-pluggable devices */ int __must_check __pci_register_driver(struct pci_driver *, struct module *, const char *mod_name); -static inline int __must_check pci_register_driver(struct pci_driver *driver) -{ - return __pci_register_driver(driver, THIS_MODULE, KBUILD_MODNAME); -} + +/* + * pci_register_driver must be a macro so that KBUILD_MODNAME can be expanded + */ +#define pci_register_driver(driver) \ + __pci_register_driver(driver, THIS_MODULE, KBUILD_MODNAME) void pci_unregister_driver(struct pci_driver *dev); void pci_remove_behind_bridge(struct pci_dev *dev); diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 35a78415acc..9ec2bcce8e8 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2177,8 +2177,6 @@ #define PCI_DEVICE_ID_HERC_WIN 0x5732 #define PCI_DEVICE_ID_HERC_UNI 0x5832 -#define PCI_VENDOR_ID_RDC 0x17f3 - #define PCI_VENDOR_ID_SITECOM 0x182d #define PCI_DEVICE_ID_SITECOM_DC105V2 0x3069 diff --git a/include/linux/pci_regs.h b/include/linux/pci_regs.h index 19958b92990..450684f7eaa 100644 --- a/include/linux/pci_regs.h +++ b/include/linux/pci_regs.h @@ -374,6 +374,7 @@ #define PCI_EXP_DEVCAP_ATN_BUT 0x1000 /* Attention Button Present */ #define PCI_EXP_DEVCAP_ATN_IND 0x2000 /* Attention Indicator Present */ #define PCI_EXP_DEVCAP_PWR_IND 0x4000 /* Power Indicator Present */ +#define PCI_EXP_DEVCAP_RBER 0x8000 /* Role-Based Error Reporting */ #define PCI_EXP_DEVCAP_PWR_VAL 0x3fc0000 /* Slot Power Limit Value */ #define PCI_EXP_DEVCAP_PWR_SCL 0xc000000 /* Slot Power Limit Scale */ #define PCI_EXP_DEVCTL 8 /* Device Control */ diff --git a/include/linux/pm_qos_params.h b/include/linux/pm_qos_params.h index 2e4e97bd19f..d74f75ed1e4 100644 --- a/include/linux/pm_qos_params.h +++ b/include/linux/pm_qos_params.h @@ -1,6 +1,6 @@ /* interface for the pm_qos_power infrastructure of the linux kernel. * - * Mark Gross + * Mark Gross <mgross@linux.intel.com> */ #include <linux/list.h> #include <linux/notifier.h> diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index 68ed19ccf1f..ea96ead1d39 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -78,6 +78,7 @@ enum power_supply_property { POWER_SUPPLY_PROP_CHARGE_EMPTY, POWER_SUPPLY_PROP_CHARGE_NOW, POWER_SUPPLY_PROP_CHARGE_AVG, + POWER_SUPPLY_PROP_CHARGE_COUNTER, POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN, POWER_SUPPLY_PROP_ENERGY_EMPTY_DESIGN, POWER_SUPPLY_PROP_ENERGY_FULL, diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index fd31756e1a0..ea7416c901d 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h @@ -172,7 +172,7 @@ static inline void ptrace_init_task(struct task_struct *child, bool ptrace) child->ptrace = 0; if (unlikely(ptrace)) { child->ptrace = current->ptrace; - __ptrace_link(child, current->parent); + ptrace_link(child, current->parent); } } diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h index 742187f7a05..ca6b9b5c8d5 100644 --- a/include/linux/quotaops.h +++ b/include/linux/quotaops.h @@ -43,6 +43,8 @@ int dquot_mark_dquot_dirty(struct dquot *dquot); int vfs_quota_on(struct super_block *sb, int type, int format_id, char *path, int remount); +int vfs_quota_on_path(struct super_block *sb, int type, int format_id, + struct path *path); int vfs_quota_on_mount(struct super_block *sb, char *qf_name, int format_id, int type); int vfs_quota_off(struct super_block *sb, int type, int remount); diff --git a/include/linux/raid/md_k.h b/include/linux/raid/md_k.h index 9f2549ac0e2..c200b9a34af 100644 --- a/include/linux/raid/md_k.h +++ b/include/linux/raid/md_k.h @@ -128,6 +128,7 @@ struct mddev_s #define MD_CHANGE_DEVS 0 /* Some device status has changed */ #define MD_CHANGE_CLEAN 1 /* transition to or from 'clean' */ #define MD_CHANGE_PENDING 2 /* superblock update in progress */ +#define MD_NOTIFY_ARRAY_STATE 3 /* atomic context wants to notify userspace */ int ro; diff --git a/include/linux/rcuclassic.h b/include/linux/rcuclassic.h index 8c774905dcf..4ab84362272 100644 --- a/include/linux/rcuclassic.h +++ b/include/linux/rcuclassic.h @@ -117,7 +117,7 @@ extern int rcu_needs_cpu(int cpu); #ifdef CONFIG_DEBUG_LOCK_ALLOC extern struct lockdep_map rcu_lock_map; # define rcu_read_acquire() \ - lock_acquire(&rcu_lock_map, 0, 0, 2, 1, _THIS_IP_) + lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_) # define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_) #else # define rcu_read_acquire() do { } while (0) diff --git a/include/linux/rculist.h b/include/linux/rculist.h index b0f39be08b6..eb4443c7e05 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h @@ -98,6 +98,34 @@ static inline void list_del_rcu(struct list_head *entry) } /** + * hlist_del_init_rcu - deletes entry from hash list with re-initialization + * @n: the element to delete from the hash list. + * + * Note: list_unhashed() on the node return true after this. It is + * useful for RCU based read lockfree traversal if the writer side + * must know if the list entry is still hashed or already unhashed. + * + * In particular, it means that we can not poison the forward pointers + * that may still be used for walking the hash list and we can only + * zero the pprev pointer so list_unhashed() will return true after + * this. + * + * The caller must take whatever precautions are necessary (such as + * holding appropriate locks) to avoid racing with another + * list-mutation primitive, such as hlist_add_head_rcu() or + * hlist_del_rcu(), running on this same list. However, it is + * perfectly legal to run concurrently with the _rcu list-traversal + * primitives, such as hlist_for_each_entry_rcu(). + */ +static inline void hlist_del_init_rcu(struct hlist_node *n) +{ + if (!hlist_unhashed(n)) { + __hlist_del(n); + n->pprev = NULL; + } +} + +/** * list_replace_rcu - replace old entry by new one * @old : the element to be replaced * @new : the new element to insert diff --git a/include/linux/regulator/bq24022.h b/include/linux/regulator/bq24022.h new file mode 100644 index 00000000000..e84b0a9feda --- /dev/null +++ b/include/linux/regulator/bq24022.h @@ -0,0 +1,21 @@ +/* + * Support for TI bq24022 (bqTINY-II) Dual Input (USB/AC Adpater) + * 1-Cell Li-Ion Charger connected via GPIOs. + * + * Copyright (c) 2008 Philipp Zabel + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +/** + * bq24022_mach_info - platform data for bq24022 + * @gpio_nce: GPIO line connected to the nCE pin, used to enable / disable charging + * @gpio_iset2: GPIO line connected to the ISET2 pin, used to limit charging current to 100 mA / 500 mA + */ +struct bq24022_mach_info { + int gpio_nce; + int gpio_iset2; +}; diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h new file mode 100644 index 00000000000..afdc4558bb9 --- /dev/null +++ b/include/linux/regulator/consumer.h @@ -0,0 +1,284 @@ +/* + * consumer.h -- SoC Regulator consumer support. + * + * Copyright (C) 2007, 2008 Wolfson Microelectronics PLC. + * + * Author: Liam Girdwood <lg@opensource.wolfsonmicro.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Regulator Consumer Interface. + * + * A Power Management Regulator framework for SoC based devices. + * Features:- + * o Voltage and current level control. + * o Operating mode control. + * o Regulator status. + * o sysfs entries for showing client devices and status + * + * EXPERIMENTAL FEATURES: + * Dynamic Regulator operating Mode Switching (DRMS) - allows regulators + * to use most efficient operating mode depending upon voltage and load and + * is transparent to client drivers. + * + * e.g. Devices x,y,z share regulator r. Device x and y draw 20mA each during + * IO and 1mA at idle. Device z draws 100mA when under load and 5mA when + * idling. Regulator r has > 90% efficiency in NORMAL mode at loads > 100mA + * but this drops rapidly to 60% when below 100mA. Regulator r has > 90% + * efficiency in IDLE mode at loads < 10mA. Thus regulator r will operate + * in normal mode for loads > 10mA and in IDLE mode for load <= 10mA. + * + */ + +#ifndef __LINUX_REGULATOR_CONSUMER_H_ +#define __LINUX_REGULATOR_CONSUMER_H_ + +/* + * Regulator operating modes. + * + * Regulators can run in a variety of different operating modes depending on + * output load. This allows further system power savings by selecting the + * best (and most efficient) regulator mode for a desired load. + * + * Most drivers will only care about NORMAL. The modes below are generic and + * will probably not match the naming convention of your regulator data sheet + * but should match the use cases in the datasheet. + * + * In order of power efficiency (least efficient at top). + * + * Mode Description + * FAST Regulator can handle fast changes in it's load. + * e.g. useful in CPU voltage & frequency scaling where + * load can quickly increase with CPU frequency increases. + * + * NORMAL Normal regulator power supply mode. Most drivers will + * use this mode. + * + * IDLE Regulator runs in a more efficient mode for light + * loads. Can be used for devices that have a low power + * requirement during periods of inactivity. This mode + * may be more noisy than NORMAL and may not be able + * to handle fast load switching. + * + * STANDBY Regulator runs in the most efficient mode for very + * light loads. Can be used by devices when they are + * in a sleep/standby state. This mode is likely to be + * the most noisy and may not be able to handle fast load + * switching. + * + * NOTE: Most regulators will only support a subset of these modes. Some + * will only just support NORMAL. + * + * These modes can be OR'ed together to make up a mask of valid register modes. + */ + +#define REGULATOR_MODE_FAST 0x1 +#define REGULATOR_MODE_NORMAL 0x2 +#define REGULATOR_MODE_IDLE 0x4 +#define REGULATOR_MODE_STANDBY 0x8 + +/* + * Regulator notifier events. + * + * UNDER_VOLTAGE Regulator output is under voltage. + * OVER_CURRENT Regulator output current is too high. + * REGULATION_OUT Regulator output is out of regulation. + * FAIL Regulator output has failed. + * OVER_TEMP Regulator over temp. + * FORCE_DISABLE Regulator shut down by software. + * + * NOTE: These events can be OR'ed together when passed into handler. + */ + +#define REGULATOR_EVENT_UNDER_VOLTAGE 0x01 +#define REGULATOR_EVENT_OVER_CURRENT 0x02 +#define REGULATOR_EVENT_REGULATION_OUT 0x04 +#define REGULATOR_EVENT_FAIL 0x08 +#define REGULATOR_EVENT_OVER_TEMP 0x10 +#define REGULATOR_EVENT_FORCE_DISABLE 0x20 + +struct regulator; + +/** + * struct regulator_bulk_data - Data used for bulk regulator operations. + * + * @supply The name of the supply. Initialised by the user before + * using the bulk regulator APIs. + * @consumer The regulator consumer for the supply. This will be managed + * by the bulk API. + * + * The regulator APIs provide a series of regulator_bulk_() API calls as + * a convenience to consumers which require multiple supplies. This + * structure is used to manage data for these calls. + */ +struct regulator_bulk_data { + const char *supply; + struct regulator *consumer; +}; + +#if defined(CONFIG_REGULATOR) + +/* regulator get and put */ +struct regulator *__must_check regulator_get(struct device *dev, + const char *id); +void regulator_put(struct regulator *regulator); + +/* regulator output control and status */ +int regulator_enable(struct regulator *regulator); +int regulator_disable(struct regulator *regulator); +int regulator_force_disable(struct regulator *regulator); +int regulator_is_enabled(struct regulator *regulator); + +int regulator_bulk_get(struct device *dev, int num_consumers, + struct regulator_bulk_data *consumers); +int regulator_bulk_enable(int num_consumers, + struct regulator_bulk_data *consumers); +int regulator_bulk_disable(int num_consumers, + struct regulator_bulk_data *consumers); +void regulator_bulk_free(int num_consumers, + struct regulator_bulk_data *consumers); + +int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV); +int regulator_get_voltage(struct regulator *regulator); +int regulator_set_current_limit(struct regulator *regulator, + int min_uA, int max_uA); +int regulator_get_current_limit(struct regulator *regulator); + +int regulator_set_mode(struct regulator *regulator, unsigned int mode); +unsigned int regulator_get_mode(struct regulator *regulator); +int regulator_set_optimum_mode(struct regulator *regulator, int load_uA); + +/* regulator notifier block */ +int regulator_register_notifier(struct regulator *regulator, + struct notifier_block *nb); +int regulator_unregister_notifier(struct regulator *regulator, + struct notifier_block *nb); + +/* driver data - core doesn't touch */ +void *regulator_get_drvdata(struct regulator *regulator); +void regulator_set_drvdata(struct regulator *regulator, void *data); + +#else + +/* + * Make sure client drivers will still build on systems with no software + * controllable voltage or current regulators. + */ +static inline struct regulator *__must_check regulator_get(struct device *dev, + const char *id) +{ + /* Nothing except the stubbed out regulator API should be + * looking at the value except to check if it is an error + * value so the actual return value doesn't matter. + */ + return (struct regulator *)id; +} +static inline void regulator_put(struct regulator *regulator) +{ +} + +static inline int regulator_enable(struct regulator *regulator) +{ + return 0; +} + +static inline int regulator_disable(struct regulator *regulator) +{ + return 0; +} + +static inline int regulator_is_enabled(struct regulator *regulator) +{ + return 1; +} + +static inline int regulator_bulk_get(struct device *dev, + int num_consumers, + struct regulator_bulk_data *consumers) +{ + return 0; +} + +static inline int regulator_bulk_enable(int num_consumers, + struct regulator_bulk_data *consumers) +{ + return 0; +} + +static inline int regulator_bulk_disable(int num_consumers, + struct regulator_bulk_data *consumers) +{ + return 0; +} + +static inline void regulator_bulk_free(int num_consumers, + struct regulator_bulk_data *consumers) +{ +} + +static inline int regulator_set_voltage(struct regulator *regulator, + int min_uV, int max_uV) +{ + return 0; +} + +static inline int regulator_get_voltage(struct regulator *regulator) +{ + return 0; +} + +static inline int regulator_set_current_limit(struct regulator *regulator, + int min_uA, int max_uA) +{ + return 0; +} + +static inline int regulator_get_current_limit(struct regulator *regulator) +{ + return 0; +} + +static inline int regulator_set_mode(struct regulator *regulator, + unsigned int mode) +{ + return 0; +} + +static inline unsigned int regulator_get_mode(struct regulator *regulator) +{ + return REGULATOR_MODE_NORMAL; +} + +static inline int regulator_set_optimum_mode(struct regulator *regulator, + int load_uA) +{ + return REGULATOR_MODE_NORMAL; +} + +static inline int regulator_register_notifier(struct regulator *regulator, + struct notifier_block *nb) +{ + return 0; +} + +static inline int regulator_unregister_notifier(struct regulator *regulator, + struct notifier_block *nb) +{ + return 0; +} + +static inline void *regulator_get_drvdata(struct regulator *regulator) +{ + return NULL; +} + +static inline void regulator_set_drvdata(struct regulator *regulator, + void *data) +{ +} + +#endif + +#endif diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h new file mode 100644 index 00000000000..1d712c7172a --- /dev/null +++ b/include/linux/regulator/driver.h @@ -0,0 +1,99 @@ +/* + * driver.h -- SoC Regulator driver support. + * + * Copyright (C) 2007, 2008 Wolfson Microelectronics PLC. + * + * Author: Liam Girdwood <lg@opensource.wolfsonmicro.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Regulator Driver Interface. + */ + +#ifndef __LINUX_REGULATOR_DRIVER_H_ +#define __LINUX_REGULATOR_DRIVER_H_ + +#include <linux/device.h> +#include <linux/regulator/consumer.h> + +struct regulator_constraints; +struct regulator_dev; + +/** + * struct regulator_ops - regulator operations. + * + * This struct describes regulator operations. + */ +struct regulator_ops { + + /* get/set regulator voltage */ + int (*set_voltage) (struct regulator_dev *, int min_uV, int max_uV); + int (*get_voltage) (struct regulator_dev *); + + /* get/set regulator current */ + int (*set_current_limit) (struct regulator_dev *, + int min_uA, int max_uA); + int (*get_current_limit) (struct regulator_dev *); + + /* enable/disable regulator */ + int (*enable) (struct regulator_dev *); + int (*disable) (struct regulator_dev *); + int (*is_enabled) (struct regulator_dev *); + + /* get/set regulator operating mode (defined in regulator.h) */ + int (*set_mode) (struct regulator_dev *, unsigned int mode); + unsigned int (*get_mode) (struct regulator_dev *); + + /* get most efficient regulator operating mode for load */ + unsigned int (*get_optimum_mode) (struct regulator_dev *, int input_uV, + int output_uV, int load_uA); + + /* the operations below are for configuration of regulator state when + * it's parent PMIC enters a global STANBY/HIBERNATE state */ + + /* set regulator suspend voltage */ + int (*set_suspend_voltage) (struct regulator_dev *, int uV); + + /* enable/disable regulator in suspend state */ + int (*set_suspend_enable) (struct regulator_dev *); + int (*set_suspend_disable) (struct regulator_dev *); + + /* set regulator suspend operating mode (defined in regulator.h) */ + int (*set_suspend_mode) (struct regulator_dev *, unsigned int mode); +}; + +/* + * Regulators can either control voltage or current. + */ +enum regulator_type { + REGULATOR_VOLTAGE, + REGULATOR_CURRENT, +}; + +/** + * struct regulator_desc - Regulator descriptor + * + */ +struct regulator_desc { + const char *name; + int id; + struct regulator_ops *ops; + int irq; + enum regulator_type type; + struct module *owner; +}; + + +struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc, + void *reg_data); +void regulator_unregister(struct regulator_dev *rdev); + +int regulator_notifier_call_chain(struct regulator_dev *rdev, + unsigned long event, void *data); + +void *rdev_get_drvdata(struct regulator_dev *rdev); +int rdev_get_id(struct regulator_dev *rdev); + +#endif diff --git a/include/linux/regulator/fixed.h b/include/linux/regulator/fixed.h new file mode 100644 index 00000000000..1387a5d2190 --- /dev/null +++ b/include/linux/regulator/fixed.h @@ -0,0 +1,22 @@ +/* + * fixed.h + * + * Copyright 2008 Wolfson Microelectronics PLC. + * + * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + */ + +#ifndef __REGULATOR_FIXED_H +#define __REGULATOR_FIXED_H + +struct fixed_voltage_config { + const char *supply_name; + int microvolts; +}; + +#endif diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h new file mode 100644 index 00000000000..11e737dbfcf --- /dev/null +++ b/include/linux/regulator/machine.h @@ -0,0 +1,104 @@ +/* + * machine.h -- SoC Regulator support, machine/board driver API. + * + * Copyright (C) 2007, 2008 Wolfson Microelectronics PLC. + * + * Author: Liam Girdwood <lg@opensource.wolfsonmicro.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Regulator Machine/Board Interface. + */ + +#ifndef __LINUX_REGULATOR_MACHINE_H_ +#define __LINUX_REGULATOR_MACHINE_H_ + +#include <linux/regulator/consumer.h> +#include <linux/suspend.h> + +struct regulator; + +/* + * Regulator operation constraint flags. These flags are used to enable + * certain regulator operations and can be OR'ed together. + * + * VOLTAGE: Regulator output voltage can be changed by software on this + * board/machine. + * CURRENT: Regulator output current can be changed by software on this + * board/machine. + * MODE: Regulator operating mode can be changed by software on this + * board/machine. + * STATUS: Regulator can be enabled and disabled. + * DRMS: Dynamic Regulator Mode Switching is enabled for this regulator. + */ + +#define REGULATOR_CHANGE_VOLTAGE 0x1 +#define REGULATOR_CHANGE_CURRENT 0x2 +#define REGULATOR_CHANGE_MODE 0x4 +#define REGULATOR_CHANGE_STATUS 0x8 +#define REGULATOR_CHANGE_DRMS 0x10 + +/** + * struct regulator_state - regulator state during low power syatem states + * + * This describes a regulators state during a system wide low power state. + */ +struct regulator_state { + int uV; /* suspend voltage */ + unsigned int mode; /* suspend regulator operating mode */ + int enabled; /* is regulator enabled in this suspend state */ +}; + +/** + * struct regulation_constraints - regulator operating constraints. + * + * This struct describes regulator and board/machine specific constraints. + */ +struct regulation_constraints { + + char *name; + + /* voltage output range (inclusive) - for voltage control */ + int min_uV; + int max_uV; + + /* current output range (inclusive) - for current control */ + int min_uA; + int max_uA; + + /* valid regulator operating modes for this machine */ + unsigned int valid_modes_mask; + + /* valid operations for regulator on this machine */ + unsigned int valid_ops_mask; + + /* regulator input voltage - only if supply is another regulator */ + int input_uV; + + /* regulator suspend states for global PMIC STANDBY/HIBERNATE */ + struct regulator_state state_disk; + struct regulator_state state_mem; + struct regulator_state state_standby; + suspend_state_t initial_state; /* suspend state to set at init */ + + /* constriant flags */ + unsigned always_on:1; /* regulator never off when system is on */ + unsigned boot_on:1; /* bootloader/firmware enabled regulator */ + unsigned apply_uV:1; /* apply uV constraint iff min == max */ +}; + +int regulator_set_supply(const char *regulator, const char *regulator_supply); + +const char *regulator_get_supply(const char *regulator); + +int regulator_set_machine_constraints(const char *regulator, + struct regulation_constraints *constraints); + +int regulator_set_device_supply(const char *regulator, struct device *dev, + const char *supply); + +int regulator_suspend_prepare(suspend_state_t state); + +#endif diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h index c5f6e54ec6a..741d1a62cc3 100644 --- a/include/linux/rfkill.h +++ b/include/linux/rfkill.h @@ -68,7 +68,8 @@ enum rfkill_state { * @user_claim_unsupported: Whether the hardware supports exclusive * RF-kill control by userspace. Set this before registering. * @user_claim: Set when the switch is controlled exlusively by userspace. - * @mutex: Guards switch state transitions + * @mutex: Guards switch state transitions. It serializes callbacks + * and also protects the state. * @data: Pointer to the RF button drivers private data which will be * passed along when toggling radio state. * @toggle_radio(): Mandatory handler to control state of the radio. @@ -89,12 +90,13 @@ struct rfkill { const char *name; enum rfkill_type type; - enum rfkill_state state; bool user_claim_unsupported; bool user_claim; + /* the mutex serializes callbacks and also protects + * the state */ struct mutex mutex; - + enum rfkill_state state; void *data; int (*toggle_radio)(void *data, enum rfkill_state state); int (*get_state)(void *data, enum rfkill_state *state); diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 1383692ac5b..69407f85e10 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -26,6 +26,14 @@ */ struct anon_vma { spinlock_t lock; /* Serialize access to vma list */ + /* + * NOTE: the LSB of the head.next is set by + * mm_take_all_locks() _after_ taking the above lock. So the + * head must only be read/written after taking the above lock + * to be sure to see a valid next pointer. The LSB bit itself + * is serialized by a system wide lock only visible to + * mm_take_all_locks() (mm_all_locks_mutex). + */ struct list_head head; /* List of private "related" vmas */ }; diff --git a/include/linux/sched.h b/include/linux/sched.h index 5270d449ff9..cfb0d87b99f 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -87,6 +87,7 @@ struct sched_param { #include <linux/task_io_accounting.h> #include <linux/kobject.h> #include <linux/latencytop.h> +#include <linux/cred.h> #include <asm/processor.h> @@ -1551,16 +1552,10 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) extern unsigned long long sched_clock(void); -#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK -static inline void sched_clock_init(void) -{ -} - -static inline u64 sched_clock_cpu(int cpu) -{ - return sched_clock(); -} +extern void sched_clock_init(void); +extern u64 sched_clock_cpu(int cpu); +#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK static inline void sched_clock_tick(void) { } @@ -1572,28 +1567,11 @@ static inline void sched_clock_idle_sleep_event(void) static inline void sched_clock_idle_wakeup_event(u64 delta_ns) { } - -#ifdef CONFIG_NO_HZ -static inline void sched_clock_tick_stop(int cpu) -{ -} - -static inline void sched_clock_tick_start(int cpu) -{ -} -#endif - -#else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ -extern void sched_clock_init(void); -extern u64 sched_clock_cpu(int cpu); +#else extern void sched_clock_tick(void); extern void sched_clock_idle_sleep_event(void); extern void sched_clock_idle_wakeup_event(u64 delta_ns); -#ifdef CONFIG_NO_HZ -extern void sched_clock_tick_stop(int cpu); -extern void sched_clock_tick_start(int cpu); #endif -#endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ /* * For kernel-internal use: high-speed (but slightly incorrect) per-cpu diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h index a66304a0995..a1783b229ef 100644 --- a/include/linux/seq_file.h +++ b/include/linux/seq_file.h @@ -4,6 +4,8 @@ #include <linux/types.h> #include <linux/string.h> #include <linux/mutex.h> +#include <linux/cpumask.h> +#include <linux/nodemask.h> struct seq_operations; struct file; @@ -47,6 +49,16 @@ int seq_path(struct seq_file *, struct path *, char *); int seq_dentry(struct seq_file *, struct dentry *, char *); int seq_path_root(struct seq_file *m, struct path *path, struct path *root, char *esc); +int seq_bitmap(struct seq_file *m, unsigned long *bits, unsigned int nr_bits); +static inline int seq_cpumask(struct seq_file *m, cpumask_t *mask) +{ + return seq_bitmap(m, mask->bits, NR_CPUS); +} + +static inline int seq_nodemask(struct seq_file *m, nodemask_t *mask) +{ + return seq_bitmap(m, mask->bits, MAX_NUMNODES); +} int single_open(struct file *, int (*)(struct seq_file *, void *), void *); int single_release(struct inode *, struct file *); diff --git a/include/linux/serio.h b/include/linux/serio.h index e72716cca57..25641d9e0ea 100644 --- a/include/linux/serio.h +++ b/include/linux/serio.h @@ -87,11 +87,10 @@ void serio_unregister_port(struct serio *serio); void serio_unregister_child_port(struct serio *serio); int __serio_register_driver(struct serio_driver *drv, struct module *owner, const char *mod_name); -static inline int serio_register_driver(struct serio_driver *drv) +static inline int __must_check serio_register_driver(struct serio_driver *drv) { return __serio_register_driver(drv, THIS_MODULE, KBUILD_MODNAME); } -int serio_register_driver(struct serio_driver *drv); void serio_unregister_driver(struct serio_driver *drv); static inline int serio_write(struct serio *serio, unsigned char data) diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 7ea44f6621f..358661c9990 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -243,6 +243,7 @@ typedef unsigned char *sk_buff_data_t; * @tc_index: Traffic control index * @tc_verd: traffic control verdict * @ndisc_nodetype: router type (from link layer) + * @do_not_encrypt: set to prevent encryption of this frame * @dma_cookie: a cookie to one of several possible DMA operations * done by skb DMA functions * @secmark: security marking @@ -316,7 +317,10 @@ struct sk_buff { #ifdef CONFIG_IPV6_NDISC_NODETYPE __u8 ndisc_nodetype:2; #endif - /* 14 bit hole */ +#if defined(CONFIG_MAC80211) || defined(CONFIG_MAC80211_MODULE) + __u8 do_not_encrypt:1; +#endif + /* 0/13/14 bit hole */ #ifdef CONFIG_NET_DMA dma_cookie_t dma_cookie; @@ -897,7 +901,7 @@ extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta); static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len) { if (len > skb_headlen(skb) && - !__pskb_pull_tail(skb, len-skb_headlen(skb))) + !__pskb_pull_tail(skb, len - skb_headlen(skb))) return NULL; skb->len -= len; return skb->data += len; @@ -914,7 +918,7 @@ static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len) return 1; if (unlikely(len > skb->len)) return 0; - return __pskb_pull_tail(skb, len-skb_headlen(skb)) != NULL; + return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL; } /** @@ -1317,7 +1321,7 @@ static inline int skb_padto(struct sk_buff *skb, unsigned int len) unsigned int size = skb->len; if (likely(size >= len)) return 0; - return skb_pad(skb, len-size); + return skb_pad(skb, len - size); } static inline int skb_add_data(struct sk_buff *skb, diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 5bad61a93f6..2f5c16b1aac 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -46,6 +46,7 @@ struct kmem_cache_cpu { struct kmem_cache_node { spinlock_t list_lock; /* Protect partial list and nr_partial */ unsigned long nr_partial; + unsigned long min_partial; struct list_head partial; #ifdef CONFIG_SLUB_DEBUG atomic_long_t nr_slabs; diff --git a/include/linux/snmp.h b/include/linux/snmp.h index 5df62ef1280..7a6e6bba4a7 100644 --- a/include/linux/snmp.h +++ b/include/linux/snmp.h @@ -214,6 +214,8 @@ enum LINUX_MIB_TCPDSACKIGNOREDOLD, /* TCPSACKIgnoredOld */ LINUX_MIB_TCPDSACKIGNOREDNOUNDO, /* TCPSACKIgnoredNoUndo */ LINUX_MIB_TCPSPURIOUSRTOS, /* TCPSpuriousRTOs */ + LINUX_MIB_TCPMD5NOTFOUND, /* TCPMD5NotFound */ + LINUX_MIB_TCPMD5UNEXPECTED, /* TCPMD5Unexpected */ __LINUX_MIB_MAX }; diff --git a/include/linux/spi/orion_spi.h b/include/linux/spi/orion_spi.h new file mode 100644 index 00000000000..b4d9fa6f797 --- /dev/null +++ b/include/linux/spi/orion_spi.h @@ -0,0 +1,17 @@ +/* + * orion_spi.h + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#ifndef __LINUX_SPI_ORION_SPI_H +#define __LINUX_SPI_ORION_SPI_H + +struct orion_spi_info { + u32 tclk; /* no <linux/clk.h> support yet */ +}; + + +#endif diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index a9cc29d4665..4be01bb4437 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -778,8 +778,20 @@ spi_register_board_info(struct spi_board_info const *info, unsigned n) * use spi_new_device() to describe each device. You can also call * spi_unregister_device() to start making that device vanish, but * normally that would be handled by spi_unregister_master(). + * + * You can also use spi_alloc_device() and spi_add_device() to use a two + * stage registration sequence for each spi_device. This gives the caller + * some more control over the spi_device structure before it is registered, + * but requires that caller to initialize fields that would otherwise + * be defined using the board info. */ extern struct spi_device * +spi_alloc_device(struct spi_master *master); + +extern int +spi_add_device(struct spi_device *spi); + +extern struct spi_device * spi_new_device(struct spi_master *, struct spi_board_info *); static inline void diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 61e5610ad16..e0c0fccced4 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -183,8 +183,14 @@ do { \ #ifdef CONFIG_DEBUG_LOCK_ALLOC # define spin_lock_nested(lock, subclass) _spin_lock_nested(lock, subclass) +# define spin_lock_nest_lock(lock, nest_lock) \ + do { \ + typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\ + _spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \ + } while (0) #else # define spin_lock_nested(lock, subclass) _spin_lock(lock) +# define spin_lock_nest_lock(lock, nest_lock) _spin_lock(lock) #endif #define write_lock(lock) _write_lock(lock) diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h index 8a2307ce729..d79845d034b 100644 --- a/include/linux/spinlock_api_smp.h +++ b/include/linux/spinlock_api_smp.h @@ -22,6 +22,8 @@ int in_lock_functions(unsigned long addr); void __lockfunc _spin_lock(spinlock_t *lock) __acquires(lock); void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass) __acquires(lock); +void __lockfunc _spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *map) + __acquires(lock); void __lockfunc _read_lock(rwlock_t *lock) __acquires(lock); void __lockfunc _write_lock(rwlock_t *lock) __acquires(lock); void __lockfunc _spin_lock_bh(spinlock_t *lock) __acquires(lock); diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h index 5bfc553bdb2..f1cb0ba6d71 100644 --- a/include/linux/stop_machine.h +++ b/include/linux/stop_machine.h @@ -5,41 +5,43 @@ (and more). So the "read" side to such a lock is anything which diables preeempt. */ #include <linux/cpu.h> +#include <linux/cpumask.h> #include <asm/system.h> #if defined(CONFIG_STOP_MACHINE) && defined(CONFIG_SMP) + +/* Deprecated, but useful for transition. */ +#define ALL_CPUS ~0U + /** - * stop_machine_run: freeze the machine on all CPUs and run this function + * stop_machine: freeze the machine on all CPUs and run this function * @fn: the function to run * @data: the data ptr for the @fn() - * @cpu: the cpu to run @fn() on (or any, if @cpu == NR_CPUS. + * @cpus: the cpus to run the @fn() on (NULL = any online cpu) * - * Description: This causes a thread to be scheduled on every other cpu, - * each of which disables interrupts, and finally interrupts are disabled - * on the current CPU. The result is that noone is holding a spinlock - * or inside any other preempt-disabled region when @fn() runs. + * Description: This causes a thread to be scheduled on every cpu, + * each of which disables interrupts. The result is that noone is + * holding a spinlock or inside any other preempt-disabled region when + * @fn() runs. * * This can be thought of as a very heavy write lock, equivalent to * grabbing every spinlock in the kernel. */ -int stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu); +int stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus); /** - * __stop_machine_run: freeze the machine on all CPUs and run this function + * __stop_machine: freeze the machine on all CPUs and run this function * @fn: the function to run * @data: the data ptr for the @fn - * @cpu: the cpu to run @fn on (or any, if @cpu == NR_CPUS. + * @cpus: the cpus to run the @fn() on (NULL = any online cpu) * - * Description: This is a special version of the above, which returns the - * thread which has run @fn(): kthread_stop will return the return value - * of @fn(). Used by hotplug cpu. + * Description: This is a special version of the above, which assumes cpus + * won't come or go while it's being called. Used by hotplug cpu. */ -struct task_struct *__stop_machine_run(int (*fn)(void *), void *data, - unsigned int cpu); - +int __stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus); #else -static inline int stop_machine_run(int (*fn)(void *), void *data, - unsigned int cpu) +static inline int stop_machine(int (*fn)(void *), void *data, + const cpumask_t *cpus) { int ret; local_irq_disable(); @@ -48,4 +50,18 @@ static inline int stop_machine_run(int (*fn)(void *), void *data, return ret; } #endif /* CONFIG_SMP */ + +static inline int __deprecated stop_machine_run(int (*fn)(void *), void *data, + unsigned int cpu) +{ + /* If they don't care which cpu fn runs on, just pick one. */ + if (cpu == NR_CPUS) + return stop_machine(fn, data, NULL); + else if (cpu == ~0U) + return stop_machine(fn, data, &cpu_possible_map); + else { + cpumask_t cpus = cpumask_of_cpu(cpu); + return stop_machine(fn, data, &cpus); + } +} #endif /* _LINUX_STOP_MACHINE */ diff --git a/include/linux/swab.h b/include/linux/swab.h new file mode 100644 index 00000000000..270d5c208a8 --- /dev/null +++ b/include/linux/swab.h @@ -0,0 +1,309 @@ +#ifndef _LINUX_SWAB_H +#define _LINUX_SWAB_H + +#include <linux/types.h> +#include <linux/compiler.h> +#include <asm/byteorder.h> + +/* + * casts are necessary for constants, because we never know how for sure + * how U/UL/ULL map to __u16, __u32, __u64. At least not in a portable way. + */ +#define __const_swab16(x) ((__u16)( \ + (((__u16)(x) & (__u16)0x00ffU) << 8) | \ + (((__u16)(x) & (__u16)0xff00U) >> 8))) + +#define __const_swab32(x) ((__u32)( \ + (((__u32)(x) & (__u32)0x000000ffUL) << 24) | \ + (((__u32)(x) & (__u32)0x0000ff00UL) << 8) | \ + (((__u32)(x) & (__u32)0x00ff0000UL) >> 8) | \ + (((__u32)(x) & (__u32)0xff000000UL) >> 24))) + +#define __const_swab64(x) ((__u64)( \ + (((__u64)(x) & (__u64)0x00000000000000ffULL) << 56) | \ + (((__u64)(x) & (__u64)0x000000000000ff00ULL) << 40) | \ + (((__u64)(x) & (__u64)0x0000000000ff0000ULL) << 24) | \ + (((__u64)(x) & (__u64)0x00000000ff000000ULL) << 8) | \ + (((__u64)(x) & (__u64)0x000000ff00000000ULL) >> 8) | \ + (((__u64)(x) & (__u64)0x0000ff0000000000ULL) >> 24) | \ + (((__u64)(x) & (__u64)0x00ff000000000000ULL) >> 40) | \ + (((__u64)(x) & (__u64)0xff00000000000000ULL) >> 56))) + +#define __const_swahw32(x) ((__u32)( \ + (((__u32)(x) & (__u32)0x0000ffffUL) << 16) | \ + (((__u32)(x) & (__u32)0xffff0000UL) >> 16))) + +#define __const_swahb32(x) ((__u32)( \ + (((__u32)(x) & (__u32)0x00ff00ffUL) << 8) | \ + (((__u32)(x) & (__u32)0xff00ff00UL) >> 8))) + +/* + * Implement the following as inlines, but define the interface using + * macros to allow constant folding when possible: + * ___swab16, ___swab32, ___swab64, ___swahw32, ___swahb32 + */ + +static inline __attribute_const__ __u16 ___swab16(__u16 val) +{ +#ifdef __arch_swab16 + return __arch_swab16(val); +#elif defined(__arch_swab16p) + return __arch_swab16p(&val); +#else + return __const_swab16(val); +#endif +} + +static inline __attribute_const__ __u32 ___swab32(__u32 val) +{ +#ifdef __arch_swab32 + return __arch_swab32(val); +#elif defined(__arch_swab32p) + return __arch_swab32p(&val); +#else + return __const_swab32(val); +#endif +} + +static inline __attribute_const__ __u64 ___swab64(__u64 val) +{ +#ifdef __arch_swab64 + return __arch_swab64(val); +#elif defined(__arch_swab64p) + return __arch_swab64p(&val); +#elif defined(__SWAB_64_THRU_32__) + __u32 h = val >> 32; + __u32 l = val & ((1ULL << 32) - 1); + return (((__u64)___swab32(l)) << 32) | ((__u64)(___swab32(h))); +#else + return __const_swab64(val); +#endif +} + +static inline __attribute_const__ __u32 ___swahw32(__u32 val) +{ +#ifdef __arch_swahw32 + return __arch_swahw32(val); +#elif defined(__arch_swahw32p) + return __arch_swahw32p(&val); +#else + return __const_swahw32(val); +#endif +} + +static inline __attribute_const__ __u32 ___swahb32(__u32 val) +{ +#ifdef __arch_swahb32 + return __arch_swahb32(val); +#elif defined(__arch_swahb32p) + return __arch_swahb32p(&val); +#else + return __const_swahb32(val); +#endif +} + +/** + * __swab16 - return a byteswapped 16-bit value + * @x: value to byteswap + */ +#define __swab16(x) \ + (__builtin_constant_p((__u16)(x)) ? \ + __const_swab16((x)) : \ + ___swab16((x))) + +/** + * __swab32 - return a byteswapped 32-bit value + * @x: value to byteswap + */ +#define __swab32(x) \ + (__builtin_constant_p((__u32)(x)) ? \ + __const_swab32((x)) : \ + ___swab32((x))) + +/** + * __swab64 - return a byteswapped 64-bit value + * @x: value to byteswap + */ +#define __swab64(x) \ + (__builtin_constant_p((__u64)(x)) ? \ + __const_swab64((x)) : \ + ___swab64((x))) + +/** + * __swahw32 - return a word-swapped 32-bit value + * @x: value to wordswap + * + * __swahw32(0x12340000) is 0x00001234 + */ +#define __swahw32(x) \ + (__builtin_constant_p((__u32)(x)) ? \ + __const_swahw32((x)) : \ + ___swahw32((x))) + +/** + * __swahb32 - return a high and low byte-swapped 32-bit value + * @x: value to byteswap + * + * __swahb32(0x12345678) is 0x34127856 + */ +#define __swahb32(x) \ + (__builtin_constant_p((__u32)(x)) ? \ + __const_swahb32((x)) : \ + ___swahb32((x))) + +/** + * __swab16p - return a byteswapped 16-bit value from a pointer + * @p: pointer to a naturally-aligned 16-bit value + */ +static inline __u16 __swab16p(const __u16 *p) +{ +#ifdef __arch_swab16p + return __arch_swab16p(p); +#else + return __swab16(*p); +#endif +} + +/** + * __swab32p - return a byteswapped 32-bit value from a pointer + * @p: pointer to a naturally-aligned 32-bit value + */ +static inline __u32 __swab32p(const __u32 *p) +{ +#ifdef __arch_swab32p + return __arch_swab32p(p); +#else + return __swab32(*p); +#endif +} + +/** + * __swab64p - return a byteswapped 64-bit value from a pointer + * @p: pointer to a naturally-aligned 64-bit value + */ +static inline __u64 __swab64p(const __u64 *p) +{ +#ifdef __arch_swab64p + return __arch_swab64p(p); +#else + return __swab64(*p); +#endif +} + +/** + * __swahw32p - return a wordswapped 32-bit value from a pointer + * @p: pointer to a naturally-aligned 32-bit value + * + * See __swahw32() for details of wordswapping. + */ +static inline __u32 __swahw32p(const __u32 *p) +{ +#ifdef __arch_swahw32p + return __arch_swahw32p(p); +#else + return __swahw32(*p); +#endif +} + +/** + * __swahb32p - return a high and low byteswapped 32-bit value from a pointer + * @p: pointer to a naturally-aligned 32-bit value + * + * See __swahb32() for details of high/low byteswapping. + */ +static inline __u32 __swahb32p(const __u32 *p) +{ +#ifdef __arch_swahb32p + return __arch_swahb32p(p); +#else + return __swahb32(*p); +#endif +} + +/** + * __swab16s - byteswap a 16-bit value in-place + * @p: pointer to a naturally-aligned 16-bit value + */ +static inline void __swab16s(__u16 *p) +{ +#ifdef __arch_swab16s + __arch_swab16s(p); +#else + *p = __swab16p(p); +#endif +} +/** + * __swab32s - byteswap a 32-bit value in-place + * @p: pointer to a naturally-aligned 32-bit value + */ +static inline void __swab32s(__u32 *p) +{ +#ifdef __arch_swab32s + __arch_swab32s(p); +#else + *p = __swab32p(p); +#endif +} + +/** + * __swab64s - byteswap a 64-bit value in-place + * @p: pointer to a naturally-aligned 64-bit value + */ +static inline void __swab64s(__u64 *p) +{ +#ifdef __arch_swab64s + __arch_swab64s(p); +#else + *p = __swab64p(p); +#endif +} + +/** + * __swahw32s - wordswap a 32-bit value in-place + * @p: pointer to a naturally-aligned 32-bit value + * + * See __swahw32() for details of wordswapping + */ +static inline void __swahw32s(__u32 *p) +{ +#ifdef __arch_swahw32s + __arch_swahw32s(p); +#else + *p = __swahw32p(p); +#endif +} + +/** + * __swahb32s - high and low byteswap a 32-bit value in-place + * @p: pointer to a naturally-aligned 32-bit value + * + * See __swahb32() for details of high and low byte swapping + */ +static inline void __swahb32s(__u32 *p) +{ +#ifdef __arch_swahb32s + __arch_swahb32s(p); +#else + *p = __swahb32p(p); +#endif +} + +#ifdef __KERNEL__ +# define swab16 __swab16 +# define swab32 __swab32 +# define swab64 __swab64 +# define swahw32 __swahw32 +# define swahb32 __swahb32 +# define swab16p __swab16p +# define swab32p __swab32p +# define swab64p __swab64p +# define swahw32p __swahw32p +# define swahb32p __swahb32p +# define swab16s __swab16s +# define swab32s __swab32s +# define swab64s __swab64s +# define swahw32s __swahw32s +# define swahb32s __swahb32s +#endif /* __KERNEL__ */ + +#endif /* _LINUX_SWAB_H */ diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h index b1875582c1a..b48d8196957 100644 --- a/include/linux/tracehook.h +++ b/include/linux/tracehook.h @@ -280,7 +280,7 @@ static inline void tracehook_report_clone(int trace, struct pt_regs *regs, unsigned long clone_flags, pid_t pid, struct task_struct *child) { - if (unlikely(trace)) { + if (unlikely(trace) || unlikely(clone_flags & CLONE_PTRACE)) { /* * The child starts up with an immediate SIGSTOP. */ @@ -487,14 +487,20 @@ static inline int tracehook_notify_jctl(int notify, int why) return notify || (current->ptrace & PT_PTRACED); } +#define DEATH_REAP -1 +#define DEATH_DELAYED_GROUP_LEADER -2 + /** * tracehook_notify_death - task is dead, ready to notify parent * @task: @current task now exiting * @death_cookie: value to pass to tracehook_report_death() * @group_dead: nonzero if this was the last thread in the group to die * - * Return the signal number to send our parent with do_notify_parent(), or - * zero to send no signal and leave a zombie, or -1 to self-reap right now. + * A return value >= 0 means call do_notify_parent() with that signal + * number. Negative return value can be %DEATH_REAP to self-reap right + * now, or %DEATH_DELAYED_GROUP_LEADER to a zombie without notifying our + * parent. Note that a return value of 0 means a do_notify_parent() call + * that sends no signal, but still wakes up a parent blocked in wait*(). * * Called with write_lock_irq(&tasklist_lock) held. */ @@ -502,7 +508,7 @@ static inline int tracehook_notify_death(struct task_struct *task, void **death_cookie, int group_dead) { if (task->exit_signal == -1) - return task->ptrace ? SIGCHLD : -1; + return task->ptrace ? SIGCHLD : DEATH_REAP; /* * If something other than our normal parent is ptracing us, then @@ -512,21 +518,21 @@ static inline int tracehook_notify_death(struct task_struct *task, if (thread_group_empty(task) && !ptrace_reparented(task)) return task->exit_signal; - return task->ptrace ? SIGCHLD : 0; + return task->ptrace ? SIGCHLD : DEATH_DELAYED_GROUP_LEADER; } /** * tracehook_report_death - task is dead and ready to be reaped * @task: @current task now exiting - * @signal: signal number sent to parent, or 0 or -1 + * @signal: return value from tracheook_notify_death() * @death_cookie: value passed back from tracehook_notify_death() * @group_dead: nonzero if this was the last thread in the group to die * * Thread has just become a zombie or is about to self-reap. If positive, * @signal is the signal number just sent to the parent (usually %SIGCHLD). - * If @signal is -1, this thread will self-reap. If @signal is 0, this is - * a delayed_group_leader() zombie. The @death_cookie was passed back by - * tracehook_notify_death(). + * If @signal is %DEATH_REAP, this thread will self-reap. If @signal is + * %DEATH_DELAYED_GROUP_LEADER, this is a delayed_group_leader() zombie. + * The @death_cookie was passed back by tracehook_notify_death(). * * If normal reaping is not inhibited, @task->exit_state might be changing * in parallel. diff --git a/include/linux/usb.h b/include/linux/usb.h index 5811c5da69f..0924cd9c30f 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -110,6 +110,8 @@ enum usb_interface_condition { * @sysfs_files_created: sysfs attributes exist * @needs_remote_wakeup: flag set when the driver requires remote-wakeup * capability during autosuspend. + * @needs_binding: flag set when the driver should be re-probed or unbound + * following a reset or suspend operation it doesn't support. * @dev: driver model's view of this device * @usb_dev: if an interface is bound to the USB major, this will point * to the sysfs representation for that device. diff --git a/include/linux/usb/musb.h b/include/linux/usb/musb.h new file mode 100644 index 00000000000..630962c04ca --- /dev/null +++ b/include/linux/usb/musb.h @@ -0,0 +1,98 @@ +/* + * This is used to for host and peripheral modes of the driver for + * Inventra (Multidrop) Highspeed Dual-Role Controllers: (M)HDRC. + * + * Board initialization should put one of these into dev->platform_data, + * probably on some platform_device named "musb_hdrc". It encapsulates + * key configuration differences between boards. + */ + +/* The USB role is defined by the connector used on the board, so long as + * standards are being followed. (Developer boards sometimes won't.) + */ +enum musb_mode { + MUSB_UNDEFINED = 0, + MUSB_HOST, /* A or Mini-A connector */ + MUSB_PERIPHERAL, /* B or Mini-B connector */ + MUSB_OTG /* Mini-AB connector */ +}; + +struct clk; + +struct musb_hdrc_eps_bits { + const char name[16]; + u8 bits; +}; + +struct musb_hdrc_config { + /* MUSB configuration-specific details */ + unsigned multipoint:1; /* multipoint device */ + unsigned dyn_fifo:1; /* supports dynamic fifo sizing */ + unsigned soft_con:1; /* soft connect required */ + unsigned utm_16:1; /* utm data witdh is 16 bits */ + unsigned big_endian:1; /* true if CPU uses big-endian */ + unsigned mult_bulk_tx:1; /* Tx ep required for multbulk pkts */ + unsigned mult_bulk_rx:1; /* Rx ep required for multbulk pkts */ + unsigned high_iso_tx:1; /* Tx ep required for HB iso */ + unsigned high_iso_rx:1; /* Rx ep required for HD iso */ + unsigned dma:1; /* supports DMA */ + unsigned vendor_req:1; /* vendor registers required */ + + u8 num_eps; /* number of endpoints _with_ ep0 */ + u8 dma_channels; /* number of dma channels */ + u8 dyn_fifo_size; /* dynamic size in bytes */ + u8 vendor_ctrl; /* vendor control reg width */ + u8 vendor_stat; /* vendor status reg witdh */ + u8 dma_req_chan; /* bitmask for required dma channels */ + u8 ram_bits; /* ram address size */ + + struct musb_hdrc_eps_bits *eps_bits; +}; + +struct musb_hdrc_platform_data { + /* MUSB_HOST, MUSB_PERIPHERAL, or MUSB_OTG */ + u8 mode; + + /* for clk_get() */ + const char *clock; + + /* (HOST or OTG) switch VBUS on/off */ + int (*set_vbus)(struct device *dev, int is_on); + + /* (HOST or OTG) mA/2 power supplied on (default = 8mA) */ + u8 power; + + /* (PERIPHERAL) mA/2 max power consumed (default = 100mA) */ + u8 min_power; + + /* (HOST or OTG) msec/2 after VBUS on till power good */ + u8 potpgt; + + /* Power the device on or off */ + int (*set_power)(int state); + + /* Turn device clock on or off */ + int (*set_clock)(struct clk *clock, int is_on); + + /* MUSB configuration-specific details */ + struct musb_hdrc_config *config; +}; + + +/* TUSB 6010 support */ + +#define TUSB6010_OSCCLK_60 16667 /* psec/clk @ 60.0 MHz */ +#define TUSB6010_REFCLK_24 41667 /* psec/clk @ 24.0 MHz XI */ +#define TUSB6010_REFCLK_19 52083 /* psec/clk @ 19.2 MHz CLKIN */ + +#ifdef CONFIG_ARCH_OMAP2 + +extern int __init tusb6010_setup_interface( + struct musb_hdrc_platform_data *data, + unsigned ps_refclk, unsigned waitpin, + unsigned async_cs, unsigned sync_cs, + unsigned irq, unsigned dmachan); + +extern int tusb6010_platform_retime(unsigned is_refclk); + +#endif /* OMAP2 */ diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h index 09a3e6a7518..655341d0f53 100644 --- a/include/linux/usb/serial.h +++ b/include/linux/usb/serial.h @@ -17,7 +17,8 @@ #include <linux/mutex.h> #define SERIAL_TTY_MAJOR 188 /* Nice legal number now */ -#define SERIAL_TTY_MINORS 255 /* loads of devices :) */ +#define SERIAL_TTY_MINORS 254 /* loads of devices :) */ +#define SERIAL_TTY_NO_MINOR 255 /* No minor was assigned */ /* The maximum number of ports one device can grab at once */ #define MAX_NUM_PORTS 8 diff --git a/include/linux/videodev.h b/include/linux/videodev.h index 9385a566aed..15a653d4113 100644 --- a/include/linux/videodev.h +++ b/include/linux/videodev.h @@ -17,6 +17,21 @@ #if defined(CONFIG_VIDEO_V4L1_COMPAT) || !defined (__KERNEL__) +#define VID_TYPE_CAPTURE 1 /* Can capture */ +#define VID_TYPE_TUNER 2 /* Can tune */ +#define VID_TYPE_TELETEXT 4 /* Does teletext */ +#define VID_TYPE_OVERLAY 8 /* Overlay onto frame buffer */ +#define VID_TYPE_CHROMAKEY 16 /* Overlay by chromakey */ +#define VID_TYPE_CLIPPING 32 /* Can clip */ +#define VID_TYPE_FRAMERAM 64 /* Uses the frame buffer memory */ +#define VID_TYPE_SCALES 128 /* Scalable */ +#define VID_TYPE_MONOCHROME 256 /* Monochrome only */ +#define VID_TYPE_SUBCAPTURE 512 /* Can capture subareas of the image */ +#define VID_TYPE_MPEG_DECODER 1024 /* Can decode MPEG streams */ +#define VID_TYPE_MPEG_ENCODER 2048 /* Can encode MPEG streams */ +#define VID_TYPE_MJPEG_DECODER 4096 /* Can decode MJPEG streams */ +#define VID_TYPE_MJPEG_ENCODER 8192 /* Can encode MJPEG streams */ + struct video_capability { char name[32]; diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h index 2e66a95e8d3..e466bd54a50 100644 --- a/include/linux/videodev2.h +++ b/include/linux/videodev2.h @@ -71,6 +71,11 @@ */ #define VIDEO_MAX_FRAME 32 +#ifndef __KERNEL__ + +/* These defines are V4L1 specific and should not be used with the V4L2 API! + They will be removed from this header in the future. */ + #define VID_TYPE_CAPTURE 1 /* Can capture */ #define VID_TYPE_TUNER 2 /* Can tune */ #define VID_TYPE_TELETEXT 4 /* Does teletext */ @@ -85,14 +90,15 @@ #define VID_TYPE_MPEG_ENCODER 2048 /* Can encode MPEG streams */ #define VID_TYPE_MJPEG_DECODER 4096 /* Can decode MJPEG streams */ #define VID_TYPE_MJPEG_ENCODER 8192 /* Can encode MJPEG streams */ +#endif /* * M I S C E L L A N E O U S */ /* Four-character-code (FOURCC) */ -#define v4l2_fourcc(a,b,c,d)\ - (((__u32)(a)<<0)|((__u32)(b)<<8)|((__u32)(c)<<16)|((__u32)(d)<<24)) +#define v4l2_fourcc(a, b, c, d)\ + ((__u32)(a) | ((__u32)(b) << 8) | ((__u32)(c) << 16) | ((__u32)(d) << 24)) /* * E N U M S @@ -226,8 +232,7 @@ struct v4l2_fract { /* * D R I V E R C A P A B I L I T I E S */ -struct v4l2_capability -{ +struct v4l2_capability { __u8 driver[16]; /* i.e. "bttv" */ __u8 card[32]; /* i.e. "Hauppauge WinTV" */ __u8 bus_info[32]; /* "PCI:" + pci_name(pci_dev) */ @@ -259,8 +264,7 @@ struct v4l2_capability /* * V I D E O I M A G E F O R M A T */ -struct v4l2_pix_format -{ +struct v4l2_pix_format { __u32 width; __u32 height; __u32 pixelformat; @@ -272,68 +276,69 @@ struct v4l2_pix_format }; /* Pixel format FOURCC depth Description */ -#define V4L2_PIX_FMT_RGB332 v4l2_fourcc('R','G','B','1') /* 8 RGB-3-3-2 */ -#define V4L2_PIX_FMT_RGB444 v4l2_fourcc('R','4','4','4') /* 16 xxxxrrrr ggggbbbb */ -#define V4L2_PIX_FMT_RGB555 v4l2_fourcc('R','G','B','O') /* 16 RGB-5-5-5 */ -#define V4L2_PIX_FMT_RGB565 v4l2_fourcc('R','G','B','P') /* 16 RGB-5-6-5 */ -#define V4L2_PIX_FMT_RGB555X v4l2_fourcc('R','G','B','Q') /* 16 RGB-5-5-5 BE */ -#define V4L2_PIX_FMT_RGB565X v4l2_fourcc('R','G','B','R') /* 16 RGB-5-6-5 BE */ -#define V4L2_PIX_FMT_BGR24 v4l2_fourcc('B','G','R','3') /* 24 BGR-8-8-8 */ -#define V4L2_PIX_FMT_RGB24 v4l2_fourcc('R','G','B','3') /* 24 RGB-8-8-8 */ -#define V4L2_PIX_FMT_BGR32 v4l2_fourcc('B','G','R','4') /* 32 BGR-8-8-8-8 */ -#define V4L2_PIX_FMT_RGB32 v4l2_fourcc('R','G','B','4') /* 32 RGB-8-8-8-8 */ -#define V4L2_PIX_FMT_GREY v4l2_fourcc('G','R','E','Y') /* 8 Greyscale */ -#define V4L2_PIX_FMT_Y16 v4l2_fourcc('Y','1','6',' ') /* 16 Greyscale */ -#define V4L2_PIX_FMT_PAL8 v4l2_fourcc('P','A','L','8') /* 8 8-bit palette */ -#define V4L2_PIX_FMT_YVU410 v4l2_fourcc('Y','V','U','9') /* 9 YVU 4:1:0 */ -#define V4L2_PIX_FMT_YVU420 v4l2_fourcc('Y','V','1','2') /* 12 YVU 4:2:0 */ -#define V4L2_PIX_FMT_YUYV v4l2_fourcc('Y','U','Y','V') /* 16 YUV 4:2:2 */ -#define V4L2_PIX_FMT_UYVY v4l2_fourcc('U','Y','V','Y') /* 16 YUV 4:2:2 */ -#define V4L2_PIX_FMT_YUV422P v4l2_fourcc('4','2','2','P') /* 16 YVU422 planar */ -#define V4L2_PIX_FMT_YUV411P v4l2_fourcc('4','1','1','P') /* 16 YVU411 planar */ -#define V4L2_PIX_FMT_Y41P v4l2_fourcc('Y','4','1','P') /* 12 YUV 4:1:1 */ -#define V4L2_PIX_FMT_YUV444 v4l2_fourcc('Y','4','4','4') /* 16 xxxxyyyy uuuuvvvv */ -#define V4L2_PIX_FMT_YUV555 v4l2_fourcc('Y','U','V','O') /* 16 YUV-5-5-5 */ -#define V4L2_PIX_FMT_YUV565 v4l2_fourcc('Y','U','V','P') /* 16 YUV-5-6-5 */ -#define V4L2_PIX_FMT_YUV32 v4l2_fourcc('Y','U','V','4') /* 32 YUV-8-8-8-8 */ +#define V4L2_PIX_FMT_RGB332 v4l2_fourcc('R', 'G', 'B', '1') /* 8 RGB-3-3-2 */ +#define V4L2_PIX_FMT_RGB444 v4l2_fourcc('R', '4', '4', '4') /* 16 xxxxrrrr ggggbbbb */ +#define V4L2_PIX_FMT_RGB555 v4l2_fourcc('R', 'G', 'B', 'O') /* 16 RGB-5-5-5 */ +#define V4L2_PIX_FMT_RGB565 v4l2_fourcc('R', 'G', 'B', 'P') /* 16 RGB-5-6-5 */ +#define V4L2_PIX_FMT_RGB555X v4l2_fourcc('R', 'G', 'B', 'Q') /* 16 RGB-5-5-5 BE */ +#define V4L2_PIX_FMT_RGB565X v4l2_fourcc('R', 'G', 'B', 'R') /* 16 RGB-5-6-5 BE */ +#define V4L2_PIX_FMT_BGR24 v4l2_fourcc('B', 'G', 'R', '3') /* 24 BGR-8-8-8 */ +#define V4L2_PIX_FMT_RGB24 v4l2_fourcc('R', 'G', 'B', '3') /* 24 RGB-8-8-8 */ +#define V4L2_PIX_FMT_BGR32 v4l2_fourcc('B', 'G', 'R', '4') /* 32 BGR-8-8-8-8 */ +#define V4L2_PIX_FMT_RGB32 v4l2_fourcc('R', 'G', 'B', '4') /* 32 RGB-8-8-8-8 */ +#define V4L2_PIX_FMT_GREY v4l2_fourcc('G', 'R', 'E', 'Y') /* 8 Greyscale */ +#define V4L2_PIX_FMT_Y16 v4l2_fourcc('Y', '1', '6', ' ') /* 16 Greyscale */ +#define V4L2_PIX_FMT_PAL8 v4l2_fourcc('P', 'A', 'L', '8') /* 8 8-bit palette */ +#define V4L2_PIX_FMT_YVU410 v4l2_fourcc('Y', 'V', 'U', '9') /* 9 YVU 4:1:0 */ +#define V4L2_PIX_FMT_YVU420 v4l2_fourcc('Y', 'V', '1', '2') /* 12 YVU 4:2:0 */ +#define V4L2_PIX_FMT_YUYV v4l2_fourcc('Y', 'U', 'Y', 'V') /* 16 YUV 4:2:2 */ +#define V4L2_PIX_FMT_UYVY v4l2_fourcc('U', 'Y', 'V', 'Y') /* 16 YUV 4:2:2 */ +#define V4L2_PIX_FMT_YUV422P v4l2_fourcc('4', '2', '2', 'P') /* 16 YVU422 planar */ +#define V4L2_PIX_FMT_YUV411P v4l2_fourcc('4', '1', '1', 'P') /* 16 YVU411 planar */ +#define V4L2_PIX_FMT_Y41P v4l2_fourcc('Y', '4', '1', 'P') /* 12 YUV 4:1:1 */ +#define V4L2_PIX_FMT_YUV444 v4l2_fourcc('Y', '4', '4', '4') /* 16 xxxxyyyy uuuuvvvv */ +#define V4L2_PIX_FMT_YUV555 v4l2_fourcc('Y', 'U', 'V', 'O') /* 16 YUV-5-5-5 */ +#define V4L2_PIX_FMT_YUV565 v4l2_fourcc('Y', 'U', 'V', 'P') /* 16 YUV-5-6-5 */ +#define V4L2_PIX_FMT_YUV32 v4l2_fourcc('Y', 'U', 'V', '4') /* 32 YUV-8-8-8-8 */ /* two planes -- one Y, one Cr + Cb interleaved */ -#define V4L2_PIX_FMT_NV12 v4l2_fourcc('N','V','1','2') /* 12 Y/CbCr 4:2:0 */ -#define V4L2_PIX_FMT_NV21 v4l2_fourcc('N','V','2','1') /* 12 Y/CrCb 4:2:0 */ +#define V4L2_PIX_FMT_NV12 v4l2_fourcc('N', 'V', '1', '2') /* 12 Y/CbCr 4:2:0 */ +#define V4L2_PIX_FMT_NV21 v4l2_fourcc('N', 'V', '2', '1') /* 12 Y/CrCb 4:2:0 */ /* The following formats are not defined in the V4L2 specification */ -#define V4L2_PIX_FMT_YUV410 v4l2_fourcc('Y','U','V','9') /* 9 YUV 4:1:0 */ -#define V4L2_PIX_FMT_YUV420 v4l2_fourcc('Y','U','1','2') /* 12 YUV 4:2:0 */ -#define V4L2_PIX_FMT_YYUV v4l2_fourcc('Y','Y','U','V') /* 16 YUV 4:2:2 */ -#define V4L2_PIX_FMT_HI240 v4l2_fourcc('H','I','2','4') /* 8 8-bit color */ -#define V4L2_PIX_FMT_HM12 v4l2_fourcc('H','M','1','2') /* 8 YUV 4:2:0 16x16 macroblocks */ +#define V4L2_PIX_FMT_YUV410 v4l2_fourcc('Y', 'U', 'V', '9') /* 9 YUV 4:1:0 */ +#define V4L2_PIX_FMT_YUV420 v4l2_fourcc('Y', 'U', '1', '2') /* 12 YUV 4:2:0 */ +#define V4L2_PIX_FMT_YYUV v4l2_fourcc('Y', 'Y', 'U', 'V') /* 16 YUV 4:2:2 */ +#define V4L2_PIX_FMT_HI240 v4l2_fourcc('H', 'I', '2', '4') /* 8 8-bit color */ +#define V4L2_PIX_FMT_HM12 v4l2_fourcc('H', 'M', '1', '2') /* 8 YUV 4:2:0 16x16 macroblocks */ /* see http://www.siliconimaging.com/RGB%20Bayer.htm */ -#define V4L2_PIX_FMT_SBGGR8 v4l2_fourcc('B','A','8','1') /* 8 BGBG.. GRGR.. */ -#define V4L2_PIX_FMT_SGBRG8 v4l2_fourcc('G','B','R','G') /* 8 GBGB.. RGRG.. */ -#define V4L2_PIX_FMT_SBGGR16 v4l2_fourcc('B','Y','R','2') /* 16 BGBG.. GRGR.. */ +#define V4L2_PIX_FMT_SBGGR8 v4l2_fourcc('B', 'A', '8', '1') /* 8 BGBG.. GRGR.. */ +#define V4L2_PIX_FMT_SGBRG8 v4l2_fourcc('G', 'B', 'R', 'G') /* 8 GBGB.. RGRG.. */ +#define V4L2_PIX_FMT_SBGGR16 v4l2_fourcc('B', 'Y', 'R', '2') /* 16 BGBG.. GRGR.. */ /* compressed formats */ -#define V4L2_PIX_FMT_MJPEG v4l2_fourcc('M','J','P','G') /* Motion-JPEG */ -#define V4L2_PIX_FMT_JPEG v4l2_fourcc('J','P','E','G') /* JFIF JPEG */ -#define V4L2_PIX_FMT_DV v4l2_fourcc('d','v','s','d') /* 1394 */ -#define V4L2_PIX_FMT_MPEG v4l2_fourcc('M','P','E','G') /* MPEG-1/2/4 */ +#define V4L2_PIX_FMT_MJPEG v4l2_fourcc('M', 'J', 'P', 'G') /* Motion-JPEG */ +#define V4L2_PIX_FMT_JPEG v4l2_fourcc('J', 'P', 'E', 'G') /* JFIF JPEG */ +#define V4L2_PIX_FMT_DV v4l2_fourcc('d', 'v', 's', 'd') /* 1394 */ +#define V4L2_PIX_FMT_MPEG v4l2_fourcc('M', 'P', 'E', 'G') /* MPEG-1/2/4 */ /* Vendor-specific formats */ -#define V4L2_PIX_FMT_WNVA v4l2_fourcc('W','N','V','A') /* Winnov hw compress */ -#define V4L2_PIX_FMT_SN9C10X v4l2_fourcc('S','9','1','0') /* SN9C10x compression */ -#define V4L2_PIX_FMT_PWC1 v4l2_fourcc('P','W','C','1') /* pwc older webcam */ -#define V4L2_PIX_FMT_PWC2 v4l2_fourcc('P','W','C','2') /* pwc newer webcam */ -#define V4L2_PIX_FMT_ET61X251 v4l2_fourcc('E','6','2','5') /* ET61X251 compression */ -#define V4L2_PIX_FMT_SPCA501 v4l2_fourcc('S','5','0','1') /* YUYV per line */ -#define V4L2_PIX_FMT_SPCA561 v4l2_fourcc('S','5','6','1') /* compressed GBRG bayer */ -#define V4L2_PIX_FMT_PAC207 v4l2_fourcc('P','2','0','7') /* compressed BGGR bayer */ +#define V4L2_PIX_FMT_WNVA v4l2_fourcc('W', 'N', 'V', 'A') /* Winnov hw compress */ +#define V4L2_PIX_FMT_SN9C10X v4l2_fourcc('S', '9', '1', '0') /* SN9C10x compression */ +#define V4L2_PIX_FMT_PWC1 v4l2_fourcc('P', 'W', 'C', '1') /* pwc older webcam */ +#define V4L2_PIX_FMT_PWC2 v4l2_fourcc('P', 'W', 'C', '2') /* pwc newer webcam */ +#define V4L2_PIX_FMT_ET61X251 v4l2_fourcc('E', '6', '2', '5') /* ET61X251 compression */ +#define V4L2_PIX_FMT_SPCA501 v4l2_fourcc('S', '5', '0', '1') /* YUYV per line */ +#define V4L2_PIX_FMT_SPCA505 v4l2_fourcc('S', '5', '0', '5') /* YYUV per line */ +#define V4L2_PIX_FMT_SPCA508 v4l2_fourcc('S', '5', '0', '8') /* YUVY per line */ +#define V4L2_PIX_FMT_SPCA561 v4l2_fourcc('S', '5', '6', '1') /* compressed GBRG bayer */ +#define V4L2_PIX_FMT_PAC207 v4l2_fourcc('P', '2', '0', '7') /* compressed BGGR bayer */ /* * F O R M A T E N U M E R A T I O N */ -struct v4l2_fmtdesc -{ +struct v4l2_fmtdesc { __u32 index; /* Format number */ enum v4l2_buf_type type; /* buffer type */ __u32 flags; @@ -349,21 +354,18 @@ struct v4l2_fmtdesc /* * F R A M E S I Z E E N U M E R A T I O N */ -enum v4l2_frmsizetypes -{ +enum v4l2_frmsizetypes { V4L2_FRMSIZE_TYPE_DISCRETE = 1, V4L2_FRMSIZE_TYPE_CONTINUOUS = 2, V4L2_FRMSIZE_TYPE_STEPWISE = 3, }; -struct v4l2_frmsize_discrete -{ +struct v4l2_frmsize_discrete { __u32 width; /* Frame width [pixel] */ __u32 height; /* Frame height [pixel] */ }; -struct v4l2_frmsize_stepwise -{ +struct v4l2_frmsize_stepwise { __u32 min_width; /* Minimum frame width [pixel] */ __u32 max_width; /* Maximum frame width [pixel] */ __u32 step_width; /* Frame width step size [pixel] */ @@ -372,8 +374,7 @@ struct v4l2_frmsize_stepwise __u32 step_height; /* Frame height step size [pixel] */ }; -struct v4l2_frmsizeenum -{ +struct v4l2_frmsizeenum { __u32 index; /* Frame size number */ __u32 pixel_format; /* Pixel format */ __u32 type; /* Frame size type the device supports. */ @@ -389,22 +390,19 @@ struct v4l2_frmsizeenum /* * F R A M E R A T E E N U M E R A T I O N */ -enum v4l2_frmivaltypes -{ +enum v4l2_frmivaltypes { V4L2_FRMIVAL_TYPE_DISCRETE = 1, V4L2_FRMIVAL_TYPE_CONTINUOUS = 2, V4L2_FRMIVAL_TYPE_STEPWISE = 3, }; -struct v4l2_frmival_stepwise -{ +struct v4l2_frmival_stepwise { struct v4l2_fract min; /* Minimum frame interval [s] */ struct v4l2_fract max; /* Maximum frame interval [s] */ struct v4l2_fract step; /* Frame interval step size [s] */ }; -struct v4l2_frmivalenum -{ +struct v4l2_frmivalenum { __u32 index; /* Frame format index */ __u32 pixel_format; /* Pixel format */ __u32 width; /* Frame width */ @@ -423,8 +421,7 @@ struct v4l2_frmivalenum /* * T I M E C O D E */ -struct v4l2_timecode -{ +struct v4l2_timecode { __u32 type; __u32 flags; __u8 frames; @@ -449,8 +446,7 @@ struct v4l2_timecode #define V4L2_TC_USERBITS_8BITCHARS 0x0008 /* The above is based on SMPTE timecodes */ -struct v4l2_jpegcompression -{ +struct v4l2_jpegcompression { int quality; int APPn; /* Number of APP segment to be written, @@ -482,16 +478,14 @@ struct v4l2_jpegcompression /* * M E M O R Y - M A P P I N G B U F F E R S */ -struct v4l2_requestbuffers -{ +struct v4l2_requestbuffers { __u32 count; enum v4l2_buf_type type; enum v4l2_memory memory; __u32 reserved[2]; }; -struct v4l2_buffer -{ +struct v4l2_buffer { __u32 index; enum v4l2_buf_type type; __u32 bytesused; @@ -525,13 +519,12 @@ struct v4l2_buffer /* * O V E R L A Y P R E V I E W */ -struct v4l2_framebuffer -{ +struct v4l2_framebuffer { __u32 capability; __u32 flags; /* FIXME: in theory we should pass something like PCI device + memory * region + offset instead of some physical address */ - void* base; + void *base; struct v4l2_pix_format fmt; }; /* Flags for the 'capability' field. Read only */ @@ -550,14 +543,12 @@ struct v4l2_framebuffer #define V4L2_FBUF_FLAG_GLOBAL_ALPHA 0x0010 #define V4L2_FBUF_FLAG_LOCAL_INV_ALPHA 0x0020 -struct v4l2_clip -{ +struct v4l2_clip { struct v4l2_rect c; struct v4l2_clip __user *next; }; -struct v4l2_window -{ +struct v4l2_window { struct v4l2_rect w; enum v4l2_field field; __u32 chromakey; @@ -570,8 +561,7 @@ struct v4l2_window /* * C A P T U R E P A R A M E T E R S */ -struct v4l2_captureparm -{ +struct v4l2_captureparm { __u32 capability; /* Supported modes */ __u32 capturemode; /* Current mode */ struct v4l2_fract timeperframe; /* Time per frame in .1us units */ @@ -584,8 +574,7 @@ struct v4l2_captureparm #define V4L2_MODE_HIGHQUALITY 0x0001 /* High quality imaging mode */ #define V4L2_CAP_TIMEPERFRAME 0x1000 /* timeperframe field is supported */ -struct v4l2_outputparm -{ +struct v4l2_outputparm { __u32 capability; /* Supported modes */ __u32 outputmode; /* Current mode */ struct v4l2_fract timeperframe; /* Time per frame in seconds */ @@ -702,8 +691,7 @@ typedef __u64 v4l2_std_id; #define V4L2_STD_ALL (V4L2_STD_525_60 |\ V4L2_STD_625_50) -struct v4l2_standard -{ +struct v4l2_standard { __u32 index; v4l2_std_id id; __u8 name[24]; @@ -715,8 +703,7 @@ struct v4l2_standard /* * V I D E O I N P U T S */ -struct v4l2_input -{ +struct v4l2_input { __u32 index; /* Which input */ __u8 name[32]; /* Label */ __u32 type; /* Type of input */ @@ -753,8 +740,7 @@ struct v4l2_input /* * V I D E O O U T P U T S */ -struct v4l2_output -{ +struct v4l2_output { __u32 index; /* Which output */ __u8 name[32]; /* Label */ __u32 type; /* Type of output */ @@ -771,14 +757,12 @@ struct v4l2_output /* * C O N T R O L S */ -struct v4l2_control -{ +struct v4l2_control { __u32 id; __s32 value; }; -struct v4l2_ext_control -{ +struct v4l2_ext_control { __u32 id; __u32 reserved2[2]; union { @@ -788,8 +772,7 @@ struct v4l2_ext_control }; } __attribute__ ((packed)); -struct v4l2_ext_controls -{ +struct v4l2_ext_controls { __u32 ctrl_class; __u32 count; __u32 error_idx; @@ -807,8 +790,7 @@ struct v4l2_ext_controls #define V4L2_CTRL_DRIVER_PRIV(id) (((id) & 0xffff) >= 0x1000) /* Used in the VIDIOC_QUERYCTRL ioctl for querying controls */ -struct v4l2_queryctrl -{ +struct v4l2_queryctrl { __u32 id; enum v4l2_ctrl_type type; __u8 name[32]; /* Whatever */ @@ -821,8 +803,7 @@ struct v4l2_queryctrl }; /* Used in the VIDIOC_QUERYMENU ioctl for querying menu items */ -struct v4l2_querymenu -{ +struct v4l2_querymenu { __u32 id; __u32 index; __u8 name[32]; /* Whatever */ @@ -1104,8 +1085,7 @@ enum v4l2_exposure_auto_type { /* * T U N I N G */ -struct v4l2_tuner -{ +struct v4l2_tuner { __u32 index; __u8 name[32]; enum v4l2_tuner_type type; @@ -1119,8 +1099,7 @@ struct v4l2_tuner __u32 reserved[4]; }; -struct v4l2_modulator -{ +struct v4l2_modulator { __u32 index; __u8 name[32]; __u32 capability; @@ -1153,8 +1132,7 @@ struct v4l2_modulator #define V4L2_TUNER_MODE_LANG1 0x0003 #define V4L2_TUNER_MODE_LANG1_LANG2 0x0004 -struct v4l2_frequency -{ +struct v4l2_frequency { __u32 tuner; enum v4l2_tuner_type type; __u32 frequency; @@ -1172,8 +1150,7 @@ struct v4l2_hw_freq_seek { /* * A U D I O */ -struct v4l2_audio -{ +struct v4l2_audio { __u32 index; __u8 name[32]; __u32 capability; @@ -1188,8 +1165,7 @@ struct v4l2_audio /* Flags for the 'mode' field */ #define V4L2_AUDMODE_AVL 0x00001 -struct v4l2_audioout -{ +struct v4l2_audioout { __u32 index; __u8 name[32]; __u32 capability; @@ -1253,8 +1229,7 @@ struct v4l2_encoder_cmd { */ /* Raw VBI */ -struct v4l2_vbi_format -{ +struct v4l2_vbi_format { __u32 sampling_rate; /* in 1 Hz */ __u32 offset; __u32 samples_per_line; @@ -1266,8 +1241,8 @@ struct v4l2_vbi_format }; /* VBI flags */ -#define V4L2_VBI_UNSYNC (1<< 0) -#define V4L2_VBI_INTERLACED (1<< 1) +#define V4L2_VBI_UNSYNC (1 << 0) +#define V4L2_VBI_INTERLACED (1 << 1) /* Sliced VBI * @@ -1276,8 +1251,7 @@ struct v4l2_vbi_format * notice in the definitive implementation. */ -struct v4l2_sliced_vbi_format -{ +struct v4l2_sliced_vbi_format { __u16 service_set; /* service_lines[0][...] specifies lines 0-23 (1-23 used) of the first field service_lines[1][...] specifies lines 0-23 (1-23 used) of the second field @@ -1301,8 +1275,7 @@ struct v4l2_sliced_vbi_format #define V4L2_SLICED_VBI_525 (V4L2_SLICED_CAPTION_525) #define V4L2_SLICED_VBI_625 (V4L2_SLICED_TELETEXT_B | V4L2_SLICED_VPS | V4L2_SLICED_WSS_625) -struct v4l2_sliced_vbi_cap -{ +struct v4l2_sliced_vbi_cap { __u16 service_set; /* service_lines[0][...] specifies lines 0-23 (1-23 used) of the first field service_lines[1][...] specifies lines 0-23 (1-23 used) of the second field @@ -1313,8 +1286,7 @@ struct v4l2_sliced_vbi_cap __u32 reserved[3]; /* must be 0 */ }; -struct v4l2_sliced_vbi_data -{ +struct v4l2_sliced_vbi_data { __u32 id; __u32 field; /* 0: first field, 1: second field */ __u32 line; /* 1-23 */ @@ -1328,27 +1300,23 @@ struct v4l2_sliced_vbi_data /* Stream data format */ -struct v4l2_format -{ +struct v4l2_format { enum v4l2_buf_type type; - union - { - struct v4l2_pix_format pix; // V4L2_BUF_TYPE_VIDEO_CAPTURE - struct v4l2_window win; // V4L2_BUF_TYPE_VIDEO_OVERLAY - struct v4l2_vbi_format vbi; // V4L2_BUF_TYPE_VBI_CAPTURE - struct v4l2_sliced_vbi_format sliced; // V4L2_BUF_TYPE_SLICED_VBI_CAPTURE - __u8 raw_data[200]; // user-defined + union { + struct v4l2_pix_format pix; /* V4L2_BUF_TYPE_VIDEO_CAPTURE */ + struct v4l2_window win; /* V4L2_BUF_TYPE_VIDEO_OVERLAY */ + struct v4l2_vbi_format vbi; /* V4L2_BUF_TYPE_VBI_CAPTURE */ + struct v4l2_sliced_vbi_format sliced; /* V4L2_BUF_TYPE_SLICED_VBI_CAPTURE */ + __u8 raw_data[200]; /* user-defined */ } fmt; }; /* Stream type-dependent parameters */ -struct v4l2_streamparm -{ +struct v4l2_streamparm { enum v4l2_buf_type type; - union - { + union { struct v4l2_captureparm capture; struct v4l2_outputparm output; __u8 raw_data[200]; /* user-defined */ @@ -1386,92 +1354,86 @@ struct v4l2_chip_ident { * I O C T L C O D E S F O R V I D E O D E V I C E S * */ -#define VIDIOC_QUERYCAP _IOR ('V', 0, struct v4l2_capability) -#define VIDIOC_RESERVED _IO ('V', 1) -#define VIDIOC_ENUM_FMT _IOWR ('V', 2, struct v4l2_fmtdesc) -#define VIDIOC_G_FMT _IOWR ('V', 4, struct v4l2_format) -#define VIDIOC_S_FMT _IOWR ('V', 5, struct v4l2_format) -#define VIDIOC_REQBUFS _IOWR ('V', 8, struct v4l2_requestbuffers) -#define VIDIOC_QUERYBUF _IOWR ('V', 9, struct v4l2_buffer) -#define VIDIOC_G_FBUF _IOR ('V', 10, struct v4l2_framebuffer) -#define VIDIOC_S_FBUF _IOW ('V', 11, struct v4l2_framebuffer) -#define VIDIOC_OVERLAY _IOW ('V', 14, int) -#define VIDIOC_QBUF _IOWR ('V', 15, struct v4l2_buffer) -#define VIDIOC_DQBUF _IOWR ('V', 17, struct v4l2_buffer) -#define VIDIOC_STREAMON _IOW ('V', 18, int) -#define VIDIOC_STREAMOFF _IOW ('V', 19, int) -#define VIDIOC_G_PARM _IOWR ('V', 21, struct v4l2_streamparm) -#define VIDIOC_S_PARM _IOWR ('V', 22, struct v4l2_streamparm) -#define VIDIOC_G_STD _IOR ('V', 23, v4l2_std_id) -#define VIDIOC_S_STD _IOW ('V', 24, v4l2_std_id) -#define VIDIOC_ENUMSTD _IOWR ('V', 25, struct v4l2_standard) -#define VIDIOC_ENUMINPUT _IOWR ('V', 26, struct v4l2_input) -#define VIDIOC_G_CTRL _IOWR ('V', 27, struct v4l2_control) -#define VIDIOC_S_CTRL _IOWR ('V', 28, struct v4l2_control) -#define VIDIOC_G_TUNER _IOWR ('V', 29, struct v4l2_tuner) -#define VIDIOC_S_TUNER _IOW ('V', 30, struct v4l2_tuner) -#define VIDIOC_G_AUDIO _IOR ('V', 33, struct v4l2_audio) -#define VIDIOC_S_AUDIO _IOW ('V', 34, struct v4l2_audio) -#define VIDIOC_QUERYCTRL _IOWR ('V', 36, struct v4l2_queryctrl) -#define VIDIOC_QUERYMENU _IOWR ('V', 37, struct v4l2_querymenu) -#define VIDIOC_G_INPUT _IOR ('V', 38, int) -#define VIDIOC_S_INPUT _IOWR ('V', 39, int) -#define VIDIOC_G_OUTPUT _IOR ('V', 46, int) -#define VIDIOC_S_OUTPUT _IOWR ('V', 47, int) -#define VIDIOC_ENUMOUTPUT _IOWR ('V', 48, struct v4l2_output) -#define VIDIOC_G_AUDOUT _IOR ('V', 49, struct v4l2_audioout) -#define VIDIOC_S_AUDOUT _IOW ('V', 50, struct v4l2_audioout) -#define VIDIOC_G_MODULATOR _IOWR ('V', 54, struct v4l2_modulator) -#define VIDIOC_S_MODULATOR _IOW ('V', 55, struct v4l2_modulator) -#define VIDIOC_G_FREQUENCY _IOWR ('V', 56, struct v4l2_frequency) -#define VIDIOC_S_FREQUENCY _IOW ('V', 57, struct v4l2_frequency) -#define VIDIOC_CROPCAP _IOWR ('V', 58, struct v4l2_cropcap) -#define VIDIOC_G_CROP _IOWR ('V', 59, struct v4l2_crop) -#define VIDIOC_S_CROP _IOW ('V', 60, struct v4l2_crop) -#define VIDIOC_G_JPEGCOMP _IOR ('V', 61, struct v4l2_jpegcompression) -#define VIDIOC_S_JPEGCOMP _IOW ('V', 62, struct v4l2_jpegcompression) -#define VIDIOC_QUERYSTD _IOR ('V', 63, v4l2_std_id) -#define VIDIOC_TRY_FMT _IOWR ('V', 64, struct v4l2_format) -#define VIDIOC_ENUMAUDIO _IOWR ('V', 65, struct v4l2_audio) -#define VIDIOC_ENUMAUDOUT _IOWR ('V', 66, struct v4l2_audioout) -#define VIDIOC_G_PRIORITY _IOR ('V', 67, enum v4l2_priority) -#define VIDIOC_S_PRIORITY _IOW ('V', 68, enum v4l2_priority) -#define VIDIOC_G_SLICED_VBI_CAP _IOWR ('V', 69, struct v4l2_sliced_vbi_cap) -#define VIDIOC_LOG_STATUS _IO ('V', 70) -#define VIDIOC_G_EXT_CTRLS _IOWR ('V', 71, struct v4l2_ext_controls) -#define VIDIOC_S_EXT_CTRLS _IOWR ('V', 72, struct v4l2_ext_controls) -#define VIDIOC_TRY_EXT_CTRLS _IOWR ('V', 73, struct v4l2_ext_controls) +#define VIDIOC_QUERYCAP _IOR('V', 0, struct v4l2_capability) +#define VIDIOC_RESERVED _IO('V', 1) +#define VIDIOC_ENUM_FMT _IOWR('V', 2, struct v4l2_fmtdesc) +#define VIDIOC_G_FMT _IOWR('V', 4, struct v4l2_format) +#define VIDIOC_S_FMT _IOWR('V', 5, struct v4l2_format) +#define VIDIOC_REQBUFS _IOWR('V', 8, struct v4l2_requestbuffers) +#define VIDIOC_QUERYBUF _IOWR('V', 9, struct v4l2_buffer) +#define VIDIOC_G_FBUF _IOR('V', 10, struct v4l2_framebuffer) +#define VIDIOC_S_FBUF _IOW('V', 11, struct v4l2_framebuffer) +#define VIDIOC_OVERLAY _IOW('V', 14, int) +#define VIDIOC_QBUF _IOWR('V', 15, struct v4l2_buffer) +#define VIDIOC_DQBUF _IOWR('V', 17, struct v4l2_buffer) +#define VIDIOC_STREAMON _IOW('V', 18, int) +#define VIDIOC_STREAMOFF _IOW('V', 19, int) +#define VIDIOC_G_PARM _IOWR('V', 21, struct v4l2_streamparm) +#define VIDIOC_S_PARM _IOWR('V', 22, struct v4l2_streamparm) +#define VIDIOC_G_STD _IOR('V', 23, v4l2_std_id) +#define VIDIOC_S_STD _IOW('V', 24, v4l2_std_id) +#define VIDIOC_ENUMSTD _IOWR('V', 25, struct v4l2_standard) +#define VIDIOC_ENUMINPUT _IOWR('V', 26, struct v4l2_input) +#define VIDIOC_G_CTRL _IOWR('V', 27, struct v4l2_control) +#define VIDIOC_S_CTRL _IOWR('V', 28, struct v4l2_control) +#define VIDIOC_G_TUNER _IOWR('V', 29, struct v4l2_tuner) +#define VIDIOC_S_TUNER _IOW('V', 30, struct v4l2_tuner) +#define VIDIOC_G_AUDIO _IOR('V', 33, struct v4l2_audio) +#define VIDIOC_S_AUDIO _IOW('V', 34, struct v4l2_audio) +#define VIDIOC_QUERYCTRL _IOWR('V', 36, struct v4l2_queryctrl) +#define VIDIOC_QUERYMENU _IOWR('V', 37, struct v4l2_querymenu) +#define VIDIOC_G_INPUT _IOR('V', 38, int) +#define VIDIOC_S_INPUT _IOWR('V', 39, int) +#define VIDIOC_G_OUTPUT _IOR('V', 46, int) +#define VIDIOC_S_OUTPUT _IOWR('V', 47, int) +#define VIDIOC_ENUMOUTPUT _IOWR('V', 48, struct v4l2_output) +#define VIDIOC_G_AUDOUT _IOR('V', 49, struct v4l2_audioout) +#define VIDIOC_S_AUDOUT _IOW('V', 50, struct v4l2_audioout) +#define VIDIOC_G_MODULATOR _IOWR('V', 54, struct v4l2_modulator) +#define VIDIOC_S_MODULATOR _IOW('V', 55, struct v4l2_modulator) +#define VIDIOC_G_FREQUENCY _IOWR('V', 56, struct v4l2_frequency) +#define VIDIOC_S_FREQUENCY _IOW('V', 57, struct v4l2_frequency) +#define VIDIOC_CROPCAP _IOWR('V', 58, struct v4l2_cropcap) +#define VIDIOC_G_CROP _IOWR('V', 59, struct v4l2_crop) +#define VIDIOC_S_CROP _IOW('V', 60, struct v4l2_crop) +#define VIDIOC_G_JPEGCOMP _IOR('V', 61, struct v4l2_jpegcompression) +#define VIDIOC_S_JPEGCOMP _IOW('V', 62, struct v4l2_jpegcompression) +#define VIDIOC_QUERYSTD _IOR('V', 63, v4l2_std_id) +#define VIDIOC_TRY_FMT _IOWR('V', 64, struct v4l2_format) +#define VIDIOC_ENUMAUDIO _IOWR('V', 65, struct v4l2_audio) +#define VIDIOC_ENUMAUDOUT _IOWR('V', 66, struct v4l2_audioout) +#define VIDIOC_G_PRIORITY _IOR('V', 67, enum v4l2_priority) +#define VIDIOC_S_PRIORITY _IOW('V', 68, enum v4l2_priority) +#define VIDIOC_G_SLICED_VBI_CAP _IOWR('V', 69, struct v4l2_sliced_vbi_cap) +#define VIDIOC_LOG_STATUS _IO('V', 70) +#define VIDIOC_G_EXT_CTRLS _IOWR('V', 71, struct v4l2_ext_controls) +#define VIDIOC_S_EXT_CTRLS _IOWR('V', 72, struct v4l2_ext_controls) +#define VIDIOC_TRY_EXT_CTRLS _IOWR('V', 73, struct v4l2_ext_controls) #if 1 -#define VIDIOC_ENUM_FRAMESIZES _IOWR ('V', 74, struct v4l2_frmsizeenum) -#define VIDIOC_ENUM_FRAMEINTERVALS _IOWR ('V', 75, struct v4l2_frmivalenum) -#define VIDIOC_G_ENC_INDEX _IOR ('V', 76, struct v4l2_enc_idx) -#define VIDIOC_ENCODER_CMD _IOWR ('V', 77, struct v4l2_encoder_cmd) -#define VIDIOC_TRY_ENCODER_CMD _IOWR ('V', 78, struct v4l2_encoder_cmd) +#define VIDIOC_ENUM_FRAMESIZES _IOWR('V', 74, struct v4l2_frmsizeenum) +#define VIDIOC_ENUM_FRAMEINTERVALS _IOWR('V', 75, struct v4l2_frmivalenum) +#define VIDIOC_G_ENC_INDEX _IOR('V', 76, struct v4l2_enc_idx) +#define VIDIOC_ENCODER_CMD _IOWR('V', 77, struct v4l2_encoder_cmd) +#define VIDIOC_TRY_ENCODER_CMD _IOWR('V', 78, struct v4l2_encoder_cmd) /* Experimental, only implemented if CONFIG_VIDEO_ADV_DEBUG is defined */ -#define VIDIOC_DBG_S_REGISTER _IOW ('V', 79, struct v4l2_register) -#define VIDIOC_DBG_G_REGISTER _IOWR ('V', 80, struct v4l2_register) +#define VIDIOC_DBG_S_REGISTER _IOW('V', 79, struct v4l2_register) +#define VIDIOC_DBG_G_REGISTER _IOWR('V', 80, struct v4l2_register) -#define VIDIOC_G_CHIP_IDENT _IOWR ('V', 81, struct v4l2_chip_ident) +#define VIDIOC_G_CHIP_IDENT _IOWR('V', 81, struct v4l2_chip_ident) #endif -#define VIDIOC_S_HW_FREQ_SEEK _IOW ('V', 82, struct v4l2_hw_freq_seek) +#define VIDIOC_S_HW_FREQ_SEEK _IOW('V', 82, struct v4l2_hw_freq_seek) #ifdef __OLD_VIDIOC_ /* for compatibility, will go away some day */ -#define VIDIOC_OVERLAY_OLD _IOWR ('V', 14, int) -#define VIDIOC_S_PARM_OLD _IOW ('V', 22, struct v4l2_streamparm) -#define VIDIOC_S_CTRL_OLD _IOW ('V', 28, struct v4l2_control) -#define VIDIOC_G_AUDIO_OLD _IOWR ('V', 33, struct v4l2_audio) -#define VIDIOC_G_AUDOUT_OLD _IOWR ('V', 49, struct v4l2_audioout) -#define VIDIOC_CROPCAP_OLD _IOR ('V', 58, struct v4l2_cropcap) +#define VIDIOC_OVERLAY_OLD _IOWR('V', 14, int) +#define VIDIOC_S_PARM_OLD _IOW('V', 22, struct v4l2_streamparm) +#define VIDIOC_S_CTRL_OLD _IOW('V', 28, struct v4l2_control) +#define VIDIOC_G_AUDIO_OLD _IOWR('V', 33, struct v4l2_audio) +#define VIDIOC_G_AUDOUT_OLD _IOWR('V', 49, struct v4l2_audioout) +#define VIDIOC_CROPCAP_OLD _IOR('V', 58, struct v4l2_cropcap) #endif #define BASE_VIDIOC_PRIVATE 192 /* 192-255 are private */ #endif /* __LINUX_VIDEODEV2_H */ - -/* - * Local variables: - * c-basic-offset: 8 - * End: - */ diff --git a/include/linux/videotext.h b/include/linux/videotext.h index 018f92047ff..3e68c8d1c7f 100644 --- a/include/linux/videotext.h +++ b/include/linux/videotext.h @@ -45,10 +45,10 @@ #define VTXIOCCLRCACHE_OLD 0x710b /* clear cache on VTX-interface (if avail.) */ #define VTXIOCSETVIRT_OLD 0x710c /* turn on virtual mode (this disables TV-display) */ -/* +/* * Definitions for VTXIOCGETINFO */ - + #define SAA5243 0 #define SAA5246 1 #define SAA5249 2 @@ -57,10 +57,10 @@ typedef struct { int version_major, version_minor; /* version of driver; if version_major changes, driver */ - /* is not backward compatible!!! CHECK THIS!!! */ + /* is not backward compatible!!! CHECK THIS!!! */ int numpages; /* number of page-buffers of vtx-chipset */ int cct_type; /* type of vtx-chipset (SAA5243, SAA5246, SAA5248 or - * SAA5249) */ + * SAA5249) */ } vtx_info_t; @@ -81,7 +81,7 @@ vtx_info_t; #define PGMASK_HOUR (HR_TEN | HR_UNIT) #define PGMASK_MINUTE (MIN_TEN | MIN_UNIT) -typedef struct +typedef struct { int page; /* number of requested page (hexadecimal) */ int hour; /* requested hour (hexadecimal) */ @@ -98,11 +98,11 @@ vtx_pagereq_t; /* * Definitions for VTXIOC{GETSTAT,PUTSTAT} */ - + #define VTX_PAGESIZE (40 * 24) #define VTX_VIRTUALSIZE (40 * 49) -typedef struct +typedef struct { int pagenum; /* number of page (hexadecimal) */ int hour; /* hour (hexadecimal) */ @@ -121,5 +121,5 @@ typedef struct unsigned hamming : 1; /* hamming-error occurred */ } vtx_pageinfo_t; - + #endif /* _VTX_H */ diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h index 14c0e91be9b..1c78d56c57e 100644 --- a/include/linux/vt_kern.h +++ b/include/linux/vt_kern.h @@ -74,7 +74,7 @@ void con_protect_unimap(struct vc_data *vc, int rdonly); int con_copy_unimap(struct vc_data *dst_vc, struct vc_data *src_vc); #define vc_translate(vc, c) ((vc)->vc_translate[(c) | \ - (vc)->vc_toggle_meta ? 0x80 : 0]) + ((vc)->vc_toggle_meta ? 0x80 : 0)]) #else #define con_set_trans_old(arg) (0) #define con_get_trans_old(arg) (-EINVAL) @@ -86,6 +86,7 @@ int con_copy_unimap(struct vc_data *dst_vc, struct vc_data *src_vc); #define con_copy_unimap(d, s) (0) #define con_get_unimap(vc, ct, uct, list) (-EINVAL) #define con_free_unimap(vc) do { ; } while (0) +#define con_protect_unimap(vc, rdonly) do { ; } while (0) #define vc_translate(vc, c) (c) #endif |