diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-08-15 18:15:17 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-08-15 18:15:17 +0200 |
commit | f3efbe582b5396d134024c03a5fa253f2a85d9a6 (patch) | |
tree | e4e15b7567b82d24cb1e7327398286a2b88df04c /include/linux | |
parent | 05d3ed0a1fe3ea05ab9f3b8d32576a0bc2e19660 (diff) | |
parent | b635acec48bcaa9183fcbf4e3955616b0d4119b5 (diff) |
Merge branch 'linus' into x86/gart
Diffstat (limited to 'include/linux')
244 files changed, 6671 insertions, 1863 deletions
diff --git a/include/linux/Kbuild b/include/linux/Kbuild index 71d70d1fbce..327f60658d9 100644 --- a/include/linux/Kbuild +++ b/include/linux/Kbuild @@ -97,6 +97,7 @@ header-y += ioctl.h header-y += ip6_tunnel.h header-y += ipmi_msgdefs.h header-y += ipsec.h +header-y += ip_vs.h header-y += ipx.h header-y += irda.h header-y += iso_fs.h @@ -189,7 +190,6 @@ unifdef-y += connector.h unifdef-y += cuda.h unifdef-y += cyclades.h unifdef-y += dccp.h -unifdef-y += dirent.h unifdef-y += dlm.h unifdef-y += dlm_plock.h unifdef-y += edd.h @@ -256,7 +256,9 @@ unifdef-y += kd.h unifdef-y += kernelcapi.h unifdef-y += kernel.h unifdef-y += keyboard.h +ifneq ($(wildcard $(srctree)/include/asm-$(SRCARCH)/kvm.h),) unifdef-y += kvm.h +endif unifdef-y += llc.h unifdef-y += loop.h unifdef-y += lp.h @@ -354,6 +356,7 @@ unifdef-y += virtio_balloon.h unifdef-y += virtio_console.h unifdef-y += virtio_pci.h unifdef-y += virtio_ring.h +unifdef-y += virtio_rng.h unifdef-y += vt.h unifdef-y += wait.h unifdef-y += wanrouter.h diff --git a/include/linux/acct.h b/include/linux/acct.h index e8cae54e8d8..882dc724876 100644 --- a/include/linux/acct.h +++ b/include/linux/acct.h @@ -120,17 +120,20 @@ struct acct_v3 struct vfsmount; struct super_block; struct pacct_struct; +struct pid_namespace; extern void acct_auto_close_mnt(struct vfsmount *m); extern void acct_auto_close(struct super_block *sb); extern void acct_init_pacct(struct pacct_struct *pacct); extern void acct_collect(long exitcode, int group_dead); extern void acct_process(void); +extern void acct_exit_ns(struct pid_namespace *); #else #define acct_auto_close_mnt(x) do { } while (0) #define acct_auto_close(x) do { } while (0) #define acct_init_pacct(x) do { } while (0) #define acct_collect(x,y) do { } while (0) #define acct_process() do { } while (0) +#define acct_exit_ns(ns) do { } while (0) #endif /* diff --git a/include/linux/acpi.h b/include/linux/acpi.h index a1717763937..702f79dad16 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -236,6 +236,7 @@ int acpi_check_mem_region(resource_size_t start, resource_size_t n, const char *name); #ifdef CONFIG_PM_SLEEP +void __init acpi_no_s4_hw_signature(void); void __init acpi_old_suspend_ordering(void); #endif /* CONFIG_PM_SLEEP */ #else /* CONFIG_ACPI */ diff --git a/include/linux/agp_backend.h b/include/linux/agp_backend.h index 972b12bcfb3..2b8df8b420f 100644 --- a/include/linux/agp_backend.h +++ b/include/linux/agp_backend.h @@ -30,6 +30,8 @@ #ifndef _AGP_BACKEND_H #define _AGP_BACKEND_H 1 +#include <linux/list.h> + enum chipset_type { NOT_SUPPORTED, SUPPORTED, @@ -78,6 +80,8 @@ struct agp_memory { bool is_bound; bool is_flushed; bool vmalloc_flag; + /* list of agp_memory mapped to the aperture */ + struct list_head mapped_list; }; #define AGP_NORMAL_MEMORY 0 @@ -96,6 +100,7 @@ extern struct agp_memory *agp_allocate_memory(struct agp_bridge_data *, size_t, extern int agp_copy_info(struct agp_bridge_data *, struct agp_kern_info *); extern int agp_bind_memory(struct agp_memory *, off_t); extern int agp_unbind_memory(struct agp_memory *); +extern int agp_rebind_memory(void); extern void agp_enable(struct agp_bridge_data *, u32); extern struct agp_bridge_data *agp_backend_acquire(struct pci_dev *); extern void agp_backend_release(struct agp_bridge_data *); diff --git a/include/linux/aio.h b/include/linux/aio.h index b51ddd28444..09b276c3522 100644 --- a/include/linux/aio.h +++ b/include/linux/aio.h @@ -7,7 +7,6 @@ #include <linux/uio.h> #include <asm/atomic.h> -#include <linux/uio.h> #define AIO_MAXSEGS 4 #define AIO_KIOGRP_NR_ATOMIC 8 diff --git a/include/linux/anon_inodes.h b/include/linux/anon_inodes.h index 6129e58ca7c..e0a0cdc2da4 100644 --- a/include/linux/anon_inodes.h +++ b/include/linux/anon_inodes.h @@ -9,7 +9,7 @@ #define _LINUX_ANON_INODES_H int anon_inode_getfd(const char *name, const struct file_operations *fops, - void *priv); + void *priv, int flags); #endif /* _LINUX_ANON_INODES_H */ diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h index eb640f0acfa..0f50d4cc436 100644 --- a/include/linux/async_tx.h +++ b/include/linux/async_tx.h @@ -101,21 +101,14 @@ async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx, /** * async_tx_sync_epilog - actions to take if an operation is run synchronously - * @flags: async_tx flags - * @depend_tx: transaction depends on depend_tx * @cb_fn: function to call when the transaction completes * @cb_fn_param: parameter to pass to the callback routine */ static inline void -async_tx_sync_epilog(unsigned long flags, - struct dma_async_tx_descriptor *depend_tx, - dma_async_tx_callback cb_fn, void *cb_fn_param) +async_tx_sync_epilog(dma_async_tx_callback cb_fn, void *cb_fn_param) { if (cb_fn) cb_fn(cb_fn_param); - - if (depend_tx && (flags & ASYNC_TX_DEP_ACK)) - async_tx_ack(depend_tx); } void @@ -152,4 +145,6 @@ struct dma_async_tx_descriptor * async_trigger_callback(enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx, dma_async_tx_callback cb_fn, void *cb_fn_param); + +void async_tx_quiesce(struct dma_async_tx_descriptor **tx); #endif /* _ASYNC_TX_H_ */ diff --git a/include/linux/atmel-pwm-bl.h b/include/linux/atmel-pwm-bl.h new file mode 100644 index 00000000000..0153a47806c --- /dev/null +++ b/include/linux/atmel-pwm-bl.h @@ -0,0 +1,43 @@ +/* + * Copyright (C) 2007 Atmel Corporation + * + * Driver for the AT32AP700X PS/2 controller (PSIF). + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + */ + +#ifndef __INCLUDE_ATMEL_PWM_BL_H +#define __INCLUDE_ATMEL_PWM_BL_H + +/** + * struct atmel_pwm_bl_platform_data + * @pwm_channel: which PWM channel in the PWM module to use. + * @pwm_frequency: PWM frequency to generate, the driver will try to be as + * close as the prescaler allows. + * @pwm_compare_max: value to use in the PWM channel compare register. + * @pwm_duty_max: maximum duty cycle value, must be less than or equal to + * pwm_compare_max. + * @pwm_duty_min: minimum duty cycle value, must be less than pwm_duty_max. + * @pwm_active_low: set to one if the low part of the PWM signal increases the + * brightness of the backlight. + * @gpio_on: GPIO line to control the backlight on/off, set to -1 if not used. + * @on_active_low: set to one if the on/off signal is on when GPIO is low. + * + * This struct must be added to the platform device in the board code. It is + * used by the atmel-pwm-bl driver to setup the GPIO to control on/off and the + * PWM device. + */ +struct atmel_pwm_bl_platform_data { + unsigned int pwm_channel; + unsigned int pwm_frequency; + unsigned int pwm_compare_max; + unsigned int pwm_duty_max; + unsigned int pwm_duty_min; + unsigned int pwm_active_low; + int gpio_on; + unsigned int on_active_low; +}; + +#endif /* __INCLUDE_ATMEL_PWM_BL_H */ diff --git a/include/linux/audit.h b/include/linux/audit.h index 8b82974bdc1..6272a395d43 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h @@ -286,7 +286,6 @@ #define AUDIT_ARCH_SHEL64 (EM_SH|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE) #define AUDIT_ARCH_SPARC (EM_SPARC) #define AUDIT_ARCH_SPARC64 (EM_SPARCV9|__AUDIT_ARCH_64BIT) -#define AUDIT_ARCH_V850 (EM_V850|__AUDIT_ARCH_LE) #define AUDIT_ARCH_X86_64 (EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE) #define AUDIT_PERM_EXEC 1 diff --git a/include/linux/auto_fs4.h b/include/linux/auto_fs4.h index 31a29541b50..b785c6f8644 100644 --- a/include/linux/auto_fs4.h +++ b/include/linux/auto_fs4.h @@ -98,8 +98,6 @@ union autofs_v5_packet_union { #define AUTOFS_IOC_EXPIRE_INDIRECT AUTOFS_IOC_EXPIRE_MULTI #define AUTOFS_IOC_EXPIRE_DIRECT AUTOFS_IOC_EXPIRE_MULTI #define AUTOFS_IOC_PROTOSUBVER _IOR(0x93,0x67,int) -#define AUTOFS_IOC_ASKREGHOST _IOR(0x93,0x68,int) -#define AUTOFS_IOC_TOGGLEREGHOST _IOR(0x93,0x69,int) #define AUTOFS_IOC_ASKUMOUNT _IOR(0x93,0x70,int) diff --git a/include/linux/auxvec.h b/include/linux/auxvec.h index 0da17d14fd1..d7afa9dd663 100644 --- a/include/linux/auxvec.h +++ b/include/linux/auxvec.h @@ -26,9 +26,13 @@ #define AT_SECURE 23 /* secure mode boolean */ +#define AT_BASE_PLATFORM 24 /* string identifying real platform, may + * differ from AT_PLATFORM. */ + #define AT_EXECFN 31 /* filename of program */ + #ifdef __KERNEL__ -#define AT_VECTOR_SIZE_BASE 17 /* NEW_AUX_ENT entries in auxiliary table */ +#define AT_VECTOR_SIZE_BASE 18 /* NEW_AUX_ENT entries in auxiliary table */ /* number of "#define AT_.*" above, minus {AT_NULL, AT_IGNORE, AT_NOTELF} */ #endif diff --git a/include/linux/bcd.h b/include/linux/bcd.h index c545308125b..7ac518e3c15 100644 --- a/include/linux/bcd.h +++ b/include/linux/bcd.h @@ -10,8 +10,13 @@ #ifndef _BCD_H #define _BCD_H -#define BCD2BIN(val) (((val) & 0x0f) + ((val)>>4)*10) -#define BIN2BCD(val) ((((val)/10)<<4) + (val)%10) +#include <linux/compiler.h> + +unsigned bcd2bin(unsigned char val) __attribute_const__; +unsigned char bin2bcd(unsigned val) __attribute_const__; + +#define BCD2BIN(val) bcd2bin(val) +#define BIN2BCD(val) bin2bcd(val) /* backwards compat */ #define BCD_TO_BIN(val) ((val)=BCD2BIN(val)) diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h index ee0ed48e834..826f6235080 100644 --- a/include/linux/binfmts.h +++ b/include/linux/binfmts.h @@ -38,7 +38,7 @@ struct linux_binprm{ misc_bang:1; struct file * file; int e_uid, e_gid; - kernel_cap_t cap_inheritable, cap_permitted; + kernel_cap_t cap_post_exec_permitted; bool cap_effective; void *security; int argc, envc; diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h index 1abfe664c44..89781fd4885 100644 --- a/include/linux/bitmap.h +++ b/include/linux/bitmap.h @@ -110,6 +110,7 @@ extern int __bitmap_weight(const unsigned long *bitmap, int bits); extern int bitmap_scnprintf(char *buf, unsigned int len, const unsigned long *src, int nbits); +extern int bitmap_scnprintf_len(unsigned int nr_bits); extern int __bitmap_parse(const char *buf, unsigned int buflen, int is_user, unsigned long *dst, int nbits); extern int bitmap_parse_user(const char __user *ubuf, unsigned int ulen, diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 88d68081a0f..e61f22be4d0 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -655,6 +655,7 @@ extern struct request *blk_get_request(struct request_queue *, int, gfp_t); extern void blk_insert_request(struct request_queue *, struct request *, int, void *); extern void blk_requeue_request(struct request_queue *, struct request *); extern void blk_plug_device(struct request_queue *); +extern void blk_plug_device_unlocked(struct request_queue *); extern int blk_remove_plug(struct request_queue *); extern void blk_recount_segments(struct request_queue *, struct bio *); extern int scsi_cmd_ioctl(struct file *, struct request_queue *, diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h index a1d9b79078e..95837bfb525 100644 --- a/include/linux/bootmem.h +++ b/include/linux/bootmem.h @@ -28,85 +28,85 @@ extern unsigned long saved_max_pfn; * memory pages (including holes) on the node. */ typedef struct bootmem_data { - unsigned long node_boot_start; + unsigned long node_min_pfn; unsigned long node_low_pfn; void *node_bootmem_map; - unsigned long last_offset; - unsigned long last_pos; - unsigned long last_success; /* Previous allocation point. To speed - * up searching */ + unsigned long last_end_off; + unsigned long hint_idx; struct list_head list; } bootmem_data_t; +extern bootmem_data_t bootmem_node_data[]; + extern unsigned long bootmem_bootmap_pages(unsigned long); + +extern unsigned long init_bootmem_node(pg_data_t *pgdat, + unsigned long freepfn, + unsigned long startpfn, + unsigned long endpfn); extern unsigned long init_bootmem(unsigned long addr, unsigned long memend); + +extern unsigned long free_all_bootmem_node(pg_data_t *pgdat); +extern unsigned long free_all_bootmem(void); + +extern void free_bootmem_node(pg_data_t *pgdat, + unsigned long addr, + unsigned long size); extern void free_bootmem(unsigned long addr, unsigned long size); -extern void *__alloc_bootmem(unsigned long size, + +/* + * Flags for reserve_bootmem (also if CONFIG_HAVE_ARCH_BOOTMEM_NODE, + * the architecture-specific code should honor this). + * + * If flags is 0, then the return value is always 0 (success). If + * flags contains BOOTMEM_EXCLUSIVE, then -EBUSY is returned if the + * memory already was reserved. + */ +#define BOOTMEM_DEFAULT 0 +#define BOOTMEM_EXCLUSIVE (1<<0) + +extern int reserve_bootmem_node(pg_data_t *pgdat, + unsigned long physaddr, + unsigned long size, + int flags); +#ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE +extern int reserve_bootmem(unsigned long addr, unsigned long size, int flags); +#endif + +extern void *__alloc_bootmem_nopanic(unsigned long size, unsigned long align, unsigned long goal); -extern void *__alloc_bootmem_nopanic(unsigned long size, +extern void *__alloc_bootmem(unsigned long size, unsigned long align, unsigned long goal); extern void *__alloc_bootmem_low(unsigned long size, unsigned long align, unsigned long goal); +extern void *__alloc_bootmem_node(pg_data_t *pgdat, + unsigned long size, + unsigned long align, + unsigned long goal); +extern void *__alloc_bootmem_node_nopanic(pg_data_t *pgdat, + unsigned long size, + unsigned long align, + unsigned long goal); extern void *__alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size, unsigned long align, unsigned long goal); -extern void *__alloc_bootmem_core(struct bootmem_data *bdata, - unsigned long size, - unsigned long align, - unsigned long goal, - unsigned long limit); - -/* - * flags for reserve_bootmem (also if CONFIG_HAVE_ARCH_BOOTMEM_NODE, - * the architecture-specific code should honor this) - */ -#define BOOTMEM_DEFAULT 0 -#define BOOTMEM_EXCLUSIVE (1<<0) - #ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE -/* - * If flags is 0, then the return value is always 0 (success). If - * flags contains BOOTMEM_EXCLUSIVE, then -EBUSY is returned if the - * memory already was reserved. - */ -extern int reserve_bootmem(unsigned long addr, unsigned long size, int flags); #define alloc_bootmem(x) \ __alloc_bootmem(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) +#define alloc_bootmem_nopanic(x) \ + __alloc_bootmem_nopanic(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) #define alloc_bootmem_low(x) \ __alloc_bootmem_low(x, SMP_CACHE_BYTES, 0) #define alloc_bootmem_pages(x) \ __alloc_bootmem(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) +#define alloc_bootmem_pages_nopanic(x) \ + __alloc_bootmem_nopanic(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) #define alloc_bootmem_low_pages(x) \ __alloc_bootmem_low(x, PAGE_SIZE, 0) -#endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */ - -extern int reserve_bootmem_generic(unsigned long addr, unsigned long size, - int flags); -extern unsigned long free_all_bootmem(void); -extern unsigned long free_all_bootmem_node(pg_data_t *pgdat); -extern void *__alloc_bootmem_node(pg_data_t *pgdat, - unsigned long size, - unsigned long align, - unsigned long goal); -extern unsigned long init_bootmem_node(pg_data_t *pgdat, - unsigned long freepfn, - unsigned long startpfn, - unsigned long endpfn); -extern int reserve_bootmem_node(pg_data_t *pgdat, - unsigned long physaddr, - unsigned long size, - int flags); -extern void free_bootmem_node(pg_data_t *pgdat, - unsigned long addr, - unsigned long size); -extern void *alloc_bootmem_section(unsigned long size, - unsigned long section_nr); - -#ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE #define alloc_bootmem_node(pgdat, x) \ __alloc_bootmem_node(pgdat, x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) #define alloc_bootmem_pages_node(pgdat, x) \ @@ -115,6 +115,12 @@ extern void *alloc_bootmem_section(unsigned long size, __alloc_bootmem_low_node(pgdat, x, PAGE_SIZE, 0) #endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */ +extern int reserve_bootmem_generic(unsigned long addr, unsigned long size, + int flags); + +extern void *alloc_bootmem_section(unsigned long size, + unsigned long section_nr); + #ifdef CONFIG_HAVE_ARCH_ALLOC_REMAP extern void *alloc_remap(int nid, unsigned long size); #else diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 82aa36c53ea..eadaab44015 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -115,7 +115,6 @@ BUFFER_FNS(Uptodate, uptodate) BUFFER_FNS(Dirty, dirty) TAS_BUFFER_FNS(Dirty, dirty) BUFFER_FNS(Lock, locked) -TAS_BUFFER_FNS(Lock, locked) BUFFER_FNS(Req, req) TAS_BUFFER_FNS(Req, req) BUFFER_FNS(Mapped, mapped) @@ -205,6 +204,8 @@ void block_invalidatepage(struct page *page, unsigned long offset); int block_write_full_page(struct page *page, get_block_t *get_block, struct writeback_control *wbc); int block_read_full_page(struct page*, get_block_t*); +int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc, + unsigned long from); int block_write_begin(struct file *, struct address_space *, loff_t, unsigned, unsigned, struct page **, void **, get_block_t*); @@ -319,10 +320,15 @@ static inline void wait_on_buffer(struct buffer_head *bh) __wait_on_buffer(bh); } +static inline int trylock_buffer(struct buffer_head *bh) +{ + return likely(!test_and_set_bit(BH_Lock, &bh->b_state)); +} + static inline void lock_buffer(struct buffer_head *bh) { might_sleep(); - if (test_set_buffer_locked(bh)) + if (!trylock_buffer(bh)) __lock_buffer(bh); } diff --git a/include/linux/byteorder.h b/include/linux/byteorder.h new file mode 100644 index 00000000000..29f002d73d9 --- /dev/null +++ b/include/linux/byteorder.h @@ -0,0 +1,372 @@ +#ifndef _LINUX_BYTEORDER_H +#define _LINUX_BYTEORDER_H + +#include <linux/types.h> +#include <linux/swab.h> + +#if defined(__LITTLE_ENDIAN) && defined(__BIG_ENDIAN) +# error Fix asm/byteorder.h to define one endianness +#endif + +#if !defined(__LITTLE_ENDIAN) && !defined(__BIG_ENDIAN) +# error Fix asm/byteorder.h to define arch endianness +#endif + +#ifdef __LITTLE_ENDIAN +# undef __LITTLE_ENDIAN +# define __LITTLE_ENDIAN 1234 +#endif + +#ifdef __BIG_ENDIAN +# undef __BIG_ENDIAN +# define __BIG_ENDIAN 4321 +#endif + +#if defined(__LITTLE_ENDIAN) && !defined(__LITTLE_ENDIAN_BITFIELD) +# define __LITTLE_ENDIAN_BITFIELD +#endif + +#if defined(__BIG_ENDIAN) && !defined(__BIG_ENDIAN_BITFIELD) +# define __BIG_ENDIAN_BITFIELD +#endif + +#ifdef __LITTLE_ENDIAN +# define __le16_to_cpu(x) ((__force __u16)(__le16)(x)) +# define __le32_to_cpu(x) ((__force __u32)(__le32)(x)) +# define __le64_to_cpu(x) ((__force __u64)(__le64)(x)) +# define __cpu_to_le16(x) ((__force __le16)(__u16)(x)) +# define __cpu_to_le32(x) ((__force __le32)(__u32)(x)) +# define __cpu_to_le64(x) ((__force __le64)(__u64)(x)) + +# define __be16_to_cpu(x) __swab16((__force __u16)(__be16)(x)) +# define __be32_to_cpu(x) __swab32((__force __u32)(__be32)(x)) +# define __be64_to_cpu(x) __swab64((__force __u64)(__be64)(x)) +# define __cpu_to_be16(x) ((__force __be16)__swab16(x)) +# define __cpu_to_be32(x) ((__force __be32)__swab32(x)) +# define __cpu_to_be64(x) ((__force __be64)__swab64(x)) +#endif + +#ifdef __BIG_ENDIAN +# define __be16_to_cpu(x) ((__force __u16)(__be16)(x)) +# define __be32_to_cpu(x) ((__force __u32)(__be32)(x)) +# define __be64_to_cpu(x) ((__force __u64)(__be64)(x)) +# define __cpu_to_be16(x) ((__force __be16)(__u16)(x)) +# define __cpu_to_be32(x) ((__force __be32)(__u32)(x)) +# define __cpu_to_be64(x) ((__force __be64)(__u64)(x)) + +# define __le16_to_cpu(x) __swab16((__force __u16)(__le16)(x)) +# define __le32_to_cpu(x) __swab32((__force __u32)(__le32)(x)) +# define __le64_to_cpu(x) __swab64((__force __u64)(__le64)(x)) +# define __cpu_to_le16(x) ((__force __le16)__swab16(x)) +# define __cpu_to_le32(x) ((__force __le32)__swab32(x)) +# define __cpu_to_le64(x) ((__force __le64)__swab64(x)) +#endif + +/* + * These helpers could be phased out over time as the base version + * handles constant folding. + */ +#define __constant_htonl(x) __cpu_to_be32(x) +#define __constant_ntohl(x) __be32_to_cpu(x) +#define __constant_htons(x) __cpu_to_be16(x) +#define __constant_ntohs(x) __be16_to_cpu(x) + +#define __constant_le16_to_cpu(x) __le16_to_cpu(x) +#define __constant_le32_to_cpu(x) __le32_to_cpu(x) +#define __constant_le64_to_cpu(x) __le64_to_cpu(x) +#define __constant_be16_to_cpu(x) __be16_to_cpu(x) +#define __constant_be32_to_cpu(x) __be32_to_cpu(x) +#define __constant_be64_to_cpu(x) __be64_to_cpu(x) + +#define __constant_cpu_to_le16(x) __cpu_to_le16(x) +#define __constant_cpu_to_le32(x) __cpu_to_le32(x) +#define __constant_cpu_to_le64(x) __cpu_to_le64(x) +#define __constant_cpu_to_be16(x) __cpu_to_be16(x) +#define __constant_cpu_to_be32(x) __cpu_to_be32(x) +#define __constant_cpu_to_be64(x) __cpu_to_be64(x) + +static inline void __le16_to_cpus(__u16 *p) +{ +#ifdef __BIG_ENDIAN + __swab16s(p); +#endif +} + +static inline void __cpu_to_le16s(__u16 *p) +{ +#ifdef __BIG_ENDIAN + __swab16s(p); +#endif +} + +static inline void __le32_to_cpus(__u32 *p) +{ +#ifdef __BIG_ENDIAN + __swab32s(p); +#endif +} + +static inline void __cpu_to_le32s(__u32 *p) +{ +#ifdef __BIG_ENDIAN + __swab32s(p); +#endif +} + +static inline void __le64_to_cpus(__u64 *p) +{ +#ifdef __BIG_ENDIAN + __swab64s(p); +#endif +} + +static inline void __cpu_to_le64s(__u64 *p) +{ +#ifdef __BIG_ENDIAN + __swab64s(p); +#endif +} + +static inline void __be16_to_cpus(__u16 *p) +{ +#ifdef __LITTLE_ENDIAN + __swab16s(p); +#endif +} + +static inline void __cpu_to_be16s(__u16 *p) +{ +#ifdef __LITTLE_ENDIAN + __swab16s(p); +#endif +} + +static inline void __be32_to_cpus(__u32 *p) +{ +#ifdef __LITTLE_ENDIAN + __swab32s(p); +#endif +} + +static inline void __cpu_to_be32s(__u32 *p) +{ +#ifdef __LITTLE_ENDIAN + __swab32s(p); +#endif +} + +static inline void __be64_to_cpus(__u64 *p) +{ +#ifdef __LITTLE_ENDIAN + __swab64s(p); +#endif +} + +static inline void __cpu_to_be64s(__u64 *p) +{ +#ifdef __LITTLE_ENDIAN + __swab64s(p); +#endif +} + +static inline __u16 __le16_to_cpup(const __le16 *p) +{ +#ifdef __LITTLE_ENDIAN + return (__force __u16)*p; +#else + return __swab16p((__force __u16 *)p); +#endif +} + +static inline __u32 __le32_to_cpup(const __le32 *p) +{ +#ifdef __LITTLE_ENDIAN + return (__force __u32)*p; +#else + return __swab32p((__force __u32 *)p); +#endif +} + +static inline __u64 __le64_to_cpup(const __le64 *p) +{ +#ifdef __LITTLE_ENDIAN + return (__force __u64)*p; +#else + return __swab64p((__force __u64 *)p); +#endif +} + +static inline __le16 __cpu_to_le16p(const __u16 *p) +{ +#ifdef __LITTLE_ENDIAN + return (__force __le16)*p; +#else + return (__force __le16)__swab16p(p); +#endif +} + +static inline __le32 __cpu_to_le32p(const __u32 *p) +{ +#ifdef __LITTLE_ENDIAN + return (__force __le32)*p; +#else + return (__force __le32)__swab32p(p); +#endif +} + +static inline __le64 __cpu_to_le64p(const __u64 *p) +{ +#ifdef __LITTLE_ENDIAN + return (__force __le64)*p; +#else + return (__force __le64)__swab64p(p); +#endif +} + +static inline __u16 __be16_to_cpup(const __be16 *p) +{ +#ifdef __BIG_ENDIAN + return (__force __u16)*p; +#else + return __swab16p((__force __u16 *)p); +#endif +} + +static inline __u32 __be32_to_cpup(const __be32 *p) +{ +#ifdef __BIG_ENDIAN + return (__force __u32)*p; +#else + return __swab32p((__force __u32 *)p); +#endif +} + +static inline __u64 __be64_to_cpup(const __be64 *p) +{ +#ifdef __BIG_ENDIAN + return (__force __u64)*p; +#else + return __swab64p((__force __u64 *)p); +#endif +} + +static inline __be16 __cpu_to_be16p(const __u16 *p) +{ +#ifdef __BIG_ENDIAN + return (__force __be16)*p; +#else + return (__force __be16)__swab16p(p); +#endif +} + +static inline __be32 __cpu_to_be32p(const __u32 *p) +{ +#ifdef __BIG_ENDIAN + return (__force __be32)*p; +#else + return (__force __be32)__swab32p(p); +#endif +} + +static inline __be64 __cpu_to_be64p(const __u64 *p) +{ +#ifdef __BIG_ENDIAN + return (__force __be64)*p; +#else + return (__force __be64)__swab64p(p); +#endif +} + +#ifdef __KERNEL__ + +# define le16_to_cpu __le16_to_cpu +# define le32_to_cpu __le32_to_cpu +# define le64_to_cpu __le64_to_cpu +# define be16_to_cpu __be16_to_cpu +# define be32_to_cpu __be32_to_cpu +# define be64_to_cpu __be64_to_cpu +# define cpu_to_le16 __cpu_to_le16 +# define cpu_to_le32 __cpu_to_le32 +# define cpu_to_le64 __cpu_to_le64 +# define cpu_to_be16 __cpu_to_be16 +# define cpu_to_be32 __cpu_to_be32 +# define cpu_to_be64 __cpu_to_be64 + +# define le16_to_cpup __le16_to_cpup +# define le32_to_cpup __le32_to_cpup +# define le64_to_cpup __le64_to_cpup +# define be16_to_cpup __be16_to_cpup +# define be32_to_cpup __be32_to_cpup +# define be64_to_cpup __be64_to_cpup +# define cpu_to_le16p __cpu_to_le16p +# define cpu_to_le32p __cpu_to_le32p +# define cpu_to_le64p __cpu_to_le64p +# define cpu_to_be16p __cpu_to_be16p +# define cpu_to_be32p __cpu_to_be32p +# define cpu_to_be64p __cpu_to_be64p + +# define le16_to_cpus __le16_to_cpus +# define le32_to_cpus __le32_to_cpus +# define le64_to_cpus __le64_to_cpus +# define be16_to_cpus __be16_to_cpus +# define be32_to_cpus __be32_to_cpus +# define be64_to_cpus __be64_to_cpus +# define cpu_to_le16s __cpu_to_le16s +# define cpu_to_le32s __cpu_to_le32s +# define cpu_to_le64s __cpu_to_le64s +# define cpu_to_be16s __cpu_to_be16s +# define cpu_to_be32s __cpu_to_be32s +# define cpu_to_be64s __cpu_to_be64s + +/* + * They have to be macros in order to do the constant folding + * correctly - if the argument passed into a inline function + * it is no longer constant according to gcc.. + */ +# undef ntohl +# undef ntohs +# undef htonl +# undef htons + +# define ___htonl(x) __cpu_to_be32(x) +# define ___htons(x) __cpu_to_be16(x) +# define ___ntohl(x) __be32_to_cpu(x) +# define ___ntohs(x) __be16_to_cpu(x) + +# define htonl(x) ___htonl(x) +# define ntohl(x) ___ntohl(x) +# define htons(x) ___htons(x) +# define ntohs(x) ___ntohs(x) + +static inline void le16_add_cpu(__le16 *var, u16 val) +{ + *var = cpu_to_le16(le16_to_cpup(var) + val); +} + +static inline void le32_add_cpu(__le32 *var, u32 val) +{ + *var = cpu_to_le32(le32_to_cpup(var) + val); +} + +static inline void le64_add_cpu(__le64 *var, u64 val) +{ + *var = cpu_to_le64(le64_to_cpup(var) + val); +} + +static inline void be16_add_cpu(__be16 *var, u16 val) +{ + *var = cpu_to_be16(be16_to_cpup(var) + val); +} + +static inline void be32_add_cpu(__be32 *var, u32 val) +{ + *var = cpu_to_be32(be32_to_cpup(var) + val); +} + +static inline void be64_add_cpu(__be64 *var, u64 val) +{ + *var = cpu_to_be64(be64_to_cpup(var) + val); +} + +#endif /* __KERNEL__ */ +#endif /* _LINUX_BYTEORDER_H */ diff --git a/include/linux/byteorder/big_endian.h b/include/linux/byteorder/big_endian.h index 961ed4b48d8..44f95b92393 100644 --- a/include/linux/byteorder/big_endian.h +++ b/include/linux/byteorder/big_endian.h @@ -94,12 +94,12 @@ static inline __u16 __be16_to_cpup(const __be16 *p) #define __le32_to_cpus(x) __swab32s((x)) #define __cpu_to_le16s(x) __swab16s((x)) #define __le16_to_cpus(x) __swab16s((x)) -#define __cpu_to_be64s(x) do {} while (0) -#define __be64_to_cpus(x) do {} while (0) -#define __cpu_to_be32s(x) do {} while (0) -#define __be32_to_cpus(x) do {} while (0) -#define __cpu_to_be16s(x) do {} while (0) -#define __be16_to_cpus(x) do {} while (0) +#define __cpu_to_be64s(x) do { (void)(x); } while (0) +#define __be64_to_cpus(x) do { (void)(x); } while (0) +#define __cpu_to_be32s(x) do { (void)(x); } while (0) +#define __be32_to_cpus(x) do { (void)(x); } while (0) +#define __cpu_to_be16s(x) do { (void)(x); } while (0) +#define __be16_to_cpus(x) do { (void)(x); } while (0) #ifdef __KERNEL__ #include <linux/byteorder/generic.h> diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h index 05dc7c35b3b..4cc170a3176 100644 --- a/include/linux/byteorder/little_endian.h +++ b/include/linux/byteorder/little_endian.h @@ -88,12 +88,12 @@ static inline __u16 __be16_to_cpup(const __be16 *p) { return __swab16p((__u16 *)p); } -#define __cpu_to_le64s(x) do {} while (0) -#define __le64_to_cpus(x) do {} while (0) -#define __cpu_to_le32s(x) do {} while (0) -#define __le32_to_cpus(x) do {} while (0) -#define __cpu_to_le16s(x) do {} while (0) -#define __le16_to_cpus(x) do {} while (0) +#define __cpu_to_le64s(x) do { (void)(x); } while (0) +#define __le64_to_cpus(x) do { (void)(x); } while (0) +#define __cpu_to_le32s(x) do { (void)(x); } while (0) +#define __le32_to_cpus(x) do { (void)(x); } while (0) +#define __cpu_to_le16s(x) do { (void)(x); } while (0) +#define __le16_to_cpus(x) do { (void)(x); } while (0) #define __cpu_to_be64s(x) __swab64s((x)) #define __be64_to_cpus(x) __swab64s((x)) #define __cpu_to_be32s(x) __swab32s((x)) diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index e155aa78d85..c98dd7cb707 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -21,11 +21,13 @@ struct cgroupfs_root; struct cgroup_subsys; struct inode; +struct cgroup; extern int cgroup_init_early(void); extern int cgroup_init(void); extern void cgroup_init_smp(void); extern void cgroup_lock(void); +extern bool cgroup_lock_live_group(struct cgroup *cgrp); extern void cgroup_unlock(void); extern void cgroup_fork(struct task_struct *p); extern void cgroup_fork_callbacks(struct task_struct *p); @@ -205,50 +207,64 @@ struct cftype { * subsystem, followed by a period */ char name[MAX_CFTYPE_NAME]; int private; - int (*open) (struct inode *inode, struct file *file); - ssize_t (*read) (struct cgroup *cgrp, struct cftype *cft, - struct file *file, - char __user *buf, size_t nbytes, loff_t *ppos); + + /* + * If non-zero, defines the maximum length of string that can + * be passed to write_string; defaults to 64 + */ + size_t max_write_len; + + int (*open)(struct inode *inode, struct file *file); + ssize_t (*read)(struct cgroup *cgrp, struct cftype *cft, + struct file *file, + char __user *buf, size_t nbytes, loff_t *ppos); /* * read_u64() is a shortcut for the common case of returning a * single integer. Use it in place of read() */ - u64 (*read_u64) (struct cgroup *cgrp, struct cftype *cft); + u64 (*read_u64)(struct cgroup *cgrp, struct cftype *cft); /* * read_s64() is a signed version of read_u64() */ - s64 (*read_s64) (struct cgroup *cgrp, struct cftype *cft); + s64 (*read_s64)(struct cgroup *cgrp, struct cftype *cft); /* * read_map() is used for defining a map of key/value * pairs. It should call cb->fill(cb, key, value) for each * entry. The key/value pairs (and their ordering) should not * change between reboots. */ - int (*read_map) (struct cgroup *cont, struct cftype *cft, - struct cgroup_map_cb *cb); + int (*read_map)(struct cgroup *cont, struct cftype *cft, + struct cgroup_map_cb *cb); /* * read_seq_string() is used for outputting a simple sequence * using seqfile. */ - int (*read_seq_string) (struct cgroup *cont, struct cftype *cft, - struct seq_file *m); + int (*read_seq_string)(struct cgroup *cont, struct cftype *cft, + struct seq_file *m); - ssize_t (*write) (struct cgroup *cgrp, struct cftype *cft, - struct file *file, - const char __user *buf, size_t nbytes, loff_t *ppos); + ssize_t (*write)(struct cgroup *cgrp, struct cftype *cft, + struct file *file, + const char __user *buf, size_t nbytes, loff_t *ppos); /* * write_u64() is a shortcut for the common case of accepting * a single integer (as parsed by simple_strtoull) from * userspace. Use in place of write(); return 0 or error. */ - int (*write_u64) (struct cgroup *cgrp, struct cftype *cft, u64 val); + int (*write_u64)(struct cgroup *cgrp, struct cftype *cft, u64 val); /* * write_s64() is a signed version of write_u64() */ - int (*write_s64) (struct cgroup *cgrp, struct cftype *cft, s64 val); + int (*write_s64)(struct cgroup *cgrp, struct cftype *cft, s64 val); /* + * write_string() is passed a nul-terminated kernelspace + * buffer of maximum length determined by max_write_len. + * Returns 0 or -ve error code. + */ + int (*write_string)(struct cgroup *cgrp, struct cftype *cft, + const char *buffer); + /* * trigger() callback can be used to get some kick from the * userspace, when the actual string written is not important * at all. The private field can be used to determine the @@ -256,7 +272,7 @@ struct cftype { */ int (*trigger)(struct cgroup *cgrp, unsigned int event); - int (*release) (struct inode *inode, struct file *file); + int (*release)(struct inode *inode, struct file *file); }; struct cgroup_scanner { @@ -348,7 +364,8 @@ static inline struct cgroup* task_cgroup(struct task_struct *task, return task_subsys_state(task, subsys_id)->cgroup; } -int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *ss); +int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *ss, + char *nodename); /* A cgroup_iter should be treated as an opaque object */ struct cgroup_iter { diff --git a/include/linux/coda.h b/include/linux/coda.h index b5cf0780c51..96c87693800 100644 --- a/include/linux/coda.h +++ b/include/linux/coda.h @@ -199,28 +199,6 @@ typedef u_int32_t vuid_t; typedef u_int32_t vgid_t; #endif /*_VUID_T_ */ -#ifdef CONFIG_CODA_FS_OLD_API -struct CodaFid { - u_int32_t opaque[3]; -}; - -static __inline__ ino_t coda_f2i(struct CodaFid *fid) -{ - if ( ! fid ) - return 0; - if (fid->opaque[1] == 0xfffffffe || fid->opaque[1] == 0xffffffff) - return ((fid->opaque[0] << 20) | (fid->opaque[2] & 0xfffff)); - else - return (fid->opaque[2] + (fid->opaque[1]<<10) + (fid->opaque[0]<<20)); -} - -struct coda_cred { - vuid_t cr_uid, cr_euid, cr_suid, cr_fsuid; /* Real, efftve, set, fs uid*/ - vgid_t cr_groupid, cr_egid, cr_sgid, cr_fsgid; /* same for groups */ -}; - -#else /* not defined(CONFIG_CODA_FS_OLD_API) */ - struct CodaFid { u_int32_t opaque[4]; }; @@ -228,8 +206,6 @@ struct CodaFid { #define coda_f2i(fid)\ (fid ? (fid->opaque[3] ^ (fid->opaque[2]<<10) ^ (fid->opaque[1]<<20) ^ fid->opaque[0]) : 0) -#endif - #ifndef _VENUS_VATTR_T_ #define _VENUS_VATTR_T_ /* @@ -313,15 +289,7 @@ struct coda_statfs { #define CIOC_KERNEL_VERSION _IOWR('c', 10, size_t) -#if 0 -#define CODA_KERNEL_VERSION 0 /* don't care about kernel version number */ -#define CODA_KERNEL_VERSION 1 /* The old venus 4.6 compatible interface */ -#endif -#ifdef CONFIG_CODA_FS_OLD_API -#define CODA_KERNEL_VERSION 2 /* venus_lookup got an extra parameter */ -#else #define CODA_KERNEL_VERSION 3 /* 128-bit file identifiers */ -#endif /* * Venus <-> Coda RPC arguments @@ -329,16 +297,9 @@ struct coda_statfs { struct coda_in_hdr { u_int32_t opcode; u_int32_t unique; /* Keep multiple outstanding msgs distinct */ -#ifdef CONFIG_CODA_FS_OLD_API - u_int16_t pid; /* Common to all */ - u_int16_t pgid; /* Common to all */ - u_int16_t sid; /* Common to all */ - struct coda_cred cred; /* Common to all */ -#else pid_t pid; pid_t pgid; vuid_t uid; -#endif }; /* Really important that opcode and unique are 1st two fields! */ @@ -613,11 +574,7 @@ struct coda_vget_out { /* CODA_PURGEUSER is a venus->kernel call */ struct coda_purgeuser_out { struct coda_out_hdr oh; -#ifdef CONFIG_CODA_FS_OLD_API - struct coda_cred cred; -#else vuid_t uid; -#endif }; /* coda_zapfile: */ diff --git a/include/linux/coda_linux.h b/include/linux/coda_linux.h index 31b75311e2c..dcc228aa335 100644 --- a/include/linux/coda_linux.h +++ b/include/linux/coda_linux.h @@ -37,7 +37,7 @@ extern const struct file_operations coda_ioctl_operations; /* operations shared over more than one file */ int coda_open(struct inode *i, struct file *f); int coda_release(struct inode *i, struct file *f); -int coda_permission(struct inode *inode, int mask, struct nameidata *nd); +int coda_permission(struct inode *inode, int mask); int coda_revalidate_inode(struct dentry *); int coda_getattr(struct vfsmount *, struct dentry *, struct kstat *); int coda_setattr(struct dentry *, struct iattr *); diff --git a/include/linux/completion.h b/include/linux/completion.h index d2961b66d53..57faa60de9b 100644 --- a/include/linux/completion.h +++ b/include/linux/completion.h @@ -55,4 +55,49 @@ extern void complete_all(struct completion *); #define INIT_COMPLETION(x) ((x).done = 0) + +/** + * try_wait_for_completion - try to decrement a completion without blocking + * @x: completion structure + * + * Returns: 0 if a decrement cannot be done without blocking + * 1 if a decrement succeeded. + * + * If a completion is being used as a counting completion, + * attempt to decrement the counter without blocking. This + * enables us to avoid waiting if the resource the completion + * is protecting is not available. + */ +static inline bool try_wait_for_completion(struct completion *x) +{ + int ret = 1; + + spin_lock_irq(&x->wait.lock); + if (!x->done) + ret = 0; + else + x->done--; + spin_unlock_irq(&x->wait.lock); + return ret; +} + +/** + * completion_done - Test to see if a completion has any waiters + * @x: completion structure + * + * Returns: 0 if there are waiters (wait_for_completion() in progress) + * 1 if there are no waiters. + * + */ +static inline bool completion_done(struct completion *x) +{ + int ret = 1; + + spin_lock_irq(&x->wait.lock); + if (!x->done) + ret = 0; + spin_unlock_irq(&x->wait.lock); + return ret; +} + #endif diff --git a/include/linux/configfs.h b/include/linux/configfs.h index d62c19ff041..7f627775c94 100644 --- a/include/linux/configfs.h +++ b/include/linux/configfs.h @@ -40,6 +40,7 @@ #include <linux/list.h> #include <linux/kref.h> #include <linux/mutex.h> +#include <linux/err.h> #include <asm/atomic.h> @@ -129,8 +130,25 @@ struct configfs_attribute { /* * Users often need to create attribute structures for their configurable * attributes, containing a configfs_attribute member and function pointers - * for the show() and store() operations on that attribute. They can use - * this macro (similar to sysfs' __ATTR) to make defining attributes easier. + * for the show() and store() operations on that attribute. If they don't + * need anything else on the extended attribute structure, they can use + * this macro to define it The argument _item is the name of the + * config_item structure. + */ +#define CONFIGFS_ATTR_STRUCT(_item) \ +struct _item##_attribute { \ + struct configfs_attribute attr; \ + ssize_t (*show)(struct _item *, char *); \ + ssize_t (*store)(struct _item *, const char *, size_t); \ +} + +/* + * With the extended attribute structure, users can use this macro + * (similar to sysfs' __ATTR) to make defining attributes easier. + * An example: + * #define MYITEM_ATTR(_name, _mode, _show, _store) \ + * struct myitem_attribute childless_attr_##_name = \ + * __CONFIGFS_ATTR(_name, _mode, _show, _store) */ #define __CONFIGFS_ATTR(_name, _mode, _show, _store) \ { \ @@ -142,6 +160,52 @@ struct configfs_attribute { .show = _show, \ .store = _store, \ } +/* Here is a readonly version, only requiring a show() operation */ +#define __CONFIGFS_ATTR_RO(_name, _show) \ +{ \ + .attr = { \ + .ca_name = __stringify(_name), \ + .ca_mode = 0444, \ + .ca_owner = THIS_MODULE, \ + }, \ + .show = _show, \ +} + +/* + * With these extended attributes, the simple show_attribute() and + * store_attribute() operations need to call the show() and store() of the + * attributes. This is a common pattern, so we provide a macro to define + * them. The argument _item is the name of the config_item structure. + * This macro expects the attributes to be named "struct <name>_attribute" + * and the function to_<name>() to exist; + */ +#define CONFIGFS_ATTR_OPS(_item) \ +static ssize_t _item##_attr_show(struct config_item *item, \ + struct configfs_attribute *attr, \ + char *page) \ +{ \ + struct _item *_item = to_##_item(item); \ + struct _item##_attribute *_item##_attr = \ + container_of(attr, struct _item##_attribute, attr); \ + ssize_t ret = 0; \ + \ + if (_item##_attr->show) \ + ret = _item##_attr->show(_item, page); \ + return ret; \ +} \ +static ssize_t _item##_attr_store(struct config_item *item, \ + struct configfs_attribute *attr, \ + const char *page, size_t count) \ +{ \ + struct _item *_item = to_##_item(item); \ + struct _item##_attribute *_item##_attr = \ + container_of(attr, struct _item##_attribute, attr); \ + ssize_t ret = -EINVAL; \ + \ + if (_item##_attr->store) \ + ret = _item##_attr->store(_item, page, count); \ + return ret; \ +} /* * If allow_link() exists, the item can symlink(2) out to other diff --git a/include/linux/connector.h b/include/linux/connector.h index 96a89d3d672..5c7f9468f75 100644 --- a/include/linux/connector.h +++ b/include/linux/connector.h @@ -38,8 +38,9 @@ #define CN_W1_VAL 0x1 #define CN_IDX_V86D 0x4 #define CN_VAL_V86D_UVESAFB 0x1 +#define CN_IDX_BB 0x5 /* BlackBoard, from the TSP GPL sampling framework */ -#define CN_NETLINK_USERS 5 +#define CN_NETLINK_USERS 6 /* * Maximum connector's message size. diff --git a/include/linux/consolemap.h b/include/linux/consolemap.h index e2bf7e5db39..c4811da1338 100644 --- a/include/linux/consolemap.h +++ b/include/linux/consolemap.h @@ -3,6 +3,9 @@ * * Interface between console.c, selection.c and consolemap.c */ +#ifndef __LINUX_CONSOLEMAP_H__ +#define __LINUX_CONSOLEMAP_H__ + #define LAT1_MAP 0 #define GRAF_MAP 1 #define IBMPC_MAP 2 @@ -10,6 +13,7 @@ #include <linux/types.h> +#ifdef CONFIG_CONSOLE_TRANSLATIONS struct vc_data; extern u16 inverse_translate(struct vc_data *conp, int glyph, int use_unicode); @@ -18,3 +22,13 @@ extern int conv_uni_to_pc(struct vc_data *conp, long ucs); extern u32 conv_8bit_to_uni(unsigned char c); extern int conv_uni_to_8bit(u32 uni); void console_map_init(void); +#else +#define inverse_translate(conp, glyph, uni) ((uint16_t)glyph) +#define set_translate(m, vc) ((unsigned short *)NULL) +#define conv_uni_to_pc(conp, ucs) ((int) (ucs > 0xff ? -1: ucs)) +#define conv_8bit_to_uni(c) ((uint32_t)(c)) +#define conv_uni_to_8bit(c) ((int) ((c) & 0xff)) +#define console_map_init(c) do { ; } while (0) +#endif /* CONFIG_CONSOLE_TRANSLATIONS */ + +#endif /* __LINUX_CONSOLEMAP_H__ */ diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 7464ba3b433..d7faf880849 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -69,10 +69,11 @@ static inline void unregister_cpu_notifier(struct notifier_block *nb) #endif int cpu_up(unsigned int cpu); - extern void cpu_hotplug_init(void); +extern void cpu_maps_update_begin(void); +extern void cpu_maps_update_done(void); -#else +#else /* CONFIG_SMP */ static inline int register_cpu_notifier(struct notifier_block *nb) { @@ -87,10 +88,16 @@ static inline void cpu_hotplug_init(void) { } +static inline void cpu_maps_update_begin(void) +{ +} + +static inline void cpu_maps_update_done(void) +{ +} + #endif /* CONFIG_SMP */ extern struct sysdev_class cpu_sysdev_class; -extern void cpu_maps_update_begin(void); -extern void cpu_maps_update_done(void); #ifdef CONFIG_HOTPLUG_CPU /* Stop CPUs going up and down. */ diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 2270ca5ec63..6fd5668aa57 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -106,6 +106,7 @@ struct cpufreq_policy { #define CPUFREQ_ADJUST (0) #define CPUFREQ_INCOMPATIBLE (1) #define CPUFREQ_NOTIFY (2) +#define CPUFREQ_START (3) #define CPUFREQ_SHARED_TYPE_NONE (0) /* None */ #define CPUFREQ_SHARED_TYPE_HW (1) /* HW does needed coordination */ diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index c24875bd9c5..d3219d73f8e 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -17,6 +17,20 @@ * For details of cpus_onto(), see bitmap_onto in lib/bitmap.c. * For details of cpus_fold(), see bitmap_fold in lib/bitmap.c. * + * . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . + * Note: The alternate operations with the suffix "_nr" are used + * to limit the range of the loop to nr_cpu_ids instead of + * NR_CPUS when NR_CPUS > 64 for performance reasons. + * If NR_CPUS is <= 64 then most assembler bitmask + * operators execute faster with a constant range, so + * the operator will continue to use NR_CPUS. + * + * Another consideration is that nr_cpu_ids is initialized + * to NR_CPUS and isn't lowered until the possible cpus are + * discovered (including any disabled cpus). So early uses + * will span the entire range of NR_CPUS. + * . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . + * * The available cpumask operations are: * * void cpu_set(cpu, mask) turn on bit 'cpu' in mask @@ -38,18 +52,52 @@ * int cpus_empty(mask) Is mask empty (no bits sets)? * int cpus_full(mask) Is mask full (all bits sets)? * int cpus_weight(mask) Hamming weigh - number of set bits + * int cpus_weight_nr(mask) Same using nr_cpu_ids instead of NR_CPUS * * void cpus_shift_right(dst, src, n) Shift right * void cpus_shift_left(dst, src, n) Shift left * * int first_cpu(mask) Number lowest set bit, or NR_CPUS * int next_cpu(cpu, mask) Next cpu past 'cpu', or NR_CPUS + * int next_cpu_nr(cpu, mask) Next cpu past 'cpu', or nr_cpu_ids * * cpumask_t cpumask_of_cpu(cpu) Return cpumask with bit 'cpu' set + * (can be used as an lvalue) * CPU_MASK_ALL Initializer - all bits set * CPU_MASK_NONE Initializer - no bits set * unsigned long *cpus_addr(mask) Array of unsigned long's in mask * + * CPUMASK_ALLOC kmalloc's a structure that is a composite of many cpumask_t + * variables, and CPUMASK_PTR provides pointers to each field. + * + * The structure should be defined something like this: + * struct my_cpumasks { + * cpumask_t mask1; + * cpumask_t mask2; + * }; + * + * Usage is then: + * CPUMASK_ALLOC(my_cpumasks); + * CPUMASK_PTR(mask1, my_cpumasks); + * CPUMASK_PTR(mask2, my_cpumasks); + * + * --- DO NOT reference cpumask_t pointers until this check --- + * if (my_cpumasks == NULL) + * "kmalloc failed"... + * + * References are now pointers to the cpumask_t variables (*mask1, ...) + * + *if NR_CPUS > BITS_PER_LONG + * CPUMASK_ALLOC(m) Declares and allocates struct m *m = + * kmalloc(sizeof(*m), GFP_KERNEL) + * CPUMASK_FREE(m) Macro for kfree(m) + *else + * CPUMASK_ALLOC(m) Declares struct m _m, *m = &_m + * CPUMASK_FREE(m) Nop + *endif + * CPUMASK_PTR(v, m) Declares cpumask_t *v = &(m->v) + * ------------------------------------------------------------------------ + * * int cpumask_scnprintf(buf, len, mask) Format cpumask for printing * int cpumask_parse_user(ubuf, ulen, mask) Parse ascii string as cpumask * int cpulist_scnprintf(buf, len, mask) Format cpumask as list for printing @@ -59,7 +107,8 @@ * void cpus_onto(dst, orig, relmap) *dst = orig relative to relmap * void cpus_fold(dst, orig, sz) dst bits = orig bits mod sz * - * for_each_cpu_mask(cpu, mask) for-loop cpu over mask + * for_each_cpu_mask(cpu, mask) for-loop cpu over mask using NR_CPUS + * for_each_cpu_mask_nr(cpu, mask) for-loop cpu over mask using nr_cpu_ids * * int num_online_cpus() Number of online CPUs * int num_possible_cpus() Number of all possible CPUs @@ -216,33 +265,30 @@ static inline void __cpus_shift_left(cpumask_t *dstp, bitmap_shift_left(dstp->bits, srcp->bits, n, nbits); } -#ifdef CONFIG_SMP -int __first_cpu(const cpumask_t *srcp); -#define first_cpu(src) __first_cpu(&(src)) -int __next_cpu(int n, const cpumask_t *srcp); -#define next_cpu(n, src) __next_cpu((n), &(src)) -#else -#define first_cpu(src) ({ (void)(src); 0; }) -#define next_cpu(n, src) ({ (void)(src); 1; }) -#endif +/* + * Special-case data structure for "single bit set only" constant CPU masks. + * + * We pre-generate all the 64 (or 32) possible bit positions, with enough + * padding to the left and the right, and return the constant pointer + * appropriately offset. + */ +extern const unsigned long + cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)]; + +static inline const cpumask_t *get_cpu_mask(unsigned int cpu) +{ + const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG]; + p -= cpu / BITS_PER_LONG; + return (const cpumask_t *)p; +} -#ifdef CONFIG_HAVE_CPUMASK_OF_CPU_MAP -extern cpumask_t *cpumask_of_cpu_map; -#define cpumask_of_cpu(cpu) (cpumask_of_cpu_map[cpu]) +/* + * In cases where we take the address of the cpumask immediately, + * gcc optimizes it out (it's a constant) and there's no huge stack + * variable created: + */ +#define cpumask_of_cpu(cpu) (*get_cpu_mask(cpu)) -#else -#define cpumask_of_cpu(cpu) \ -(*({ \ - typeof(_unused_cpumask_arg_) m; \ - if (sizeof(m) == sizeof(unsigned long)) { \ - m.bits[0] = 1UL<<(cpu); \ - } else { \ - cpus_clear(m); \ - cpu_set((cpu), m); \ - } \ - &m; \ -})) -#endif #define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS) @@ -281,6 +327,15 @@ extern cpumask_t cpu_mask_all; #define cpus_addr(src) ((src).bits) +#if NR_CPUS > BITS_PER_LONG +#define CPUMASK_ALLOC(m) struct m *m = kmalloc(sizeof(*m), GFP_KERNEL) +#define CPUMASK_FREE(m) kfree(m) +#else +#define CPUMASK_ALLOC(m) struct m _m, *m = &_m +#define CPUMASK_FREE(m) +#endif +#define CPUMASK_PTR(v, m) cpumask_t *v = &(m->v) + #define cpumask_scnprintf(buf, len, src) \ __cpumask_scnprintf((buf), (len), &(src), NR_CPUS) static inline int __cpumask_scnprintf(char *buf, int len, @@ -343,29 +398,59 @@ static inline void __cpus_fold(cpumask_t *dstp, const cpumask_t *origp, bitmap_fold(dstp->bits, origp->bits, sz, nbits); } -#if NR_CPUS > 1 -#define for_each_cpu_mask(cpu, mask) \ - for ((cpu) = first_cpu(mask); \ - (cpu) < NR_CPUS; \ - (cpu) = next_cpu((cpu), (mask))) -#else /* NR_CPUS == 1 */ -#define for_each_cpu_mask(cpu, mask) \ +#if NR_CPUS == 1 + +#define nr_cpu_ids 1 +#define first_cpu(src) ({ (void)(src); 0; }) +#define next_cpu(n, src) ({ (void)(src); 1; }) +#define any_online_cpu(mask) 0 +#define for_each_cpu_mask(cpu, mask) \ for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask) -#endif /* NR_CPUS */ + +#else /* NR_CPUS > 1 */ + +extern int nr_cpu_ids; +int __first_cpu(const cpumask_t *srcp); +int __next_cpu(int n, const cpumask_t *srcp); +int __any_online_cpu(const cpumask_t *mask); + +#define first_cpu(src) __first_cpu(&(src)) +#define next_cpu(n, src) __next_cpu((n), &(src)) +#define any_online_cpu(mask) __any_online_cpu(&(mask)) +#define for_each_cpu_mask(cpu, mask) \ + for ((cpu) = -1; \ + (cpu) = next_cpu((cpu), (mask)), \ + (cpu) < NR_CPUS; ) +#endif + +#if NR_CPUS <= 64 #define next_cpu_nr(n, src) next_cpu(n, src) #define cpus_weight_nr(cpumask) cpus_weight(cpumask) #define for_each_cpu_mask_nr(cpu, mask) for_each_cpu_mask(cpu, mask) +#else /* NR_CPUS > 64 */ + +int __next_cpu_nr(int n, const cpumask_t *srcp); +#define next_cpu_nr(n, src) __next_cpu_nr((n), &(src)) +#define cpus_weight_nr(cpumask) __cpus_weight(&(cpumask), nr_cpu_ids) +#define for_each_cpu_mask_nr(cpu, mask) \ + for ((cpu) = -1; \ + (cpu) = next_cpu_nr((cpu), (mask)), \ + (cpu) < nr_cpu_ids; ) + +#endif /* NR_CPUS > 64 */ + /* * The following particular system cpumasks and operations manage - * possible, present and online cpus. Each of them is a fixed size + * possible, present, active and online cpus. Each of them is a fixed size * bitmap of size NR_CPUS. * * #ifdef CONFIG_HOTPLUG_CPU * cpu_possible_map - has bit 'cpu' set iff cpu is populatable * cpu_present_map - has bit 'cpu' set iff cpu is populated * cpu_online_map - has bit 'cpu' set iff cpu available to scheduler + * cpu_active_map - has bit 'cpu' set iff cpu available to migration * #else * cpu_possible_map - has bit 'cpu' set iff cpu is populated * cpu_present_map - copy of cpu_possible_map @@ -416,14 +501,16 @@ static inline void __cpus_fold(cpumask_t *dstp, const cpumask_t *origp, extern cpumask_t cpu_possible_map; extern cpumask_t cpu_online_map; extern cpumask_t cpu_present_map; +extern cpumask_t cpu_active_map; #if NR_CPUS > 1 -#define num_online_cpus() cpus_weight(cpu_online_map) -#define num_possible_cpus() cpus_weight(cpu_possible_map) -#define num_present_cpus() cpus_weight(cpu_present_map) +#define num_online_cpus() cpus_weight_nr(cpu_online_map) +#define num_possible_cpus() cpus_weight_nr(cpu_possible_map) +#define num_present_cpus() cpus_weight_nr(cpu_present_map) #define cpu_online(cpu) cpu_isset((cpu), cpu_online_map) #define cpu_possible(cpu) cpu_isset((cpu), cpu_possible_map) #define cpu_present(cpu) cpu_isset((cpu), cpu_present_map) +#define cpu_active(cpu) cpu_isset((cpu), cpu_active_map) #else #define num_online_cpus() 1 #define num_possible_cpus() 1 @@ -431,21 +518,13 @@ extern cpumask_t cpu_present_map; #define cpu_online(cpu) ((cpu) == 0) #define cpu_possible(cpu) ((cpu) == 0) #define cpu_present(cpu) ((cpu) == 0) +#define cpu_active(cpu) ((cpu) == 0) #endif #define cpu_is_offline(cpu) unlikely(!cpu_online(cpu)) -#ifdef CONFIG_SMP -extern int nr_cpu_ids; -#define any_online_cpu(mask) __any_online_cpu(&(mask)) -int __any_online_cpu(const cpumask_t *mask); -#else -#define nr_cpu_ids 1 -#define any_online_cpu(mask) 0 -#endif - -#define for_each_possible_cpu(cpu) for_each_cpu_mask((cpu), cpu_possible_map) -#define for_each_online_cpu(cpu) for_each_cpu_mask((cpu), cpu_online_map) -#define for_each_present_cpu(cpu) for_each_cpu_mask((cpu), cpu_present_map) +#define for_each_possible_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_possible_map) +#define for_each_online_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_online_map) +#define for_each_present_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_present_map) #endif /* __LINUX_CPUMASK_H */ diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index 038578362b4..e8f450c499b 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h @@ -78,6 +78,8 @@ extern void cpuset_track_online_nodes(void); extern int current_cpuset_is_being_rebound(void); +extern void rebuild_sched_domains(void); + #else /* !CONFIG_CPUSETS */ static inline int cpuset_init_early(void) { return 0; } @@ -156,6 +158,11 @@ static inline int current_cpuset_is_being_rebound(void) return 0; } +static inline void rebuild_sched_domains(void) +{ + partition_sched_domains(0, NULL, NULL); +} + #endif /* !CONFIG_CPUSETS */ #endif /* _LINUX_CPUSET_H */ diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h index 22c7ac5cd80..025e4f57510 100644 --- a/include/linux/crash_dump.h +++ b/include/linux/crash_dump.h @@ -8,7 +8,13 @@ #include <linux/proc_fs.h> #define ELFCORE_ADDR_MAX (-1ULL) + +#ifdef CONFIG_PROC_VMCORE extern unsigned long long elfcorehdr_addr; +#else +static const unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX; +#endif + extern ssize_t copy_oldmem_page(unsigned long, char *, size_t, unsigned long, int); extern const struct file_operations proc_vmcore_operations; @@ -22,5 +28,13 @@ extern struct proc_dir_entry *proc_vmcore; #define vmcore_elf_check_arch(x) (elf_check_arch(x) || vmcore_elf_check_arch_cross(x)) +static inline int is_kdump_kernel(void) +{ + return (elfcorehdr_addr != ELFCORE_ADDR_MAX) ? 1 : 0; +} +#else /* !CONFIG_CRASH_DUMP */ +static inline int is_kdump_kernel(void) { return 0; } #endif /* CONFIG_CRASH_DUMP */ + +extern unsigned long saved_max_pfn; #endif /* LINUX_CRASHDUMP_H */ diff --git a/include/linux/cred.h b/include/linux/cred.h new file mode 100644 index 00000000000..b69222cc1fd --- /dev/null +++ b/include/linux/cred.h @@ -0,0 +1,50 @@ +/* Credentials management + * + * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#ifndef _LINUX_CRED_H +#define _LINUX_CRED_H + +#define get_current_user() (get_uid(current->user)) + +#define task_uid(task) ((task)->uid) +#define task_gid(task) ((task)->gid) +#define task_euid(task) ((task)->euid) +#define task_egid(task) ((task)->egid) + +#define current_uid() (current->uid) +#define current_gid() (current->gid) +#define current_euid() (current->euid) +#define current_egid() (current->egid) +#define current_suid() (current->suid) +#define current_sgid() (current->sgid) +#define current_fsuid() (current->fsuid) +#define current_fsgid() (current->fsgid) +#define current_cap() (current->cap_effective) + +#define current_uid_gid(_uid, _gid) \ +do { \ + *(_uid) = current->uid; \ + *(_gid) = current->gid; \ +} while(0) + +#define current_euid_egid(_uid, _gid) \ +do { \ + *(_uid) = current->euid; \ + *(_gid) = current->egid; \ +} while(0) + +#define current_fsuid_fsgid(_uid, _gid) \ +do { \ + *(_uid) = current->fsuid; \ + *(_gid) = current->fsgid; \ +} while(0) + +#endif /* _LINUX_CRED_H */ diff --git a/include/linux/dca.h b/include/linux/dca.h index af61cd1f37e..b00a753eda5 100644 --- a/include/linux/dca.h +++ b/include/linux/dca.h @@ -10,6 +10,7 @@ void dca_unregister_notify(struct notifier_block *nb); #define DCA_PROVIDER_REMOVE 0x0002 struct dca_provider { + struct list_head node; struct dca_ops *ops; struct device *cd; int id; @@ -18,7 +19,9 @@ struct dca_provider { struct dca_ops { int (*add_requester) (struct dca_provider *, struct device *); int (*remove_requester) (struct dca_provider *, struct device *); - u8 (*get_tag) (struct dca_provider *, int cpu); + u8 (*get_tag) (struct dca_provider *, struct device *, + int cpu); + int (*dev_managed) (struct dca_provider *, struct device *); }; struct dca_provider *alloc_dca_provider(struct dca_ops *ops, int priv_size); @@ -32,9 +35,11 @@ static inline void *dca_priv(struct dca_provider *dca) } /* Requester API */ +#define DCA_GET_TAG_TWO_ARGS int dca_add_requester(struct device *dev); int dca_remove_requester(struct device *dev); u8 dca_get_tag(int cpu); +u8 dca3_get_tag(struct device *dev, int cpu); /* internal stuff */ int __init dca_sysfs_init(void); diff --git a/include/linux/dcache.h b/include/linux/dcache.h index 98202c672fd..07aa198f19e 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -230,6 +230,7 @@ extern void d_delete(struct dentry *); extern struct dentry * d_alloc(struct dentry *, const struct qstr *); extern struct dentry * d_alloc_anon(struct inode *); extern struct dentry * d_splice_alias(struct inode *, struct dentry *); +extern struct dentry * d_add_ci(struct inode *, struct dentry *, struct qstr *); extern void shrink_dcache_sb(struct super_block *); extern void shrink_dcache_parent(struct dentry *); extern void shrink_dcache_for_umount(struct super_block *); diff --git a/include/linux/delayacct.h b/include/linux/delayacct.h index ab94bc08355..f352f06fa06 100644 --- a/include/linux/delayacct.h +++ b/include/linux/delayacct.h @@ -39,6 +39,8 @@ extern void __delayacct_blkio_start(void); extern void __delayacct_blkio_end(void); extern int __delayacct_add_tsk(struct taskstats *, struct task_struct *); extern __u64 __delayacct_blkio_ticks(struct task_struct *); +extern void __delayacct_freepages_start(void); +extern void __delayacct_freepages_end(void); static inline int delayacct_is_task_waiting_on_io(struct task_struct *p) { @@ -107,6 +109,18 @@ static inline __u64 delayacct_blkio_ticks(struct task_struct *tsk) return 0; } +static inline void delayacct_freepages_start(void) +{ + if (current->delays) + __delayacct_freepages_start(); +} + +static inline void delayacct_freepages_end(void) +{ + if (current->delays) + __delayacct_freepages_end(); +} + #else static inline void delayacct_set_flag(int flag) {} @@ -129,6 +143,11 @@ static inline __u64 delayacct_blkio_ticks(struct task_struct *tsk) { return 0; } static inline int delayacct_is_task_waiting_on_io(struct task_struct *p) { return 0; } +static inline void delayacct_freepages_start(void) +{} +static inline void delayacct_freepages_end(void) +{} + #endif /* CONFIG_TASK_DELAY_ACCT */ #endif diff --git a/include/linux/dirent.h b/include/linux/dirent.h index 5d6023b8780..f072fb8d10a 100644 --- a/include/linux/dirent.h +++ b/include/linux/dirent.h @@ -1,23 +1,6 @@ #ifndef _LINUX_DIRENT_H #define _LINUX_DIRENT_H -struct dirent { - long d_ino; - __kernel_off_t d_off; - unsigned short d_reclen; - char d_name[256]; /* We must not include limits.h! */ -}; - -struct dirent64 { - __u64 d_ino; - __s64 d_off; - unsigned short d_reclen; - unsigned char d_type; - char d_name[256]; -}; - -#ifdef __KERNEL__ - struct linux_dirent64 { u64 d_ino; s64 d_off; @@ -26,7 +9,4 @@ struct linux_dirent64 { char d_name[0]; }; -#endif /* __KERNEL__ */ - - #endif diff --git a/include/linux/dm9000.h b/include/linux/dm9000.h index fc82446b642..c30879cf93b 100644 --- a/include/linux/dm9000.h +++ b/include/linux/dm9000.h @@ -27,6 +27,7 @@ struct dm9000_plat_data { unsigned int flags; + unsigned char dev_addr[6]; /* allow replacement IO routines */ diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index d08a5c5eb92..adb0b084eb5 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -89,10 +89,23 @@ enum dma_transaction_type { DMA_MEMSET, DMA_MEMCPY_CRC32C, DMA_INTERRUPT, + DMA_SLAVE, }; /* last transaction type for creation of the capabilities mask */ -#define DMA_TX_TYPE_END (DMA_INTERRUPT + 1) +#define DMA_TX_TYPE_END (DMA_SLAVE + 1) + +/** + * enum dma_slave_width - DMA slave register access width. + * @DMA_SLAVE_WIDTH_8BIT: Do 8-bit slave register accesses + * @DMA_SLAVE_WIDTH_16BIT: Do 16-bit slave register accesses + * @DMA_SLAVE_WIDTH_32BIT: Do 32-bit slave register accesses + */ +enum dma_slave_width { + DMA_SLAVE_WIDTH_8BIT, + DMA_SLAVE_WIDTH_16BIT, + DMA_SLAVE_WIDTH_32BIT, +}; /** * enum dma_ctrl_flags - DMA flags to augment operation preparation, @@ -102,10 +115,14 @@ enum dma_transaction_type { * @DMA_CTRL_ACK - the descriptor cannot be reused until the client * acknowledges receipt, i.e. has has a chance to establish any * dependency chains + * @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s) + * @DMA_COMPL_SKIP_DEST_UNMAP - set to disable dma-unmapping the destination(s) */ enum dma_ctrl_flags { DMA_PREP_INTERRUPT = (1 << 0), DMA_CTRL_ACK = (1 << 1), + DMA_COMPL_SKIP_SRC_UNMAP = (1 << 2), + DMA_COMPL_SKIP_DEST_UNMAP = (1 << 3), }; /** @@ -115,6 +132,32 @@ enum dma_ctrl_flags { typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t; /** + * struct dma_slave - Information about a DMA slave + * @dev: device acting as DMA slave + * @dma_dev: required DMA master device. If non-NULL, the client can not be + * bound to other masters than this. + * @tx_reg: physical address of data register used for + * memory-to-peripheral transfers + * @rx_reg: physical address of data register used for + * peripheral-to-memory transfers + * @reg_width: peripheral register width + * + * If dma_dev is non-NULL, the client can not be bound to other DMA + * masters than the one corresponding to this device. The DMA master + * driver may use this to determine if there is controller-specific + * data wrapped around this struct. Drivers of platform code that sets + * the dma_dev field must therefore make sure to use an appropriate + * controller-specific dma slave structure wrapping this struct. + */ +struct dma_slave { + struct device *dev; + struct device *dma_dev; + dma_addr_t tx_reg; + dma_addr_t rx_reg; + enum dma_slave_width reg_width; +}; + +/** * struct dma_chan_percpu - the per-CPU part of struct dma_chan * @refcount: local_t used for open-coded "bigref" counting * @memcpy_count: transaction counter @@ -139,6 +182,7 @@ struct dma_chan_percpu { * @rcu: the DMA channel's RCU head * @device_node: used to add this to the device chan list * @local: per-cpu pointer to a struct dma_chan_percpu + * @client-count: how many clients are using this channel */ struct dma_chan { struct dma_device *device; @@ -154,6 +198,7 @@ struct dma_chan { struct list_head device_node; struct dma_chan_percpu *local; + int client_count; }; #define to_dma_chan(p) container_of(p, struct dma_chan, dev) @@ -202,11 +247,14 @@ typedef enum dma_state_client (*dma_event_callback) (struct dma_client *client, * @event_callback: func ptr to call when something happens * @cap_mask: only return channels that satisfy the requested capabilities * a value of zero corresponds to any capability + * @slave: data for preparing slave transfer. Must be non-NULL iff the + * DMA_SLAVE capability is requested. * @global_node: list_head for global dma_client_list */ struct dma_client { dma_event_callback event_callback; dma_cap_mask_t cap_mask; + struct dma_slave *slave; struct list_head global_node; }; @@ -263,6 +311,8 @@ struct dma_async_tx_descriptor { * @device_prep_dma_zero_sum: prepares a zero_sum operation * @device_prep_dma_memset: prepares a memset operation * @device_prep_dma_interrupt: prepares an end of chain interrupt operation + * @device_prep_slave_sg: prepares a slave dma operation + * @device_terminate_all: terminate all pending operations * @device_issue_pending: push pending transactions to hardware */ struct dma_device { @@ -279,7 +329,8 @@ struct dma_device { int dev_id; struct device *dev; - int (*device_alloc_chan_resources)(struct dma_chan *chan); + int (*device_alloc_chan_resources)(struct dma_chan *chan, + struct dma_client *client); void (*device_free_chan_resources)(struct dma_chan *chan); struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)( @@ -297,6 +348,12 @@ struct dma_device { struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)( struct dma_chan *chan, unsigned long flags); + struct dma_async_tx_descriptor *(*device_prep_slave_sg)( + struct dma_chan *chan, struct scatterlist *sgl, + unsigned int sg_len, enum dma_data_direction direction, + unsigned long flags); + void (*device_terminate_all)(struct dma_chan *chan); + enum dma_status (*device_is_tx_complete)(struct dma_chan *chan, dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used); @@ -318,16 +375,14 @@ dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan, void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, struct dma_chan *chan); -static inline void -async_tx_ack(struct dma_async_tx_descriptor *tx) +static inline void async_tx_ack(struct dma_async_tx_descriptor *tx) { tx->flags |= DMA_CTRL_ACK; } -static inline int -async_tx_test_ack(struct dma_async_tx_descriptor *tx) +static inline bool async_tx_test_ack(struct dma_async_tx_descriptor *tx) { - return tx->flags & DMA_CTRL_ACK; + return (tx->flags & DMA_CTRL_ACK) == DMA_CTRL_ACK; } #define first_dma_cap(mask) __first_dma_cap(&(mask)) diff --git a/include/linux/dw_dmac.h b/include/linux/dw_dmac.h new file mode 100644 index 00000000000..04d217b442b --- /dev/null +++ b/include/linux/dw_dmac.h @@ -0,0 +1,62 @@ +/* + * Driver for the Synopsys DesignWare DMA Controller (aka DMACA on + * AVR32 systems.) + * + * Copyright (C) 2007 Atmel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef DW_DMAC_H +#define DW_DMAC_H + +#include <linux/dmaengine.h> + +/** + * struct dw_dma_platform_data - Controller configuration parameters + * @nr_channels: Number of channels supported by hardware (max 8) + */ +struct dw_dma_platform_data { + unsigned int nr_channels; +}; + +/** + * struct dw_dma_slave - Controller-specific information about a slave + * @slave: Generic information about the slave + * @ctl_lo: Platform-specific initializer for the CTL_LO register + * @cfg_hi: Platform-specific initializer for the CFG_HI register + * @cfg_lo: Platform-specific initializer for the CFG_LO register + */ +struct dw_dma_slave { + struct dma_slave slave; + u32 cfg_hi; + u32 cfg_lo; +}; + +/* Platform-configurable bits in CFG_HI */ +#define DWC_CFGH_FCMODE (1 << 0) +#define DWC_CFGH_FIFO_MODE (1 << 1) +#define DWC_CFGH_PROTCTL(x) ((x) << 2) +#define DWC_CFGH_SRC_PER(x) ((x) << 7) +#define DWC_CFGH_DST_PER(x) ((x) << 11) + +/* Platform-configurable bits in CFG_LO */ +#define DWC_CFGL_PRIO(x) ((x) << 5) /* priority */ +#define DWC_CFGL_LOCK_CH_XFER (0 << 12) /* scope of LOCK_CH */ +#define DWC_CFGL_LOCK_CH_BLOCK (1 << 12) +#define DWC_CFGL_LOCK_CH_XACT (2 << 12) +#define DWC_CFGL_LOCK_BUS_XFER (0 << 14) /* scope of LOCK_BUS */ +#define DWC_CFGL_LOCK_BUS_BLOCK (1 << 14) +#define DWC_CFGL_LOCK_BUS_XACT (2 << 14) +#define DWC_CFGL_LOCK_CH (1 << 15) /* channel lockout */ +#define DWC_CFGL_LOCK_BUS (1 << 16) /* busmaster lockout */ +#define DWC_CFGL_HS_DST_POL (1 << 18) /* dst handshake active low */ +#define DWC_CFGL_HS_SRC_POL (1 << 19) /* src handshake active low */ + +static inline struct dw_dma_slave *to_dw_dma_slave(struct dma_slave *slave) +{ + return container_of(slave, struct dw_dma_slave, slave); +} + +#endif /* DW_DMAC_H */ diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index 8bb5e87df36..b4b038b89ee 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -27,9 +27,24 @@ struct ethtool_cmd { __u8 autoneg; /* Enable or disable autonegotiation */ __u32 maxtxpkt; /* Tx pkts before generating tx int */ __u32 maxrxpkt; /* Rx pkts before generating rx int */ - __u32 reserved[4]; + __u16 speed_hi; + __u16 reserved2; + __u32 reserved[3]; }; +static inline void ethtool_cmd_speed_set(struct ethtool_cmd *ep, + __u32 speed) +{ + + ep->speed = (__u16)speed; + ep->speed_hi = (__u16)(speed >> 16); +} + +static inline __u32 ethtool_cmd_speed(struct ethtool_cmd *ep) +{ + return (ep->speed_hi << 16) | ep->speed; +} + #define ETHTOOL_BUSINFO_LEN 32 /* these strings are set to whatever the driver author decides... */ struct ethtool_drvinfo { diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h index a701399b7fe..a667637b54e 100644 --- a/include/linux/eventfd.h +++ b/include/linux/eventfd.h @@ -10,6 +10,13 @@ #ifdef CONFIG_EVENTFD +/* For O_CLOEXEC and O_NONBLOCK */ +#include <linux/fcntl.h> + +/* Flags for eventfd2. */ +#define EFD_CLOEXEC O_CLOEXEC +#define EFD_NONBLOCK O_NONBLOCK + struct file *eventfd_fget(int fd); int eventfd_signal(struct file *file, int n); diff --git a/include/linux/eventpoll.h b/include/linux/eventpoll.h index cf79853967f..f1e1d3c4712 100644 --- a/include/linux/eventpoll.h +++ b/include/linux/eventpoll.h @@ -14,8 +14,12 @@ #ifndef _LINUX_EVENTPOLL_H #define _LINUX_EVENTPOLL_H +/* For O_CLOEXEC */ +#include <linux/fcntl.h> #include <linux/types.h> +/* Flags for epoll_create1. */ +#define EPOLL_CLOEXEC O_CLOEXEC /* Valid opcodes to issue to sys_epoll_ctl() */ #define EPOLL_CTL_ADD 1 diff --git a/include/linux/ext2_fs.h b/include/linux/ext2_fs.h index 84cec2aa9f1..2efe7b863cf 100644 --- a/include/linux/ext2_fs.h +++ b/include/linux/ext2_fs.h @@ -284,8 +284,8 @@ struct ext2_inode { #ifdef __hurd__ #define i_translator osd1.hurd1.h_i_translator -#define i_frag osd2.hurd2.h_i_frag; -#define i_fsize osd2.hurd2.h_i_fsize; +#define i_frag osd2.hurd2.h_i_frag +#define i_fsize osd2.hurd2.h_i_fsize #define i_uid_high osd2.hurd2.h_i_uid_high #define i_gid_high osd2.hurd2.h_i_gid_high #define i_author osd2.hurd2.h_i_author diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h index 36c54039637..80171ee89a2 100644 --- a/include/linux/ext3_fs.h +++ b/include/linux/ext3_fs.h @@ -832,6 +832,7 @@ extern void ext3_discard_reservation (struct inode *); extern void ext3_dirty_inode(struct inode *); extern int ext3_change_inode_journal_flag(struct inode *, int); extern int ext3_get_inode_loc(struct inode *, struct ext3_iloc *); +extern int ext3_can_truncate(struct inode *inode); extern void ext3_truncate (struct inode *); extern void ext3_set_inode_flags(struct inode *); extern void ext3_get_inode_flags(struct ext3_inode_info *); diff --git a/include/linux/fb.h b/include/linux/fb.h index 72295b09922..3b8870e32af 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -8,7 +8,6 @@ struct dentry; /* Definitions of frame buffers */ -#define FB_MAJOR 29 #define FB_MAX 32 /* sufficient for now */ /* ioctls @@ -120,6 +119,10 @@ struct dentry; #define FB_ACCEL_XGI_VOLARI_V 47 /* XGI Volari V3XT, V5, V8 */ #define FB_ACCEL_XGI_VOLARI_Z 48 /* XGI Volari Z7 */ #define FB_ACCEL_OMAP1610 49 /* TI OMAP16xx */ +#define FB_ACCEL_TRIDENT_TGUI 50 /* Trident TGUI */ +#define FB_ACCEL_TRIDENT_3DIMAGE 51 /* Trident 3DImage */ +#define FB_ACCEL_TRIDENT_BLADE3D 52 /* Trident Blade3D */ +#define FB_ACCEL_TRIDENT_BLADEXP 53 /* Trident BladeXP */ #define FB_ACCEL_NEOMAGIC_NM2070 90 /* NeoMagic NM2070 */ #define FB_ACCEL_NEOMAGIC_NM2090 91 /* NeoMagic NM2090 */ #define FB_ACCEL_NEOMAGIC_NM2093 92 /* NeoMagic NM2093 */ diff --git a/include/linux/fd1772.h b/include/linux/fd1772.h deleted file mode 100644 index 871d6e4c677..00000000000 --- a/include/linux/fd1772.h +++ /dev/null @@ -1,80 +0,0 @@ -#ifndef _LINUX_FD1772REG_H -#define _LINUX_FD1772REG_H - -/* -** WD1772 stuff - originally from the M68K Linux - * Modified for Archimedes by Dave Gilbert (gilbertd@cs.man.ac.uk) - */ - -/* register codes */ - -#define FDC1772SELREG_STP (0x80) /* command/status register */ -#define FDC1772SELREG_TRA (0x82) /* track register */ -#define FDC1772SELREG_SEC (0x84) /* sector register */ -#define FDC1772SELREG_DTA (0x86) /* data register */ - -/* register names for FDC1772_READ/WRITE macros */ - -#define FDC1772REG_CMD 0 -#define FDC1772REG_STATUS 0 -#define FDC1772REG_TRACK 2 -#define FDC1772REG_SECTOR 4 -#define FDC1772REG_DATA 6 - -/* command opcodes */ - -#define FDC1772CMD_RESTORE (0x00) /* - */ -#define FDC1772CMD_SEEK (0x10) /* | */ -#define FDC1772CMD_STEP (0x20) /* | TYP 1 Commands */ -#define FDC1772CMD_STIN (0x40) /* | */ -#define FDC1772CMD_STOT (0x60) /* - */ -#define FDC1772CMD_RDSEC (0x80) /* - TYP 2 Commands */ -#define FDC1772CMD_WRSEC (0xa0) /* - " */ -#define FDC1772CMD_RDADR (0xc0) /* - */ -#define FDC1772CMD_RDTRA (0xe0) /* | TYP 3 Commands */ -#define FDC1772CMD_WRTRA (0xf0) /* - */ -#define FDC1772CMD_FORCI (0xd0) /* - TYP 4 Command */ - -/* command modifier bits */ - -#define FDC1772CMDADD_SR6 (0x00) /* step rate settings */ -#define FDC1772CMDADD_SR12 (0x01) -#define FDC1772CMDADD_SR2 (0x02) -#define FDC1772CMDADD_SR3 (0x03) -#define FDC1772CMDADD_V (0x04) /* verify */ -#define FDC1772CMDADD_H (0x08) /* wait for spin-up */ -#define FDC1772CMDADD_U (0x10) /* update track register */ -#define FDC1772CMDADD_M (0x10) /* multiple sector access */ -#define FDC1772CMDADD_E (0x04) /* head settling flag */ -#define FDC1772CMDADD_P (0x02) /* precompensation */ -#define FDC1772CMDADD_A0 (0x01) /* DAM flag */ - -/* status register bits */ - -#define FDC1772STAT_MOTORON (0x80) /* motor on */ -#define FDC1772STAT_WPROT (0x40) /* write protected (FDC1772CMD_WR*) */ -#define FDC1772STAT_SPINUP (0x20) /* motor speed stable (Type I) */ -#define FDC1772STAT_DELDAM (0x20) /* sector has deleted DAM (Type II+III) */ -#define FDC1772STAT_RECNF (0x10) /* record not found */ -#define FDC1772STAT_CRC (0x08) /* CRC error */ -#define FDC1772STAT_TR00 (0x04) /* Track 00 flag (Type I) */ -#define FDC1772STAT_LOST (0x04) /* Lost Data (Type II+III) */ -#define FDC1772STAT_IDX (0x02) /* Index status (Type I) */ -#define FDC1772STAT_DRQ (0x02) /* DRQ status (Type II+III) */ -#define FDC1772STAT_BUSY (0x01) /* FDC1772 is busy */ - - -/* PSG Port A Bit Nr 0 .. Side Sel .. 0 -> Side 1 1 -> Side 2 */ -#define DSKSIDE (0x01) - -#define DSKDRVNONE (0x06) -#define DSKDRV0 (0x02) -#define DSKDRV1 (0x04) - -/* step rates */ -#define FDC1772STEP_6 0x00 -#define FDC1772STEP_12 0x01 -#define FDC1772STEP_2 0x02 -#define FDC1772STEP_3 0x03 - -#endif diff --git a/include/linux/file.h b/include/linux/file.h index 27c64bdc68c..a20259e248a 100644 --- a/include/linux/file.h +++ b/include/linux/file.h @@ -34,8 +34,9 @@ extern struct file *fget(unsigned int fd); extern struct file *fget_light(unsigned int fd, int *fput_needed); extern void set_close_on_exec(unsigned int fd, int flag); extern void put_filp(struct file *); +extern int alloc_fd(unsigned start, unsigned flags); extern int get_unused_fd(void); -extern int get_unused_fd_flags(int flags); +#define get_unused_fd_flags(flags) alloc_fd(0, (flags)) extern void put_unused_fd(unsigned int fd); extern void fd_install(unsigned int fd, struct file *file); diff --git a/include/linux/firmware-map.h b/include/linux/firmware-map.h index acbdbcc1605..6e199c8dfac 100644 --- a/include/linux/firmware-map.h +++ b/include/linux/firmware-map.h @@ -24,34 +24,8 @@ */ #ifdef CONFIG_FIRMWARE_MEMMAP -/** - * Adds a firmware mapping entry. This function uses kmalloc() for memory - * allocation. Use firmware_map_add_early() if you want to use the bootmem - * allocator. - * - * That function must be called before late_initcall. - * - * @start: Start of the memory range. - * @end: End of the memory range (inclusive). - * @type: Type of the memory range. - * - * Returns 0 on success, or -ENOMEM if no memory could be allocated. - */ int firmware_map_add(resource_size_t start, resource_size_t end, const char *type); - -/** - * Adds a firmware mapping entry. This function uses the bootmem allocator - * for memory allocation. Use firmware_map_add() if you want to use kmalloc(). - * - * That function must be called before late_initcall. - * - * @start: Start of the memory range. - * @end: End of the memory range (inclusive). - * @type: Type of the memory range. - * - * Returns 0 on success, or -ENOMEM if no memory could be allocated. - */ int firmware_map_add_early(resource_size_t start, resource_size_t end, const char *type); diff --git a/include/linux/fs.h b/include/linux/fs.h index 9c2ac5c0ef5..580b513668f 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -60,6 +60,8 @@ extern int dir_notify_enable; #define MAY_WRITE 2 #define MAY_READ 4 #define MAY_APPEND 8 +#define MAY_ACCESS 16 +#define MAY_OPEN 32 #define FMODE_READ 1 #define FMODE_WRITE 2 @@ -277,7 +279,7 @@ extern int dir_notify_enable; #include <linux/types.h> #include <linux/kdev_t.h> #include <linux/dcache.h> -#include <linux/namei.h> +#include <linux/path.h> #include <linux/stat.h> #include <linux/cache.h> #include <linux/kobject.h> @@ -318,22 +320,23 @@ typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset, * Attribute flags. These should be or-ed together to figure out what * has been changed! */ -#define ATTR_MODE 1 -#define ATTR_UID 2 -#define ATTR_GID 4 -#define ATTR_SIZE 8 -#define ATTR_ATIME 16 -#define ATTR_MTIME 32 -#define ATTR_CTIME 64 -#define ATTR_ATIME_SET 128 -#define ATTR_MTIME_SET 256 -#define ATTR_FORCE 512 /* Not a change, but a change it */ -#define ATTR_ATTR_FLAG 1024 -#define ATTR_KILL_SUID 2048 -#define ATTR_KILL_SGID 4096 -#define ATTR_FILE 8192 -#define ATTR_KILL_PRIV 16384 -#define ATTR_OPEN 32768 /* Truncating from open(O_TRUNC) */ +#define ATTR_MODE (1 << 0) +#define ATTR_UID (1 << 1) +#define ATTR_GID (1 << 2) +#define ATTR_SIZE (1 << 3) +#define ATTR_ATIME (1 << 4) +#define ATTR_MTIME (1 << 5) +#define ATTR_CTIME (1 << 6) +#define ATTR_ATIME_SET (1 << 7) +#define ATTR_MTIME_SET (1 << 8) +#define ATTR_FORCE (1 << 9) /* Not a change, but a change it */ +#define ATTR_ATTR_FLAG (1 << 10) +#define ATTR_KILL_SUID (1 << 11) +#define ATTR_KILL_SGID (1 << 12) +#define ATTR_FILE (1 << 13) +#define ATTR_KILL_PRIV (1 << 14) +#define ATTR_OPEN (1 << 15) /* Truncating from open(O_TRUNC) */ +#define ATTR_TIMES_SET (1 << 16) /* * This is the Inode Attributes structure, used for notify_change(). It @@ -440,6 +443,27 @@ static inline size_t iov_iter_count(struct iov_iter *i) return i->count; } +/* + * "descriptor" for what we're up to with a read. + * This allows us to use the same read code yet + * have multiple different users of the data that + * we read from a file. + * + * The simplest case just copies the data to user + * mode. + */ +typedef struct { + size_t written; + size_t count; + union { + char __user *buf; + void *data; + } arg; + int error; +} read_descriptor_t; + +typedef int (*read_actor_t)(read_descriptor_t *, struct page *, + unsigned long, unsigned long); struct address_space_operations { int (*writepage)(struct page *page, struct writeback_control *wbc); @@ -481,6 +505,8 @@ struct address_space_operations { int (*migratepage) (struct address_space *, struct page *, struct page *); int (*launder_page) (struct page *); + int (*is_partially_uptodate) (struct page *, read_descriptor_t *, + unsigned long); }; /* @@ -499,7 +525,7 @@ struct backing_dev_info; struct address_space { struct inode *host; /* owner: inode, block_device */ struct radix_tree_root page_tree; /* radix tree of all pages */ - rwlock_t tree_lock; /* and rwlock protecting it */ + spinlock_t tree_lock; /* and lock protecting it */ unsigned int i_mmap_writable;/* count VM_SHARED mappings */ struct prio_tree_root i_mmap; /* tree of private and shared mappings */ struct list_head i_mmap_nonlinear;/*list VM_NONLINEAR mappings */ @@ -792,7 +818,7 @@ struct file { #define f_dentry f_path.dentry #define f_vfsmnt f_path.mnt const struct file_operations *f_op; - atomic_t f_count; + atomic_long_t f_count; unsigned int f_flags; mode_t f_mode; loff_t f_pos; @@ -821,8 +847,8 @@ extern spinlock_t files_lock; #define file_list_lock() spin_lock(&files_lock); #define file_list_unlock() spin_unlock(&files_lock); -#define get_file(x) atomic_inc(&(x)->f_count) -#define file_count(x) atomic_read(&(x)->f_count) +#define get_file(x) atomic_long_inc(&(x)->f_count) +#define file_count(x) atomic_long_read(&(x)->f_count) #ifdef CONFIG_DEBUG_WRITECOUNT static inline void file_take_write(struct file *f) @@ -886,6 +912,12 @@ static inline int file_check_writeable(struct file *filp) #define FL_SLEEP 128 /* A blocking lock */ /* + * Special return value from posix_lock_file() and vfs_lock_file() for + * asynchronous locking. + */ +#define FILE_LOCK_DEFERRED 1 + +/* * The POSIX file lock owner is determined by * the "struct files_struct" in the thread group * (or NULL for no owner - BSD locks). @@ -1025,6 +1057,7 @@ extern int send_sigurg(struct fown_struct *fown); extern struct list_head super_blocks; extern spinlock_t sb_lock; +#define sb_entry(list) list_entry((list), struct super_block, s_list) #define S_BIAS (1<<30) struct super_block { struct list_head s_list; /* Keep this first */ @@ -1058,6 +1091,9 @@ struct super_block { struct list_head s_more_io; /* parked for more writeback */ struct hlist_head s_anon; /* anonymous dentries for (nfs) exporting */ struct list_head s_files; + /* s_dentry_lru and s_nr_dentry_unused are protected by dcache_lock */ + struct list_head s_dentry_lru; /* unused dentry lru */ + int s_nr_dentry_unused; /* # of dentry on lru */ struct block_device *s_bdev; struct mtd_info *s_mtd; @@ -1126,7 +1162,7 @@ extern int vfs_permission(struct nameidata *, int); extern int vfs_create(struct inode *, struct dentry *, int, struct nameidata *); extern int vfs_mkdir(struct inode *, struct dentry *, int); extern int vfs_mknod(struct inode *, struct dentry *, int, dev_t); -extern int vfs_symlink(struct inode *, struct dentry *, const char *, int); +extern int vfs_symlink(struct inode *, struct dentry *, const char *); extern int vfs_link(struct dentry *, struct inode *, struct dentry *); extern int vfs_rmdir(struct inode *, struct dentry *); extern int vfs_unlink(struct inode *, struct dentry *); @@ -1185,27 +1221,6 @@ struct block_device_operations { struct module *owner; }; -/* - * "descriptor" for what we're up to with a read. - * This allows us to use the same read code yet - * have multiple different users of the data that - * we read from a file. - * - * The simplest case just copies the data to user - * mode. - */ -typedef struct { - size_t written; - size_t count; - union { - char __user * buf; - void *data; - } arg; - int error; -} read_descriptor_t; - -typedef int (*read_actor_t)(read_descriptor_t *, struct page *, unsigned long, unsigned long); - /* These macros are for out of kernel modules to test that * the kernel supports the unlocked_ioctl and compat_ioctl * fields in struct file_operations. */ @@ -1262,7 +1277,7 @@ struct inode_operations { void * (*follow_link) (struct dentry *, struct nameidata *); void (*put_link) (struct dentry *, struct nameidata *, void *); void (*truncate) (struct inode *); - int (*permission) (struct inode *, int, struct nameidata *); + int (*permission) (struct inode *, int); int (*setattr) (struct dentry *, struct iattr *); int (*getattr) (struct vfsmount *mnt, struct dentry *, struct kstat *); int (*setxattr) (struct dentry *, const char *,const void *,size_t,int); @@ -1686,9 +1701,9 @@ extern void init_special_inode(struct inode *, umode_t, dev_t); extern void make_bad_inode(struct inode *); extern int is_bad_inode(struct inode *); -extern const struct file_operations read_fifo_fops; -extern const struct file_operations write_fifo_fops; -extern const struct file_operations rdwr_fifo_fops; +extern const struct file_operations read_pipefifo_fops; +extern const struct file_operations write_pipefifo_fops; +extern const struct file_operations rdwr_pipefifo_fops; extern int fs_may_remount_ro(struct super_block *); @@ -1757,7 +1772,7 @@ extern int do_remount_sb(struct super_block *sb, int flags, extern sector_t bmap(struct inode *, sector_t); #endif extern int notify_change(struct dentry *, struct iattr *); -extern int permission(struct inode *, int, struct nameidata *); +extern int inode_permission(struct inode *, int); extern int generic_permission(struct inode *, int, int (*check_acl)(struct inode *, int)); @@ -1773,8 +1788,9 @@ static inline void allow_write_access(struct file *file) atomic_inc(&file->f_path.dentry->d_inode->i_writecount); } extern int do_pipe(int *); -extern struct file *create_read_pipe(struct file *f); -extern struct file *create_write_pipe(void); +extern int do_pipe_flags(int *, int); +extern struct file *create_read_pipe(struct file *f, int flags); +extern struct file *create_write_pipe(int flags); extern void free_write_pipe(struct file *); extern struct file *do_filp_open(int dfd, const char *pathname, @@ -1820,7 +1836,7 @@ extern void clear_inode(struct inode *); extern void destroy_inode(struct inode *); extern struct inode *new_inode(struct super_block *); extern int should_remove_suid(struct dentry *); -extern int remove_suid(struct dentry *); +extern int file_remove_suid(struct file *); extern void __insert_inode_hash(struct inode *, unsigned long hashval); extern void remove_inode_hash(struct inode *); @@ -2006,8 +2022,6 @@ extern void simple_release_fs(struct vfsmount **mount, int *count); extern ssize_t simple_read_from_buffer(void __user *to, size_t count, loff_t *ppos, const void *from, size_t available); -extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos, - const void *from, size_t available); #ifdef CONFIG_MIGRATION extern int buffer_migrate_page(struct address_space *, diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h index 282f5421912..9e5a06e78d0 100644 --- a/include/linux/fs_struct.h +++ b/include/linux/fs_struct.h @@ -7,7 +7,7 @@ struct fs_struct { atomic_t count; rwlock_t lock; int umask; - struct path root, pwd, altroot; + struct path root, pwd; }; #define INIT_FS { \ @@ -19,7 +19,6 @@ struct fs_struct { extern struct kmem_cache *fs_cachep; extern void exit_fs(struct task_struct *); -extern void set_fs_altroot(void); extern void set_fs_root(struct fs_struct *, struct path *); extern void set_fs_pwd(struct fs_struct *, struct path *); extern struct fs_struct *copy_fs_struct(struct fs_struct *); diff --git a/include/linux/fuse.h b/include/linux/fuse.h index d4828219769..265635dc990 100644 --- a/include/linux/fuse.h +++ b/include/linux/fuse.h @@ -104,11 +104,14 @@ struct fuse_file_lock { /** * INIT request/reply flags + * + * FUSE_EXPORT_SUPPORT: filesystem handles lookups of "." and ".." */ #define FUSE_ASYNC_READ (1 << 0) #define FUSE_POSIX_LOCKS (1 << 1) #define FUSE_FILE_OPS (1 << 2) #define FUSE_ATOMIC_O_TRUNC (1 << 3) +#define FUSE_EXPORT_SUPPORT (1 << 4) #define FUSE_BIG_WRITES (1 << 5) /** diff --git a/include/linux/genhd.h b/include/linux/genhd.h index e8787417f65..118216f1bd3 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -541,7 +541,7 @@ extern dev_t blk_lookup_devt(const char *name, int part); extern char *disk_name (struct gendisk *hd, int part, char *buf); extern int rescan_partitions(struct gendisk *disk, struct block_device *bdev); -extern void add_partition(struct gendisk *, int, sector_t, sector_t, int); +extern int __must_check add_partition(struct gendisk *, int, sector_t, sector_t, int); extern void delete_partition(struct gendisk *, int); extern void printk_all_partitions(void); diff --git a/include/linux/gfp.h b/include/linux/gfp.h index b414be38718..e8003afeffb 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -173,11 +173,24 @@ static inline void arch_free_page(struct page *page, int order) { } static inline void arch_alloc_page(struct page *page, int order) { } #endif -extern struct page *__alloc_pages(gfp_t, unsigned int, struct zonelist *); +struct page * +__alloc_pages_internal(gfp_t gfp_mask, unsigned int order, + struct zonelist *zonelist, nodemask_t *nodemask); + +static inline struct page * +__alloc_pages(gfp_t gfp_mask, unsigned int order, + struct zonelist *zonelist) +{ + return __alloc_pages_internal(gfp_mask, order, zonelist, NULL); +} + +static inline struct page * +__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, + struct zonelist *zonelist, nodemask_t *nodemask) +{ + return __alloc_pages_internal(gfp_mask, order, zonelist, nodemask); +} -extern struct page * -__alloc_pages_nodemask(gfp_t, unsigned int, - struct zonelist *, nodemask_t *nodemask); static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order) @@ -215,6 +228,9 @@ extern struct page *alloc_page_vma(gfp_t gfp_mask, extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order); extern unsigned long get_zeroed_page(gfp_t gfp_mask); +void *alloc_pages_exact(size_t size, gfp_t gfp_mask); +void free_pages_exact(void *virt, size_t size); + #define __get_free_page(gfp_mask) \ __get_free_pages((gfp_mask),0) diff --git a/include/linux/gpio.h b/include/linux/gpio.h index 98be6c5762b..730a20b8357 100644 --- a/include/linux/gpio.h +++ b/include/linux/gpio.h @@ -79,6 +79,19 @@ static inline void gpio_set_value_cansleep(unsigned gpio, int value) WARN_ON(1); } +static inline int gpio_export(unsigned gpio, bool direction_may_change) +{ + /* GPIO can never have been requested or set as {in,out}put */ + WARN_ON(1); + return -EINVAL; +} + +static inline void gpio_unexport(unsigned gpio) +{ + /* GPIO can never have been exported */ + WARN_ON(1); +} + static inline int gpio_to_irq(unsigned gpio) { /* GPIO can never have been requested or set as input */ diff --git a/include/linux/harrier_defs.h b/include/linux/harrier_defs.h deleted file mode 100644 index efef11db790..00000000000 --- a/include/linux/harrier_defs.h +++ /dev/null @@ -1,212 +0,0 @@ -/* - * include/linux/harrier_defs.h - * - * Definitions for Motorola MCG Harrier North Bridge & Memory controller - * - * Author: Dale Farnsworth - * dale.farnsworth@mvista.com - * - * Extracted from asm-ppc/harrier.h by: - * Randy Vinson - * rvinson@mvista.com - * - * Copyright 2001-2002 MontaVista Software Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - -#ifndef __ASMPPC_HARRIER_DEFS_H -#define __ASMPPC_HARRIER_DEFS_H - -#define HARRIER_DEFAULT_XCSR_BASE 0xfeff0000 - -#define HARRIER_VEND_DEV_ID 0x1057480b - -#define HARRIER_VENI_OFF 0x00 - -#define HARRIER_REVI_OFF 0x05 -#define HARRIER_UCTL_OFF 0xd0 -#define HARRIER_XTAL64_MASK 0x02 - -#define HARRIER_MISC_CSR_OFF 0x1c -#define HARRIER_RSTOUT 0x01000000 -#define HARRIER_SYSCON 0x08000000 -#define HARRIER_EREADY 0x10000000 -#define HARRIER_ERDYS 0x20000000 - -/* Function exception registers */ -#define HARRIER_FEEN_OFF 0x40 /* enable */ -#define HARRIER_FEST_OFF 0x44 /* status */ -#define HARRIER_FEMA_OFF 0x48 /* mask */ -#define HARRIER_FECL_OFF 0x4c /* clear */ - -#define HARRIER_FE_DMA 0x80 -#define HARRIER_FE_MIDB 0x40 -#define HARRIER_FE_MIM0 0x20 -#define HARRIER_FE_MIM1 0x10 -#define HARRIER_FE_MIP 0x08 -#define HARRIER_FE_UA0 0x04 -#define HARRIER_FE_UA1 0x02 -#define HARRIER_FE_ABT 0x01 - -#define HARRIER_SERIAL_0_OFF 0xc0 - -#define HARRIER_MBAR_OFF 0xe0 -#define HARRIER_MBAR_MSK 0xfffc0000 -#define HARRIER_MPIC_CSR_OFF 0xe4 -#define HARRIER_MPIC_OPI_ENABLE 0x40 -#define HARRIER_MPIC_IFEVP_OFF 0x10200 -#define HARRIER_MPIC_IFEVP_VECT_MSK 0xff -#define HARRIER_MPIC_IFEDE_OFF 0x10210 - -/* - * Define the Memory Controller register offsets. - */ -#define HARRIER_SDBA_OFF 0x110 -#define HARRIER_SDBB_OFF 0x114 -#define HARRIER_SDBC_OFF 0x118 -#define HARRIER_SDBD_OFF 0x11c -#define HARRIER_SDBE_OFF 0x120 -#define HARRIER_SDBF_OFF 0x124 -#define HARRIER_SDBG_OFF 0x128 -#define HARRIER_SDBH_OFF 0x12c - -#define HARRIER_SDB_ENABLE 0x00000100 -#define HARRIER_SDB_SIZE_MASK 0xf -#define HARRIER_SDB_SIZE_SHIFT 16 -#define HARRIER_SDB_BASE_MASK 0xff -#define HARRIER_SDB_BASE_SHIFT 24 - -/* - * Define outbound register offsets. - */ -#define HARRIER_OTAD0_OFF 0x220 -#define HARRIER_OTOF0_OFF 0x224 -#define HARRIER_OTAD1_OFF 0x228 -#define HARRIER_OTOF1_OFF 0x22c -#define HARRIER_OTAD2_OFF 0x230 -#define HARRIER_OTOF2_OFF 0x234 -#define HARRIER_OTAD3_OFF 0x238 -#define HARRIER_OTOF3_OFF 0x23c - -#define HARRIER_OTADX_START_MSK 0xffff0000UL -#define HARRIER_OTADX_END_MSK 0x0000ffffUL - -#define HARRIER_OTOFX_OFF_MSK 0xffff0000UL -#define HARRIER_OTOFX_ENA 0x80UL -#define HARRIER_OTOFX_WPE 0x10UL -#define HARRIER_OTOFX_SGE 0x08UL -#define HARRIER_OTOFX_RAE 0x04UL -#define HARRIER_OTOFX_MEM 0x02UL -#define HARRIER_OTOFX_IOM 0x01UL - -/* - * Define generic message passing register offsets - */ -/* Mirrored registers (visible from both PowerPC and PCI space) */ -#define HARRIER_XCSR_MP_BASE_OFF 0x290 /* base offset in XCSR space */ -#define HARRIER_PMEP_MP_BASE_OFF 0x100 /* base offset in PMEM space */ -#define HARRIER_MGOM0_OFF 0x00 /* outbound msg 0 */ -#define HARRIER_MGOM1_OFF 0x04 /* outbound msg 1 */ -#define HARRIER_MGOD_OFF 0x08 /* outbound doorbells */ - -#define HARRIER_MGIM0_OFF 0x10 /* inbound msg 0 */ -#define HARRIER_MGIM1_OFF 0x14 /* inbound msg 1 */ -#define HARRIER_MGID_OFF 0x18 /* inbound doorbells */ - -/* PowerPC-only registers */ -#define HARRIER_MGIDM_OFF 0x20 /* inbound doorbell mask */ - -/* PCI-only registers */ -#define HARRIER_PMEP_MGST_OFF 0x20 /* (outbound) interrupt status */ -#define HARRIER_PMEP_MGMS_OFF 0x24 /* (outbound) interrupt mask */ -#define HARRIER_MG_OMI0 (1<<4) -#define HARRIER_MG_OMI1 (1<<5) - -#define HARRIER_PMEP_MGODM_OFF 0x28 /* outbound doorbell mask */ - -/* - * Define PCI configuration space register offsets - */ -#define HARRIER_XCSR_TO_PCFS_OFF 0x300 - -/* - * Define message passing attribute register offset - */ -#define HARRIER_MPAT_OFF 0x44 - -/* - * Define inbound attribute register offsets. - */ -#define HARRIER_ITSZ0_OFF 0x48 -#define HARRIER_ITAT0_OFF 0x4c - -#define HARRIER_ITSZ1_OFF 0x50 -#define HARRIER_ITAT1_OFF 0x54 - -#define HARRIER_ITSZ2_OFF 0x58 -#define HARRIER_ITAT2_OFF 0x5c - -#define HARRIER_ITSZ3_OFF 0x60 -#define HARRIER_ITAT3_OFF 0x64 - -/* inbound translation size constants */ -#define HARRIER_ITSZ_MSK 0xff -#define HARRIER_ITSZ_4KB 0x00 -#define HARRIER_ITSZ_8KB 0x01 -#define HARRIER_ITSZ_16KB 0x02 -#define HARRIER_ITSZ_32KB 0x03 -#define HARRIER_ITSZ_64KB 0x04 -#define HARRIER_ITSZ_128KB 0x05 -#define HARRIER_ITSZ_256KB 0x06 -#define HARRIER_ITSZ_512KB 0x07 -#define HARRIER_ITSZ_1MB 0x08 -#define HARRIER_ITSZ_2MB 0x09 -#define HARRIER_ITSZ_4MB 0x0A -#define HARRIER_ITSZ_8MB 0x0B -#define HARRIER_ITSZ_16MB 0x0C -#define HARRIER_ITSZ_32MB 0x0D -#define HARRIER_ITSZ_64MB 0x0E -#define HARRIER_ITSZ_128MB 0x0F -#define HARRIER_ITSZ_256MB 0x10 -#define HARRIER_ITSZ_512MB 0x11 -#define HARRIER_ITSZ_1GB 0x12 -#define HARRIER_ITSZ_2GB 0x13 - -/* inbound translation offset */ -#define HARRIER_ITOF_SHIFT 0x10 -#define HARRIER_ITOF_MSK 0xffff - -/* inbound translation atttributes */ -#define HARRIER_ITAT_PRE (1<<3) -#define HARRIER_ITAT_RAE (1<<4) -#define HARRIER_ITAT_WPE (1<<5) -#define HARRIER_ITAT_MEM (1<<6) -#define HARRIER_ITAT_ENA (1<<7) -#define HARRIER_ITAT_GBL (1<<16) - -#define HARRIER_LBA_OFF 0x80 -#define HARRIER_LBA_MSK (1<<31) - -#define HARRIER_XCSR_SIZE 1024 - -/* macros to calculate message passing register offsets */ -#define HARRIER_MP_XCSR(x) ((u32)HARRIER_XCSR_MP_BASE_OFF + (u32)x) - -#define HARRIER_MP_PMEP(x) ((u32)HARRIER_PMEP_MP_BASE_OFF + (u32)x) - -/* - * Define PCI configuration space register offsets - */ -#define HARRIER_MPBAR_OFF PCI_BASE_ADDRESS_0 -#define HARRIER_ITBAR0_OFF PCI_BASE_ADDRESS_1 -#define HARRIER_ITBAR1_OFF PCI_BASE_ADDRESS_2 -#define HARRIER_ITBAR2_OFF PCI_BASE_ADDRESS_3 -#define HARRIER_ITBAR3_OFF PCI_BASE_ADDRESS_4 - -#define HARRIER_XCSR_CONFIG(x) ((u32)HARRIER_XCSR_TO_PCFS_OFF + (u32)x) - -#endif /* __ASMPPC_HARRIER_DEFS_H */ diff --git a/include/linux/hid.h b/include/linux/hid.h index fe56b86f2c6..ac4e678a04e 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -512,7 +512,7 @@ struct hid_descriptor { /* Applications from HID Usage Tables 4/8/99 Version 1.1 */ /* We ignore a few input applications that are not widely used */ -#define IS_INPUT_APPLICATION(a) (((a >= 0x00010000) && (a <= 0x00010008)) || (a == 0x00010080) || (a == 0x000c0001)) +#define IS_INPUT_APPLICATION(a) (((a >= 0x00010000) && (a <= 0x00010008)) || (a == 0x00010080) || (a == 0x000c0001) || (a == 0x000d0002)) /* HID core API */ diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index a79e80b689d..32e0ef0f6e1 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -8,7 +8,6 @@ #include <linux/mempolicy.h> #include <linux/shm.h> #include <asm/tlbflush.h> -#include <asm/hugetlb.h> struct ctl_table; @@ -17,38 +16,45 @@ static inline int is_vm_hugetlb_page(struct vm_area_struct *vma) return vma->vm_flags & VM_HUGETLB; } +void reset_vma_resv_huge_pages(struct vm_area_struct *vma); int hugetlb_sysctl_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *); int hugetlb_overcommit_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *); int hugetlb_treat_movable_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *); int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *); int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, struct page **, struct vm_area_struct **, unsigned long *, int *, int, int); -void unmap_hugepage_range(struct vm_area_struct *, unsigned long, unsigned long); -void __unmap_hugepage_range(struct vm_area_struct *, unsigned long, unsigned long); +void unmap_hugepage_range(struct vm_area_struct *, + unsigned long, unsigned long, struct page *); +void __unmap_hugepage_range(struct vm_area_struct *, + unsigned long, unsigned long, struct page *); int hugetlb_prefault(struct address_space *, struct vm_area_struct *); int hugetlb_report_meminfo(char *); int hugetlb_report_node_meminfo(int, char *); unsigned long hugetlb_total_pages(void); int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, int write_access); -int hugetlb_reserve_pages(struct inode *inode, long from, long to); +int hugetlb_reserve_pages(struct inode *inode, long from, long to, + struct vm_area_struct *vma); void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed); -extern unsigned long max_huge_pages; -extern unsigned long sysctl_overcommit_huge_pages; extern unsigned long hugepages_treat_as_movable; extern const unsigned long hugetlb_zero, hugetlb_infinity; extern int sysctl_hugetlb_shm_group; +extern struct list_head huge_boot_pages; /* arch callbacks */ -pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr); +pte_t *huge_pte_alloc(struct mm_struct *mm, + unsigned long addr, unsigned long sz); pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr); int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep); struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, int write); struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write); +struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address, + pud_t *pud, int write); int pmd_huge(pmd_t pmd); +int pud_huge(pud_t pmd); void hugetlb_change_protection(struct vm_area_struct *vma, unsigned long address, unsigned long end, pgprot_t newprot); @@ -58,6 +64,11 @@ static inline int is_vm_hugetlb_page(struct vm_area_struct *vma) { return 0; } + +static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma) +{ +} + static inline unsigned long hugetlb_total_pages(void) { return 0; @@ -67,12 +78,14 @@ static inline unsigned long hugetlb_total_pages(void) #define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL) #define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; }) #define hugetlb_prefault(mapping, vma) ({ BUG(); 0; }) -#define unmap_hugepage_range(vma, start, end) BUG() +#define unmap_hugepage_range(vma, start, end, page) BUG() #define hugetlb_report_meminfo(buf) 0 #define hugetlb_report_node_meminfo(n, buf) 0 #define follow_huge_pmd(mm, addr, pmd, write) NULL -#define prepare_hugepage_range(addr,len) (-EINVAL) +#define follow_huge_pud(mm, addr, pud, write) NULL +#define prepare_hugepage_range(file, addr, len) (-EINVAL) #define pmd_huge(x) 0 +#define pud_huge(x) 0 #define is_hugepage_only_range(mm, addr, len) 0 #define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; }) #define hugetlb_fault(mm, vma, addr, write) ({ BUG(); 0; }) @@ -93,6 +106,7 @@ struct hugetlbfs_config { umode_t mode; long nr_blocks; long nr_inodes; + struct hstate *hstate; }; struct hugetlbfs_sb_info { @@ -101,6 +115,7 @@ struct hugetlbfs_sb_info { long max_inodes; /* inodes allowed */ long free_inodes; /* inodes free */ spinlock_t stat_lock; + struct hstate *hstate; }; @@ -125,8 +140,6 @@ struct file *hugetlb_file_setup(const char *name, size_t); int hugetlb_get_quota(struct address_space *mapping, long delta); void hugetlb_put_quota(struct address_space *mapping, long delta); -#define BLOCKS_PER_HUGEPAGE (HPAGE_SIZE / 512) - static inline int is_file_hugepages(struct file *file) { if (file->f_op == &hugetlbfs_file_operations) @@ -155,4 +168,115 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long flags); #endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */ +#ifdef CONFIG_HUGETLB_PAGE + +#define HSTATE_NAME_LEN 32 +/* Defines one hugetlb page size */ +struct hstate { + int hugetlb_next_nid; + unsigned int order; + unsigned long mask; + unsigned long max_huge_pages; + unsigned long nr_huge_pages; + unsigned long free_huge_pages; + unsigned long resv_huge_pages; + unsigned long surplus_huge_pages; + unsigned long nr_overcommit_huge_pages; + struct list_head hugepage_freelists[MAX_NUMNODES]; + unsigned int nr_huge_pages_node[MAX_NUMNODES]; + unsigned int free_huge_pages_node[MAX_NUMNODES]; + unsigned int surplus_huge_pages_node[MAX_NUMNODES]; + char name[HSTATE_NAME_LEN]; +}; + +struct huge_bootmem_page { + struct list_head list; + struct hstate *hstate; +}; + +/* arch callback */ +int __init alloc_bootmem_huge_page(struct hstate *h); + +void __init hugetlb_add_hstate(unsigned order); +struct hstate *size_to_hstate(unsigned long size); + +#ifndef HUGE_MAX_HSTATE +#define HUGE_MAX_HSTATE 1 +#endif + +extern struct hstate hstates[HUGE_MAX_HSTATE]; +extern unsigned int default_hstate_idx; + +#define default_hstate (hstates[default_hstate_idx]) + +static inline struct hstate *hstate_inode(struct inode *i) +{ + struct hugetlbfs_sb_info *hsb; + hsb = HUGETLBFS_SB(i->i_sb); + return hsb->hstate; +} + +static inline struct hstate *hstate_file(struct file *f) +{ + return hstate_inode(f->f_dentry->d_inode); +} + +static inline struct hstate *hstate_vma(struct vm_area_struct *vma) +{ + return hstate_file(vma->vm_file); +} + +static inline unsigned long huge_page_size(struct hstate *h) +{ + return (unsigned long)PAGE_SIZE << h->order; +} + +static inline unsigned long huge_page_mask(struct hstate *h) +{ + return h->mask; +} + +static inline unsigned int huge_page_order(struct hstate *h) +{ + return h->order; +} + +static inline unsigned huge_page_shift(struct hstate *h) +{ + return h->order + PAGE_SHIFT; +} + +static inline unsigned int pages_per_huge_page(struct hstate *h) +{ + return 1 << h->order; +} + +static inline unsigned int blocks_per_huge_page(struct hstate *h) +{ + return huge_page_size(h) / 512; +} + +#include <asm/hugetlb.h> + +static inline struct hstate *page_hstate(struct page *page) +{ + return size_to_hstate(PAGE_SIZE << compound_order(page)); +} + +#else +struct hstate {}; +#define alloc_bootmem_huge_page(h) NULL +#define hstate_file(f) NULL +#define hstate_vma(v) NULL +#define hstate_inode(i) NULL +#define huge_page_size(h) PAGE_SIZE +#define huge_page_mask(h) PAGE_MASK +#define huge_page_order(h) 0 +#define huge_page_shift(h) PAGE_SHIFT +static inline unsigned int pages_per_huge_page(struct hstate *h) +{ + return 1; +} +#endif + #endif /* _LINUX_HUGETLB_H */ diff --git a/include/linux/i2c-id.h b/include/linux/i2c-id.h index 4862398e05b..bf34c5f4c05 100644 --- a/include/linux/i2c-id.h +++ b/include/linux/i2c-id.h @@ -39,7 +39,6 @@ #define I2C_DRIVERID_SAA7111A 8 /* video input processor */ #define I2C_DRIVERID_SAA7185B 13 /* video encoder */ #define I2C_DRIVERID_SAA7110 22 /* video decoder */ -#define I2C_DRIVERID_MGATVO 23 /* Matrox TVOut */ #define I2C_DRIVERID_SAA5249 24 /* SAA5249 and compatibles */ #define I2C_DRIVERID_PCF8583 25 /* real time clock */ #define I2C_DRIVERID_SAB3036 26 /* SAB3036 tuner */ @@ -95,7 +94,6 @@ #define I2C_HW_B_BT848 0x010005 /* BT848 video boards */ #define I2C_HW_B_VIA 0x010007 /* Via vt82c586b */ #define I2C_HW_B_HYDRA 0x010008 /* Apple Hydra Mac I/O */ -#define I2C_HW_B_G400 0x010009 /* Matrox G400 */ #define I2C_HW_B_I810 0x01000a /* Intel I810 */ #define I2C_HW_B_VOO 0x01000b /* 3dfx Voodoo 3 / Banshee */ #define I2C_HW_B_SCX200 0x01000e /* Nat'l Semi SCx200 I2C */ diff --git a/include/linux/i2c-pnx.h b/include/linux/i2c-pnx.h index e6e9c814da6..f13255e0640 100644 --- a/include/linux/i2c-pnx.h +++ b/include/linux/i2c-pnx.h @@ -12,7 +12,9 @@ #ifndef __I2C_PNX_H__ #define __I2C_PNX_H__ -#include <asm/arch/i2c.h> +#include <linux/pm.h> + +struct platform_device; struct i2c_pnx_mif { int ret; /* Return value */ diff --git a/include/linux/i2c/max732x.h b/include/linux/i2c/max732x.h new file mode 100644 index 00000000000..e10336631c6 --- /dev/null +++ b/include/linux/i2c/max732x.h @@ -0,0 +1,19 @@ +#ifndef __LINUX_I2C_MAX732X_H +#define __LINUX_I2C_MAX732X_H + +/* platform data for the MAX732x 8/16-bit I/O expander driver */ + +struct max732x_platform_data { + /* number of the first GPIO */ + unsigned gpio_base; + + void *context; /* param to setup/teardown */ + + int (*setup)(struct i2c_client *client, + unsigned gpio, unsigned ngpio, + void *context); + int (*teardown)(struct i2c_client *client, + unsigned gpio, unsigned ngpio, + void *context); +}; +#endif /* __LINUX_I2C_MAX732X_H */ diff --git a/include/linux/i2o.h b/include/linux/i2o.h index 7d51cbca49a..75ae6d8aba4 100644 --- a/include/linux/i2o.h +++ b/include/linux/i2o.h @@ -758,7 +758,7 @@ static inline dma_addr_t i2o_dma_map_single(struct i2o_controller *c, void *ptr, } dma_addr = dma_map_single(&c->pdev->dev, ptr, size, direction); - if (!dma_mapping_error(dma_addr)) { + if (!dma_mapping_error(&c->pdev->dev, dma_addr)) { #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 if ((sizeof(dma_addr_t) > 4) && c->pae_support) { *mptr++ = cpu_to_le32(0x7C020002); diff --git a/include/linux/ide.h b/include/linux/ide.h index 4726126f5a5..87c12ed9695 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h @@ -178,6 +178,7 @@ typedef struct hw_regs_s { ide_ack_intr_t *ack_intr; /* acknowledge interrupt */ hwif_chipset_t chipset; struct device *dev, *parent; + unsigned long config; } hw_regs_t; void ide_init_port_data(struct hwif_s *, unsigned int); @@ -210,13 +211,16 @@ static inline int __ide_default_irq(unsigned long base) return 0; } +#if defined(CONFIG_ARM) || defined(CONFIG_FRV) || defined(CONFIG_M68K) || \ + defined(CONFIG_MIPS) || defined(CONFIG_MN10300) || defined(CONFIG_PARISC) \ + || defined(CONFIG_PPC) || defined(CONFIG_SPARC) || defined(CONFIG_SPARC64) #include <asm/ide.h> - -#if !defined(MAX_HWIFS) || defined(CONFIG_EMBEDDED) -#undef MAX_HWIFS -#define MAX_HWIFS CONFIG_IDE_MAX_HWIFS +#else +#include <asm-generic/ide_iops.h> #endif +#define MAX_HWIFS 10 + /* Currently only m68k, apus and m8xx need it */ #ifndef IDE_ARCH_ACK_INTR # define ide_ack_intr(hwif) (1) @@ -307,7 +311,65 @@ struct ide_acpi_drive_link; struct ide_acpi_hwif_link; #endif -typedef struct ide_drive_s { +/* ATAPI device flags */ +enum { + IDE_AFLAG_DRQ_INTERRUPT = (1 << 0), + IDE_AFLAG_MEDIA_CHANGED = (1 << 1), + + /* ide-cd */ + /* Drive cannot lock the door. */ + IDE_AFLAG_NO_DOORLOCK = (1 << 2), + /* Drive cannot eject the disc. */ + IDE_AFLAG_NO_EJECT = (1 << 3), + /* Drive is a pre ATAPI 1.2 drive. */ + IDE_AFLAG_PRE_ATAPI12 = (1 << 4), + /* TOC addresses are in BCD. */ + IDE_AFLAG_TOCADDR_AS_BCD = (1 << 5), + /* TOC track numbers are in BCD. */ + IDE_AFLAG_TOCTRACKS_AS_BCD = (1 << 6), + /* + * Drive does not provide data in multiples of SECTOR_SIZE + * when more than one interrupt is needed. + */ + IDE_AFLAG_LIMIT_NFRAMES = (1 << 7), + /* Seeking in progress. */ + IDE_AFLAG_SEEKING = (1 << 8), + /* Saved TOC information is current. */ + IDE_AFLAG_TOC_VALID = (1 << 9), + /* We think that the drive door is locked. */ + IDE_AFLAG_DOOR_LOCKED = (1 << 10), + /* SET_CD_SPEED command is unsupported. */ + IDE_AFLAG_NO_SPEED_SELECT = (1 << 11), + IDE_AFLAG_VERTOS_300_SSD = (1 << 12), + IDE_AFLAG_VERTOS_600_ESD = (1 << 13), + IDE_AFLAG_SANYO_3CD = (1 << 14), + IDE_AFLAG_FULL_CAPS_PAGE = (1 << 15), + IDE_AFLAG_PLAY_AUDIO_OK = (1 << 16), + IDE_AFLAG_LE_SPEED_FIELDS = (1 << 17), + + /* ide-floppy */ + /* Format in progress */ + IDE_AFLAG_FORMAT_IN_PROGRESS = (1 << 18), + /* Avoid commands not supported in Clik drive */ + IDE_AFLAG_CLIK_DRIVE = (1 << 19), + /* Requires BH algorithm for packets */ + IDE_AFLAG_ZIP_DRIVE = (1 << 20), + + /* ide-tape */ + IDE_AFLAG_IGNORE_DSC = (1 << 21), + /* 0 When the tape position is unknown */ + IDE_AFLAG_ADDRESS_VALID = (1 << 22), + /* Device already opened */ + IDE_AFLAG_BUSY = (1 << 23), + /* Attempt to auto-detect the current user block size */ + IDE_AFLAG_DETECT_BS = (1 << 24), + /* Currently on a filemark */ + IDE_AFLAG_FILEMARK = (1 << 25), + /* 0 = no tape is loaded, so we don't rewind after ejecting */ + IDE_AFLAG_MEDIUM_PRESENT = (1 << 26) +}; + +struct ide_drive_s { char name[4]; /* drive name, such as "hda" */ char driver_req[10]; /* requests specific driver */ @@ -355,7 +417,6 @@ typedef struct ide_drive_s { unsigned nodma : 1; /* disallow DMA */ unsigned remap_0_to_1 : 1; /* 0=noremap, 1=remap 0->1 (for EZDrive) */ unsigned blocked : 1; /* 1=powermanagment told us not to do anything, so sleep nicely */ - unsigned vdma : 1; /* 1=doing PIO over DMA 0=doing normal DMA */ unsigned scsi : 1; /* 0=default, 1=ide-scsi emulation */ unsigned sleeping : 1; /* 1=sleeping & sleep field valid */ unsigned post_reset : 1; @@ -400,7 +461,14 @@ typedef struct ide_drive_s { struct list_head list; struct device gendev; struct completion gendev_rel_comp; /* to deal with device release() */ -} ide_drive_t; + + /* callback for packet commands */ + void (*pc_callback)(struct ide_drive_s *); + + unsigned long atapi_flags; +}; + +typedef struct ide_drive_s ide_drive_t; #define to_ide_device(dev)container_of(dev, ide_drive_t, gendev) @@ -408,26 +476,55 @@ typedef struct ide_drive_s { ((1<<ide_pci)|(1<<ide_cmd646)|(1<<ide_ali14xx)) #define IDE_CHIPSET_IS_PCI(c) ((IDE_CHIPSET_PCI_MASK >> (c)) & 1) +struct ide_task_s; struct ide_port_info; +struct ide_tp_ops { + void (*exec_command)(struct hwif_s *, u8); + u8 (*read_status)(struct hwif_s *); + u8 (*read_altstatus)(struct hwif_s *); + u8 (*read_sff_dma_status)(struct hwif_s *); + + void (*set_irq)(struct hwif_s *, int); + + void (*tf_load)(ide_drive_t *, struct ide_task_s *); + void (*tf_read)(ide_drive_t *, struct ide_task_s *); + + void (*input_data)(ide_drive_t *, struct request *, void *, + unsigned int); + void (*output_data)(ide_drive_t *, struct request *, void *, + unsigned int); +}; + +extern const struct ide_tp_ops default_tp_ops; + +/** + * struct ide_port_ops - IDE port operations + * + * @init_dev: host specific initialization of a device + * @set_pio_mode: routine to program host for PIO mode + * @set_dma_mode: routine to program host for DMA mode + * @selectproc: tweaks hardware to select drive + * @reset_poll: chipset polling based on hba specifics + * @pre_reset: chipset specific changes to default for device-hba resets + * @resetproc: routine to reset controller after a disk reset + * @maskproc: special host masking for drive selection + * @quirkproc: check host's drive quirk list + * + * @mdma_filter: filter MDMA modes + * @udma_filter: filter UDMA modes + * + * @cable_detect: detect cable type + */ struct ide_port_ops { - /* host specific initialization of a device */ void (*init_dev)(ide_drive_t *); - /* routine to program host for PIO mode */ void (*set_pio_mode)(ide_drive_t *, const u8); - /* routine to program host for DMA mode */ void (*set_dma_mode)(ide_drive_t *, const u8); - /* tweaks hardware to select drive */ void (*selectproc)(ide_drive_t *); - /* chipset polling based on hba specifics */ int (*reset_poll)(ide_drive_t *); - /* chipset specific changes to default for device-hba resets */ void (*pre_reset)(ide_drive_t *); - /* routine to reset controller after a disk reset */ void (*resetproc)(ide_drive_t *); - /* special host masking for drive selection */ void (*maskproc)(ide_drive_t *, int); - /* check host's drive quirk list */ void (*quirkproc)(ide_drive_t *); u8 (*mdma_filter)(ide_drive_t *); @@ -447,7 +544,7 @@ struct ide_dma_ops { void (*dma_timeout)(struct ide_drive_s *); }; -struct ide_task_s; +struct ide_host; typedef struct hwif_s { struct hwif_s *next; /* for linked-list in ide_hwgroup_t */ @@ -455,6 +552,8 @@ typedef struct hwif_s { struct hwgroup_s *hwgroup; /* actually (ide_hwgroup_t *) */ struct proc_dir_entry *proc; /* /proc/ide/ directory entry */ + struct ide_host *host; + char name[6]; /* name of interface, eg. "ide0" */ struct ide_io_ports io_ports; @@ -486,22 +585,12 @@ typedef struct hwif_s { void (*rw_disk)(ide_drive_t *, struct request *); + const struct ide_tp_ops *tp_ops; const struct ide_port_ops *port_ops; const struct ide_dma_ops *dma_ops; - void (*tf_load)(ide_drive_t *, struct ide_task_s *); - void (*tf_read)(ide_drive_t *, struct ide_task_s *); - - void (*input_data)(ide_drive_t *, struct request *, void *, unsigned); - void (*output_data)(ide_drive_t *, struct request *, void *, unsigned); - void (*ide_dma_clear_irq)(ide_drive_t *drive); - void (*OUTB)(u8 addr, unsigned long port); - void (*OUTBSYNC)(struct hwif_s *hwif, u8 addr, unsigned long port); - - u8 (*INB)(unsigned long port); - /* dma physical region descriptor table (cpu view) */ unsigned int *dmatable_cpu; /* dma physical region descriptor table (dma view) */ @@ -524,8 +613,6 @@ typedef struct hwif_s { int irq; /* our irq number */ unsigned long dma_base; /* base addr for dma ports */ - unsigned long dma_command; /* dma command register */ - unsigned long dma_status; /* dma status register */ unsigned long config_data; /* for use by chipset-specific code */ unsigned long select_data; /* for use by chipset-specific code */ @@ -552,6 +639,14 @@ typedef struct hwif_s { #endif } ____cacheline_internodealigned_in_smp ide_hwif_t; +struct ide_host { + ide_hwif_t *ports[MAX_HWIFS]; + unsigned int n_ports; + struct device *dev[2]; + unsigned long host_flags; + void *host_priv; +}; + /* * internal ide interrupt handler type */ @@ -611,8 +706,6 @@ enum { PC_FLAG_WRITING = (1 << 6), /* command timed out */ PC_FLAG_TIMEDOUT = (1 << 7), - PC_FLAG_ZIP_DRIVE = (1 << 8), - PC_FLAG_DRQ_INTERRUPT = (1 << 9), }; struct ide_atapi_pc { @@ -646,8 +739,6 @@ struct ide_atapi_pc { */ u8 pc_buf[256]; - void (*callback)(ide_drive_t *); - /* idetape only */ struct idetape_bh *bh; char *b_data; @@ -802,18 +893,14 @@ struct ide_driver_s { #define to_ide_driver(drv) container_of(drv, ide_driver_t, gen_driver) +int ide_device_get(ide_drive_t *); +void ide_device_put(ide_drive_t *); + int generic_ide_ioctl(ide_drive_t *, struct file *, struct block_device *, unsigned, unsigned long); extern int ide_vlb_clk; extern int ide_pci_clk; -ide_hwif_t *ide_find_port_slot(const struct ide_port_info *); - -static inline ide_hwif_t *ide_find_port(void) -{ - return ide_find_port_slot(NULL); -} - extern int ide_end_request (ide_drive_t *drive, int uptodate, int nrsecs); int ide_end_dequeued_request(ide_drive_t *drive, struct request *rq, int uptodate, int nr_sectors); @@ -884,6 +971,7 @@ enum { IDE_TFLAG_IN_HOB = IDE_TFLAG_IN_HOB_FEATURE | IDE_TFLAG_IN_HOB_NSECT | IDE_TFLAG_IN_HOB_LBA, + IDE_TFLAG_IN_FEATURE = (1 << 1), IDE_TFLAG_IN_NSECT = (1 << 25), IDE_TFLAG_IN_LBAL = (1 << 26), IDE_TFLAG_IN_LBAM = (1 << 27), @@ -948,9 +1036,25 @@ typedef struct ide_task_s { void ide_tf_dump(const char *, struct ide_taskfile *); +void ide_exec_command(ide_hwif_t *, u8); +u8 ide_read_status(ide_hwif_t *); +u8 ide_read_altstatus(ide_hwif_t *); +u8 ide_read_sff_dma_status(ide_hwif_t *); + +void ide_set_irq(ide_hwif_t *, int); + +void ide_tf_load(ide_drive_t *, ide_task_t *); +void ide_tf_read(ide_drive_t *, ide_task_t *); + +void ide_input_data(ide_drive_t *, struct request *, void *, unsigned int); +void ide_output_data(ide_drive_t *, struct request *, void *, unsigned int); + extern void SELECT_DRIVE(ide_drive_t *); void SELECT_MASK(ide_drive_t *, int); +u8 ide_read_error(ide_drive_t *); +void ide_read_bcount_and_ireason(ide_drive_t *, u16 *, u8 *); + extern int drive_is_ready(ide_drive_t *); void ide_pktcmd_tf_load(ide_drive_t *, u32, u16, u8); @@ -1000,12 +1104,15 @@ extern int __ide_pci_register_driver(struct pci_driver *driver, struct module *o #define ide_pci_register_driver(d) pci_register_driver(d) #endif -void ide_pci_setup_ports(struct pci_dev *, const struct ide_port_info *, int, u8 *); +void ide_pci_setup_ports(struct pci_dev *, const struct ide_port_info *, int, + hw_regs_t *, hw_regs_t **); void ide_setup_pci_noise(struct pci_dev *, const struct ide_port_info *); #ifdef CONFIG_BLK_DEV_IDEDMA_PCI int ide_pci_set_master(struct pci_dev *, const char *); unsigned long ide_pci_dma_base(ide_hwif_t *, const struct ide_port_info *); +extern const struct ide_dma_ops sff_dma_ops; +int ide_pci_check_simplex(ide_hwif_t *, const struct ide_port_info *); int ide_hwif_setup_dma(ide_hwif_t *, const struct ide_port_info *); #else static inline int ide_hwif_setup_dma(ide_hwif_t *hwif, @@ -1015,10 +1122,6 @@ static inline int ide_hwif_setup_dma(ide_hwif_t *hwif, } #endif -extern void default_hwif_iops(ide_hwif_t *); -extern void default_hwif_mmiops(ide_hwif_t *); -extern void default_hwif_transport(ide_hwif_t *); - typedef struct ide_pci_enablebit_s { u8 reg; /* byte pci reg holding the enable-bit */ u8 mask; /* mask to isolate the enable-bit */ @@ -1081,7 +1184,6 @@ enum { IDE_HFLAG_IO_32BIT = (1 << 24), /* unmask IRQs */ IDE_HFLAG_UNMASK_IRQS = (1 << 25), - IDE_HFLAG_ABUSE_SET_DMA_MODE = (1 << 26), /* serialize ports if DMA is possible (for sl82c105) */ IDE_HFLAG_SERIALIZE_DMA = (1 << 27), /* force host out of "simplex" mode */ @@ -1092,8 +1194,6 @@ enum { IDE_HFLAG_NO_IO_32BIT = (1 << 30), /* never unmask IRQs */ IDE_HFLAG_NO_UNMASK_IRQS = (1 << 31), - /* host uses VDMA (disabled for now) */ - IDE_HFLAG_VDMA = 0, }; #ifdef CONFIG_BLK_DEV_OFFBOARD @@ -1104,12 +1204,13 @@ enum { struct ide_port_info { char *name; - unsigned int (*init_chipset)(struct pci_dev *, const char *); + unsigned int (*init_chipset)(struct pci_dev *); void (*init_iops)(ide_hwif_t *); void (*init_hwif)(ide_hwif_t *); int (*init_dma)(ide_hwif_t *, const struct ide_port_info *); + const struct ide_tp_ops *tp_ops; const struct ide_port_ops *port_ops; const struct ide_dma_ops *dma_ops; @@ -1122,8 +1223,10 @@ struct ide_port_info { u8 udma_mask; }; -int ide_setup_pci_device(struct pci_dev *, const struct ide_port_info *); -int ide_setup_pci_devices(struct pci_dev *, struct pci_dev *, const struct ide_port_info *); +int ide_pci_init_one(struct pci_dev *, const struct ide_port_info *, void *); +int ide_pci_init_two(struct pci_dev *, struct pci_dev *, + const struct ide_port_info *, void *); +void ide_pci_remove(struct pci_dev *); void ide_map_sg(ide_drive_t *, struct request *); void ide_init_sg_cmd(ide_drive_t *, struct request *); @@ -1163,7 +1266,6 @@ void ide_destroy_dmatable(ide_drive_t *); extern int ide_build_dmatable(ide_drive_t *, struct request *); int ide_allocate_dma_engine(ide_hwif_t *); void ide_release_dma_engine(ide_hwif_t *); -void ide_setup_dma(ide_hwif_t *, unsigned long); void ide_dma_host_set(ide_drive_t *, int); extern int ide_dma_setup(ide_drive_t *); @@ -1217,8 +1319,14 @@ void ide_undecoded_slave(ide_drive_t *); void ide_port_apply_params(ide_hwif_t *); -int ide_device_add_all(u8 *idx, const struct ide_port_info *); -int ide_device_add(u8 idx[4], const struct ide_port_info *); +struct ide_host *ide_host_alloc_all(const struct ide_port_info *, hw_regs_t **); +struct ide_host *ide_host_alloc(const struct ide_port_info *, hw_regs_t **); +void ide_host_free(struct ide_host *); +int ide_host_register(struct ide_host *, const struct ide_port_info *, + hw_regs_t **); +int ide_host_add(const struct ide_port_info *, hw_regs_t **, + struct ide_host **); +void ide_host_remove(struct ide_host *); int ide_legacy_device_add(const struct ide_port_info *, unsigned long); void ide_port_unregister_devices(ide_hwif_t *); void ide_port_scan(ide_hwif_t *); @@ -1350,33 +1458,4 @@ static inline ide_drive_t *ide_get_paired_drive(ide_drive_t *drive) return &hwif->drives[(drive->dn ^ 1) & 1]; } - -static inline void ide_set_irq(ide_drive_t *drive, int on) -{ - ide_hwif_t *hwif = drive->hwif; - - hwif->OUTBSYNC(hwif, ATA_DEVCTL_OBS | (on ? 0 : 2), - hwif->io_ports.ctl_addr); -} - -static inline u8 ide_read_status(ide_drive_t *drive) -{ - ide_hwif_t *hwif = drive->hwif; - - return hwif->INB(hwif->io_ports.status_addr); -} - -static inline u8 ide_read_altstatus(ide_drive_t *drive) -{ - ide_hwif_t *hwif = drive->hwif; - - return hwif->INB(hwif->io_ports.ctl_addr); -} - -static inline u8 ide_read_error(ide_drive_t *drive) -{ - ide_hwif_t *hwif = drive->hwif; - - return hwif->INB(hwif->io_ports.error_addr); -} #endif /* _IDE_H */ diff --git a/include/linux/idr.h b/include/linux/idr.h index 9a2d762124d..fa035f96f2a 100644 --- a/include/linux/idr.h +++ b/include/linux/idr.h @@ -15,6 +15,7 @@ #include <linux/types.h> #include <linux/bitops.h> #include <linux/init.h> +#include <linux/rcupdate.h> #if BITS_PER_LONG == 32 # define IDR_BITS 5 @@ -51,6 +52,7 @@ struct idr_layer { unsigned long bitmap; /* A zero bit means "space here" */ struct idr_layer *ary[1<<IDR_BITS]; int count; /* When zero, we can release it */ + struct rcu_head rcu_head; }; struct idr { @@ -71,6 +73,28 @@ struct idr { } #define DEFINE_IDR(name) struct idr name = IDR_INIT(name) +/* Actions to be taken after a call to _idr_sub_alloc */ +#define IDR_NEED_TO_GROW -2 +#define IDR_NOMORE_SPACE -3 + +#define _idr_rc_to_errno(rc) ((rc) == -1 ? -EAGAIN : -ENOSPC) + +/** + * idr synchronization (stolen from radix-tree.h) + * + * idr_find() is able to be called locklessly, using RCU. The caller must + * ensure calls to this function are made within rcu_read_lock() regions. + * Other readers (lock-free or otherwise) and modifications may be running + * concurrently. + * + * It is still required that the caller manage the synchronization and + * lifetimes of the items. So if RCU lock-free lookups are used, typically + * this would mean that the items have their own locks, or are amenable to + * lock-free access; and that the items are freed by RCU (or only freed after + * having been deleted from the idr tree *and* a synchronize_rcu() grace + * period). + */ + /* * This is what we export. */ diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index a1630ba0b87..7f4df7c7659 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -506,6 +506,19 @@ struct ieee80211_channel_sw_ie { u8 count; } __attribute__ ((packed)); +/** + * struct ieee80211_tim + * + * This structure refers to "Traffic Indication Map information element" + */ +struct ieee80211_tim_ie { + u8 dtim_count; + u8 dtim_period; + u8 bitmap_ctrl; + /* variable size: 1 - 251 bytes */ + u8 virtual_map[0]; +} __attribute__ ((packed)); + struct ieee80211_mgmt { __le16 frame_control; __le16 duration; diff --git a/include/linux/ihex.h b/include/linux/ihex.h index 2baace2788a..31d8629e75a 100644 --- a/include/linux/ihex.h +++ b/include/linux/ihex.h @@ -18,7 +18,7 @@ struct ihex_binrec { __be32 addr; __be16 len; uint8_t data[0]; -} __attribute__((aligned(4))); +} __attribute__((packed)); /* Find the next record, taking into account the 4-byte alignment */ static inline const struct ihex_binrec * diff --git a/include/linux/init.h b/include/linux/init.h index 21d658cdfa2..93538b696e3 100644 --- a/include/linux/init.h +++ b/include/linux/init.h @@ -139,6 +139,7 @@ extern initcall_t __con_initcall_start[], __con_initcall_end[]; extern initcall_t __security_initcall_start[], __security_initcall_end[]; /* Defined in init/main.c */ +extern int do_one_initcall(initcall_t fn); extern char __initdata boot_command_line[]; extern char *saved_command_line; extern unsigned int reset_devices; @@ -170,6 +171,13 @@ extern void (*late_time_init)(void); __attribute__((__section__(".initcall" level ".init"))) = fn /* + * Early initcalls run before initializing SMP. + * + * Only for built-in code, not modules. + */ +#define early_initcall(fn) __define_initcall("early",fn,early) + +/* * A "pure" initcall has no dependencies on anything else, and purely * initializes variables that couldn't be statically initialized. * @@ -275,13 +283,7 @@ void __init parse_early_param(void); #define security_initcall(fn) module_init(fn) -/* These macros create a dummy inline: gcc 2.9x does not count alias - as usage, hence the `unused function' warning when __init functions - are declared static. We use the dummy __*_module_inline functions - both to kill the warning and check the type of the init/cleanup - function. */ - -/* Each module must use one module_init(), or one no_module_init */ +/* Each module must use one module_init(). */ #define module_init(initfn) \ static inline initcall_t __inittest(void) \ { return initfn; } \ diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 93c45acf249..021d8e720c7 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -122,7 +122,7 @@ extern struct group_info init_groups; .state = 0, \ .stack = &init_thread_info, \ .usage = ATOMIC_INIT(2), \ - .flags = 0, \ + .flags = PF_KTHREAD, \ .lock_depth = -1, \ .prio = MAX_PRIO-20, \ .static_prio = MAX_PRIO-20, \ diff --git a/include/linux/inotify.h b/include/linux/inotify.h index 742b917e7d1..bd578578a8b 100644 --- a/include/linux/inotify.h +++ b/include/linux/inotify.h @@ -7,6 +7,8 @@ #ifndef _LINUX_INOTIFY_H #define _LINUX_INOTIFY_H +/* For O_CLOEXEC and O_NONBLOCK */ +#include <linux/fcntl.h> #include <linux/types.h> /* @@ -63,6 +65,10 @@ struct inotify_event { IN_MOVED_TO | IN_DELETE | IN_CREATE | IN_DELETE_SELF | \ IN_MOVE_SELF) +/* Flags for sys_inotify_init1. */ +#define IN_CLOEXEC O_CLOEXEC +#define IN_NONBLOCK O_NONBLOCK + #ifdef __KERNEL__ #include <linux/dcache.h> diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 62aa4f895ab..58ff4e74b2f 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -223,35 +223,6 @@ static inline int disable_irq_wake(unsigned int irq) #define or_softirq_pending(x) (local_softirq_pending() |= (x)) #endif -/* - * Temporary defines for UP kernels, until all code gets fixed. - */ -#ifndef CONFIG_SMP -static inline void __deprecated cli(void) -{ - local_irq_disable(); -} -static inline void __deprecated sti(void) -{ - local_irq_enable(); -} -static inline void __deprecated save_flags(unsigned long *x) -{ - local_save_flags(*x); -} -#define save_flags(x) save_flags(&x) -static inline void __deprecated restore_flags(unsigned long x) -{ - local_irq_restore(x); -} - -static inline void __deprecated save_and_cli(unsigned long *x) -{ - local_irq_save(*x); -} -#define save_and_cli(x) save_and_cli(&x) -#endif /* CONFIG_SMP */ - /* Some architectures might implement lazy enabling/disabling of * interrupts. In some cases, such as stop_machine, we might want * to ensure that after a local_irq_disable(), interrupts have diff --git a/include/linux/ioport.h b/include/linux/ioport.h index 2cd07cc2968..22d2115458c 100644 --- a/include/linux/ioport.h +++ b/include/linux/ioport.h @@ -118,6 +118,10 @@ extern int allocate_resource(struct resource *root, struct resource *new, int adjust_resource(struct resource *res, resource_size_t start, resource_size_t size); resource_size_t resource_alignment(struct resource *res); +static inline resource_size_t resource_size(struct resource *res) +{ + return res->end - res->start + 1; +} /* Convenience shorthand with allocation */ #define request_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name)) diff --git a/include/linux/ip_vs.h b/include/linux/ip_vs.h new file mode 100644 index 00000000000..ec6eb49af2d --- /dev/null +++ b/include/linux/ip_vs.h @@ -0,0 +1,245 @@ +/* + * IP Virtual Server + * data structure and functionality definitions + */ + +#ifndef _IP_VS_H +#define _IP_VS_H + +#include <linux/types.h> /* For __beXX types in userland */ + +#define IP_VS_VERSION_CODE 0x010201 +#define NVERSION(version) \ + (version >> 16) & 0xFF, \ + (version >> 8) & 0xFF, \ + version & 0xFF + +/* + * Virtual Service Flags + */ +#define IP_VS_SVC_F_PERSISTENT 0x0001 /* persistent port */ +#define IP_VS_SVC_F_HASHED 0x0002 /* hashed entry */ + +/* + * Destination Server Flags + */ +#define IP_VS_DEST_F_AVAILABLE 0x0001 /* server is available */ +#define IP_VS_DEST_F_OVERLOAD 0x0002 /* server is overloaded */ + +/* + * IPVS sync daemon states + */ +#define IP_VS_STATE_NONE 0x0000 /* daemon is stopped */ +#define IP_VS_STATE_MASTER 0x0001 /* started as master */ +#define IP_VS_STATE_BACKUP 0x0002 /* started as backup */ + +/* + * IPVS socket options + */ +#define IP_VS_BASE_CTL (64+1024+64) /* base */ + +#define IP_VS_SO_SET_NONE IP_VS_BASE_CTL /* just peek */ +#define IP_VS_SO_SET_INSERT (IP_VS_BASE_CTL+1) +#define IP_VS_SO_SET_ADD (IP_VS_BASE_CTL+2) +#define IP_VS_SO_SET_EDIT (IP_VS_BASE_CTL+3) +#define IP_VS_SO_SET_DEL (IP_VS_BASE_CTL+4) +#define IP_VS_SO_SET_FLUSH (IP_VS_BASE_CTL+5) +#define IP_VS_SO_SET_LIST (IP_VS_BASE_CTL+6) +#define IP_VS_SO_SET_ADDDEST (IP_VS_BASE_CTL+7) +#define IP_VS_SO_SET_DELDEST (IP_VS_BASE_CTL+8) +#define IP_VS_SO_SET_EDITDEST (IP_VS_BASE_CTL+9) +#define IP_VS_SO_SET_TIMEOUT (IP_VS_BASE_CTL+10) +#define IP_VS_SO_SET_STARTDAEMON (IP_VS_BASE_CTL+11) +#define IP_VS_SO_SET_STOPDAEMON (IP_VS_BASE_CTL+12) +#define IP_VS_SO_SET_RESTORE (IP_VS_BASE_CTL+13) +#define IP_VS_SO_SET_SAVE (IP_VS_BASE_CTL+14) +#define IP_VS_SO_SET_ZERO (IP_VS_BASE_CTL+15) +#define IP_VS_SO_SET_MAX IP_VS_SO_SET_ZERO + +#define IP_VS_SO_GET_VERSION IP_VS_BASE_CTL +#define IP_VS_SO_GET_INFO (IP_VS_BASE_CTL+1) +#define IP_VS_SO_GET_SERVICES (IP_VS_BASE_CTL+2) +#define IP_VS_SO_GET_SERVICE (IP_VS_BASE_CTL+3) +#define IP_VS_SO_GET_DESTS (IP_VS_BASE_CTL+4) +#define IP_VS_SO_GET_DEST (IP_VS_BASE_CTL+5) /* not used now */ +#define IP_VS_SO_GET_TIMEOUT (IP_VS_BASE_CTL+6) +#define IP_VS_SO_GET_DAEMON (IP_VS_BASE_CTL+7) +#define IP_VS_SO_GET_MAX IP_VS_SO_GET_DAEMON + + +/* + * IPVS Connection Flags + */ +#define IP_VS_CONN_F_FWD_MASK 0x0007 /* mask for the fwd methods */ +#define IP_VS_CONN_F_MASQ 0x0000 /* masquerading/NAT */ +#define IP_VS_CONN_F_LOCALNODE 0x0001 /* local node */ +#define IP_VS_CONN_F_TUNNEL 0x0002 /* tunneling */ +#define IP_VS_CONN_F_DROUTE 0x0003 /* direct routing */ +#define IP_VS_CONN_F_BYPASS 0x0004 /* cache bypass */ +#define IP_VS_CONN_F_SYNC 0x0020 /* entry created by sync */ +#define IP_VS_CONN_F_HASHED 0x0040 /* hashed entry */ +#define IP_VS_CONN_F_NOOUTPUT 0x0080 /* no output packets */ +#define IP_VS_CONN_F_INACTIVE 0x0100 /* not established */ +#define IP_VS_CONN_F_OUT_SEQ 0x0200 /* must do output seq adjust */ +#define IP_VS_CONN_F_IN_SEQ 0x0400 /* must do input seq adjust */ +#define IP_VS_CONN_F_SEQ_MASK 0x0600 /* in/out sequence mask */ +#define IP_VS_CONN_F_NO_CPORT 0x0800 /* no client port set yet */ +#define IP_VS_CONN_F_TEMPLATE 0x1000 /* template, not connection */ + +#define IP_VS_SCHEDNAME_MAXLEN 16 +#define IP_VS_IFNAME_MAXLEN 16 + + +/* + * The struct ip_vs_service_user and struct ip_vs_dest_user are + * used to set IPVS rules through setsockopt. + */ +struct ip_vs_service_user { + /* virtual service addresses */ + u_int16_t protocol; + __be32 addr; /* virtual ip address */ + __be16 port; + u_int32_t fwmark; /* firwall mark of service */ + + /* virtual service options */ + char sched_name[IP_VS_SCHEDNAME_MAXLEN]; + unsigned flags; /* virtual service flags */ + unsigned timeout; /* persistent timeout in sec */ + __be32 netmask; /* persistent netmask */ +}; + + +struct ip_vs_dest_user { + /* destination server address */ + __be32 addr; + __be16 port; + + /* real server options */ + unsigned conn_flags; /* connection flags */ + int weight; /* destination weight */ + + /* thresholds for active connections */ + u_int32_t u_threshold; /* upper threshold */ + u_int32_t l_threshold; /* lower threshold */ +}; + + +/* + * IPVS statistics object (for user space) + */ +struct ip_vs_stats_user +{ + __u32 conns; /* connections scheduled */ + __u32 inpkts; /* incoming packets */ + __u32 outpkts; /* outgoing packets */ + __u64 inbytes; /* incoming bytes */ + __u64 outbytes; /* outgoing bytes */ + + __u32 cps; /* current connection rate */ + __u32 inpps; /* current in packet rate */ + __u32 outpps; /* current out packet rate */ + __u32 inbps; /* current in byte rate */ + __u32 outbps; /* current out byte rate */ +}; + + +/* The argument to IP_VS_SO_GET_INFO */ +struct ip_vs_getinfo { + /* version number */ + unsigned int version; + + /* size of connection hash table */ + unsigned int size; + + /* number of virtual services */ + unsigned int num_services; +}; + + +/* The argument to IP_VS_SO_GET_SERVICE */ +struct ip_vs_service_entry { + /* which service: user fills in these */ + u_int16_t protocol; + __be32 addr; /* virtual address */ + __be16 port; + u_int32_t fwmark; /* firwall mark of service */ + + /* service options */ + char sched_name[IP_VS_SCHEDNAME_MAXLEN]; + unsigned flags; /* virtual service flags */ + unsigned timeout; /* persistent timeout */ + __be32 netmask; /* persistent netmask */ + + /* number of real servers */ + unsigned int num_dests; + + /* statistics */ + struct ip_vs_stats_user stats; +}; + + +struct ip_vs_dest_entry { + __be32 addr; /* destination address */ + __be16 port; + unsigned conn_flags; /* connection flags */ + int weight; /* destination weight */ + + u_int32_t u_threshold; /* upper threshold */ + u_int32_t l_threshold; /* lower threshold */ + + u_int32_t activeconns; /* active connections */ + u_int32_t inactconns; /* inactive connections */ + u_int32_t persistconns; /* persistent connections */ + + /* statistics */ + struct ip_vs_stats_user stats; +}; + + +/* The argument to IP_VS_SO_GET_DESTS */ +struct ip_vs_get_dests { + /* which service: user fills in these */ + u_int16_t protocol; + __be32 addr; /* virtual address */ + __be16 port; + u_int32_t fwmark; /* firwall mark of service */ + + /* number of real servers */ + unsigned int num_dests; + + /* the real servers */ + struct ip_vs_dest_entry entrytable[0]; +}; + + +/* The argument to IP_VS_SO_GET_SERVICES */ +struct ip_vs_get_services { + /* number of virtual services */ + unsigned int num_services; + + /* service table */ + struct ip_vs_service_entry entrytable[0]; +}; + + +/* The argument to IP_VS_SO_GET_TIMEOUT */ +struct ip_vs_timeout_user { + int tcp_timeout; + int tcp_fin_timeout; + int udp_timeout; +}; + + +/* The argument to IP_VS_SO_GET_DAEMON */ +struct ip_vs_daemon_user { + /* sync daemon state (master/backup) */ + int state; + + /* multicast interface name */ + char mcast_ifn[IP_VS_IFNAME_MAXLEN]; + + /* SyncID we belong to */ + int syncid; +}; + +#endif /* _IP_VS_H */ diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h index ea6c18a8b0d..ea330f9e710 100644 --- a/include/linux/ipc_namespace.h +++ b/include/linux/ipc_namespace.h @@ -36,6 +36,7 @@ struct ipc_namespace { int msg_ctlmni; atomic_t msg_bytes; atomic_t msg_hdrs; + int auto_msgmni; size_t shm_ctlmax; size_t shm_ctlall; @@ -53,7 +54,7 @@ extern atomic_t nr_ipc_ns; extern int register_ipcns_notifier(struct ipc_namespace *); extern int cond_register_ipcns_notifier(struct ipc_namespace *); -extern int unregister_ipcns_notifier(struct ipc_namespace *); +extern void unregister_ipcns_notifier(struct ipc_namespace *); extern int ipcns_notify(unsigned long); #else /* CONFIG_SYSVIPC */ diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h index 2b1c2e58566..74bde13224c 100644 --- a/include/linux/irqflags.h +++ b/include/linux/irqflags.h @@ -11,6 +11,8 @@ #ifndef _LINUX_TRACE_IRQFLAGS_H #define _LINUX_TRACE_IRQFLAGS_H +#include <linux/typecheck.h> + #ifdef CONFIG_TRACE_IRQFLAGS extern void trace_softirqs_on(unsigned long ip); extern void trace_softirqs_off(unsigned long ip); @@ -58,18 +60,24 @@ do { trace_hardirqs_on(); raw_local_irq_enable(); } while (0) #define local_irq_disable() \ do { raw_local_irq_disable(); trace_hardirqs_off(); } while (0) -#define local_irq_save(flags) \ - do { raw_local_irq_save(flags); trace_hardirqs_off(); } while (0) +#define local_irq_save(flags) \ + do { \ + typecheck(unsigned long, flags); \ + raw_local_irq_save(flags); \ + trace_hardirqs_off(); \ + } while (0) -#define local_irq_restore(flags) \ - do { \ - if (raw_irqs_disabled_flags(flags)) { \ - raw_local_irq_restore(flags); \ - trace_hardirqs_off(); \ - } else { \ - trace_hardirqs_on(); \ - raw_local_irq_restore(flags); \ - } \ + +#define local_irq_restore(flags) \ + do { \ + typecheck(unsigned long, flags); \ + if (raw_irqs_disabled_flags(flags)) { \ + raw_local_irq_restore(flags); \ + trace_hardirqs_off(); \ + } else { \ + trace_hardirqs_on(); \ + raw_local_irq_restore(flags); \ + } \ } while (0) #else /* !CONFIG_TRACE_IRQFLAGS_SUPPORT */ /* @@ -78,8 +86,16 @@ */ # define raw_local_irq_disable() local_irq_disable() # define raw_local_irq_enable() local_irq_enable() -# define raw_local_irq_save(flags) local_irq_save(flags) -# define raw_local_irq_restore(flags) local_irq_restore(flags) +# define raw_local_irq_save(flags) \ + do { \ + typecheck(unsigned long, flags); \ + local_irq_save(flags); \ + } while (0) +# define raw_local_irq_restore(flags) \ + do { \ + typecheck(unsigned long, flags); \ + local_irq_restore(flags); \ + } while (0) #endif /* CONFIG_TRACE_IRQFLAGS_SUPPORT */ #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT @@ -89,7 +105,11 @@ raw_safe_halt(); \ } while (0) -#define local_save_flags(flags) raw_local_save_flags(flags) +#define local_save_flags(flags) \ + do { \ + typecheck(unsigned long, flags); \ + raw_local_save_flags(flags); \ + } while (0) #define irqs_disabled() \ ({ \ @@ -99,7 +119,11 @@ raw_irqs_disabled_flags(_flags); \ }) -#define irqs_disabled_flags(flags) raw_irqs_disabled_flags(flags) +#define irqs_disabled_flags(flags) \ +({ \ + typecheck(unsigned long, flags); \ + raw_irqs_disabled_flags(flags); \ +}) #endif /* CONFIG_X86 */ #endif diff --git a/include/linux/jffs2.h b/include/linux/jffs2.h index 6b563cae23d..da720bc3eb1 100644 --- a/include/linux/jffs2.h +++ b/include/linux/jffs2.h @@ -7,9 +7,6 @@ * * For licensing information, see the file 'LICENCE' in the * jffs2 directory. - * - * $Id: jffs2.h,v 1.38 2005/09/26 11:37:23 havasi Exp $ - * */ #ifndef __LINUX_JFFS2_H__ diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h index 00c1801099f..b9614488744 100644 --- a/include/linux/kallsyms.h +++ b/include/linux/kallsyms.h @@ -6,6 +6,7 @@ #define _LINUX_KALLSYMS_H #include <linux/errno.h> +#include <linux/kernel.h> #include <linux/stddef.h> #define KSYM_NAME_LEN 128 @@ -105,18 +106,9 @@ static inline void print_fn_descriptor_symbol(const char *fmt, void *addr) print_symbol(fmt, (unsigned long)addr); } -#ifndef CONFIG_64BIT -#define print_ip_sym(ip) \ -do { \ - printk("[<%08lx>]", ip); \ - print_symbol(" %s\n", ip); \ -} while(0) -#else -#define print_ip_sym(ip) \ -do { \ - printk("[<%016lx>]", ip); \ - print_symbol(" %s\n", ip); \ -} while(0) -#endif +static inline void print_ip_sym(unsigned long ip) +{ + printk("[<%p>] %pS\n", (void *) ip, (void *) ip); +} #endif /*_LINUX_KALLSYMS_H*/ diff --git a/include/linux/kernel.h b/include/linux/kernel.h index f9cd7a513f9..2651f805ba6 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -14,6 +14,8 @@ #include <linux/compiler.h> #include <linux/bitops.h> #include <linux/log2.h> +#include <linux/typecheck.h> +#include <linux/ratelimit.h> #include <asm/byteorder.h> #include <asm/bug.h> @@ -73,6 +75,12 @@ extern const char linux_proc_banner[]; */ #define upper_32_bits(n) ((u32)(((n) >> 16) >> 16)) +/** + * lower_32_bits - return bits 0-31 of a number + * @n: the number we're accessing + */ +#define lower_32_bits(n) ((u32)(n)) + #define KERN_EMERG "<0>" /* system is unusable */ #define KERN_ALERT "<1>" /* action must be taken immediately */ #define KERN_CRIT "<2>" /* critical conditions */ @@ -100,6 +108,13 @@ struct completion; struct pt_regs; struct user; +#ifdef CONFIG_PREEMPT_VOLUNTARY +extern int _cond_resched(void); +# define might_resched() _cond_resched() +#else +# define might_resched() do { } while (0) +#endif + /** * might_sleep - annotation for functions that can sleep * @@ -110,13 +125,6 @@ struct user; * be bitten later when the calling function happens to sleep when it is not * supposed to. */ -#ifdef CONFIG_PREEMPT_VOLUNTARY -extern int _cond_resched(void); -# define might_resched() _cond_resched() -#else -# define might_resched() do { } while (0) -#endif - #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP void __might_sleep(char *file, int line); # define might_sleep() \ @@ -188,11 +196,8 @@ asmlinkage int vprintk(const char *fmt, va_list args) asmlinkage int printk(const char * fmt, ...) __attribute__ ((format (printf, 1, 2))) __cold; -extern int printk_ratelimit_jiffies; -extern int printk_ratelimit_burst; +extern struct ratelimit_state printk_ratelimit_state; extern int printk_ratelimit(void); -extern int __ratelimit(int ratelimit_jiffies, int ratelimit_burst); -extern int __printk_ratelimit(int ratelimit_jiffies, int ratelimit_burst); extern bool printk_timed_ratelimit(unsigned long *caller_jiffies, unsigned int interval_msec); #else @@ -203,8 +208,6 @@ static inline int printk(const char *s, ...) __attribute__ ((format (printf, 1, 2))); static inline int __cold printk(const char *s, ...) { return 0; } static inline int printk_ratelimit(void) { return 0; } -static inline int __printk_ratelimit(int ratelimit_jiffies, \ - int ratelimit_burst) { return 0; } static inline bool printk_timed_ratelimit(unsigned long *caller_jiffies, \ unsigned int interval_msec) \ { return false; } @@ -441,26 +444,6 @@ static inline char *pack_hex_byte(char *buf, u8 byte) const typeof( ((type *)0)->member ) *__mptr = (ptr); \ (type *)( (char *)__mptr - offsetof(type,member) );}) -/* - * Check at compile time that something is of a particular type. - * Always evaluates to 1 so you may use it easily in comparisons. - */ -#define typecheck(type,x) \ -({ type __dummy; \ - typeof(x) __dummy2; \ - (void)(&__dummy == &__dummy2); \ - 1; \ -}) - -/* - * Check at compile time that 'function' is a certain type, or is a pointer - * to that type (needs to use typedef for the function type.) - */ -#define typecheck_fn(type,function) \ -({ typeof(type) __tmp = function; \ - (void)__tmp; \ -}) - struct sysinfo; extern int do_sysinfo(struct sysinfo *info); diff --git a/include/linux/kexec.h b/include/linux/kexec.h index 3265968cd2c..32110cede64 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -83,6 +83,7 @@ struct kimage { unsigned long start; struct page *control_code_page; + struct page *swap_page; unsigned long nr_segments; struct kexec_segment segment[KEXEC_SEGMENT_MAX]; @@ -98,18 +99,20 @@ struct kimage { unsigned int type : 1; #define KEXEC_TYPE_DEFAULT 0 #define KEXEC_TYPE_CRASH 1 + unsigned int preserve_context : 1; }; /* kexec interface functions */ -extern NORET_TYPE void machine_kexec(struct kimage *image) ATTRIB_NORET; +extern void machine_kexec(struct kimage *image); extern int machine_kexec_prepare(struct kimage *image); extern void machine_kexec_cleanup(struct kimage *image); extern asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments, struct kexec_segment __user *segments, unsigned long flags); +extern int kernel_kexec(void); #ifdef CONFIG_COMPAT extern asmlinkage long compat_sys_kexec_load(unsigned long entry, unsigned long nr_segments, @@ -127,8 +130,8 @@ void vmcoreinfo_append_str(const char *fmt, ...) __attribute__ ((format (printf, 1, 2))); unsigned long paddr_vmcoreinfo_note(void); -#define VMCOREINFO_OSRELEASE(name) \ - vmcoreinfo_append_str("OSRELEASE=%s\n", #name) +#define VMCOREINFO_OSRELEASE(value) \ + vmcoreinfo_append_str("OSRELEASE=%s\n", value) #define VMCOREINFO_PAGESIZE(value) \ vmcoreinfo_append_str("PAGESIZE=%ld\n", value) #define VMCOREINFO_SYMBOL(name) \ @@ -156,8 +159,9 @@ extern struct kimage *kexec_crash_image; #define kexec_flush_icache_page(page) #endif -#define KEXEC_ON_CRASH 0x00000001 -#define KEXEC_ARCH_MASK 0xffff0000 +#define KEXEC_ON_CRASH 0x00000001 +#define KEXEC_PRESERVE_CONTEXT 0x00000002 +#define KEXEC_ARCH_MASK 0xffff0000 /* These values match the ELF architecture values. * Unless there is a good reason that should continue to be the case. @@ -174,7 +178,12 @@ extern struct kimage *kexec_crash_image; #define KEXEC_ARCH_MIPS_LE (10 << 16) #define KEXEC_ARCH_MIPS ( 8 << 16) -#define KEXEC_FLAGS (KEXEC_ON_CRASH) /* List of defined/legal kexec flags */ +/* List of defined/legal kexec flags */ +#ifndef CONFIG_KEXEC_JUMP +#define KEXEC_FLAGS KEXEC_ON_CRASH +#else +#define KEXEC_FLAGS (KEXEC_ON_CRASH | KEXEC_PRESERVE_CONTEXT) +#endif #define VMCOREINFO_BYTES (4096) #define VMCOREINFO_NOTE_NAME "VMCOREINFO" diff --git a/include/linux/kmod.h b/include/linux/kmod.h index 0509c4ce485..a1a91577813 100644 --- a/include/linux/kmod.h +++ b/include/linux/kmod.h @@ -19,6 +19,7 @@ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ +#include <linux/gfp.h> #include <linux/stddef.h> #include <linux/errno.h> #include <linux/compiler.h> @@ -41,8 +42,8 @@ struct file; struct subprocess_info; /* Allocate a subprocess_info structure */ -struct subprocess_info *call_usermodehelper_setup(char *path, - char **argv, char **envp); +struct subprocess_info *call_usermodehelper_setup(char *path, char **argv, + char **envp, gfp_t gfp_mask); /* Set various pieces of state into the subprocess_info structure */ void call_usermodehelper_setkeys(struct subprocess_info *info, @@ -69,8 +70,9 @@ static inline int call_usermodehelper(char *path, char **argv, char **envp, enum umh_wait wait) { struct subprocess_info *info; + gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? GFP_ATOMIC : GFP_KERNEL; - info = call_usermodehelper_setup(path, argv, envp); + info = call_usermodehelper_setup(path, argv, envp, gfp_mask); if (info == NULL) return -ENOMEM; return call_usermodehelper_exec(info, wait); @@ -81,8 +83,9 @@ call_usermodehelper_keys(char *path, char **argv, char **envp, struct key *session_keyring, enum umh_wait wait) { struct subprocess_info *info; + gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? GFP_ATOMIC : GFP_KERNEL; - info = call_usermodehelper_setup(path, argv, envp); + info = call_usermodehelper_setup(path, argv, envp, gfp_mask); if (info == NULL) return -ENOMEM; diff --git a/include/linux/kobject.h b/include/linux/kobject.h index 60f0d418ae3..5437ac0276e 100644 --- a/include/linux/kobject.h +++ b/include/linux/kobject.h @@ -186,6 +186,8 @@ extern struct kobject *kset_find_obj(struct kset *, const char *); /* The global /sys/kernel/ kobject for people to chain off of */ extern struct kobject *kernel_kobj; +/* The global /sys/kernel/mm/ kobject for people to chain off of */ +extern struct kobject *mm_kobj; /* The global /sys/hypervisor/ kobject for people to chain off of */ extern struct kobject *hypervisor_kobj; /* The global /sys/power/ kobject for people to chain off of */ diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 04a3556bdea..0be7795655f 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -157,11 +157,10 @@ struct kretprobe { int nmissed; size_t data_size; struct hlist_head free_instances; - struct hlist_head used_instances; + spinlock_t lock; }; struct kretprobe_instance { - struct hlist_node uflist; /* either on free list or used list */ struct hlist_node hlist; struct kretprobe *rp; kprobe_opcode_t *ret_addr; @@ -201,7 +200,6 @@ static inline int init_test_probes(void) } #endif /* CONFIG_KPROBES_SANITY_TEST */ -extern spinlock_t kretprobe_lock; extern struct mutex kprobe_mutex; extern int arch_prepare_kprobe(struct kprobe *p); extern void arch_arm_kprobe(struct kprobe *p); @@ -214,6 +212,9 @@ extern void kprobes_inc_nmissed_count(struct kprobe *p); /* Get the kprobe at this addr (if any) - called with preemption disabled */ struct kprobe *get_kprobe(void *addr); +void kretprobe_hash_lock(struct task_struct *tsk, + struct hlist_head **head, unsigned long *flags); +void kretprobe_hash_unlock(struct task_struct *tsk, unsigned long *flags); struct hlist_head * kretprobe_inst_table_head(struct task_struct *tsk); /* kprobe_running() will just return the current_kprobe on this CPU */ diff --git a/include/linux/kthread.h b/include/linux/kthread.h index 00dd957e245..aabc8a13ba7 100644 --- a/include/linux/kthread.h +++ b/include/linux/kthread.h @@ -6,7 +6,8 @@ struct task_struct *kthread_create(int (*threadfn)(void *data), void *data, - const char namefmt[], ...); + const char namefmt[], ...) + __attribute__((format(printf, 3, 4))); /** * kthread_run - create and wake a thread. diff --git a/include/linux/kvm.h b/include/linux/kvm.h index 0ea064cbfbc..69511f74f91 100644 --- a/include/linux/kvm.h +++ b/include/linux/kvm.h @@ -371,6 +371,7 @@ struct kvm_trace_rec { #define KVM_CAP_PV_MMU 13 #define KVM_CAP_MP_STATE 14 #define KVM_CAP_COALESCED_MMIO 15 +#define KVM_CAP_SYNC_MMU 16 /* Changes to host mmap are reflected in guest */ /* * ioctls for VM fds diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 07d68a8ae8e..8525afc5310 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -121,6 +121,12 @@ struct kvm { struct kvm_coalesced_mmio_dev *coalesced_mmio_dev; struct kvm_coalesced_mmio_ring *coalesced_mmio_ring; #endif + +#ifdef KVM_ARCH_WANT_MMU_NOTIFIER + struct mmu_notifier mmu_notifier; + unsigned long mmu_notifier_seq; + long mmu_notifier_count; +#endif }; /* The guest did something we don't support. */ @@ -332,4 +338,22 @@ int kvm_trace_ioctl(unsigned int ioctl, unsigned long arg) #define kvm_trace_cleanup() ((void)0) #endif +#ifdef KVM_ARCH_WANT_MMU_NOTIFIER +static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_seq) +{ + if (unlikely(vcpu->kvm->mmu_notifier_count)) + return 1; + /* + * Both reads happen under the mmu_lock and both values are + * modified under mmu_lock, so there's no need of smb_rmb() + * here in between, otherwise mmu_notifier_count should be + * read before mmu_notifier_seq, see + * mmu_notifier_invalidate_range_end write side. + */ + if (vcpu->kvm->mmu_notifier_seq != mmu_seq) + return 1; + return 0; +} +#endif + #endif diff --git a/include/linux/lcd.h b/include/linux/lcd.h index 1d379787f2e..173febac665 100644 --- a/include/linux/lcd.h +++ b/include/linux/lcd.h @@ -47,7 +47,7 @@ struct lcd_ops { int (*set_contrast)(struct lcd_device *, int contrast); /* Check if given framebuffer device is the one LCD is bound to; return 0 if not, !=0 if it is. If NULL, lcd always matches the fb. */ - int (*check_fb)(struct fb_info *); + int (*check_fb)(struct lcd_device *, struct fb_info *); }; struct lcd_device { diff --git a/include/linux/leds-pca9532.h b/include/linux/leds-pca9532.h new file mode 100644 index 00000000000..81b4207deb9 --- /dev/null +++ b/include/linux/leds-pca9532.h @@ -0,0 +1,45 @@ +/* + * pca9532.h - platform data structure for pca9532 led controller + * + * Copyright (C) 2008 Riku Voipio <riku.voipio@movial.fi> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * Datasheet: http://www.nxp.com/acrobat/datasheets/PCA9532_3.pdf + * + */ + +#ifndef __LINUX_PCA9532_H +#define __LINUX_PCA9532_H + +#include <linux/leds.h> + +enum pca9532_state { + PCA9532_OFF = 0x0, + PCA9532_ON = 0x1, + PCA9532_PWM0 = 0x2, + PCA9532_PWM1 = 0x3 +}; + +enum pca9532_type { PCA9532_TYPE_NONE, PCA9532_TYPE_LED, + PCA9532_TYPE_N2100_BEEP }; + +struct pca9532_led { + u8 id; + struct i2c_client *client; + char *name; + struct led_classdev ldev; + enum pca9532_type type; + enum pca9532_state state; +}; + +struct pca9532_platform_data { + struct pca9532_led leds[16]; + u8 pwm[2]; + u8 psc[2]; +}; + +#endif /* __LINUX_PCA9532_H */ + diff --git a/include/linux/leds.h b/include/linux/leds.h index 519df72e939..d41ccb56146 100644 --- a/include/linux/leds.h +++ b/include/linux/leds.h @@ -48,7 +48,7 @@ struct led_classdev { struct device *dev; struct list_head node; /* LED Device list */ - char *default_trigger; /* Trigger to use */ + const char *default_trigger; /* Trigger to use */ #ifdef CONFIG_LEDS_TRIGGERS /* Protects the trigger data below */ @@ -118,6 +118,20 @@ extern void ledtrig_ide_activity(void); #define ledtrig_ide_activity() do {} while(0) #endif +/* + * Generic LED platform data for describing LED names and default triggers. + */ +struct led_info { + const char *name; + char *default_trigger; + int flags; +}; + +struct led_platform_data { + int num_leds; + struct led_info *leds; +}; + /* For the leds-gpio driver */ struct gpio_led { const char *name; diff --git a/include/linux/libata.h b/include/linux/libata.h index 5b247b8a6b3..06b80337303 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -60,9 +60,9 @@ /* note: prints function name for you */ #ifdef ATA_DEBUG -#define DPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args) +#define DPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ## args) #ifdef ATA_VERBOSE_DEBUG -#define VPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args) +#define VPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ## args) #else #define VPRINTK(fmt, args...) #endif /* ATA_VERBOSE_DEBUG */ @@ -71,7 +71,7 @@ #define VPRINTK(fmt, args...) #endif /* ATA_DEBUG */ -#define BPRINTK(fmt, args...) if (ap->flags & ATA_FLAG_DEBUGMSG) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args) +#define BPRINTK(fmt, args...) if (ap->flags & ATA_FLAG_DEBUGMSG) printk(KERN_ERR "%s: " fmt, __func__, ## args) /* NEW: debug levels */ #define HAVE_LIBATA_MSG 1 @@ -750,6 +750,7 @@ struct ata_port_operations { void (*set_piomode)(struct ata_port *ap, struct ata_device *dev); void (*set_dmamode)(struct ata_port *ap, struct ata_device *dev); int (*set_mode)(struct ata_link *link, struct ata_device **r_failed_dev); + unsigned int (*read_id)(struct ata_device *dev, struct ata_taskfile *tf, u16 *id); void (*dev_config)(struct ata_device *dev); @@ -951,6 +952,8 @@ extern void ata_id_string(const u16 *id, unsigned char *s, unsigned int ofs, unsigned int len); extern void ata_id_c_string(const u16 *id, unsigned char *s, unsigned int ofs, unsigned int len); +extern unsigned int ata_do_dev_read_id(struct ata_device *dev, + struct ata_taskfile *tf, u16 *id); extern void ata_qc_complete(struct ata_queued_cmd *qc); extern int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active); extern void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd, diff --git a/include/linux/list.h b/include/linux/list.h index 139ec41d9c2..db35ef02e74 100644 --- a/include/linux/list.h +++ b/include/linux/list.h @@ -61,14 +61,10 @@ extern void __list_add(struct list_head *new, * Insert a new entry after the specified head. * This is good for implementing stacks. */ -#ifndef CONFIG_DEBUG_LIST static inline void list_add(struct list_head *new, struct list_head *head) { __list_add(new, head, head->next); } -#else -extern void list_add(struct list_head *new, struct list_head *head); -#endif /** @@ -218,22 +214,62 @@ static inline int list_is_singular(const struct list_head *head) return !list_empty(head) && (head->next == head->prev); } +static inline void __list_cut_position(struct list_head *list, + struct list_head *head, struct list_head *entry) +{ + struct list_head *new_first = entry->next; + list->next = head->next; + list->next->prev = list; + list->prev = entry; + entry->next = list; + head->next = new_first; + new_first->prev = head; +} + +/** + * list_cut_position - cut a list into two + * @list: a new list to add all removed entries + * @head: a list with entries + * @entry: an entry within head, could be the head itself + * and if so we won't cut the list + * + * This helper moves the initial part of @head, up to and + * including @entry, from @head to @list. You should + * pass on @entry an element you know is on @head. @list + * should be an empty list or a list you do not care about + * losing its data. + * + */ +static inline void list_cut_position(struct list_head *list, + struct list_head *head, struct list_head *entry) +{ + if (list_empty(head)) + return; + if (list_is_singular(head) && + (head->next != entry && head != entry)) + return; + if (entry == head) + INIT_LIST_HEAD(list); + else + __list_cut_position(list, head, entry); +} + static inline void __list_splice(const struct list_head *list, - struct list_head *head) + struct list_head *prev, + struct list_head *next) { struct list_head *first = list->next; struct list_head *last = list->prev; - struct list_head *at = head->next; - first->prev = head; - head->next = first; + first->prev = prev; + prev->next = first; - last->next = at; - at->prev = last; + last->next = next; + next->prev = last; } /** - * list_splice - join two lists + * list_splice - join two lists, this is designed for stacks * @list: the new list to add. * @head: the place to add it in the first list. */ @@ -241,7 +277,19 @@ static inline void list_splice(const struct list_head *list, struct list_head *head) { if (!list_empty(list)) - __list_splice(list, head); + __list_splice(list, head, head->next); +} + +/** + * list_splice_tail - join two lists, each list being a queue + * @list: the new list to add. + * @head: the place to add it in the first list. + */ +static inline void list_splice_tail(struct list_head *list, + struct list_head *head) +{ + if (!list_empty(list)) + __list_splice(list, head->prev, head); } /** @@ -255,7 +303,24 @@ static inline void list_splice_init(struct list_head *list, struct list_head *head) { if (!list_empty(list)) { - __list_splice(list, head); + __list_splice(list, head, head->next); + INIT_LIST_HEAD(list); + } +} + +/** + * list_splice_tail_init - join two lists and reinitialise the emptied list + * @list: the new list to add. + * @head: the place to add it in the first list. + * + * Each of the lists is a queue. + * The list at @list is reinitialised + */ +static inline void list_splice_tail_init(struct list_head *list, + struct list_head *head) +{ + if (!list_empty(list)) { + __list_splice(list, head->prev, head); INIT_LIST_HEAD(list); } } diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 2486eb4edbf..331e5f1c2d8 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -89,6 +89,7 @@ struct lock_class { struct lockdep_subclass_key *key; unsigned int subclass; + unsigned int dep_gen_id; /* * IRQ/softirq usage tracking bits: @@ -189,6 +190,14 @@ struct lock_chain { u64 chain_key; }; +#define MAX_LOCKDEP_KEYS_BITS 13 +/* + * Subtract one because we offset hlock->class_idx by 1 in order + * to make 0 mean no class. This avoids overflowing the class_idx + * bitfield and hitting the BUG in hlock_class(). + */ +#define MAX_LOCKDEP_KEYS ((1UL << MAX_LOCKDEP_KEYS_BITS) - 1) + struct held_lock { /* * One-way hash of the dependency chain up to this point. We @@ -205,14 +214,14 @@ struct held_lock { * with zero), here we store the previous hash value: */ u64 prev_chain_key; - struct lock_class *class; unsigned long acquire_ip; struct lockdep_map *instance; - + struct lockdep_map *nest_lock; #ifdef CONFIG_LOCK_STAT u64 waittime_stamp; u64 holdtime_stamp; #endif + unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS; /* * The lock-stack is unified in that the lock chains of interrupt * contexts nest ontop of process context chains, but we 'separate' @@ -226,11 +235,11 @@ struct held_lock { * The following field is used to detect when we cross into an * interrupt context: */ - int irq_context; - int trylock; - int read; - int check; - int hardirqs_off; + unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */ + unsigned int trylock:1; + unsigned int read:2; /* see lock_acquire() comment */ + unsigned int check:2; /* see lock_acquire() comment */ + unsigned int hardirqs_off:1; }; /* @@ -294,11 +303,15 @@ extern void lockdep_init_map(struct lockdep_map *lock, const char *name, * 2: full validation */ extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass, - int trylock, int read, int check, unsigned long ip); + int trylock, int read, int check, + struct lockdep_map *nest_lock, unsigned long ip); extern void lock_release(struct lockdep_map *lock, int nested, unsigned long ip); +extern void lock_set_subclass(struct lockdep_map *lock, unsigned int subclass, + unsigned long ip); + # define INIT_LOCKDEP .lockdep_recursion = 0, #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) @@ -313,8 +326,9 @@ static inline void lockdep_on(void) { } -# define lock_acquire(l, s, t, r, c, i) do { } while (0) +# define lock_acquire(l, s, t, r, c, n, i) do { } while (0) # define lock_release(l, n, i) do { } while (0) +# define lock_set_subclass(l, s, i) do { } while (0) # define lockdep_init() do { } while (0) # define lockdep_info() do { } while (0) # define lockdep_init_map(lock, name, key, sub) do { (void)(key); } while (0) @@ -400,9 +414,11 @@ static inline void print_irqtrace_events(struct task_struct *curr) #ifdef CONFIG_DEBUG_LOCK_ALLOC # ifdef CONFIG_PROVE_LOCKING -# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i) +# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) +# define spin_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i) # else -# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i) +# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) +# define spin_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, NULL, i) # endif # define spin_release(l, n, i) lock_release(l, n, i) #else @@ -412,11 +428,11 @@ static inline void print_irqtrace_events(struct task_struct *curr) #ifdef CONFIG_DEBUG_LOCK_ALLOC # ifdef CONFIG_PROVE_LOCKING -# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i) -# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 2, i) +# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) +# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 2, NULL, i) # else -# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i) -# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 1, i) +# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) +# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 1, NULL, i) # endif # define rwlock_release(l, n, i) lock_release(l, n, i) #else @@ -427,9 +443,9 @@ static inline void print_irqtrace_events(struct task_struct *curr) #ifdef CONFIG_DEBUG_LOCK_ALLOC # ifdef CONFIG_PROVE_LOCKING -# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i) +# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) # else -# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i) +# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) # endif # define mutex_release(l, n, i) lock_release(l, n, i) #else @@ -439,11 +455,11 @@ static inline void print_irqtrace_events(struct task_struct *curr) #ifdef CONFIG_DEBUG_LOCK_ALLOC # ifdef CONFIG_PROVE_LOCKING -# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i) -# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, i) +# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) +# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, NULL, i) # else -# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i) -# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, i) +# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) +# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, NULL, i) # endif # define rwsem_release(l, n, i) lock_release(l, n, i) #else @@ -452,4 +468,16 @@ static inline void print_irqtrace_events(struct task_struct *curr) # define rwsem_release(l, n, i) do { } while (0) #endif +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# ifdef CONFIG_PROVE_LOCKING +# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 2, NULL, _THIS_IP_) +# else +# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 1, NULL, _THIS_IP_) +# endif +# define lock_map_release(l) lock_release(l, 1, _THIS_IP_) +#else +# define lock_map_acquire(l) do { } while (0) +# define lock_map_release(l) do { } while (0) +#endif + #endif /* __LINUX_LOCKDEP_H */ diff --git a/include/linux/mISDNdsp.h b/include/linux/mISDNdsp.h new file mode 100644 index 00000000000..6b71d2dce50 --- /dev/null +++ b/include/linux/mISDNdsp.h @@ -0,0 +1,37 @@ +#ifndef __mISDNdsp_H__ +#define __mISDNdsp_H__ + +struct mISDN_dsp_element_arg { + char *name; + char *def; + char *desc; +}; + +struct mISDN_dsp_element { + char *name; + void *(*new)(const char *arg); + void (*free)(void *p); + void (*process_tx)(void *p, unsigned char *data, int len); + void (*process_rx)(void *p, unsigned char *data, int len); + int num_args; + struct mISDN_dsp_element_arg + *args; +}; + +extern int mISDN_dsp_element_register(struct mISDN_dsp_element *elem); +extern void mISDN_dsp_element_unregister(struct mISDN_dsp_element *elem); + +struct dsp_features { + int hfc_id; /* unique id to identify the chip (or -1) */ + int hfc_dtmf; /* set if HFCmulti card supports dtmf */ + int hfc_loops; /* set if card supports tone loops */ + int hfc_echocanhw; /* set if card supports echocancelation*/ + int pcm_id; /* unique id to identify the pcm bus (or -1) */ + int pcm_slots; /* number of slots on the pcm bus */ + int pcm_banks; /* number of IO banks of pcm bus */ + int unclocked; /* data is not clocked (has jitter/loss) */ + int unordered; /* data is unordered (packets have index) */ +}; + +#endif + diff --git a/include/linux/mISDNhw.h b/include/linux/mISDNhw.h new file mode 100644 index 00000000000..e794dfb8750 --- /dev/null +++ b/include/linux/mISDNhw.h @@ -0,0 +1,193 @@ +/* + * + * Author Karsten Keil <kkeil@novell.com> + * + * Basic declarations for the mISDN HW channels + * + * Copyright 2008 by Karsten Keil <kkeil@novell.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef MISDNHW_H +#define MISDNHW_H +#include <linux/mISDNif.h> +#include <linux/timer.h> + +/* + * HW DEBUG 0xHHHHGGGG + * H - hardware driver specific bits + * G - for all drivers + */ + +#define DEBUG_HW 0x00000001 +#define DEBUG_HW_OPEN 0x00000002 +#define DEBUG_HW_DCHANNEL 0x00000100 +#define DEBUG_HW_DFIFO 0x00000200 +#define DEBUG_HW_BCHANNEL 0x00001000 +#define DEBUG_HW_BFIFO 0x00002000 + +#define MAX_DFRAME_LEN_L1 300 +#define MAX_MON_FRAME 32 +#define MAX_LOG_SPACE 2048 +#define MISDN_COPY_SIZE 32 + +/* channel->Flags bit field */ +#define FLG_TX_BUSY 0 /* tx_buf in use */ +#define FLG_TX_NEXT 1 /* next_skb in use */ +#define FLG_L1_BUSY 2 /* L1 is permanent busy */ +#define FLG_L2_ACTIVATED 3 /* activated from L2 */ +#define FLG_OPEN 5 /* channel is in use */ +#define FLG_ACTIVE 6 /* channel is activated */ +#define FLG_BUSY_TIMER 7 +/* channel type */ +#define FLG_DCHANNEL 8 /* channel is D-channel */ +#define FLG_BCHANNEL 9 /* channel is B-channel */ +#define FLG_ECHANNEL 10 /* channel is E-channel */ +#define FLG_TRANSPARENT 12 /* channel use transparent data */ +#define FLG_HDLC 13 /* channel use hdlc data */ +#define FLG_L2DATA 14 /* channel use L2 DATA primitivs */ +#define FLG_ORIGIN 15 /* channel is on origin site */ +/* channel specific stuff */ +/* arcofi specific */ +#define FLG_ARCOFI_TIMER 16 +#define FLG_ARCOFI_ERROR 17 +/* isar specific */ +#define FLG_INITIALIZED 16 +#define FLG_DLEETX 17 +#define FLG_LASTDLE 18 +#define FLG_FIRST 19 +#define FLG_LASTDATA 20 +#define FLG_NMD_DATA 21 +#define FLG_FTI_RUN 22 +#define FLG_LL_OK 23 +#define FLG_LL_CONN 24 +#define FLG_DTMFSEND 25 + +/* workq events */ +#define FLG_RECVQUEUE 30 +#define FLG_PHCHANGE 31 + +#define schedule_event(s, ev) do { \ + test_and_set_bit(ev, &((s)->Flags)); \ + schedule_work(&((s)->workq)); \ + } while (0) + +struct dchannel { + struct mISDNdevice dev; + u_long Flags; + struct work_struct workq; + void (*phfunc) (struct dchannel *); + u_int state; + void *l1; + /* HW access */ + u_char (*read_reg) (void *, u_char); + void (*write_reg) (void *, u_char, u_char); + void (*read_fifo) (void *, u_char *, int); + void (*write_fifo) (void *, u_char *, int); + void *hw; + int slot; /* multiport card channel slot */ + struct timer_list timer; + /* receive data */ + struct sk_buff *rx_skb; + int maxlen; + /* send data */ + struct sk_buff_head squeue; + struct sk_buff_head rqueue; + struct sk_buff *tx_skb; + int tx_idx; + int debug; + /* statistics */ + int err_crc; + int err_tx; + int err_rx; +}; + +typedef int (dchannel_l1callback)(struct dchannel *, u_int); +extern int create_l1(struct dchannel *, dchannel_l1callback *); + +/* private L1 commands */ +#define INFO0 0x8002 +#define INFO1 0x8102 +#define INFO2 0x8202 +#define INFO3_P8 0x8302 +#define INFO3_P10 0x8402 +#define INFO4_P8 0x8502 +#define INFO4_P10 0x8602 +#define LOSTFRAMING 0x8702 +#define ANYSIGNAL 0x8802 +#define HW_POWERDOWN 0x8902 +#define HW_RESET_REQ 0x8a02 +#define HW_POWERUP_REQ 0x8b02 +#define HW_DEACT_REQ 0x8c02 +#define HW_ACTIVATE_REQ 0x8e02 +#define HW_D_NOBLOCKED 0x8f02 +#define HW_RESET_IND 0x9002 +#define HW_POWERUP_IND 0x9102 +#define HW_DEACT_IND 0x9202 +#define HW_ACTIVATE_IND 0x9302 +#define HW_DEACT_CNF 0x9402 +#define HW_TESTLOOP 0x9502 +#define HW_TESTRX_RAW 0x9602 +#define HW_TESTRX_HDLC 0x9702 +#define HW_TESTRX_OFF 0x9802 + +struct layer1; +extern int l1_event(struct layer1 *, u_int); + + +struct bchannel { + struct mISDNchannel ch; + int nr; + u_long Flags; + struct work_struct workq; + u_int state; + /* HW access */ + u_char (*read_reg) (void *, u_char); + void (*write_reg) (void *, u_char, u_char); + void (*read_fifo) (void *, u_char *, int); + void (*write_fifo) (void *, u_char *, int); + void *hw; + int slot; /* multiport card channel slot */ + struct timer_list timer; + /* receive data */ + struct sk_buff *rx_skb; + int maxlen; + /* send data */ + struct sk_buff *next_skb; + struct sk_buff *tx_skb; + struct sk_buff_head rqueue; + int rcount; + int tx_idx; + int debug; + /* statistics */ + int err_crc; + int err_tx; + int err_rx; +}; + +extern int mISDN_initdchannel(struct dchannel *, int, void *); +extern int mISDN_initbchannel(struct bchannel *, int); +extern int mISDN_freedchannel(struct dchannel *); +extern int mISDN_freebchannel(struct bchannel *); +extern void queue_ch_frame(struct mISDNchannel *, u_int, + int, struct sk_buff *); +extern int dchannel_senddata(struct dchannel *, struct sk_buff *); +extern int bchannel_senddata(struct bchannel *, struct sk_buff *); +extern void recv_Dchannel(struct dchannel *); +extern void recv_Bchannel(struct bchannel *); +extern void recv_Dchannel_skb(struct dchannel *, struct sk_buff *); +extern void recv_Bchannel_skb(struct bchannel *, struct sk_buff *); +extern void confirm_Bsend(struct bchannel *bch); +extern int get_next_bframe(struct bchannel *); +extern int get_next_dframe(struct dchannel *); + +#endif diff --git a/include/linux/mISDNif.h b/include/linux/mISDNif.h new file mode 100644 index 00000000000..8f2d60da04e --- /dev/null +++ b/include/linux/mISDNif.h @@ -0,0 +1,509 @@ +/* + * + * Author Karsten Keil <kkeil@novell.com> + * + * Copyright 2008 by Karsten Keil <kkeil@novell.com> + * + * This code is free software; you can redistribute it and/or modify + * it under the terms of the GNU LESSER GENERAL PUBLIC LICENSE + * version 2.1 as published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU LESSER GENERAL PUBLIC LICENSE for more details. + * + */ + +#ifndef mISDNIF_H +#define mISDNIF_H + +#include <stdarg.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/socket.h> + +/* + * ABI Version 32 bit + * + * <8 bit> Major version + * - changed if any interface become backwards incompatible + * + * <8 bit> Minor version + * - changed if any interface is extended but backwards compatible + * + * <16 bit> Release number + * - should be incremented on every checkin + */ +#define MISDN_MAJOR_VERSION 1 +#define MISDN_MINOR_VERSION 0 +#define MISDN_RELEASE 19 + +/* primitives for information exchange + * generell format + * <16 bit 0 > + * <8 bit command> + * BIT 8 = 1 LAYER private + * BIT 7 = 1 answer + * BIT 6 = 1 DATA + * <8 bit target layer mask> + * + * Layer = 00 is reserved for general commands + Layer = 01 L2 -> HW + Layer = 02 HW -> L2 + Layer = 04 L3 -> L2 + Layer = 08 L2 -> L3 + * Layer = FF is reserved for broadcast commands + */ + +#define MISDN_CMDMASK 0xff00 +#define MISDN_LAYERMASK 0x00ff + +/* generell commands */ +#define OPEN_CHANNEL 0x0100 +#define CLOSE_CHANNEL 0x0200 +#define CONTROL_CHANNEL 0x0300 +#define CHECK_DATA 0x0400 + +/* layer 2 -> layer 1 */ +#define PH_ACTIVATE_REQ 0x0101 +#define PH_DEACTIVATE_REQ 0x0201 +#define PH_DATA_REQ 0x2001 +#define MPH_ACTIVATE_REQ 0x0501 +#define MPH_DEACTIVATE_REQ 0x0601 +#define MPH_INFORMATION_REQ 0x0701 +#define PH_CONTROL_REQ 0x0801 + +/* layer 1 -> layer 2 */ +#define PH_ACTIVATE_IND 0x0102 +#define PH_ACTIVATE_CNF 0x4102 +#define PH_DEACTIVATE_IND 0x0202 +#define PH_DEACTIVATE_CNF 0x4202 +#define PH_DATA_IND 0x2002 +#define MPH_ACTIVATE_IND 0x0502 +#define MPH_DEACTIVATE_IND 0x0602 +#define MPH_INFORMATION_IND 0x0702 +#define PH_DATA_CNF 0x6002 +#define PH_CONTROL_IND 0x0802 +#define PH_CONTROL_CNF 0x4802 + +/* layer 3 -> layer 2 */ +#define DL_ESTABLISH_REQ 0x1004 +#define DL_RELEASE_REQ 0x1104 +#define DL_DATA_REQ 0x3004 +#define DL_UNITDATA_REQ 0x3104 +#define DL_INFORMATION_REQ 0x0004 + +/* layer 2 -> layer 3 */ +#define DL_ESTABLISH_IND 0x1008 +#define DL_ESTABLISH_CNF 0x5008 +#define DL_RELEASE_IND 0x1108 +#define DL_RELEASE_CNF 0x5108 +#define DL_DATA_IND 0x3008 +#define DL_UNITDATA_IND 0x3108 +#define DL_INFORMATION_IND 0x0008 + +/* intern layer 2 managment */ +#define MDL_ASSIGN_REQ 0x1804 +#define MDL_ASSIGN_IND 0x1904 +#define MDL_REMOVE_REQ 0x1A04 +#define MDL_REMOVE_IND 0x1B04 +#define MDL_STATUS_UP_IND 0x1C04 +#define MDL_STATUS_DOWN_IND 0x1D04 +#define MDL_STATUS_UI_IND 0x1E04 +#define MDL_ERROR_IND 0x1F04 +#define MDL_ERROR_RSP 0x5F04 + +/* DL_INFORMATION_IND types */ +#define DL_INFO_L2_CONNECT 0x0001 +#define DL_INFO_L2_REMOVED 0x0002 + +/* PH_CONTROL types */ +/* TOUCH TONE IS 0x20XX XX "0"..."9", "A","B","C","D","*","#" */ +#define DTMF_TONE_VAL 0x2000 +#define DTMF_TONE_MASK 0x007F +#define DTMF_TONE_START 0x2100 +#define DTMF_TONE_STOP 0x2200 +#define DTMF_HFC_COEF 0x4000 +#define DSP_CONF_JOIN 0x2403 +#define DSP_CONF_SPLIT 0x2404 +#define DSP_RECEIVE_OFF 0x2405 +#define DSP_RECEIVE_ON 0x2406 +#define DSP_ECHO_ON 0x2407 +#define DSP_ECHO_OFF 0x2408 +#define DSP_MIX_ON 0x2409 +#define DSP_MIX_OFF 0x240a +#define DSP_DELAY 0x240b +#define DSP_JITTER 0x240c +#define DSP_TXDATA_ON 0x240d +#define DSP_TXDATA_OFF 0x240e +#define DSP_TX_DEJITTER 0x240f +#define DSP_TX_DEJ_OFF 0x2410 +#define DSP_TONE_PATT_ON 0x2411 +#define DSP_TONE_PATT_OFF 0x2412 +#define DSP_VOL_CHANGE_TX 0x2413 +#define DSP_VOL_CHANGE_RX 0x2414 +#define DSP_BF_ENABLE_KEY 0x2415 +#define DSP_BF_DISABLE 0x2416 +#define DSP_BF_ACCEPT 0x2416 +#define DSP_BF_REJECT 0x2417 +#define DSP_PIPELINE_CFG 0x2418 +#define HFC_VOL_CHANGE_TX 0x2601 +#define HFC_VOL_CHANGE_RX 0x2602 +#define HFC_SPL_LOOP_ON 0x2603 +#define HFC_SPL_LOOP_OFF 0x2604 + +/* DSP_TONE_PATT_ON parameter */ +#define TONE_OFF 0x0000 +#define TONE_GERMAN_DIALTONE 0x0001 +#define TONE_GERMAN_OLDDIALTONE 0x0002 +#define TONE_AMERICAN_DIALTONE 0x0003 +#define TONE_GERMAN_DIALPBX 0x0004 +#define TONE_GERMAN_OLDDIALPBX 0x0005 +#define TONE_AMERICAN_DIALPBX 0x0006 +#define TONE_GERMAN_RINGING 0x0007 +#define TONE_GERMAN_OLDRINGING 0x0008 +#define TONE_AMERICAN_RINGPBX 0x000b +#define TONE_GERMAN_RINGPBX 0x000c +#define TONE_GERMAN_OLDRINGPBX 0x000d +#define TONE_AMERICAN_RINGING 0x000e +#define TONE_GERMAN_BUSY 0x000f +#define TONE_GERMAN_OLDBUSY 0x0010 +#define TONE_AMERICAN_BUSY 0x0011 +#define TONE_GERMAN_HANGUP 0x0012 +#define TONE_GERMAN_OLDHANGUP 0x0013 +#define TONE_AMERICAN_HANGUP 0x0014 +#define TONE_SPECIAL_INFO 0x0015 +#define TONE_GERMAN_GASSENBESETZT 0x0016 +#define TONE_GERMAN_AUFSCHALTTON 0x0016 + +/* MPH_INFORMATION_IND */ +#define L1_SIGNAL_LOS_OFF 0x0010 +#define L1_SIGNAL_LOS_ON 0x0011 +#define L1_SIGNAL_AIS_OFF 0x0012 +#define L1_SIGNAL_AIS_ON 0x0013 +#define L1_SIGNAL_RDI_OFF 0x0014 +#define L1_SIGNAL_RDI_ON 0x0015 +#define L1_SIGNAL_SLIP_RX 0x0020 +#define L1_SIGNAL_SLIP_TX 0x0021 + +/* + * protocol ids + * D channel 1-31 + * B channel 33 - 63 + */ + +#define ISDN_P_NONE 0 +#define ISDN_P_BASE 0 +#define ISDN_P_TE_S0 0x01 +#define ISDN_P_NT_S0 0x02 +#define ISDN_P_TE_E1 0x03 +#define ISDN_P_NT_E1 0x04 +#define ISDN_P_LAPD_TE 0x10 +#define ISDN_P_LAPD_NT 0x11 + +#define ISDN_P_B_MASK 0x1f +#define ISDN_P_B_START 0x20 + +#define ISDN_P_B_RAW 0x21 +#define ISDN_P_B_HDLC 0x22 +#define ISDN_P_B_X75SLP 0x23 +#define ISDN_P_B_L2DTMF 0x24 +#define ISDN_P_B_L2DSP 0x25 +#define ISDN_P_B_L2DSPHDLC 0x26 + +#define OPTION_L2_PMX 1 +#define OPTION_L2_PTP 2 +#define OPTION_L2_FIXEDTEI 3 +#define OPTION_L2_CLEANUP 4 + +/* should be in sync with linux/kobject.h:KOBJ_NAME_LEN */ +#define MISDN_MAX_IDLEN 20 + +struct mISDNhead { + unsigned int prim; + unsigned int id; +} __attribute__((packed)); + +#define MISDN_HEADER_LEN sizeof(struct mISDNhead) +#define MAX_DATA_SIZE 2048 +#define MAX_DATA_MEM (MAX_DATA_SIZE + MISDN_HEADER_LEN) +#define MAX_DFRAME_LEN 260 + +#define MISDN_ID_ADDR_MASK 0xFFFF +#define MISDN_ID_TEI_MASK 0xFF00 +#define MISDN_ID_SAPI_MASK 0x00FF +#define MISDN_ID_TEI_ANY 0x7F00 + +#define MISDN_ID_ANY 0xFFFF +#define MISDN_ID_NONE 0xFFFE + +#define GROUP_TEI 127 +#define TEI_SAPI 63 +#define CTRL_SAPI 0 + +#define MISDN_MAX_CHANNEL 127 +#define MISDN_CHMAP_SIZE ((MISDN_MAX_CHANNEL + 1) >> 3) + +#define SOL_MISDN 0 + +struct sockaddr_mISDN { + sa_family_t family; + unsigned char dev; + unsigned char channel; + unsigned char sapi; + unsigned char tei; +}; + +/* timer device ioctl */ +#define IMADDTIMER _IOR('I', 64, int) +#define IMDELTIMER _IOR('I', 65, int) +/* socket ioctls */ +#define IMGETVERSION _IOR('I', 66, int) +#define IMGETCOUNT _IOR('I', 67, int) +#define IMGETDEVINFO _IOR('I', 68, int) +#define IMCTRLREQ _IOR('I', 69, int) +#define IMCLEAR_L2 _IOR('I', 70, int) + +struct mISDNversion { + unsigned char major; + unsigned char minor; + unsigned short release; +}; + +struct mISDN_devinfo { + u_int id; + u_int Dprotocols; + u_int Bprotocols; + u_int protocol; + u_char channelmap[MISDN_CHMAP_SIZE]; + u_int nrbchan; + char name[MISDN_MAX_IDLEN]; +}; + +static inline int +test_channelmap(u_int nr, u_char *map) +{ + if (nr <= MISDN_MAX_CHANNEL) + return map[nr >> 3] & (1 << (nr & 7)); + else + return 0; +} + +static inline void +set_channelmap(u_int nr, u_char *map) +{ + map[nr >> 3] |= (1 << (nr & 7)); +} + +static inline void +clear_channelmap(u_int nr, u_char *map) +{ + map[nr >> 3] &= ~(1 << (nr & 7)); +} + +/* CONTROL_CHANNEL parameters */ +#define MISDN_CTRL_GETOP 0x0000 +#define MISDN_CTRL_LOOP 0x0001 +#define MISDN_CTRL_CONNECT 0x0002 +#define MISDN_CTRL_DISCONNECT 0x0004 +#define MISDN_CTRL_PCMCONNECT 0x0010 +#define MISDN_CTRL_PCMDISCONNECT 0x0020 +#define MISDN_CTRL_SETPEER 0x0040 +#define MISDN_CTRL_UNSETPEER 0x0080 +#define MISDN_CTRL_RX_OFF 0x0100 +#define MISDN_CTRL_HW_FEATURES_OP 0x2000 +#define MISDN_CTRL_HW_FEATURES 0x2001 +#define MISDN_CTRL_HFC_OP 0x4000 +#define MISDN_CTRL_HFC_PCM_CONN 0x4001 +#define MISDN_CTRL_HFC_PCM_DISC 0x4002 +#define MISDN_CTRL_HFC_CONF_JOIN 0x4003 +#define MISDN_CTRL_HFC_CONF_SPLIT 0x4004 +#define MISDN_CTRL_HFC_RECEIVE_OFF 0x4005 +#define MISDN_CTRL_HFC_RECEIVE_ON 0x4006 +#define MISDN_CTRL_HFC_ECHOCAN_ON 0x4007 +#define MISDN_CTRL_HFC_ECHOCAN_OFF 0x4008 + + +/* socket options */ +#define MISDN_TIME_STAMP 0x0001 + +struct mISDN_ctrl_req { + int op; + int channel; + int p1; + int p2; +}; + +/* muxer options */ +#define MISDN_OPT_ALL 1 +#define MISDN_OPT_TEIMGR 2 + +#ifdef __KERNEL__ +#include <linux/list.h> +#include <linux/skbuff.h> +#include <linux/net.h> +#include <net/sock.h> +#include <linux/completion.h> + +#define DEBUG_CORE 0x000000ff +#define DEBUG_CORE_FUNC 0x00000002 +#define DEBUG_SOCKET 0x00000004 +#define DEBUG_MANAGER 0x00000008 +#define DEBUG_SEND_ERR 0x00000010 +#define DEBUG_MSG_THREAD 0x00000020 +#define DEBUG_QUEUE_FUNC 0x00000040 +#define DEBUG_L1 0x0000ff00 +#define DEBUG_L1_FSM 0x00000200 +#define DEBUG_L2 0x00ff0000 +#define DEBUG_L2_FSM 0x00020000 +#define DEBUG_L2_CTRL 0x00040000 +#define DEBUG_L2_RECV 0x00080000 +#define DEBUG_L2_TEI 0x00100000 +#define DEBUG_L2_TEIFSM 0x00200000 +#define DEBUG_TIMER 0x01000000 + +#define mISDN_HEAD_P(s) ((struct mISDNhead *)&s->cb[0]) +#define mISDN_HEAD_PRIM(s) (((struct mISDNhead *)&s->cb[0])->prim) +#define mISDN_HEAD_ID(s) (((struct mISDNhead *)&s->cb[0])->id) + +/* socket states */ +#define MISDN_OPEN 1 +#define MISDN_BOUND 2 +#define MISDN_CLOSED 3 + +struct mISDNchannel; +struct mISDNdevice; +struct mISDNstack; + +struct channel_req { + u_int protocol; + struct sockaddr_mISDN adr; + struct mISDNchannel *ch; +}; + +typedef int (ctrl_func_t)(struct mISDNchannel *, u_int, void *); +typedef int (send_func_t)(struct mISDNchannel *, struct sk_buff *); +typedef int (create_func_t)(struct channel_req *); + +struct Bprotocol { + struct list_head list; + char *name; + u_int Bprotocols; + create_func_t *create; +}; + +struct mISDNchannel { + struct list_head list; + u_int protocol; + u_int nr; + u_long opt; + u_int addr; + struct mISDNstack *st; + struct mISDNchannel *peer; + send_func_t *send; + send_func_t *recv; + ctrl_func_t *ctrl; +}; + +struct mISDN_sock_list { + struct hlist_head head; + rwlock_t lock; +}; + +struct mISDN_sock { + struct sock sk; + struct mISDNchannel ch; + u_int cmask; + struct mISDNdevice *dev; +}; + + + +struct mISDNdevice { + struct mISDNchannel D; + u_int id; + char name[MISDN_MAX_IDLEN]; + u_int Dprotocols; + u_int Bprotocols; + u_int nrbchan; + u_char channelmap[MISDN_CHMAP_SIZE]; + struct list_head bchannels; + struct mISDNchannel *teimgr; + struct device dev; +}; + +struct mISDNstack { + u_long status; + struct mISDNdevice *dev; + struct task_struct *thread; + struct completion *notify; + wait_queue_head_t workq; + struct sk_buff_head msgq; + struct list_head layer2; + struct mISDNchannel *layer1; + struct mISDNchannel own; + struct mutex lmutex; /* protect lists */ + struct mISDN_sock_list l1sock; +#ifdef MISDN_MSG_STATS + u_int msg_cnt; + u_int sleep_cnt; + u_int stopped_cnt; +#endif +}; + +/* global alloc/queue functions */ + +static inline struct sk_buff * +mI_alloc_skb(unsigned int len, gfp_t gfp_mask) +{ + struct sk_buff *skb; + + skb = alloc_skb(len + MISDN_HEADER_LEN, gfp_mask); + if (likely(skb)) + skb_reserve(skb, MISDN_HEADER_LEN); + return skb; +} + +static inline struct sk_buff * +_alloc_mISDN_skb(u_int prim, u_int id, u_int len, void *dp, gfp_t gfp_mask) +{ + struct sk_buff *skb = mI_alloc_skb(len, gfp_mask); + struct mISDNhead *hh; + + if (!skb) + return NULL; + if (len) + memcpy(skb_put(skb, len), dp, len); + hh = mISDN_HEAD_P(skb); + hh->prim = prim; + hh->id = id; + return skb; +} + +static inline void +_queue_data(struct mISDNchannel *ch, u_int prim, + u_int id, u_int len, void *dp, gfp_t gfp_mask) +{ + struct sk_buff *skb; + + if (!ch->peer) + return; + skb = _alloc_mISDN_skb(prim, id, len, dp, gfp_mask); + if (!skb) + return; + if (ch->recv(ch->peer, skb)) + dev_kfree_skb(skb); +} + +/* global register/unregister functions */ + +extern int mISDN_register_device(struct mISDNdevice *, char *name); +extern void mISDN_unregister_device(struct mISDNdevice *); +extern int mISDN_register_Bprotocol(struct Bprotocol *); +extern void mISDN_unregister_Bprotocol(struct Bprotocol *); + +extern void set_channel_address(struct mISDNchannel *, u_int, u_int); + +#endif /* __KERNEL__ */ +#endif /* mISDNIF_H */ diff --git a/include/linux/major.h b/include/linux/major.h index 0cb98053537..53d5fafd85c 100644 --- a/include/linux/major.h +++ b/include/linux/major.h @@ -53,7 +53,7 @@ #define STL_SIOMEMMAJOR 28 #define ACSI_MAJOR 28 #define AZTECH_CDROM_MAJOR 29 -#define GRAPHDEV_MAJOR 29 /* SparcLinux & Linux/68k /dev/fb */ +#define FB_MAJOR 29 /* /dev/fb* framebuffers */ #define CM206_CDROM_MAJOR 32 #define IDE2_MAJOR 33 #define IDE3_MAJOR 34 diff --git a/include/linux/maple.h b/include/linux/maple.h index d31e36ebb43..c23d3f51ba4 100644 --- a/include/linux/maple.h +++ b/include/linux/maple.h @@ -2,6 +2,7 @@ #define __LINUX_MAPLE_H #include <linux/device.h> +#include <mach/maple.h> extern struct bus_type maple_bus_type; @@ -33,6 +34,7 @@ struct mapleq { void *sendbuf, *recvbuf, *recvbufdcsp; unsigned char length; enum maple_code command; + struct mutex mutex; }; struct maple_devinfo { @@ -49,7 +51,6 @@ struct maple_devinfo { struct maple_device { struct maple_driver *driver; struct mapleq *mq; - void *private_data; void (*callback) (struct mapleq * mq); unsigned long when, interval, function; struct maple_devinfo devinfo; @@ -61,8 +62,6 @@ struct maple_device { struct maple_driver { unsigned long function; - int (*connect) (struct maple_device * dev); - void (*disconnect) (struct maple_device * dev); struct device_driver drv; }; @@ -70,10 +69,17 @@ void maple_getcond_callback(struct maple_device *dev, void (*callback) (struct mapleq * mq), unsigned long interval, unsigned long function); -int maple_driver_register(struct device_driver *drv); -void maple_add_packet(struct mapleq *mq); +int maple_driver_register(struct maple_driver *); +void maple_driver_unregister(struct maple_driver *); + +int maple_add_packet_sleeps(struct maple_device *mdev, u32 function, + u32 command, u32 length, void *data); +void maple_clear_dev(struct maple_device *mdev); #define to_maple_dev(n) container_of(n, struct maple_device, dev) #define to_maple_driver(n) container_of(n, struct maple_driver, drv) +#define maple_get_drvdata(d) dev_get_drvdata(&(d)->dev) +#define maple_set_drvdata(d,p) dev_set_drvdata(&(d)->dev, (p)) + #endif /* __LINUX_MAPLE_H */ diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index e6608776bc9..fdf3967e139 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -35,7 +35,10 @@ extern int mem_cgroup_charge(struct page *page, struct mm_struct *mm, extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask); extern void mem_cgroup_uncharge_page(struct page *page); +extern void mem_cgroup_uncharge_cache_page(struct page *page); extern void mem_cgroup_move_lists(struct page *page, bool active); +extern int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask); + extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, struct list_head *dst, unsigned long *scanned, int order, @@ -50,9 +53,9 @@ extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); #define mm_match_cgroup(mm, cgroup) \ ((cgroup) == mem_cgroup_from_task((mm)->owner)) -extern int mem_cgroup_prepare_migration(struct page *page); +extern int +mem_cgroup_prepare_migration(struct page *page, struct page *newpage); extern void mem_cgroup_end_migration(struct page *page); -extern void mem_cgroup_page_migration(struct page *page, struct page *newpage); /* * For memory reclaim. @@ -97,6 +100,15 @@ static inline void mem_cgroup_uncharge_page(struct page *page) { } +static inline void mem_cgroup_uncharge_cache_page(struct page *page) +{ +} + +static inline int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask) +{ + return 0; +} + static inline void mem_cgroup_move_lists(struct page *page, bool active) { } @@ -112,7 +124,8 @@ static inline int task_in_mem_cgroup(struct task_struct *task, return 1; } -static inline int mem_cgroup_prepare_migration(struct page *page) +static inline int +mem_cgroup_prepare_migration(struct page *page, struct page *newpage) { return 0; } @@ -121,11 +134,6 @@ static inline void mem_cgroup_end_migration(struct page *page) { } -static inline void -mem_cgroup_page_migration(struct page *page, struct page *newpage) -{ -} - static inline int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem) { return 0; diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index ea9f5ad9ec8..763ba81fc0f 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -13,12 +13,12 @@ struct mem_section; #ifdef CONFIG_MEMORY_HOTPLUG /* - * Magic number for free bootmem. + * Types for free bootmem. * The normal smallest mapcount is -1. Here is smaller value than it. */ -#define SECTION_INFO 0xfffffffe -#define MIX_INFO 0xfffffffd -#define NODE_INFO 0xfffffffc +#define SECTION_INFO (-1 - 1) +#define MIX_SECTION_INFO (-1 - 2) +#define NODE_INFO (-1 - 3) /* * pgdat resizing functions @@ -199,6 +199,18 @@ extern int walk_memory_resource(unsigned long start_pfn, unsigned long nr_pages, void *arg, int (*func)(unsigned long, unsigned long, void *)); +#ifdef CONFIG_MEMORY_HOTREMOVE + +extern int is_mem_section_removable(unsigned long pfn, unsigned long nr_pages); + +#else +static inline int is_mem_section_removable(unsigned long pfn, + unsigned long nr_pages) +{ + return 0; +} +#endif /* CONFIG_MEMORY_HOTREMOVE */ + extern int add_memory(int nid, u64 start, u64 size); extern int arch_add_memory(int nid, u64 start, u64 size); extern int remove_memory(u64 start, u64 size); diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index 3a39570b81b..085c903fe0f 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h @@ -59,6 +59,7 @@ enum { #include <linux/rbtree.h> #include <linux/spinlock.h> #include <linux/nodemask.h> +#include <linux/pagemap.h> struct mm_struct; @@ -220,6 +221,24 @@ extern int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context); extern int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int no_context); #endif + +/* Check if a vma is migratable */ +static inline int vma_migratable(struct vm_area_struct *vma) +{ + if (vma->vm_flags & (VM_IO|VM_HUGETLB|VM_PFNMAP|VM_RESERVED)) + return 0; + /* + * Migration allocates pages in the highest zone. If we cannot + * do so then migration (at least from node to node) is not + * possible. + */ + if (vma->vm_file && + gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping)) + < policy_zone) + return 0; + return 1; +} + #else struct mempolicy {}; diff --git a/include/linux/memstick.h b/include/linux/memstick.h index 37a5cdb0391..a9f998a3f48 100644 --- a/include/linux/memstick.h +++ b/include/linux/memstick.h @@ -263,6 +263,10 @@ struct memstick_dev { /* Get next request from the media driver. */ int (*next_request)(struct memstick_dev *card, struct memstick_request **mrq); + /* Tell the media driver to stop doing things */ + void (*stop)(struct memstick_dev *card); + /* Allow the media driver to continue */ + void (*start)(struct memstick_dev *card); struct device dev; }; @@ -284,7 +288,7 @@ struct memstick_host { /* Notify the host that some requests are pending. */ void (*request)(struct memstick_host *host); /* Set host IO parameters (power, clock, etc). */ - void (*set_param)(struct memstick_host *host, + int (*set_param)(struct memstick_host *host, enum memstick_param param, int value); unsigned long private[0] ____cacheline_aligned; diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h new file mode 100644 index 00000000000..49ef857cdb2 --- /dev/null +++ b/include/linux/mfd/core.h @@ -0,0 +1,55 @@ +/* + * drivers/mfd/mfd-core.h + * + * core MFD support + * Copyright (c) 2006 Ian Molton + * Copyright (c) 2007 Dmitry Baryshkov + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#ifndef MFD_CORE_H +#define MFD_CORE_H + +#include <linux/platform_device.h> + +/* + * This struct describes the MFD part ("cell"). + * After registration the copy of this structure will become the platform data + * of the resulting platform_device + */ +struct mfd_cell { + const char *name; + + int (*enable)(struct platform_device *dev); + int (*disable)(struct platform_device *dev); + int (*suspend)(struct platform_device *dev); + int (*resume)(struct platform_device *dev); + + /* driver-specific data for MFD-aware "cell" drivers */ + void *driver_data; + + /* platform_data can be used to either pass data to "generic" + driver or as a hook to mfd_cell for the "cell" drivers */ + void *platform_data; + size_t data_size; + + /* + * This resources can be specified relatievly to the parent device. + * For accessing device you should use resources from device + */ + int num_resources; + const struct resource *resources; +}; + +extern int mfd_add_devices(struct device *parent, int id, + const struct mfd_cell *cells, int n_devs, + struct resource *mem_base, + int irq_base); + +extern void mfd_remove_devices(struct device *parent); + +#endif diff --git a/include/linux/mfd/t7l66xb.h b/include/linux/mfd/t7l66xb.h new file mode 100644 index 00000000000..e83c7f2036f --- /dev/null +++ b/include/linux/mfd/t7l66xb.h @@ -0,0 +1,36 @@ +/* + * This file contains the definitions for the T7L66XB + * + * (C) Copyright 2005 Ian Molton <spyro@f2s.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ +#ifndef MFD_T7L66XB_H +#define MFD_T7L66XB_H + +#include <linux/mfd/core.h> +#include <linux/mfd/tmio.h> + +struct t7l66xb_platform_data { + int (*enable_clk32k)(struct platform_device *dev); + void (*disable_clk32k)(struct platform_device *dev); + int (*enable)(struct platform_device *dev); + int (*disable)(struct platform_device *dev); + int (*suspend)(struct platform_device *dev); + int (*resume)(struct platform_device *dev); + + int irq_base; /* The base for subdevice irqs */ + + struct tmio_nand_data *nand_data; +}; + + +#define IRQ_T7L66XB_MMC (1) +#define IRQ_T7L66XB_NAND (3) + +#define T7L66XB_NR_IRQS 8 + +#endif diff --git a/include/linux/mfd/tc6387xb.h b/include/linux/mfd/tc6387xb.h new file mode 100644 index 00000000000..fa06e0610b8 --- /dev/null +++ b/include/linux/mfd/tc6387xb.h @@ -0,0 +1,23 @@ +/* + * This file contains the definitions for the TC6387XB + * + * (C) Copyright 2005 Ian Molton <spyro@f2s.com> + * + * May be copied or modified under the terms of the GNU General Public + * License. See linux/COPYING for more information. + * + */ +#ifndef MFD_TC6387XB_H +#define MFD_TC6387XB_H + +struct tc6387xb_platform_data { + int (*enable_clk32k)(struct platform_device *dev); + void (*disable_clk32k)(struct platform_device *dev); + + int (*enable)(struct platform_device *dev); + int (*disable)(struct platform_device *dev); + int (*suspend)(struct platform_device *dev); + int (*resume)(struct platform_device *dev); +}; + +#endif diff --git a/include/linux/mfd/tc6393xb.h b/include/linux/mfd/tc6393xb.h new file mode 100644 index 00000000000..fec7b3f7a81 --- /dev/null +++ b/include/linux/mfd/tc6393xb.h @@ -0,0 +1,46 @@ +/* + * Toshiba TC6393XB SoC support + * + * Copyright(c) 2005-2006 Chris Humbert + * Copyright(c) 2005 Dirk Opfer + * Copyright(c) 2005 Ian Molton <spyro@f2s.com> + * Copyright(c) 2007 Dmitry Baryshkov + * + * Based on code written by Sharp/Lineo for 2.4 kernels + * Based on locomo.c + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef MFD_TC6393XB_H +#define MFD_TC6393XB_H + +/* Also one should provide the CK3P6MI clock */ +struct tc6393xb_platform_data { + u16 scr_pll2cr; /* PLL2 Control */ + u16 scr_gper; /* GP Enable */ + u32 scr_gpo_doecr; /* GPO Data OE Control */ + u32 scr_gpo_dsr; /* GPO Data Set */ + + int (*enable)(struct platform_device *dev); + int (*disable)(struct platform_device *dev); + int (*suspend)(struct platform_device *dev); + int (*resume)(struct platform_device *dev); + + int irq_base; /* base for subdevice irqs */ + int gpio_base; + + struct tmio_nand_data *nand_data; +}; + +/* + * Relative to irq_base + */ +#define IRQ_TC6393_NAND 0 +#define IRQ_TC6393_MMC 1 + +#define TC6393XB_NR_IRQS 8 + +#endif diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h new file mode 100644 index 00000000000..ec612e66391 --- /dev/null +++ b/include/linux/mfd/tmio.h @@ -0,0 +1,28 @@ +#ifndef MFD_TMIO_H +#define MFD_TMIO_H + +#define tmio_ioread8(addr) readb(addr) +#define tmio_ioread16(addr) readw(addr) +#define tmio_ioread16_rep(r, b, l) readsw(r, b, l) +#define tmio_ioread32(addr) \ + (((u32) readw((addr))) | (((u32) readw((addr) + 2)) << 16)) + +#define tmio_iowrite8(val, addr) writeb((val), (addr)) +#define tmio_iowrite16(val, addr) writew((val), (addr)) +#define tmio_iowrite16_rep(r, b, l) writesw(r, b, l) +#define tmio_iowrite32(val, addr) \ + do { \ + writew((val), (addr)); \ + writew((val) >> 16, (addr) + 2); \ + } while (0) + +/* + * data for the NAND controller + */ +struct tmio_nand_data { + struct nand_bbt_descr *badblock_pattern; + struct mtd_partition *partition; + unsigned int num_partitions; +}; + +#endif diff --git a/include/linux/migrate.h b/include/linux/migrate.h index e10a90a93b5..03aea612d28 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -3,28 +3,10 @@ #include <linux/mm.h> #include <linux/mempolicy.h> -#include <linux/pagemap.h> typedef struct page *new_page_t(struct page *, unsigned long private, int **); #ifdef CONFIG_MIGRATION -/* Check if a vma is migratable */ -static inline int vma_migratable(struct vm_area_struct *vma) -{ - if (vma->vm_flags & (VM_IO|VM_HUGETLB|VM_PFNMAP|VM_RESERVED)) - return 0; - /* - * Migration allocates pages in the highest zone. If we cannot - * do so then migration (at least from node to node) is not - * possible. - */ - if (vma->vm_file && - gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping)) - < policy_zone) - return 0; - return 1; -} - extern int isolate_lru_page(struct page *p, struct list_head *pagelist); extern int putback_lru_pages(struct list_head *l); extern int migrate_page(struct address_space *, @@ -39,9 +21,6 @@ extern int migrate_vmas(struct mm_struct *mm, const nodemask_t *from, const nodemask_t *to, unsigned long flags); #else -static inline int vma_migratable(struct vm_area_struct *vma) - { return 0; } - static inline int isolate_lru_page(struct page *p, struct list_head *list) { return -ENOSYS; } static inline int putback_lru_pages(struct list_head *l) { return 0; } diff --git a/include/linux/mlx4/cq.h b/include/linux/mlx4/cq.h index 071cf96cf01..6f65b2c8bb8 100644 --- a/include/linux/mlx4/cq.h +++ b/include/linux/mlx4/cq.h @@ -39,17 +39,18 @@ #include <linux/mlx4/doorbell.h> struct mlx4_cqe { - __be32 my_qpn; + __be32 vlan_my_qpn; __be32 immed_rss_invalid; __be32 g_mlpath_rqpn; - u8 sl; - u8 reserved1; + __be16 sl_vid; __be16 rlid; - __be32 ipoib_status; + __be16 status; + u8 ipv6_ext_mask; + u8 badfcs_enc; __be32 byte_cnt; __be16 wqe_index; __be16 checksum; - u8 reserved2[3]; + u8 reserved[3]; u8 owner_sr_opcode; }; @@ -64,6 +65,11 @@ struct mlx4_err_cqe { }; enum { + MLX4_CQE_VLAN_PRESENT_MASK = 1 << 29, + MLX4_CQE_QPN_MASK = 0xffffff, +}; + +enum { MLX4_CQE_OWNER_MASK = 0x80, MLX4_CQE_IS_SEND_MASK = 0x40, MLX4_CQE_OPCODE_MASK = 0x1f @@ -86,13 +92,19 @@ enum { }; enum { - MLX4_CQE_IPOIB_STATUS_IPV4 = 1 << 22, - MLX4_CQE_IPOIB_STATUS_IPV4F = 1 << 23, - MLX4_CQE_IPOIB_STATUS_IPV6 = 1 << 24, - MLX4_CQE_IPOIB_STATUS_IPV4OPT = 1 << 25, - MLX4_CQE_IPOIB_STATUS_TCP = 1 << 26, - MLX4_CQE_IPOIB_STATUS_UDP = 1 << 27, - MLX4_CQE_IPOIB_STATUS_IPOK = 1 << 28, + MLX4_CQE_STATUS_IPV4 = 1 << 6, + MLX4_CQE_STATUS_IPV4F = 1 << 7, + MLX4_CQE_STATUS_IPV6 = 1 << 8, + MLX4_CQE_STATUS_IPV4OPT = 1 << 9, + MLX4_CQE_STATUS_TCP = 1 << 10, + MLX4_CQE_STATUS_UDP = 1 << 11, + MLX4_CQE_STATUS_IPOK = 1 << 12, +}; + +enum { + MLX4_CQE_LLC = 1, + MLX4_CQE_SNAP = 1 << 1, + MLX4_CQE_BAD_FCS = 1 << 4, }; static inline void mlx4_cq_arm(struct mlx4_cq *cq, u32 cmd, diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 81b3dd5206e..655ea0d1ee1 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -68,6 +68,14 @@ enum { MLX4_DEV_CAP_FLAG_UD_MCAST = 1 << 21 }; +enum { + MLX4_BMME_FLAG_LOCAL_INV = 1 << 6, + MLX4_BMME_FLAG_REMOTE_INV = 1 << 7, + MLX4_BMME_FLAG_TYPE_2_WIN = 1 << 9, + MLX4_BMME_FLAG_RESERVED_LKEY = 1 << 10, + MLX4_BMME_FLAG_FAST_REG_WR = 1 << 11, +}; + enum mlx4_event { MLX4_EVENT_TYPE_COMP = 0x00, MLX4_EVENT_TYPE_PATH_MIG = 0x01, @@ -184,6 +192,8 @@ struct mlx4_caps { u32 max_msg_sz; u32 page_size_cap; u32 flags; + u32 bmme_flags; + u32 reserved_lkey; u16 stat_rate_support; u8 port_width_cap[MLX4_MAX_PORTS + 1]; int max_gso_sz; diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h index 7f128b266fa..bf8f11982da 100644 --- a/include/linux/mlx4/qp.h +++ b/include/linux/mlx4/qp.h @@ -164,11 +164,13 @@ enum { MLX4_WQE_CTRL_SOLICITED = 1 << 1, MLX4_WQE_CTRL_IP_CSUM = 1 << 4, MLX4_WQE_CTRL_TCP_UDP_CSUM = 1 << 5, + MLX4_WQE_CTRL_INS_VLAN = 1 << 6, }; struct mlx4_wqe_ctrl_seg { __be32 owner_opcode; - u8 reserved2[3]; + __be16 vlan_tag; + u8 ins_vlan; u8 fence_size; /* * High 24 bits are SRC remote buffer; low 8 bits are flags: @@ -219,7 +221,7 @@ struct mlx4_wqe_datagram_seg { __be32 reservd[2]; }; -struct mlx4_lso_seg { +struct mlx4_wqe_lso_seg { __be32 mss_hdr_size; __be32 header[0]; }; @@ -233,6 +235,14 @@ struct mlx4_wqe_bind_seg { __be64 length; }; +enum { + MLX4_WQE_FMR_PERM_LOCAL_READ = 1 << 27, + MLX4_WQE_FMR_PERM_LOCAL_WRITE = 1 << 28, + MLX4_WQE_FMR_PERM_REMOTE_READ = 1 << 29, + MLX4_WQE_FMR_PERM_REMOTE_WRITE = 1 << 30, + MLX4_WQE_FMR_PERM_ATOMIC = 1 << 31 +}; + struct mlx4_wqe_fmr_seg { __be32 flags; __be32 mem_key; @@ -255,11 +265,11 @@ struct mlx4_wqe_fmr_ext_seg { }; struct mlx4_wqe_local_inval_seg { - u8 flags; - u8 reserved1[3]; + __be32 flags; + u32 reserved1; __be32 mem_key; - u8 reserved2[3]; - u8 guest_id; + u32 reserved2[2]; + __be32 guest_id; __be64 pa; }; diff --git a/include/linux/mm.h b/include/linux/mm.h index 2128ef7780c..fa651609b65 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -41,6 +41,9 @@ extern unsigned long mmap_min_addr; #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n)) +/* to align the pointer to the (next) page boundary */ +#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE) + /* * Linux kernel virtual memory manager primitives. * The idea being to have a "virtual" mm in the same way @@ -100,6 +103,7 @@ extern unsigned int kobjsize(const void *objp); #define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */ #define VM_RESERVED 0x00080000 /* Count as reserved_vm like IO */ #define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */ +#define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */ #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */ #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */ #define VM_MAPPED_COPY 0x01000000 /* T if mapped copy of data (nommu mmap) */ @@ -166,12 +170,16 @@ struct vm_operations_struct { void (*open)(struct vm_area_struct * area); void (*close)(struct vm_area_struct * area); int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf); - unsigned long (*nopfn)(struct vm_area_struct *area, - unsigned long address); /* notification that a previously read-only page is about to become * writable, if an error is returned it will cause a SIGBUS */ int (*page_mkwrite)(struct vm_area_struct *vma, struct page *page); + + /* called by access_process_vm when get_user_pages() fails, typically + * for use by special VMAs that can switch between memory and hardware + */ + int (*access)(struct vm_area_struct *vma, unsigned long addr, + void *buf, int len, int write); #ifdef CONFIG_NUMA /* * set_policy() op must add a reference to any non-NULL @new mempolicy @@ -675,13 +683,6 @@ static inline int page_mapped(struct page *page) } /* - * Error return values for the *_nopfn functions - */ -#define NOPFN_SIGBUS ((unsigned long) -1) -#define NOPFN_OOM ((unsigned long) -2) -#define NOPFN_REFAULT ((unsigned long) -3) - -/* * Different kinds of faults, as returned by handle_mm_fault(). * Used to decide whether a process gets delivered SIGBUS or * just gets major/minor fault counters bumped up. @@ -743,6 +744,8 @@ struct zap_details { struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, pte_t pte); +int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, + unsigned long size); unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address, unsigned long size, struct zap_details *); unsigned long unmap_vmas(struct mmu_gather **tlb, @@ -772,14 +775,14 @@ struct mm_walk { int walk_page_range(unsigned long addr, unsigned long end, struct mm_walk *walk); -void free_pgd_range(struct mmu_gather **tlb, unsigned long addr, +void free_pgd_range(struct mmu_gather *tlb, unsigned long addr, unsigned long end, unsigned long floor, unsigned long ceiling); -void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *start_vma, - unsigned long floor, unsigned long ceiling); int copy_page_range(struct mm_struct *dst, struct mm_struct *src, struct vm_area_struct *vma); void unmap_mapping_range(struct address_space *mapping, loff_t const holebegin, loff_t const holelen, int even_cows); +int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, + void *buf, int len, int write); static inline void unmap_shared_mapping_range(struct address_space *mapping, loff_t const holebegin, loff_t const holelen) @@ -809,7 +812,6 @@ extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void * int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, int len, int write, int force, struct page **pages, struct vm_area_struct **vmas); -void print_bad_pte(struct vm_area_struct *, pte_t, unsigned long); extern int try_to_release_page(struct page * page, gfp_t gfp_mask); extern void do_invalidatepage(struct page *page, unsigned long offset); @@ -833,6 +835,19 @@ extern int mprotect_fixup(struct vm_area_struct *vma, unsigned long end, unsigned long newflags); /* + * get_user_pages_fast provides equivalent functionality to get_user_pages, + * operating on current and current->mm (force=0 and doesn't return any vmas). + * + * get_user_pages_fast may take mmap_sem and page tables, so no assumptions + * can be made about locking. get_user_pages_fast is to be implemented in a + * way that is advantageous (vs get_user_pages()) when the user memory area is + * already faulted in and present in ptes. However if the pages have to be + * faulted in, it may turn out to be slightly slower). + */ +int get_user_pages_fast(unsigned long start, int nr_pages, int write, + struct page **pages); + +/* * A callback you can register to apply pressure to ageable caches. * * 'shrink' is passed a count 'nr_to_scan' and a 'gfpmask'. It should @@ -965,9 +980,8 @@ static inline void pgtable_page_dtor(struct page *page) NULL: pte_offset_kernel(pmd, address)) extern void free_area_init(unsigned long * zones_size); -extern void free_area_init_node(int nid, pg_data_t *pgdat, - unsigned long * zones_size, unsigned long zone_start_pfn, - unsigned long *zholes_size); +extern void free_area_init_node(int nid, unsigned long * zones_size, + unsigned long zone_start_pfn, unsigned long *zholes_size); #ifdef CONFIG_ARCH_POPULATES_NODE_MAP /* * With CONFIG_ARCH_POPULATES_NODE_MAP set, an architecture may initialise its @@ -1009,7 +1023,6 @@ extern unsigned long absent_pages_in_range(unsigned long start_pfn, extern void get_pfn_range_for_nid(unsigned int nid, unsigned long *start_pfn, unsigned long *end_pfn); extern unsigned long find_min_pfn_with_active_regions(void); -extern unsigned long find_max_pfn_with_active_regions(void); extern void free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn); typedef int (*work_fn_t)(unsigned long, unsigned long, void *); @@ -1072,6 +1085,9 @@ extern struct vm_area_struct *copy_vma(struct vm_area_struct **, unsigned long addr, unsigned long len, pgoff_t pgoff); extern void exit_mmap(struct mm_struct *); +extern int mm_take_all_locks(struct mm_struct *mm); +extern void mm_drop_all_locks(struct mm_struct *mm); + #ifdef CONFIG_PROC_FS /* From fs/proc/base.c. callers must _not_ hold the mm's exe_file_lock */ extern void added_exe_file_vma(struct mm_struct *mm); diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 02a27ae7853..386edbe2cb4 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -10,6 +10,7 @@ #include <linux/rbtree.h> #include <linux/rwsem.h> #include <linux/completion.h> +#include <linux/cpumask.h> #include <asm/page.h> #include <asm/mmu.h> @@ -159,6 +160,17 @@ struct vm_area_struct { #endif }; +struct core_thread { + struct task_struct *task; + struct core_thread *next; +}; + +struct core_state { + atomic_t nr_threads; + struct core_thread dumper; + struct completion startup; +}; + struct mm_struct { struct vm_area_struct * mmap; /* list of VMAs */ struct rb_root mm_rb; @@ -175,7 +187,6 @@ struct mm_struct { atomic_t mm_users; /* How many users with user space? */ atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */ int map_count; /* number of VMAs */ - int core_waiters; struct rw_semaphore mmap_sem; spinlock_t page_table_lock; /* Protects page tables and some counters */ @@ -219,8 +230,7 @@ struct mm_struct { unsigned long flags; /* Must use atomic bitops to access the bits */ - /* coredumping support */ - struct completion *core_startup_done, core_done; + struct core_state *core_state; /* coredumping support */ /* aio bits */ rwlock_t ioctx_list_lock; /* aio lock */ @@ -244,6 +254,9 @@ struct mm_struct { struct file *exe_file; unsigned long num_exe_file_vmas; #endif +#ifdef CONFIG_MMU_NOTIFIER + struct mmu_notifier_mm *mmu_notifier_mm; +#endif }; #endif /* _LINUX_MM_TYPES_H */ diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h index 0d508ac17d6..ee6e822d599 100644 --- a/include/linux/mmc/card.h +++ b/include/linux/mmc/card.h @@ -111,6 +111,8 @@ struct mmc_card { unsigned num_info; /* number of info strings */ const char **info; /* info strings */ struct sdio_func_tuple *tuples; /* unknown common tuples */ + + struct dentry *debugfs_root; }; #define mmc_card_mmc(c) ((c)->type == MMC_TYPE_MMC) diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 10a2080086c..9c288c90987 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -157,6 +157,8 @@ struct mmc_host { struct led_trigger *led; /* activity led */ #endif + struct dentry *debugfs_root; + unsigned long private[0] ____cacheline_aligned; }; diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h new file mode 100644 index 00000000000..b77486d152c --- /dev/null +++ b/include/linux/mmu_notifier.h @@ -0,0 +1,279 @@ +#ifndef _LINUX_MMU_NOTIFIER_H +#define _LINUX_MMU_NOTIFIER_H + +#include <linux/list.h> +#include <linux/spinlock.h> +#include <linux/mm_types.h> + +struct mmu_notifier; +struct mmu_notifier_ops; + +#ifdef CONFIG_MMU_NOTIFIER + +/* + * The mmu notifier_mm structure is allocated and installed in + * mm->mmu_notifier_mm inside the mm_take_all_locks() protected + * critical section and it's released only when mm_count reaches zero + * in mmdrop(). + */ +struct mmu_notifier_mm { + /* all mmu notifiers registerd in this mm are queued in this list */ + struct hlist_head list; + /* to serialize the list modifications and hlist_unhashed */ + spinlock_t lock; +}; + +struct mmu_notifier_ops { + /* + * Called either by mmu_notifier_unregister or when the mm is + * being destroyed by exit_mmap, always before all pages are + * freed. This can run concurrently with other mmu notifier + * methods (the ones invoked outside the mm context) and it + * should tear down all secondary mmu mappings and freeze the + * secondary mmu. If this method isn't implemented you've to + * be sure that nothing could possibly write to the pages + * through the secondary mmu by the time the last thread with + * tsk->mm == mm exits. + * + * As side note: the pages freed after ->release returns could + * be immediately reallocated by the gart at an alias physical + * address with a different cache model, so if ->release isn't + * implemented because all _software_ driven memory accesses + * through the secondary mmu are terminated by the time the + * last thread of this mm quits, you've also to be sure that + * speculative _hardware_ operations can't allocate dirty + * cachelines in the cpu that could not be snooped and made + * coherent with the other read and write operations happening + * through the gart alias address, so leading to memory + * corruption. + */ + void (*release)(struct mmu_notifier *mn, + struct mm_struct *mm); + + /* + * clear_flush_young is called after the VM is + * test-and-clearing the young/accessed bitflag in the + * pte. This way the VM will provide proper aging to the + * accesses to the page through the secondary MMUs and not + * only to the ones through the Linux pte. + */ + int (*clear_flush_young)(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long address); + + /* + * Before this is invoked any secondary MMU is still ok to + * read/write to the page previously pointed to by the Linux + * pte because the page hasn't been freed yet and it won't be + * freed until this returns. If required set_page_dirty has to + * be called internally to this method. + */ + void (*invalidate_page)(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long address); + + /* + * invalidate_range_start() and invalidate_range_end() must be + * paired and are called only when the mmap_sem and/or the + * locks protecting the reverse maps are held. The subsystem + * must guarantee that no additional references are taken to + * the pages in the range established between the call to + * invalidate_range_start() and the matching call to + * invalidate_range_end(). + * + * Invalidation of multiple concurrent ranges may be + * optionally permitted by the driver. Either way the + * establishment of sptes is forbidden in the range passed to + * invalidate_range_begin/end for the whole duration of the + * invalidate_range_begin/end critical section. + * + * invalidate_range_start() is called when all pages in the + * range are still mapped and have at least a refcount of one. + * + * invalidate_range_end() is called when all pages in the + * range have been unmapped and the pages have been freed by + * the VM. + * + * The VM will remove the page table entries and potentially + * the page between invalidate_range_start() and + * invalidate_range_end(). If the page must not be freed + * because of pending I/O or other circumstances then the + * invalidate_range_start() callback (or the initial mapping + * by the driver) must make sure that the refcount is kept + * elevated. + * + * If the driver increases the refcount when the pages are + * initially mapped into an address space then either + * invalidate_range_start() or invalidate_range_end() may + * decrease the refcount. If the refcount is decreased on + * invalidate_range_start() then the VM can free pages as page + * table entries are removed. If the refcount is only + * droppped on invalidate_range_end() then the driver itself + * will drop the last refcount but it must take care to flush + * any secondary tlb before doing the final free on the + * page. Pages will no longer be referenced by the linux + * address space but may still be referenced by sptes until + * the last refcount is dropped. + */ + void (*invalidate_range_start)(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long start, unsigned long end); + void (*invalidate_range_end)(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long start, unsigned long end); +}; + +/* + * The notifier chains are protected by mmap_sem and/or the reverse map + * semaphores. Notifier chains are only changed when all reverse maps and + * the mmap_sem locks are taken. + * + * Therefore notifier chains can only be traversed when either + * + * 1. mmap_sem is held. + * 2. One of the reverse map locks is held (i_mmap_lock or anon_vma->lock). + * 3. No other concurrent thread can access the list (release) + */ +struct mmu_notifier { + struct hlist_node hlist; + const struct mmu_notifier_ops *ops; +}; + +static inline int mm_has_notifiers(struct mm_struct *mm) +{ + return unlikely(mm->mmu_notifier_mm); +} + +extern int mmu_notifier_register(struct mmu_notifier *mn, + struct mm_struct *mm); +extern int __mmu_notifier_register(struct mmu_notifier *mn, + struct mm_struct *mm); +extern void mmu_notifier_unregister(struct mmu_notifier *mn, + struct mm_struct *mm); +extern void __mmu_notifier_mm_destroy(struct mm_struct *mm); +extern void __mmu_notifier_release(struct mm_struct *mm); +extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm, + unsigned long address); +extern void __mmu_notifier_invalidate_page(struct mm_struct *mm, + unsigned long address); +extern void __mmu_notifier_invalidate_range_start(struct mm_struct *mm, + unsigned long start, unsigned long end); +extern void __mmu_notifier_invalidate_range_end(struct mm_struct *mm, + unsigned long start, unsigned long end); + +static inline void mmu_notifier_release(struct mm_struct *mm) +{ + if (mm_has_notifiers(mm)) + __mmu_notifier_release(mm); +} + +static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm, + unsigned long address) +{ + if (mm_has_notifiers(mm)) + return __mmu_notifier_clear_flush_young(mm, address); + return 0; +} + +static inline void mmu_notifier_invalidate_page(struct mm_struct *mm, + unsigned long address) +{ + if (mm_has_notifiers(mm)) + __mmu_notifier_invalidate_page(mm, address); +} + +static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ + if (mm_has_notifiers(mm)) + __mmu_notifier_invalidate_range_start(mm, start, end); +} + +static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ + if (mm_has_notifiers(mm)) + __mmu_notifier_invalidate_range_end(mm, start, end); +} + +static inline void mmu_notifier_mm_init(struct mm_struct *mm) +{ + mm->mmu_notifier_mm = NULL; +} + +static inline void mmu_notifier_mm_destroy(struct mm_struct *mm) +{ + if (mm_has_notifiers(mm)) + __mmu_notifier_mm_destroy(mm); +} + +/* + * These two macros will sometime replace ptep_clear_flush. + * ptep_clear_flush is impleemnted as macro itself, so this also is + * implemented as a macro until ptep_clear_flush will converted to an + * inline function, to diminish the risk of compilation failure. The + * invalidate_page method over time can be moved outside the PT lock + * and these two macros can be later removed. + */ +#define ptep_clear_flush_notify(__vma, __address, __ptep) \ +({ \ + pte_t __pte; \ + struct vm_area_struct *___vma = __vma; \ + unsigned long ___address = __address; \ + __pte = ptep_clear_flush(___vma, ___address, __ptep); \ + mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \ + __pte; \ +}) + +#define ptep_clear_flush_young_notify(__vma, __address, __ptep) \ +({ \ + int __young; \ + struct vm_area_struct *___vma = __vma; \ + unsigned long ___address = __address; \ + __young = ptep_clear_flush_young(___vma, ___address, __ptep); \ + __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \ + ___address); \ + __young; \ +}) + +#else /* CONFIG_MMU_NOTIFIER */ + +static inline void mmu_notifier_release(struct mm_struct *mm) +{ +} + +static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm, + unsigned long address) +{ + return 0; +} + +static inline void mmu_notifier_invalidate_page(struct mm_struct *mm, + unsigned long address) +{ +} + +static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ +} + +static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ +} + +static inline void mmu_notifier_mm_init(struct mm_struct *mm) +{ +} + +static inline void mmu_notifier_mm_destroy(struct mm_struct *mm) +{ +} + +#define ptep_clear_flush_young_notify ptep_clear_flush_young +#define ptep_clear_flush_notify ptep_clear_flush + +#endif /* CONFIG_MMU_NOTIFIER */ + +#endif /* _LINUX_MMU_NOTIFIER_H */ diff --git a/include/linux/module.h b/include/linux/module.h index fce15ebd0e1..68e09557c95 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -23,7 +23,7 @@ /* Not Yet Implemented */ #define MODULE_SUPPORTED_DEVICE(name) -/* v850 toolchain uses a `_' prefix for all user symbols */ +/* some toolchains uses a `_' prefix for all user symbols */ #ifndef MODULE_SYMBOL_PREFIX #define MODULE_SYMBOL_PREFIX "" #endif diff --git a/include/linux/mount.h b/include/linux/mount.h index 4374d1adeb4..30a1d63b6fb 100644 --- a/include/linux/mount.h +++ b/include/linux/mount.h @@ -47,7 +47,7 @@ struct vfsmount { struct list_head mnt_child; /* and going through their mnt_child */ int mnt_flags; /* 4 bytes hole on 64bits arches */ - char *mnt_devname; /* Name of device e.g. /dev/dsk/hda1 */ + const char *mnt_devname; /* Name of device e.g. /dev/dsk/hda1 */ struct list_head mnt_list; struct list_head mnt_expire; /* link in fs-specific expiry list */ struct list_head mnt_share; /* circular list of shared mounts */ @@ -105,7 +105,8 @@ extern struct vfsmount *vfs_kern_mount(struct file_system_type *type, struct nameidata; -extern int do_add_mount(struct vfsmount *newmnt, struct nameidata *nd, +struct path; +extern int do_add_mount(struct vfsmount *newmnt, struct path *path, int mnt_flags, struct list_head *fslist); extern void mark_mounts_for_expiry(struct list_head *mounts); diff --git a/include/linux/msdos_fs.h b/include/linux/msdos_fs.h index 81cd36b735b..ba63858056c 100644 --- a/include/linux/msdos_fs.h +++ b/include/linux/msdos_fs.h @@ -2,11 +2,11 @@ #define _LINUX_MSDOS_FS_H #include <linux/magic.h> +#include <asm/byteorder.h> /* * The MS-DOS filesystem constants/structures */ -#include <asm/byteorder.h> #define SECTOR_SIZE 512 /* sector size (bytes) */ #define SECTOR_BITS 9 /* log2(SECTOR_SIZE) */ @@ -89,24 +89,22 @@ #define IS_FSINFO(x) (le32_to_cpu((x)->signature1) == FAT_FSINFO_SIG1 \ && le32_to_cpu((x)->signature2) == FAT_FSINFO_SIG2) +struct __fat_dirent { + long d_ino; + __kernel_off_t d_off; + unsigned short d_reclen; + char d_name[256]; /* We must not include limits.h! */ +}; + /* * ioctl commands */ -#define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct dirent [2]) -#define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct dirent [2]) +#define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct __fat_dirent[2]) +#define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct __fat_dirent[2]) /* <linux/videotext.h> has used 0x72 ('r') in collision, so skip a few */ #define FAT_IOCTL_GET_ATTRIBUTES _IOR('r', 0x10, __u32) #define FAT_IOCTL_SET_ATTRIBUTES _IOW('r', 0x11, __u32) -/* - * vfat shortname flags - */ -#define VFAT_SFN_DISPLAY_LOWER 0x0001 /* convert to lowercase for display */ -#define VFAT_SFN_DISPLAY_WIN95 0x0002 /* emulate win95 rule for display */ -#define VFAT_SFN_DISPLAY_WINNT 0x0004 /* emulate winnt rule for display */ -#define VFAT_SFN_CREATE_WIN95 0x0100 /* emulate win95 rule for create */ -#define VFAT_SFN_CREATE_WINNT 0x0200 /* emulate winnt rule for create */ - struct fat_boot_sector { __u8 ignored[3]; /* Boot strap short or near jump */ __u8 system_id[8]; /* Name - can be used to special case @@ -168,14 +166,6 @@ struct msdos_dir_slot { __u8 name11_12[4]; /* last 2 characters in name */ }; -struct fat_slot_info { - loff_t i_pos; /* on-disk position of directory entry */ - loff_t slot_off; /* offset for slot or de start */ - int nr_slots; /* number of slots + 1(de) in filename */ - struct msdos_dir_entry *de; - struct buffer_head *bh; -}; - #ifdef __KERNEL__ #include <linux/buffer_head.h> @@ -184,6 +174,15 @@ struct fat_slot_info { #include <linux/fs.h> #include <linux/mutex.h> +/* + * vfat shortname flags + */ +#define VFAT_SFN_DISPLAY_LOWER 0x0001 /* convert to lowercase for display */ +#define VFAT_SFN_DISPLAY_WIN95 0x0002 /* emulate win95 rule for display */ +#define VFAT_SFN_DISPLAY_WINNT 0x0004 /* emulate winnt rule for display */ +#define VFAT_SFN_CREATE_WIN95 0x0100 /* emulate win95 rule for create */ +#define VFAT_SFN_CREATE_WINNT 0x0200 /* emulate winnt rule for create */ + struct fat_mount_options { uid_t fs_uid; gid_t fs_gid; @@ -202,10 +201,10 @@ struct fat_mount_options { utf8:1, /* Use of UTF-8 character set (Default) */ unicode_xlate:1, /* create escape sequences for unhandled Unicode */ numtail:1, /* Does first alias have a numeric '~1' type tail? */ - atari:1, /* Use Atari GEMDOS variation of MS-DOS fs */ flush:1, /* write things quickly */ nocase:1, /* Does this need case conversion? 0=need case conversion*/ - usefree:1; /* Use free_clusters for FAT32 */ + usefree:1, /* Use free_clusters for FAT32 */ + tz_utc:1; /* Filesystem timestamps are in UTC */ }; #define FAT_HASH_BITS 8 @@ -267,6 +266,14 @@ struct msdos_inode_info { struct inode vfs_inode; }; +struct fat_slot_info { + loff_t i_pos; /* on-disk position of directory entry */ + loff_t slot_off; /* offset for slot or de start */ + int nr_slots; /* number of slots + 1(de) in filename */ + struct msdos_dir_entry *de; + struct buffer_head *bh; +}; + static inline struct msdos_sb_info *MSDOS_SB(struct super_block *sb) { return sb->s_fs_info; @@ -428,8 +435,9 @@ extern int fat_flush_inodes(struct super_block *sb, struct inode *i1, extern void fat_fs_panic(struct super_block *s, const char *fmt, ...); extern void fat_clusters_flush(struct super_block *sb); extern int fat_chain_add(struct inode *inode, int new_dclus, int nr_cluster); -extern int date_dos2unix(unsigned short time, unsigned short date); -extern void fat_date_unix2dos(int unix_date, __le16 *time, __le16 *date); +extern int date_dos2unix(unsigned short time, unsigned short date, int tz_utc); +extern void fat_date_unix2dos(int unix_date, __le16 *time, __le16 *date, + int tz_utc); extern int fat_sync_bhs(struct buffer_head **bhs, int nr_bhs); int fat_cache_init(void); diff --git a/include/linux/mtd/blktrans.h b/include/linux/mtd/blktrans.h index 9a6e2f953cb..310e6160641 100644 --- a/include/linux/mtd/blktrans.h +++ b/include/linux/mtd/blktrans.h @@ -1,6 +1,4 @@ /* - * $Id: blktrans.h,v 1.6 2005/11/07 11:14:54 gleixner Exp $ - * * (C) 2003 David Woodhouse <dwmw2@infradead.org> * * Interface to Linux block layer for MTD 'translation layers'. diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h index b0ddf4b2586..d6fb115f5a0 100644 --- a/include/linux/mtd/cfi.h +++ b/include/linux/mtd/cfi.h @@ -1,7 +1,6 @@ /* Common Flash Interface structures * See http://support.intel.com/design/flash/technote/index.htm - * $Id: cfi.h,v 1.57 2005/11/15 23:28:17 tpoynor Exp $ */ #ifndef __MTD_CFI_H__ diff --git a/include/linux/mtd/cfi_endian.h b/include/linux/mtd/cfi_endian.h index 25724f7d386..d802f7736be 100644 --- a/include/linux/mtd/cfi_endian.h +++ b/include/linux/mtd/cfi_endian.h @@ -1,8 +1,3 @@ -/* - * $Id: cfi_endian.h,v 1.11 2002/01/30 23:20:48 awozniak Exp $ - * - */ - #include <asm/byteorder.h> #ifndef CONFIG_MTD_CFI_ADV_OPTIONS diff --git a/include/linux/mtd/concat.h b/include/linux/mtd/concat.h index ed8dc675521..c02f3d264ec 100644 --- a/include/linux/mtd/concat.h +++ b/include/linux/mtd/concat.h @@ -4,8 +4,6 @@ * (C) 2002 Robert Kaiser <rkaiser@sysgo.de> * * This code is GPL - * - * $Id: concat.h,v 1.1 2002/03/08 16:34:36 rkaiser Exp $ */ #ifndef MTD_CONCAT_H diff --git a/include/linux/mtd/doc2000.h b/include/linux/mtd/doc2000.h index 9addd073bf1..0a6d516ab71 100644 --- a/include/linux/mtd/doc2000.h +++ b/include/linux/mtd/doc2000.h @@ -6,8 +6,6 @@ * Copyright (C) 2002-2003 Greg Ungerer <gerg@snapgear.com> * Copyright (C) 2002-2003 SnapGear Inc * - * $Id: doc2000.h,v 1.25 2005/11/07 11:14:54 gleixner Exp $ - * * Released under GPL */ diff --git a/include/linux/mtd/flashchip.h b/include/linux/mtd/flashchip.h index 39e7d2a1be9..08dd131301c 100644 --- a/include/linux/mtd/flashchip.h +++ b/include/linux/mtd/flashchip.h @@ -5,9 +5,6 @@ * Contains information about the location and state of a given flash device * * (C) 2000 Red Hat. GPLd. - * - * $Id: flashchip.h,v 1.18 2005/11/07 11:14:54 gleixner Exp $ - * */ #ifndef __MTD_FLASHCHIP_H__ diff --git a/include/linux/mtd/ftl.h b/include/linux/mtd/ftl.h index d9960911330..0be442f881d 100644 --- a/include/linux/mtd/ftl.h +++ b/include/linux/mtd/ftl.h @@ -1,6 +1,4 @@ /* - * $Id: ftl.h,v 1.7 2005/11/07 11:14:54 gleixner Exp $ - * * Derived from (and probably identical to): * ftl.h 1.7 1999/10/25 20:23:17 * diff --git a/include/linux/mtd/gen_probe.h b/include/linux/mtd/gen_probe.h index 256e7342ed1..df362ddf294 100644 --- a/include/linux/mtd/gen_probe.h +++ b/include/linux/mtd/gen_probe.h @@ -1,7 +1,6 @@ /* * (C) 2001, 2001 Red Hat, Inc. * GPL'd - * $Id: gen_probe.h,v 1.4 2005/11/07 11:14:54 gleixner Exp $ */ #ifndef __LINUX_MTD_GEN_PROBE_H__ diff --git a/include/linux/mtd/inftl.h b/include/linux/mtd/inftl.h index 85fd041d44a..64ee53ce95a 100644 --- a/include/linux/mtd/inftl.h +++ b/include/linux/mtd/inftl.h @@ -2,8 +2,6 @@ * inftl.h -- defines to support the Inverse NAND Flash Translation Layer * * (C) Copyright 2002, Greg Ungerer (gerg@snapgear.com) - * - * $Id: inftl.h,v 1.7 2005/06/13 13:08:45 sean Exp $ */ #ifndef __MTD_INFTL_H__ @@ -52,8 +50,6 @@ struct INFTLrecord { int INFTL_mount(struct INFTLrecord *s); int INFTL_formatblock(struct INFTLrecord *s, int block); -extern char inftlmountrev[]; - void INFTL_dumptables(struct INFTLrecord *s); void INFTL_dumpVUchains(struct INFTLrecord *s); diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h index 9c1d95491f8..aa30244492c 100644 --- a/include/linux/mtd/map.h +++ b/include/linux/mtd/map.h @@ -1,6 +1,5 @@ /* Overhauled routines for dealing with different mmap regions of flash */ -/* $Id: map.h,v 1.54 2005/11/07 11:14:54 gleixner Exp $ */ #ifndef __LINUX_MTD_MAP_H__ #define __LINUX_MTD_MAP_H__ diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h index 8b5d49133ec..92263654855 100644 --- a/include/linux/mtd/mtd.h +++ b/include/linux/mtd/mtd.h @@ -1,6 +1,4 @@ /* - * $Id: mtd.h,v 1.61 2005/11/07 11:14:54 gleixner Exp $ - * * Copyright (C) 1999-2003 David Woodhouse <dwmw2@infradead.org> et al. * * Released under GPL @@ -274,7 +272,11 @@ static inline void mtd_erase_callback(struct erase_info *instr) printk(KERN_INFO args); \ } while(0) #else /* CONFIG_MTD_DEBUG */ -#define DEBUG(n, args...) do { } while(0) +#define DEBUG(n, args...) \ + do { \ + if (0) \ + printk(KERN_INFO args); \ + } while(0) #endif /* CONFIG_MTD_DEBUG */ diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index 53ea3dc8b0e..81774e5facf 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -5,8 +5,6 @@ * Steven J. Hill <sjhill@realitydiluted.com> * Thomas Gleixner <tglx@linutronix.de> * - * $Id: nand.h,v 1.74 2005/09/15 13:58:50 vwool Exp $ - * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. @@ -179,6 +177,9 @@ typedef enum { #define NAND_MUST_PAD(chip) (!(chip->options & NAND_NO_PADDING)) #define NAND_HAS_CACHEPROG(chip) ((chip->options & NAND_CACHEPRG)) #define NAND_HAS_COPYBACK(chip) ((chip->options & NAND_COPYBACK)) +/* Large page NAND with SOFT_ECC should support subpage reads */ +#define NAND_SUBPAGE_READ(chip) ((chip->ecc.mode == NAND_ECC_SOFT) \ + && (chip->page_shift > 9)) /* Mask to zero out the chip options, which come from the id table */ #define NAND_CHIPOPTIONS_MSK (0x0000ffff & ~NAND_NO_AUTOINCR) @@ -276,6 +277,10 @@ struct nand_ecc_ctrl { int (*read_page)(struct mtd_info *mtd, struct nand_chip *chip, uint8_t *buf); + int (*read_subpage)(struct mtd_info *mtd, + struct nand_chip *chip, + uint32_t offs, uint32_t len, + uint8_t *buf); void (*write_page)(struct mtd_info *mtd, struct nand_chip *chip, const uint8_t *buf); diff --git a/include/linux/mtd/nand_ecc.h b/include/linux/mtd/nand_ecc.h index 12c5bc342ea..090da505425 100644 --- a/include/linux/mtd/nand_ecc.h +++ b/include/linux/mtd/nand_ecc.h @@ -3,8 +3,6 @@ * * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com) * - * $Id: nand_ecc.h,v 1.4 2004/06/17 02:35:02 dbrown Exp $ - * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. diff --git a/include/linux/mtd/nftl.h b/include/linux/mtd/nftl.h index 001eec50cac..dcaf611ed74 100644 --- a/include/linux/mtd/nftl.h +++ b/include/linux/mtd/nftl.h @@ -1,6 +1,4 @@ /* - * $Id: nftl.h,v 1.16 2004/06/30 14:49:00 dbrown Exp $ - * * (C) 1999-2003 David Woodhouse <dwmw2@infradead.org> */ diff --git a/include/linux/mtd/partitions.h b/include/linux/mtd/partitions.h index 7c37d7e55ab..5014f7a9f5d 100644 --- a/include/linux/mtd/partitions.h +++ b/include/linux/mtd/partitions.h @@ -4,8 +4,6 @@ * (C) 2000 Nicolas Pitre <nico@cam.org> * * This code is GPL - * - * $Id: partitions.h,v 1.17 2005/11/07 11:14:55 gleixner Exp $ */ #ifndef MTD_PARTITIONS_H diff --git a/include/linux/mtd/physmap.h b/include/linux/mtd/physmap.h index 0dc07d5f335..c8e63a5ee72 100644 --- a/include/linux/mtd/physmap.h +++ b/include/linux/mtd/physmap.h @@ -2,8 +2,6 @@ * For boards with physically mapped flash and using * drivers/mtd/maps/physmap.c mapping driver. * - * $Id: physmap.h,v 1.4 2005/11/07 11:14:55 gleixner Exp $ - * * Copyright (C) 2003 MontaVista Software Inc. * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net * diff --git a/include/linux/mtd/plat-ram.h b/include/linux/mtd/plat-ram.h index 0e37ad07bce..e07890aff1c 100644 --- a/include/linux/mtd/plat-ram.h +++ b/include/linux/mtd/plat-ram.h @@ -6,8 +6,6 @@ * * Generic platform device based RAM map * - * $Id: plat-ram.h,v 1.2 2005/01/24 00:37:40 bjd Exp $ - * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. diff --git a/include/linux/mtd/pmc551.h b/include/linux/mtd/pmc551.h index 5cc070c24d8..27ad40aed19 100644 --- a/include/linux/mtd/pmc551.h +++ b/include/linux/mtd/pmc551.h @@ -1,6 +1,4 @@ /* - * $Id: pmc551.h,v 1.6 2005/11/07 11:14:55 gleixner Exp $ - * * PMC551 PCI Mezzanine Ram Device * * Author: @@ -17,7 +15,7 @@ #include <linux/mtd/mtd.h> -#define PMC551_VERSION "$Id: pmc551.h,v 1.6 2005/11/07 11:14:55 gleixner Exp $\n"\ +#define PMC551_VERSION \ "Ramix PMC551 PCI Mezzanine Ram Driver. (C) 1999,2000 Nortel Networks.\n" /* diff --git a/include/linux/mtd/ubi.h b/include/linux/mtd/ubi.h index f71201d0f3e..6316fafe5c2 100644 --- a/include/linux/mtd/ubi.h +++ b/include/linux/mtd/ubi.h @@ -45,13 +45,13 @@ enum { * @size: how many physical eraseblocks are reserved for this volume * @used_bytes: how many bytes of data this volume contains * @used_ebs: how many physical eraseblocks of this volume actually contain any - * data + * data * @vol_type: volume type (%UBI_DYNAMIC_VOLUME or %UBI_STATIC_VOLUME) * @corrupted: non-zero if the volume is corrupted (static volumes only) * @upd_marker: non-zero if the volume has update marker set * @alignment: volume alignment * @usable_leb_size: how many bytes are available in logical eraseblocks of - * this volume + * this volume * @name_len: volume name length * @name: volume name * @cdev: UBI volume character device major and minor numbers @@ -152,6 +152,7 @@ int ubi_leb_erase(struct ubi_volume_desc *desc, int lnum); int ubi_leb_unmap(struct ubi_volume_desc *desc, int lnum); int ubi_leb_map(struct ubi_volume_desc *desc, int lnum, int dtype); int ubi_is_mapped(struct ubi_volume_desc *desc, int lnum); +int ubi_sync(int ubi_num); /* * This function is the same as the 'ubi_leb_read()' function, but it does not diff --git a/include/linux/mtd/xip.h b/include/linux/mtd/xip.h index e9d40bdde48..36efcba15ec 100644 --- a/include/linux/mtd/xip.h +++ b/include/linux/mtd/xip.h @@ -11,8 +11,6 @@ * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. - * - * $Id: xip.h,v 1.5 2005/11/07 11:14:55 gleixner Exp $ */ #ifndef __LINUX_MTD_XIP_H__ diff --git a/include/linux/namei.h b/include/linux/namei.h index 24d88e98a62..68f8c3203c8 100644 --- a/include/linux/namei.h +++ b/include/linux/namei.h @@ -47,27 +47,24 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND}; #define LOOKUP_DIRECTORY 2 #define LOOKUP_CONTINUE 4 #define LOOKUP_PARENT 16 -#define LOOKUP_NOALT 32 #define LOOKUP_REVAL 64 /* * Intent data */ #define LOOKUP_OPEN (0x0100) #define LOOKUP_CREATE (0x0200) -#define LOOKUP_ACCESS (0x0400) -#define LOOKUP_CHDIR (0x0800) - -extern int __user_walk(const char __user *, unsigned, struct nameidata *); -extern int __user_walk_fd(int dfd, const char __user *, unsigned, struct nameidata *); -#define user_path_walk(name,nd) \ - __user_walk_fd(AT_FDCWD, name, LOOKUP_FOLLOW, nd) -#define user_path_walk_link(name,nd) \ - __user_walk_fd(AT_FDCWD, name, 0, nd) + +extern int user_path_at(int, const char __user *, unsigned, struct path *); + +#define user_path(name, path) user_path_at(AT_FDCWD, name, LOOKUP_FOLLOW, path) +#define user_lpath(name, path) user_path_at(AT_FDCWD, name, 0, path) +#define user_path_dir(name, path) \ + user_path_at(AT_FDCWD, name, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, path) + extern int path_lookup(const char *, unsigned, struct nameidata *); extern int vfs_path_lookup(struct dentry *, struct vfsmount *, const char *, unsigned int, struct nameidata *); -extern int __user_path_lookup_open(const char __user *, unsigned lookup_flags, struct nameidata *nd, int open_flags); extern int path_lookup_open(int dfd, const char *name, unsigned lookup_flags, struct nameidata *, int open_flags); extern struct file *lookup_instantiate_filp(struct nameidata *nd, struct dentry *dentry, int (*open)(struct inode *, struct file *)); diff --git a/include/linux/net.h b/include/linux/net.h index 150a48c68d5..4a9a30f2d68 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -20,6 +20,7 @@ #include <linux/wait.h> #include <linux/socket.h> +#include <linux/fcntl.h> /* For O_CLOEXEC and O_NONBLOCK */ #include <asm/socket.h> struct poll_table_struct; @@ -46,6 +47,7 @@ struct net; #define SYS_GETSOCKOPT 15 /* sys_getsockopt(2) */ #define SYS_SENDMSG 16 /* sys_sendmsg(2) */ #define SYS_RECVMSG 17 /* sys_recvmsg(2) */ +#define SYS_PACCEPT 18 /* sys_paccept(2) */ typedef enum { SS_FREE = 0, /* not allocated */ @@ -94,6 +96,15 @@ enum sock_type { }; #define SOCK_MAX (SOCK_PACKET + 1) +/* Mask which covers at least up to SOCK_MASK-1. The + * remaining bits are used as flags. */ +#define SOCK_TYPE_MASK 0xf + +/* Flags for socket, socketpair, paccept */ +#define SOCK_CLOEXEC O_CLOEXEC +#ifndef SOCK_NONBLOCK +#define SOCK_NONBLOCK O_NONBLOCK +#endif #endif /* ARCH_HAS_SOCKET_TYPES */ @@ -208,10 +219,12 @@ extern int sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t len); extern int sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, int flags); -extern int sock_map_fd(struct socket *sock); +extern int sock_map_fd(struct socket *sock, int flags); extern struct socket *sockfd_lookup(int fd, int *err); #define sockfd_put(sock) fput(sock->file) extern int net_ratelimit(void); +extern long do_accept(int fd, struct sockaddr __user *upeer_sockaddr, + int __user *upeer_addrlen, int flags); #define net_random() random32() #define net_srandom(seed) srandom32((__force u32)seed) @@ -338,8 +351,7 @@ static const struct proto_ops name##_ops = { \ #ifdef CONFIG_SYSCTL #include <linux/sysctl.h> -extern int net_msg_cost; -extern int net_msg_burst; +extern struct ratelimit_state net_ratelimit_state; #endif #endif /* __KERNEL__ */ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index b4d056ceab9..488c56e649b 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -61,9 +61,7 @@ struct wireless_dev; #define NET_XMIT_DROP 1 /* skb dropped */ #define NET_XMIT_CN 2 /* congestion notification */ #define NET_XMIT_POLICED 3 /* skb is shot by police */ -#define NET_XMIT_BYPASS 4 /* packet does not leave via dequeue; - (TC use only - dev_queue_xmit - returns this as NET_XMIT_SUCCESS) */ +#define NET_XMIT_MASK 0xFFFF /* qdisc flags in net/sch_generic.h */ /* Backlog congestion levels */ #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */ @@ -440,6 +438,7 @@ static inline void napi_synchronize(const struct napi_struct *n) enum netdev_queue_state_t { __QUEUE_STATE_XOFF, + __QUEUE_STATE_FROZEN, }; struct netdev_queue { @@ -636,7 +635,7 @@ struct net_device unsigned int real_num_tx_queues; unsigned long tx_queue_len; /* Max frames per queue allowed */ - + spinlock_t tx_global_lock; /* * One part is mostly used on xmit path (device) */ @@ -1099,6 +1098,11 @@ static inline int netif_queue_stopped(const struct net_device *dev) return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0)); } +static inline int netif_tx_queue_frozen(const struct netdev_queue *dev_queue) +{ + return test_bit(__QUEUE_STATE_FROZEN, &dev_queue->state); +} + /** * netif_running - test if up * @dev: network device @@ -1475,6 +1479,26 @@ static inline void __netif_tx_lock_bh(struct netdev_queue *txq) txq->xmit_lock_owner = smp_processor_id(); } +static inline int __netif_tx_trylock(struct netdev_queue *txq) +{ + int ok = spin_trylock(&txq->_xmit_lock); + if (likely(ok)) + txq->xmit_lock_owner = smp_processor_id(); + return ok; +} + +static inline void __netif_tx_unlock(struct netdev_queue *txq) +{ + txq->xmit_lock_owner = -1; + spin_unlock(&txq->_xmit_lock); +} + +static inline void __netif_tx_unlock_bh(struct netdev_queue *txq) +{ + txq->xmit_lock_owner = -1; + spin_unlock_bh(&txq->_xmit_lock); +} + /** * netif_tx_lock - grab network device transmit lock * @dev: network device @@ -1484,12 +1508,23 @@ static inline void __netif_tx_lock_bh(struct netdev_queue *txq) */ static inline void netif_tx_lock(struct net_device *dev) { - int cpu = smp_processor_id(); unsigned int i; + int cpu; + spin_lock(&dev->tx_global_lock); + cpu = smp_processor_id(); for (i = 0; i < dev->num_tx_queues; i++) { struct netdev_queue *txq = netdev_get_tx_queue(dev, i); + + /* We are the only thread of execution doing a + * freeze, but we have to grab the _xmit_lock in + * order to synchronize with threads which are in + * the ->hard_start_xmit() handler and already + * checked the frozen bit. + */ __netif_tx_lock(txq, cpu); + set_bit(__QUEUE_STATE_FROZEN, &txq->state); + __netif_tx_unlock(txq); } } @@ -1499,40 +1534,22 @@ static inline void netif_tx_lock_bh(struct net_device *dev) netif_tx_lock(dev); } -static inline int __netif_tx_trylock(struct netdev_queue *txq) -{ - int ok = spin_trylock(&txq->_xmit_lock); - if (likely(ok)) - txq->xmit_lock_owner = smp_processor_id(); - return ok; -} - -static inline int netif_tx_trylock(struct net_device *dev) -{ - return __netif_tx_trylock(netdev_get_tx_queue(dev, 0)); -} - -static inline void __netif_tx_unlock(struct netdev_queue *txq) -{ - txq->xmit_lock_owner = -1; - spin_unlock(&txq->_xmit_lock); -} - -static inline void __netif_tx_unlock_bh(struct netdev_queue *txq) -{ - txq->xmit_lock_owner = -1; - spin_unlock_bh(&txq->_xmit_lock); -} - static inline void netif_tx_unlock(struct net_device *dev) { unsigned int i; for (i = 0; i < dev->num_tx_queues; i++) { struct netdev_queue *txq = netdev_get_tx_queue(dev, i); - __netif_tx_unlock(txq); - } + /* No need to grab the _xmit_lock here. If the + * queue is not stopped for another reason, we + * force a schedule. + */ + clear_bit(__QUEUE_STATE_FROZEN, &txq->state); + if (!test_bit(__QUEUE_STATE_XOFF, &txq->state)) + __netif_schedule(txq->qdisc); + } + spin_unlock(&dev->tx_global_lock); } static inline void netif_tx_unlock_bh(struct net_device *dev) @@ -1556,13 +1573,18 @@ static inline void netif_tx_unlock_bh(struct net_device *dev) static inline void netif_tx_disable(struct net_device *dev) { unsigned int i; + int cpu; - netif_tx_lock_bh(dev); + local_bh_disable(); + cpu = smp_processor_id(); for (i = 0; i < dev->num_tx_queues; i++) { struct netdev_queue *txq = netdev_get_tx_queue(dev, i); + + __netif_tx_lock(txq, cpu); netif_tx_stop_queue(txq); + __netif_tx_unlock(txq); } - netif_tx_unlock_bh(dev); + local_bh_enable(); } static inline void netif_addr_lock(struct net_device *dev) diff --git a/include/linux/netfilter/nf_conntrack_tcp.h b/include/linux/netfilter/nf_conntrack_tcp.h index 22ce29995f1..a049df4f223 100644 --- a/include/linux/netfilter/nf_conntrack_tcp.h +++ b/include/linux/netfilter/nf_conntrack_tcp.h @@ -30,6 +30,9 @@ enum tcp_conntrack { /* Be liberal in window checking */ #define IP_CT_TCP_FLAG_BE_LIBERAL 0x08 +/* Has unacknowledged data */ +#define IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED 0x10 + struct nf_ct_tcp_flags { u_int8_t flags; u_int8_t mask; diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 29d26191873..78a5922a2f1 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -42,7 +42,6 @@ #include <linux/in.h> #include <linux/kref.h> #include <linux/mm.h> -#include <linux/namei.h> #include <linux/pagemap.h> #include <linux/rbtree.h> #include <linux/rwsem.h> @@ -332,7 +331,7 @@ extern int nfs_refresh_inode(struct inode *, struct nfs_fattr *); extern int nfs_post_op_update_inode(struct inode *inode, struct nfs_fattr *fattr); extern int nfs_post_op_update_inode_force_wcc(struct inode *inode, struct nfs_fattr *fattr); extern int nfs_getattr(struct vfsmount *, struct dentry *, struct kstat *); -extern int nfs_permission(struct inode *, int, struct nameidata *); +extern int nfs_permission(struct inode *, int); extern int nfs_open(struct inode *, struct file *); extern int nfs_release(struct inode *, struct file *); extern int nfs_attribute_timeout(struct inode *inode); diff --git a/include/linux/nfsd/nfsd.h b/include/linux/nfsd/nfsd.h index a2861d95ecc..108f47e5fd9 100644 --- a/include/linux/nfsd/nfsd.h +++ b/include/linux/nfsd/nfsd.h @@ -12,7 +12,6 @@ #include <linux/types.h> #include <linux/unistd.h> -#include <linux/dirent.h> #include <linux/fs.h> #include <linux/posix_acl.h> #include <linux/mount.h> diff --git a/include/linux/notifier.h b/include/linux/notifier.h index bd3d72ddf33..da2698b0fdd 100644 --- a/include/linux/notifier.h +++ b/include/linux/notifier.h @@ -214,6 +214,8 @@ static inline int notifier_to_errno(int ret) #define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */ #define CPU_DYING 0x0008 /* CPU (unsigned)v not running any task, * not handling interrupts, soon dead */ +#define CPU_POST_DEAD 0x0009 /* CPU (unsigned)v dead, cpu_hotplug + * lock is dropped */ /* Used for CPU hotplug events occuring while tasks are frozen due to a suspend * operation in progress diff --git a/include/linux/nsproxy.h b/include/linux/nsproxy.h index 0e66b57631f..c8a768e5964 100644 --- a/include/linux/nsproxy.h +++ b/include/linux/nsproxy.h @@ -82,9 +82,12 @@ static inline void get_nsproxy(struct nsproxy *ns) } #ifdef CONFIG_CGROUP_NS -int ns_cgroup_clone(struct task_struct *tsk); +int ns_cgroup_clone(struct task_struct *tsk, struct pid *pid); #else -static inline int ns_cgroup_clone(struct task_struct *tsk) { return 0; } +static inline int ns_cgroup_clone(struct task_struct *tsk, struct pid *pid) +{ + return 0; +} #endif #endif diff --git a/include/linux/of.h b/include/linux/of.h index 59a61bdc98b..79886ade070 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -70,5 +70,6 @@ extern int of_n_addr_cells(struct device_node *np); extern int of_n_size_cells(struct device_node *np); extern const struct of_device_id *of_match_node( const struct of_device_id *matches, const struct device_node *node); +extern int of_modalias_node(struct device_node *node, char *modalias, int len); #endif /* _LINUX_OF_H */ diff --git a/include/linux/of_spi.h b/include/linux/of_spi.h new file mode 100644 index 00000000000..5f71ee8c086 --- /dev/null +++ b/include/linux/of_spi.h @@ -0,0 +1,18 @@ +/* + * OpenFirmware SPI support routines + * Copyright (C) 2008 Secret Lab Technologies Ltd. + * + * Support routines for deriving SPI device attachments from the device + * tree. + */ + +#ifndef __LINUX_OF_SPI_H +#define __LINUX_OF_SPI_H + +#include <linux/of.h> +#include <linux/spi/spi.h> + +extern void of_register_spi_devices(struct spi_master *master, + struct device_node *np); + +#endif /* __LINUX_OF_SPI */ diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 0d2a4e7012a..c74d3e87531 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -96,7 +96,22 @@ enum pageflags { #ifdef CONFIG_IA64_UNCACHED_ALLOCATOR PG_uncached, /* Page has been mapped as uncached */ #endif - __NR_PAGEFLAGS + __NR_PAGEFLAGS, + + /* Filesystems */ + PG_checked = PG_owner_priv_1, + + /* XEN */ + PG_pinned = PG_owner_priv_1, + PG_savepinned = PG_dirty, + + /* SLOB */ + PG_slob_page = PG_active, + PG_slob_free = PG_private, + + /* SLUB */ + PG_slub_frozen = PG_active, + PG_slub_debug = PG_error, }; #ifndef __GENERATING_BOUNDS_H @@ -148,20 +163,26 @@ static inline int Page##uname(struct page *page) \ struct page; /* forward declaration */ -PAGEFLAG(Locked, locked) TESTSCFLAG(Locked, locked) +TESTPAGEFLAG(Locked, locked) PAGEFLAG(Error, error) PAGEFLAG(Referenced, referenced) TESTCLEARFLAG(Referenced, referenced) PAGEFLAG(Dirty, dirty) TESTSCFLAG(Dirty, dirty) __CLEARPAGEFLAG(Dirty, dirty) PAGEFLAG(LRU, lru) __CLEARPAGEFLAG(LRU, lru) PAGEFLAG(Active, active) __CLEARPAGEFLAG(Active, active) __PAGEFLAG(Slab, slab) -PAGEFLAG(Checked, owner_priv_1) /* Used by some filesystems */ -PAGEFLAG(Pinned, owner_priv_1) TESTSCFLAG(Pinned, owner_priv_1) /* Xen */ -PAGEFLAG(SavePinned, dirty); /* Xen */ +PAGEFLAG(Checked, checked) /* Used by some filesystems */ +PAGEFLAG(Pinned, pinned) TESTSCFLAG(Pinned, pinned) /* Xen */ +PAGEFLAG(SavePinned, savepinned); /* Xen */ PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved) PAGEFLAG(Private, private) __CLEARPAGEFLAG(Private, private) __SETPAGEFLAG(Private, private) +__PAGEFLAG(SlobPage, slob_page) +__PAGEFLAG(SlobFree, slob_free) + +__PAGEFLAG(SlubFrozen, slub_frozen) +__PAGEFLAG(SlubDebug, slub_debug) + /* * Only test-and-set exist for PG_writeback. The unconditional operators are * risky: they bypass page accounting. @@ -218,9 +239,6 @@ static inline void __SetPageUptodate(struct page *page) { smp_wmb(); __set_bit(PG_uptodate, &(page)->flags); -#ifdef CONFIG_S390 - page_clear_dirty(page); -#endif } static inline void SetPageUptodate(struct page *page) diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index d2fca802f80..5da31c12101 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -12,6 +12,7 @@ #include <asm/uaccess.h> #include <linux/gfp.h> #include <linux/bitops.h> +#include <linux/hardirq.h> /* for in_interrupt() */ /* * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page @@ -19,10 +20,11 @@ */ #define AS_EIO (__GFP_BITS_SHIFT + 0) /* IO error on async write */ #define AS_ENOSPC (__GFP_BITS_SHIFT + 1) /* ENOSPC on async write */ +#define AS_MM_ALL_LOCKS (__GFP_BITS_SHIFT + 2) /* under mm_take_all_locks() */ static inline void mapping_set_error(struct address_space *mapping, int error) { - if (error) { + if (unlikely(error)) { if (error == -ENOSPC) set_bit(AS_ENOSPC, &mapping->flags); else @@ -62,6 +64,121 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask) #define page_cache_release(page) put_page(page) void release_pages(struct page **pages, int nr, int cold); +/* + * speculatively take a reference to a page. + * If the page is free (_count == 0), then _count is untouched, and 0 + * is returned. Otherwise, _count is incremented by 1 and 1 is returned. + * + * This function must be called inside the same rcu_read_lock() section as has + * been used to lookup the page in the pagecache radix-tree (or page table): + * this allows allocators to use a synchronize_rcu() to stabilize _count. + * + * Unless an RCU grace period has passed, the count of all pages coming out + * of the allocator must be considered unstable. page_count may return higher + * than expected, and put_page must be able to do the right thing when the + * page has been finished with, no matter what it is subsequently allocated + * for (because put_page is what is used here to drop an invalid speculative + * reference). + * + * This is the interesting part of the lockless pagecache (and lockless + * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page) + * has the following pattern: + * 1. find page in radix tree + * 2. conditionally increment refcount + * 3. check the page is still in pagecache (if no, goto 1) + * + * Remove-side that cares about stability of _count (eg. reclaim) has the + * following (with tree_lock held for write): + * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg) + * B. remove page from pagecache + * C. free the page + * + * There are 2 critical interleavings that matter: + * - 2 runs before A: in this case, A sees elevated refcount and bails out + * - A runs before 2: in this case, 2 sees zero refcount and retries; + * subsequently, B will complete and 1 will find no page, causing the + * lookup to return NULL. + * + * It is possible that between 1 and 2, the page is removed then the exact same + * page is inserted into the same position in pagecache. That's OK: the + * old find_get_page using tree_lock could equally have run before or after + * such a re-insertion, depending on order that locks are granted. + * + * Lookups racing against pagecache insertion isn't a big problem: either 1 + * will find the page or it will not. Likewise, the old find_get_page could run + * either before the insertion or afterwards, depending on timing. + */ +static inline int page_cache_get_speculative(struct page *page) +{ + VM_BUG_ON(in_interrupt()); + +#if !defined(CONFIG_SMP) && defined(CONFIG_CLASSIC_RCU) +# ifdef CONFIG_PREEMPT + VM_BUG_ON(!in_atomic()); +# endif + /* + * Preempt must be disabled here - we rely on rcu_read_lock doing + * this for us. + * + * Pagecache won't be truncated from interrupt context, so if we have + * found a page in the radix tree here, we have pinned its refcount by + * disabling preempt, and hence no need for the "speculative get" that + * SMP requires. + */ + VM_BUG_ON(page_count(page) == 0); + atomic_inc(&page->_count); + +#else + if (unlikely(!get_page_unless_zero(page))) { + /* + * Either the page has been freed, or will be freed. + * In either case, retry here and the caller should + * do the right thing (see comments above). + */ + return 0; + } +#endif + VM_BUG_ON(PageTail(page)); + + return 1; +} + +/* + * Same as above, but add instead of inc (could just be merged) + */ +static inline int page_cache_add_speculative(struct page *page, int count) +{ + VM_BUG_ON(in_interrupt()); + +#if !defined(CONFIG_SMP) && defined(CONFIG_CLASSIC_RCU) +# ifdef CONFIG_PREEMPT + VM_BUG_ON(!in_atomic()); +# endif + VM_BUG_ON(page_count(page) == 0); + atomic_add(count, &page->_count); + +#else + if (unlikely(!atomic_add_unless(&page->_count, count, 0))) + return 0; +#endif + VM_BUG_ON(PageCompound(page) && page != compound_head(page)); + + return 1; +} + +static inline int page_freeze_refs(struct page *page, int count) +{ + return likely(atomic_cmpxchg(&page->_count, count, 0) == count); +} + +static inline void page_unfreeze_refs(struct page *page, int count) +{ + VM_BUG_ON(page_count(page) != 0); + VM_BUG_ON(count == 0); + + atomic_set(&page->_count, count); +} + #ifdef CONFIG_NUMA extern struct page *__page_cache_alloc(gfp_t gfp); #else @@ -133,13 +250,6 @@ static inline struct page *read_mapping_page(struct address_space *mapping, return read_cache_page(mapping, index, filler, data); } -int add_to_page_cache(struct page *page, struct address_space *mapping, - pgoff_t index, gfp_t gfp_mask); -int add_to_page_cache_lru(struct page *page, struct address_space *mapping, - pgoff_t index, gfp_t gfp_mask); -extern void remove_from_page_cache(struct page *page); -extern void __remove_from_page_cache(struct page *page); - /* * Return byte-offset into filesystem object for page. */ @@ -161,13 +271,28 @@ extern int __lock_page_killable(struct page *page); extern void __lock_page_nosync(struct page *page); extern void unlock_page(struct page *page); +static inline void set_page_locked(struct page *page) +{ + set_bit(PG_locked, &page->flags); +} + +static inline void clear_page_locked(struct page *page) +{ + clear_bit(PG_locked, &page->flags); +} + +static inline int trylock_page(struct page *page) +{ + return !test_and_set_bit(PG_locked, &page->flags); +} + /* * lock_page may only be called if we have the page's inode pinned. */ static inline void lock_page(struct page *page) { might_sleep(); - if (TestSetPageLocked(page)) + if (!trylock_page(page)) __lock_page(page); } @@ -179,7 +304,7 @@ static inline void lock_page(struct page *page) static inline int lock_page_killable(struct page *page) { might_sleep(); - if (TestSetPageLocked(page)) + if (!trylock_page(page)) return __lock_page_killable(page); return 0; } @@ -191,7 +316,7 @@ static inline int lock_page_killable(struct page *page) static inline void lock_page_nosync(struct page *page) { might_sleep(); - if (TestSetPageLocked(page)) + if (!trylock_page(page)) __lock_page_nosync(page); } @@ -276,4 +401,27 @@ static inline int fault_in_pages_readable(const char __user *uaddr, int size) return ret; } +int add_to_page_cache_locked(struct page *page, struct address_space *mapping, + pgoff_t index, gfp_t gfp_mask); +int add_to_page_cache_lru(struct page *page, struct address_space *mapping, + pgoff_t index, gfp_t gfp_mask); +extern void remove_from_page_cache(struct page *page); +extern void __remove_from_page_cache(struct page *page); + +/* + * Like add_to_page_cache_locked, but used to add newly allocated pages: + * the page is new, so we can just run set_page_locked() against it. + */ +static inline int add_to_page_cache(struct page *page, + struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) +{ + int error; + + set_page_locked(page); + error = add_to_page_cache_locked(page, mapping, offset, gfp_mask); + if (unlikely(error)) + clear_page_locked(page); + return error; +} + #endif /* _LINUX_PAGEMAP_H */ diff --git a/include/linux/parport.h b/include/linux/parport.h index dcb9e01a69c..6a0d7cdb577 100644 --- a/include/linux/parport.h +++ b/include/linux/parport.h @@ -560,5 +560,8 @@ extern int parport_device_proc_unregister(struct pardevice *device); #endif /* !CONFIG_PARPORT_NOT_PC */ +extern unsigned long parport_default_timeslice; +extern int parport_default_spintime; + #endif /* __KERNEL__ */ #endif /* _PARPORT_H_ */ diff --git a/include/linux/pci-aspm.h b/include/linux/pci-aspm.h index a1a1e618e99..91ba0b338b4 100644 --- a/include/linux/pci-aspm.h +++ b/include/linux/pci-aspm.h @@ -27,6 +27,7 @@ extern void pcie_aspm_init_link_state(struct pci_dev *pdev); extern void pcie_aspm_exit_link_state(struct pci_dev *pdev); extern void pcie_aspm_pm_state_change(struct pci_dev *pdev); extern void pci_disable_link_state(struct pci_dev *pdev, int state); +extern void pcie_no_aspm(void); #else static inline void pcie_aspm_init_link_state(struct pci_dev *pdev) { @@ -40,6 +41,10 @@ static inline void pcie_aspm_pm_state_change(struct pci_dev *pdev) static inline void pci_disable_link_state(struct pci_dev *pdev, int state) { } + +static inline void pcie_no_aspm(void) +{ +} #endif #ifdef CONFIG_PCIEASPM_DEBUG /* this depends on CONFIG_PCIEASPM */ diff --git a/include/linux/pci.h b/include/linux/pci.h index a6a088e1a80..c0e14008a3c 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -124,6 +124,8 @@ enum pci_dev_flags { * generation too. */ PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG = (__force pci_dev_flags_t) 1, + /* Device configuration is irrevocably lost if disabled into D3 */ + PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) 2, }; typedef unsigned short __bitwise pci_bus_flags_t; @@ -638,7 +640,10 @@ int pci_save_state(struct pci_dev *dev); int pci_restore_state(struct pci_dev *dev); int pci_set_power_state(struct pci_dev *dev, pci_power_t state); pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state); +bool pci_pme_capable(struct pci_dev *dev, pci_power_t state); +void pci_pme_active(struct pci_dev *dev, bool enable); int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable); +pci_power_t pci_target_state(struct pci_dev *dev); int pci_prepare_to_sleep(struct pci_dev *dev); int pci_back_from_sleep(struct pci_dev *dev); @@ -676,10 +681,12 @@ void pci_enable_bridges(struct pci_bus *bus); /* Proper probing supporting hot-pluggable devices */ int __must_check __pci_register_driver(struct pci_driver *, struct module *, const char *mod_name); -static inline int __must_check pci_register_driver(struct pci_driver *driver) -{ - return __pci_register_driver(driver, THIS_MODULE, KBUILD_MODNAME); -} + +/* + * pci_register_driver must be a macro so that KBUILD_MODNAME can be expanded + */ +#define pci_register_driver(driver) \ + __pci_register_driver(driver, THIS_MODULE, KBUILD_MODNAME) void pci_unregister_driver(struct pci_driver *dev); void pci_remove_behind_bridge(struct pci_dev *dev); diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index d8507eb394c..9ec2bcce8e8 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -748,6 +748,7 @@ #define PCI_VENDOR_ID_TI 0x104c #define PCI_DEVICE_ID_TI_TVP4020 0x3d07 #define PCI_DEVICE_ID_TI_4450 0x8011 +#define PCI_DEVICE_ID_TI_TSB43AB22 0x8023 #define PCI_DEVICE_ID_TI_XX21_XX11 0x8031 #define PCI_DEVICE_ID_TI_XX21_XX11_FM 0x8033 #define PCI_DEVICE_ID_TI_XX21_XX11_SD 0x8034 @@ -1832,7 +1833,13 @@ #define PCI_DEVICE_ID_MOXA_C320 0x3200 #define PCI_VENDOR_ID_CCD 0x1397 +#define PCI_DEVICE_ID_CCD_HFC4S 0x08B4 +#define PCI_SUBDEVICE_ID_CCD_PMX2S 0x1234 +#define PCI_DEVICE_ID_CCD_HFC8S 0x16B8 #define PCI_DEVICE_ID_CCD_2BD0 0x2bd0 +#define PCI_DEVICE_ID_CCD_HFCE1 0x30B1 +#define PCI_SUBDEVICE_ID_CCD_SPD4S 0x3136 +#define PCI_SUBDEVICE_ID_CCD_SPDE1 0x3137 #define PCI_DEVICE_ID_CCD_B000 0xb000 #define PCI_DEVICE_ID_CCD_B006 0xb006 #define PCI_DEVICE_ID_CCD_B007 0xb007 @@ -1842,8 +1849,32 @@ #define PCI_DEVICE_ID_CCD_B00B 0xb00b #define PCI_DEVICE_ID_CCD_B00C 0xb00c #define PCI_DEVICE_ID_CCD_B100 0xb100 +#define PCI_SUBDEVICE_ID_CCD_IOB4ST 0xB520 +#define PCI_SUBDEVICE_ID_CCD_IOB8STR 0xB521 +#define PCI_SUBDEVICE_ID_CCD_IOB8ST 0xB522 +#define PCI_SUBDEVICE_ID_CCD_IOB1E1 0xB523 +#define PCI_SUBDEVICE_ID_CCD_SWYX4S 0xB540 +#define PCI_SUBDEVICE_ID_CCD_JH4S20 0xB550 +#define PCI_SUBDEVICE_ID_CCD_IOB8ST_1 0xB552 +#define PCI_SUBDEVICE_ID_CCD_BN4S 0xB560 +#define PCI_SUBDEVICE_ID_CCD_BN8S 0xB562 +#define PCI_SUBDEVICE_ID_CCD_BNE1 0xB563 +#define PCI_SUBDEVICE_ID_CCD_BNE1D 0xB564 +#define PCI_SUBDEVICE_ID_CCD_BNE1DP 0xB565 +#define PCI_SUBDEVICE_ID_CCD_BN2S 0xB566 +#define PCI_SUBDEVICE_ID_CCD_BN1SM 0xB567 +#define PCI_SUBDEVICE_ID_CCD_BN4SM 0xB568 +#define PCI_SUBDEVICE_ID_CCD_BN2SM 0xB569 +#define PCI_SUBDEVICE_ID_CCD_BNE1M 0xB56A +#define PCI_SUBDEVICE_ID_CCD_BN8SP 0xB56B +#define PCI_SUBDEVICE_ID_CCD_HFC4S 0xB620 +#define PCI_SUBDEVICE_ID_CCD_HFC8S 0xB622 #define PCI_DEVICE_ID_CCD_B700 0xb700 #define PCI_DEVICE_ID_CCD_B701 0xb701 +#define PCI_SUBDEVICE_ID_CCD_HFCE1 0xC523 +#define PCI_SUBDEVICE_ID_CCD_OV2S 0xE884 +#define PCI_SUBDEVICE_ID_CCD_OV4S 0xE888 +#define PCI_SUBDEVICE_ID_CCD_OV8S 0xE998 #define PCI_VENDOR_ID_EXAR 0x13a8 #define PCI_DEVICE_ID_EXAR_XR17C152 0x0152 @@ -2146,8 +2177,6 @@ #define PCI_DEVICE_ID_HERC_WIN 0x5732 #define PCI_DEVICE_ID_HERC_UNI 0x5832 -#define PCI_VENDOR_ID_RDC 0x17f3 - #define PCI_VENDOR_ID_SITECOM 0x182d #define PCI_DEVICE_ID_SITECOM_DC105V2 0x3069 @@ -2371,6 +2400,14 @@ #define PCI_DEVICE_ID_INTEL_ICH9_7 0x2916 #define PCI_DEVICE_ID_INTEL_ICH9_8 0x2918 #define PCI_DEVICE_ID_INTEL_82855PM_HB 0x3340 +#define PCI_DEVICE_ID_INTEL_IOAT_TBG4 0x3429 +#define PCI_DEVICE_ID_INTEL_IOAT_TBG5 0x342a +#define PCI_DEVICE_ID_INTEL_IOAT_TBG6 0x342b +#define PCI_DEVICE_ID_INTEL_IOAT_TBG7 0x342c +#define PCI_DEVICE_ID_INTEL_IOAT_TBG0 0x3430 +#define PCI_DEVICE_ID_INTEL_IOAT_TBG1 0x3431 +#define PCI_DEVICE_ID_INTEL_IOAT_TBG2 0x3432 +#define PCI_DEVICE_ID_INTEL_IOAT_TBG3 0x3433 #define PCI_DEVICE_ID_INTEL_82830_HB 0x3575 #define PCI_DEVICE_ID_INTEL_82830_CGC 0x3577 #define PCI_DEVICE_ID_INTEL_82855GM_HB 0x3580 @@ -2392,6 +2429,9 @@ #define PCI_DEVICE_ID_INTEL_ICH10_4 0x3a30 #define PCI_DEVICE_ID_INTEL_ICH10_5 0x3a60 #define PCI_DEVICE_ID_INTEL_IOAT_SNB 0x402f +#define PCI_DEVICE_ID_INTEL_5100_16 0x65f0 +#define PCI_DEVICE_ID_INTEL_5100_21 0x65f5 +#define PCI_DEVICE_ID_INTEL_5100_22 0x65f6 #define PCI_DEVICE_ID_INTEL_5400_ERR 0x4030 #define PCI_DEVICE_ID_INTEL_5400_FBD0 0x4035 #define PCI_DEVICE_ID_INTEL_5400_FBD1 0x4036 @@ -2512,6 +2552,9 @@ #define PCI_VENDOR_ID_3COM_2 0xa727 +#define PCI_VENDOR_ID_DIGIUM 0xd161 +#define PCI_DEVICE_ID_DIGIUM_HFC4S 0xb410 + #define PCI_SUBVENDOR_ID_EXSYS 0xd84d #define PCI_SUBDEVICE_ID_EXSYS_4014 0x4014 #define PCI_SUBDEVICE_ID_EXSYS_4055 0x4055 diff --git a/include/linux/pci_regs.h b/include/linux/pci_regs.h index 19958b92990..450684f7eaa 100644 --- a/include/linux/pci_regs.h +++ b/include/linux/pci_regs.h @@ -374,6 +374,7 @@ #define PCI_EXP_DEVCAP_ATN_BUT 0x1000 /* Attention Button Present */ #define PCI_EXP_DEVCAP_ATN_IND 0x2000 /* Attention Indicator Present */ #define PCI_EXP_DEVCAP_PWR_IND 0x4000 /* Power Indicator Present */ +#define PCI_EXP_DEVCAP_RBER 0x8000 /* Role-Based Error Reporting */ #define PCI_EXP_DEVCAP_PWR_VAL 0x3fc0000 /* Slot Power Limit Value */ #define PCI_EXP_DEVCAP_PWR_SCL 0xc000000 /* Slot Power Limit Scale */ #define PCI_EXP_DEVCTL 8 /* Device Control */ diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 4cdd393e71e..fac3337547e 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h @@ -74,11 +74,6 @@ struct percpu_data { (__typeof__(ptr))__p->ptrs[(cpu)]; \ }) -extern void *percpu_populate(void *__pdata, size_t size, gfp_t gfp, int cpu); -extern void percpu_depopulate(void *__pdata, int cpu); -extern int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp, - cpumask_t *mask); -extern void __percpu_depopulate_mask(void *__pdata, cpumask_t *mask); extern void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask); extern void percpu_free(void *__pdata); @@ -86,26 +81,6 @@ extern void percpu_free(void *__pdata); #define percpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); }) -static inline void percpu_depopulate(void *__pdata, int cpu) -{ -} - -static inline void __percpu_depopulate_mask(void *__pdata, cpumask_t *mask) -{ -} - -static inline void *percpu_populate(void *__pdata, size_t size, gfp_t gfp, - int cpu) -{ - return percpu_ptr(__pdata, cpu); -} - -static inline int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp, - cpumask_t *mask) -{ - return 0; -} - static __always_inline void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask) { return kzalloc(size, gfp); @@ -118,10 +93,6 @@ static inline void percpu_free(void *__pdata) #endif /* CONFIG_SMP */ -#define percpu_populate_mask(__pdata, size, gfp, mask) \ - __percpu_populate_mask((__pdata), (size), (gfp), &(mask)) -#define percpu_depopulate_mask(__pdata, mask) \ - __percpu_depopulate_mask((__pdata), &(mask)) #define percpu_alloc_mask(size, gfp, mask) \ __percpu_alloc_mask((size), (gfp), &(mask)) diff --git a/include/linux/pid.h b/include/linux/pid.h index c21c7e8124a..22921ac4cfd 100644 --- a/include/linux/pid.h +++ b/include/linux/pid.h @@ -48,7 +48,7 @@ enum pid_type */ struct upid { - /* Try to keep pid_chain in the same cacheline as nr for find_pid */ + /* Try to keep pid_chain in the same cacheline as nr for find_vpid */ int nr; struct pid_namespace *ns; struct hlist_node pid_chain; @@ -57,10 +57,10 @@ struct upid { struct pid { atomic_t count; + unsigned int level; /* lists of tasks that use this pid */ struct hlist_head tasks[PIDTYPE_MAX]; struct rcu_head rcu; - unsigned int level; struct upid numbers[1]; }; @@ -105,14 +105,12 @@ extern struct pid_namespace init_pid_ns; * or rcu_read_lock() held. * * find_pid_ns() finds the pid in the namespace specified - * find_pid() find the pid by its global id, i.e. in the init namespace * find_vpid() finr the pid by its virtual id, i.e. in the current namespace * - * see also find_task_by_pid() set in include/linux/sched.h + * see also find_task_by_vpid() set in include/linux/sched.h */ extern struct pid *find_pid_ns(int nr, struct pid_namespace *ns); extern struct pid *find_vpid(int nr); -extern struct pid *find_pid(int nr); /* * Lookup a PID in the hash table, and return with it's count elevated. diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h index caff5283d15..1af82c4e17d 100644 --- a/include/linux/pid_namespace.h +++ b/include/linux/pid_namespace.h @@ -14,6 +14,8 @@ struct pidmap { #define PIDMAP_ENTRIES ((PID_MAX_LIMIT + 8*PAGE_SIZE - 1)/PAGE_SIZE/8) +struct bsd_acct_struct; + struct pid_namespace { struct kref kref; struct pidmap pidmap[PIDMAP_ENTRIES]; @@ -25,6 +27,9 @@ struct pid_namespace { #ifdef CONFIG_PROC_FS struct vfsmount *proc_mnt; #endif +#ifdef CONFIG_BSD_PROCESS_ACCT + struct bsd_acct_struct *bacct; +#endif }; extern struct pid_namespace init_pid_ns; @@ -85,4 +90,7 @@ static inline struct task_struct *task_child_reaper(struct task_struct *tsk) return tsk->nsproxy->pid_ns->child_reaper; } +void pidhash_init(void); +void pidmap_init(void); + #endif /* _LINUX_PID_NS_H */ diff --git a/include/linux/pm.h b/include/linux/pm.h index 4ad9de94449..4dcce54b6d7 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -22,78 +22,6 @@ #define _LINUX_PM_H #include <linux/list.h> -#include <asm/atomic.h> -#include <asm/errno.h> - -/* - * Power management requests... these are passed to pm_send_all() and friends. - * - * these functions are old and deprecated, see below. - */ -typedef int __bitwise pm_request_t; - -#define PM_SUSPEND ((__force pm_request_t) 1) /* enter D1-D3 */ -#define PM_RESUME ((__force pm_request_t) 2) /* enter D0 */ - - -/* - * Device types... these are passed to pm_register - */ -typedef int __bitwise pm_dev_t; - -#define PM_UNKNOWN_DEV ((__force pm_dev_t) 0) /* generic */ -#define PM_SYS_DEV ((__force pm_dev_t) 1) /* system device (fan, KB controller, ...) */ -#define PM_PCI_DEV ((__force pm_dev_t) 2) /* PCI device */ -#define PM_USB_DEV ((__force pm_dev_t) 3) /* USB device */ -#define PM_SCSI_DEV ((__force pm_dev_t) 4) /* SCSI device */ -#define PM_ISA_DEV ((__force pm_dev_t) 5) /* ISA device */ -#define PM_MTD_DEV ((__force pm_dev_t) 6) /* Memory Technology Device */ - -/* - * System device hardware ID (PnP) values - */ -enum -{ - PM_SYS_UNKNOWN = 0x00000000, /* generic */ - PM_SYS_KBC = 0x41d00303, /* keyboard controller */ - PM_SYS_COM = 0x41d00500, /* serial port */ - PM_SYS_IRDA = 0x41d00510, /* IRDA controller */ - PM_SYS_FDC = 0x41d00700, /* floppy controller */ - PM_SYS_VGA = 0x41d00900, /* VGA controller */ - PM_SYS_PCMCIA = 0x41d00e00, /* PCMCIA controller */ -}; - -/* - * Device identifier - */ -#define PM_PCI_ID(dev) ((dev)->bus->number << 16 | (dev)->devfn) - -/* - * Request handler callback - */ -struct pm_dev; - -typedef int (*pm_callback)(struct pm_dev *dev, pm_request_t rqst, void *data); - -/* - * Dynamic device information - */ -struct pm_dev -{ - pm_dev_t type; - unsigned long id; - pm_callback callback; - void *data; - - unsigned long flags; - unsigned long state; - unsigned long prev_state; - - struct list_head entry; -}; - -/* Functions above this comment are list-based old-style power - * management. Please avoid using them. */ /* * Callbacks for platform drivers to implement. @@ -317,6 +245,21 @@ struct pm_ext_ops { * RECOVER Creation of a hibernation image or restoration of the main * memory contents from a hibernation image has failed, call * ->thaw() and ->complete() for all devices. + * + * The following PM_EVENT_ messages are defined for internal use by + * kernel subsystems. They are never issued by the PM core. + * + * USER_SUSPEND Manual selective suspend was issued by userspace. + * + * USER_RESUME Manual selective resume was issued by userspace. + * + * REMOTE_WAKEUP Remote-wakeup request was received from the device. + * + * AUTO_SUSPEND Automatic (device idle) runtime suspend was + * initiated by the subsystem. + * + * AUTO_RESUME Automatic (device needed) runtime resume was + * requested by a driver. */ #define PM_EVENT_ON 0x0000 @@ -328,9 +271,18 @@ struct pm_ext_ops { #define PM_EVENT_THAW 0x0020 #define PM_EVENT_RESTORE 0x0040 #define PM_EVENT_RECOVER 0x0080 +#define PM_EVENT_USER 0x0100 +#define PM_EVENT_REMOTE 0x0200 +#define PM_EVENT_AUTO 0x0400 -#define PM_EVENT_SLEEP (PM_EVENT_SUSPEND | PM_EVENT_HIBERNATE) +#define PM_EVENT_SLEEP (PM_EVENT_SUSPEND | PM_EVENT_HIBERNATE) +#define PM_EVENT_USER_SUSPEND (PM_EVENT_USER | PM_EVENT_SUSPEND) +#define PM_EVENT_USER_RESUME (PM_EVENT_USER | PM_EVENT_RESUME) +#define PM_EVENT_REMOTE_WAKEUP (PM_EVENT_REMOTE | PM_EVENT_RESUME) +#define PM_EVENT_AUTO_SUSPEND (PM_EVENT_AUTO | PM_EVENT_SUSPEND) +#define PM_EVENT_AUTO_RESUME (PM_EVENT_AUTO | PM_EVENT_RESUME) +#define PMSG_ON ((struct pm_message){ .event = PM_EVENT_ON, }) #define PMSG_FREEZE ((struct pm_message){ .event = PM_EVENT_FREEZE, }) #define PMSG_QUIESCE ((struct pm_message){ .event = PM_EVENT_QUIESCE, }) #define PMSG_SUSPEND ((struct pm_message){ .event = PM_EVENT_SUSPEND, }) @@ -339,7 +291,16 @@ struct pm_ext_ops { #define PMSG_THAW ((struct pm_message){ .event = PM_EVENT_THAW, }) #define PMSG_RESTORE ((struct pm_message){ .event = PM_EVENT_RESTORE, }) #define PMSG_RECOVER ((struct pm_message){ .event = PM_EVENT_RECOVER, }) -#define PMSG_ON ((struct pm_message){ .event = PM_EVENT_ON, }) +#define PMSG_USER_SUSPEND ((struct pm_messge) \ + { .event = PM_EVENT_USER_SUSPEND, }) +#define PMSG_USER_RESUME ((struct pm_messge) \ + { .event = PM_EVENT_USER_RESUME, }) +#define PMSG_REMOTE_RESUME ((struct pm_messge) \ + { .event = PM_EVENT_REMOTE_RESUME, }) +#define PMSG_AUTO_SUSPEND ((struct pm_messge) \ + { .event = PM_EVENT_AUTO_SUSPEND, }) +#define PMSG_AUTO_RESUME ((struct pm_messge) \ + { .event = PM_EVENT_AUTO_RESUME, }) /** * Device power management states diff --git a/include/linux/pm_legacy.h b/include/linux/pm_legacy.h deleted file mode 100644 index 446f4f42b95..00000000000 --- a/include/linux/pm_legacy.h +++ /dev/null @@ -1,35 +0,0 @@ -#ifndef __LINUX_PM_LEGACY_H__ -#define __LINUX_PM_LEGACY_H__ - - -#ifdef CONFIG_PM_LEGACY - -/* - * Register a device with power management - */ -struct pm_dev __deprecated * -pm_register(pm_dev_t type, unsigned long id, pm_callback callback); - -/* - * Send a request to all devices - */ -int __deprecated pm_send_all(pm_request_t rqst, void *data); - -#else /* CONFIG_PM_LEGACY */ - -static inline struct pm_dev *pm_register(pm_dev_t type, - unsigned long id, - pm_callback callback) -{ - return NULL; -} - -static inline int pm_send_all(pm_request_t rqst, void *data) -{ - return 0; -} - -#endif /* CONFIG_PM_LEGACY */ - -#endif /* __LINUX_PM_LEGACY_H__ */ - diff --git a/include/linux/pm_qos_params.h b/include/linux/pm_qos_params.h index 2e4e97bd19f..d74f75ed1e4 100644 --- a/include/linux/pm_qos_params.h +++ b/include/linux/pm_qos_params.h @@ -1,6 +1,6 @@ /* interface for the pm_qos_power infrastructure of the linux kernel. * - * Mark Gross + * Mark Gross <mgross@linux.intel.com> */ #include <linux/list.h> #include <linux/notifier.h> diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index 68ed19ccf1f..ea96ead1d39 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -78,6 +78,7 @@ enum power_supply_property { POWER_SUPPLY_PROP_CHARGE_EMPTY, POWER_SUPPLY_PROP_CHARGE_NOW, POWER_SUPPLY_PROP_CHARGE_AVG, + POWER_SUPPLY_PROP_CHARGE_COUNTER, POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN, POWER_SUPPLY_PROP_ENERGY_EMPTY_DESIGN, POWER_SUPPLY_PROP_ENERGY_FULL, diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h index 15a9eaf4a80..fb61850d1cf 100644 --- a/include/linux/proc_fs.h +++ b/include/linux/proc_fs.h @@ -79,6 +79,7 @@ struct proc_dir_entry { int pde_users; /* number of callers into module in progress */ spinlock_t pde_unload_lock; /* proc_fops checks and pde_users bumps */ struct completion *pde_unload_completion; + struct list_head pde_openers; /* who did ->open, but not ->release */ }; struct kcore_list { @@ -138,7 +139,6 @@ extern int proc_readdir(struct file *, void *, filldir_t); extern struct dentry *proc_lookup(struct inode *, struct dentry *, struct nameidata *); extern const struct file_operations proc_kcore_operations; -extern const struct file_operations proc_kmsg_operations; extern const struct file_operations ppc_htab_operations; extern int pid_ns_prepare_proc(struct pid_namespace *ns); @@ -282,11 +282,16 @@ union proc_op { struct task_struct *task); }; +struct ctl_table_header; +struct ctl_table; + struct proc_inode { struct pid *pid; int fd; union proc_op op; struct proc_dir_entry *pde; + struct ctl_table_header *sysctl; + struct ctl_table *sysctl_entry; struct inode vfs_inode; }; diff --git a/include/linux/profile.h b/include/linux/profile.h index 05c1cc73693..7e7087239af 100644 --- a/include/linux/profile.h +++ b/include/linux/profile.h @@ -8,8 +8,6 @@ #include <asm/errno.h> -extern int prof_on __read_mostly; - #define CPU_PROFILING 1 #define SCHED_PROFILING 2 #define SLEEP_PROFILING 3 @@ -19,14 +17,31 @@ struct proc_dir_entry; struct pt_regs; struct notifier_block; +#if defined(CONFIG_PROFILING) && defined(CONFIG_PROC_FS) +void create_prof_cpu_mask(struct proc_dir_entry *de); +#else +static inline void create_prof_cpu_mask(struct proc_dir_entry *de) +{ +} +#endif + +enum profile_type { + PROFILE_TASK_EXIT, + PROFILE_MUNMAP +}; + +#ifdef CONFIG_PROFILING + +extern int prof_on __read_mostly; + /* init basic kernel profiler */ void __init profile_init(void); -void profile_tick(int); +void profile_tick(int type); /* * Add multiple profiler hits to a given address: */ -void profile_hits(int, void *ip, unsigned int nr_hits); +void profile_hits(int type, void *ip, unsigned int nr_hits); /* * Single profiler hit: @@ -40,19 +55,6 @@ static inline void profile_hit(int type, void *ip) profile_hits(type, ip, 1); } -#ifdef CONFIG_PROC_FS -void create_prof_cpu_mask(struct proc_dir_entry *); -#else -#define create_prof_cpu_mask(x) do { (void)(x); } while (0) -#endif - -enum profile_type { - PROFILE_TASK_EXIT, - PROFILE_MUNMAP -}; - -#ifdef CONFIG_PROFILING - struct task_struct; struct mm_struct; @@ -80,6 +82,28 @@ struct pt_regs; #else +#define prof_on 0 + +static inline void profile_init(void) +{ + return; +} + +static inline void profile_tick(int type) +{ + return; +} + +static inline void profile_hits(int type, void *ip, unsigned int nr_hits) +{ + return; +} + +static inline void profile_hit(int type, void *ip) +{ + return; +} + static inline int task_handoff_register(struct notifier_block * n) { return -ENOSYS; diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index c6f5f9dd0ce..ea7416c901d 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h @@ -121,6 +121,74 @@ static inline void ptrace_unlink(struct task_struct *child) int generic_ptrace_peekdata(struct task_struct *tsk, long addr, long data); int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data); +/** + * task_ptrace - return %PT_* flags that apply to a task + * @task: pointer to &task_struct in question + * + * Returns the %PT_* flags that apply to @task. + */ +static inline int task_ptrace(struct task_struct *task) +{ + return task->ptrace; +} + +/** + * ptrace_event - possibly stop for a ptrace event notification + * @mask: %PT_* bit to check in @current->ptrace + * @event: %PTRACE_EVENT_* value to report if @mask is set + * @message: value for %PTRACE_GETEVENTMSG to return + * + * This checks the @mask bit to see if ptrace wants stops for this event. + * If so we stop, reporting @event and @message to the ptrace parent. + * + * Returns nonzero if we did a ptrace notification, zero if not. + * + * Called without locks. + */ +static inline int ptrace_event(int mask, int event, unsigned long message) +{ + if (mask && likely(!(current->ptrace & mask))) + return 0; + current->ptrace_message = message; + ptrace_notify((event << 8) | SIGTRAP); + return 1; +} + +/** + * ptrace_init_task - initialize ptrace state for a new child + * @child: new child task + * @ptrace: true if child should be ptrace'd by parent's tracer + * + * This is called immediately after adding @child to its parent's children + * list. @ptrace is false in the normal case, and true to ptrace @child. + * + * Called with current's siglock and write_lock_irq(&tasklist_lock) held. + */ +static inline void ptrace_init_task(struct task_struct *child, bool ptrace) +{ + INIT_LIST_HEAD(&child->ptrace_entry); + INIT_LIST_HEAD(&child->ptraced); + child->parent = child->real_parent; + child->ptrace = 0; + if (unlikely(ptrace)) { + child->ptrace = current->ptrace; + ptrace_link(child, current->parent); + } +} + +/** + * ptrace_release_task - final ptrace-related cleanup of a zombie being reaped + * @task: task in %EXIT_DEAD state + * + * Called with write_lock(&tasklist_lock) held. + */ +static inline void ptrace_release_task(struct task_struct *task) +{ + BUG_ON(!list_empty(&task->ptraced)); + ptrace_unlink(task); + BUG_ON(!list_empty(&task->ptrace_entry)); +} + #ifndef force_successful_syscall_return /* * System call handlers that, upon successful completion, need to return a @@ -246,6 +314,10 @@ static inline void user_enable_block_step(struct task_struct *task) #define arch_ptrace_stop(code, info) do { } while (0) #endif +extern int task_current_syscall(struct task_struct *target, long *callno, + unsigned long args[6], unsigned int maxargs, + unsigned long *sp, unsigned long *pc); + #endif #endif diff --git a/include/linux/quota.h b/include/linux/quota.h index dcddfb20094..376a05048bc 100644 --- a/include/linux/quota.h +++ b/include/linux/quota.h @@ -41,9 +41,6 @@ #define __DQUOT_VERSION__ "dquot_6.5.1" #define __DQUOT_NUM_VERSION__ 6*10000+5*100+1 -typedef __kernel_uid32_t qid_t; /* Type in which we store ids in memory */ -typedef __u64 qsize_t; /* Type in which we store sizes */ - /* Size of blocks in which are counted size limits */ #define QUOTABLOCK_BITS 10 #define QUOTABLOCK_SIZE (1 << QUOTABLOCK_BITS) @@ -138,6 +135,10 @@ struct if_dqinfo { #define QUOTA_NL_BHARDWARN 4 /* Block hardlimit reached */ #define QUOTA_NL_BSOFTLONGWARN 5 /* Block grace time expired */ #define QUOTA_NL_BSOFTWARN 6 /* Block softlimit reached */ +#define QUOTA_NL_IHARDBELOW 7 /* Usage got below inode hardlimit */ +#define QUOTA_NL_ISOFTBELOW 8 /* Usage got below inode softlimit */ +#define QUOTA_NL_BHARDBELOW 9 /* Usage got below block hardlimit */ +#define QUOTA_NL_BSOFTBELOW 10 /* Usage got below block softlimit */ enum { QUOTA_NL_C_UNSPEC, @@ -172,6 +173,9 @@ enum { #include <asm/atomic.h> +typedef __kernel_uid32_t qid_t; /* Type in which we store ids in memory */ +typedef __u64 qsize_t; /* Type in which we store sizes */ + extern spinlock_t dq_data_lock; /* Maximal numbers of writes for quota operation (insert/delete/update) @@ -223,12 +227,10 @@ struct super_block; #define DQF_INFO_DIRTY (1 << DQF_INFO_DIRTY_B) /* Is info dirty? */ extern void mark_info_dirty(struct super_block *sb, int type); -#define info_dirty(info) test_bit(DQF_INFO_DIRTY_B, &(info)->dqi_flags) -#define info_any_dquot_dirty(info) (!list_empty(&(info)->dqi_dirty_list)) -#define info_any_dirty(info) (info_dirty(info) || info_any_dquot_dirty(info)) - -#define sb_dqopt(sb) (&(sb)->s_dquot) -#define sb_dqinfo(sb, type) (sb_dqopt(sb)->info+(type)) +static inline int info_dirty(struct mem_dqinfo *info) +{ + return test_bit(DQF_INFO_DIRTY_B, &info->dqi_flags); +} struct dqstats { int lookups; @@ -337,19 +339,6 @@ struct quota_info { struct quota_format_ops *ops[MAXQUOTAS]; /* Operations for each type */ }; -#define sb_has_quota_enabled(sb, type) ((type)==USRQUOTA ? \ - (sb_dqopt(sb)->flags & DQUOT_USR_ENABLED) : (sb_dqopt(sb)->flags & DQUOT_GRP_ENABLED)) - -#define sb_any_quota_enabled(sb) (sb_has_quota_enabled(sb, USRQUOTA) | \ - sb_has_quota_enabled(sb, GRPQUOTA)) - -#define sb_has_quota_suspended(sb, type) \ - ((type) == USRQUOTA ? (sb_dqopt(sb)->flags & DQUOT_USR_SUSPENDED) : \ - (sb_dqopt(sb)->flags & DQUOT_GRP_SUSPENDED)) - -#define sb_any_quota_suspended(sb) (sb_has_quota_suspended(sb, USRQUOTA) | \ - sb_has_quota_suspended(sb, GRPQUOTA)) - int register_quota_format(struct quota_format_type *fmt); void unregister_quota_format(struct quota_format_type *fmt); diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h index f8670205385..ca6b9b5c8d5 100644 --- a/include/linux/quotaops.h +++ b/include/linux/quotaops.h @@ -11,42 +11,87 @@ #define _LINUX_QUOTAOPS_ #include <linux/smp_lock.h> - #include <linux/fs.h> +static inline struct quota_info *sb_dqopt(struct super_block *sb) +{ + return &sb->s_dquot; +} + #if defined(CONFIG_QUOTA) /* * declaration of quota_function calls in kernel. */ -extern void sync_dquots(struct super_block *sb, int type); - -extern int dquot_initialize(struct inode *inode, int type); -extern int dquot_drop(struct inode *inode); - -extern int dquot_alloc_space(struct inode *inode, qsize_t number, int prealloc); -extern int dquot_alloc_inode(const struct inode *inode, unsigned long number); - -extern int dquot_free_space(struct inode *inode, qsize_t number); -extern int dquot_free_inode(const struct inode *inode, unsigned long number); - -extern int dquot_transfer(struct inode *inode, struct iattr *iattr); -extern int dquot_commit(struct dquot *dquot); -extern int dquot_acquire(struct dquot *dquot); -extern int dquot_release(struct dquot *dquot); -extern int dquot_commit_info(struct super_block *sb, int type); -extern int dquot_mark_dquot_dirty(struct dquot *dquot); - -extern int vfs_quota_on(struct super_block *sb, int type, int format_id, - char *path, int remount); -extern int vfs_quota_on_mount(struct super_block *sb, char *qf_name, - int format_id, int type); -extern int vfs_quota_off(struct super_block *sb, int type, int remount); -extern int vfs_quota_sync(struct super_block *sb, int type); -extern int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii); -extern int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii); -extern int vfs_get_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *di); -extern int vfs_set_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *di); +void sync_dquots(struct super_block *sb, int type); + +int dquot_initialize(struct inode *inode, int type); +int dquot_drop(struct inode *inode); + +int dquot_alloc_space(struct inode *inode, qsize_t number, int prealloc); +int dquot_alloc_inode(const struct inode *inode, unsigned long number); + +int dquot_free_space(struct inode *inode, qsize_t number); +int dquot_free_inode(const struct inode *inode, unsigned long number); + +int dquot_transfer(struct inode *inode, struct iattr *iattr); +int dquot_commit(struct dquot *dquot); +int dquot_acquire(struct dquot *dquot); +int dquot_release(struct dquot *dquot); +int dquot_commit_info(struct super_block *sb, int type); +int dquot_mark_dquot_dirty(struct dquot *dquot); + +int vfs_quota_on(struct super_block *sb, int type, int format_id, + char *path, int remount); +int vfs_quota_on_path(struct super_block *sb, int type, int format_id, + struct path *path); +int vfs_quota_on_mount(struct super_block *sb, char *qf_name, + int format_id, int type); +int vfs_quota_off(struct super_block *sb, int type, int remount); +int vfs_quota_sync(struct super_block *sb, int type); +int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii); +int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii); +int vfs_get_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *di); +int vfs_set_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *di); + +void vfs_dq_drop(struct inode *inode); +int vfs_dq_transfer(struct inode *inode, struct iattr *iattr); +int vfs_dq_quota_on_remount(struct super_block *sb); + +static inline struct mem_dqinfo *sb_dqinfo(struct super_block *sb, int type) +{ + return sb_dqopt(sb)->info + type; +} + +/* + * Functions for checking status of quota + */ + +static inline int sb_has_quota_enabled(struct super_block *sb, int type) +{ + if (type == USRQUOTA) + return sb_dqopt(sb)->flags & DQUOT_USR_ENABLED; + return sb_dqopt(sb)->flags & DQUOT_GRP_ENABLED; +} + +static inline int sb_any_quota_enabled(struct super_block *sb) +{ + return sb_has_quota_enabled(sb, USRQUOTA) || + sb_has_quota_enabled(sb, GRPQUOTA); +} + +static inline int sb_has_quota_suspended(struct super_block *sb, int type) +{ + if (type == USRQUOTA) + return sb_dqopt(sb)->flags & DQUOT_USR_SUSPENDED; + return sb_dqopt(sb)->flags & DQUOT_GRP_SUSPENDED; +} + +static inline int sb_any_quota_suspended(struct super_block *sb) +{ + return sb_has_quota_suspended(sb, USRQUOTA) || + sb_has_quota_suspended(sb, GRPQUOTA); +} /* * Operations supported for diskquotas. @@ -59,38 +104,16 @@ extern struct quotactl_ops vfs_quotactl_ops; /* It is better to call this function outside of any transaction as it might * need a lot of space in journal for dquot structure allocation. */ -static inline void DQUOT_INIT(struct inode *inode) +static inline void vfs_dq_init(struct inode *inode) { BUG_ON(!inode->i_sb); if (sb_any_quota_enabled(inode->i_sb) && !IS_NOQUOTA(inode)) inode->i_sb->dq_op->initialize(inode, -1); } -/* The same as with DQUOT_INIT */ -static inline void DQUOT_DROP(struct inode *inode) -{ - /* Here we can get arbitrary inode from clear_inode() so we have - * to be careful. OTOH we don't need locking as quota operations - * are allowed to change only at mount time */ - if (!IS_NOQUOTA(inode) && inode->i_sb && inode->i_sb->dq_op - && inode->i_sb->dq_op->drop) { - int cnt; - /* Test before calling to rule out calls from proc and such - * where we are not allowed to block. Note that this is - * actually reliable test even without the lock - the caller - * must assure that nobody can come after the DQUOT_DROP and - * add quota pointers back anyway */ - for (cnt = 0; cnt < MAXQUOTAS; cnt++) - if (inode->i_dquot[cnt] != NODQUOT) - break; - if (cnt < MAXQUOTAS) - inode->i_sb->dq_op->drop(inode); - } -} - /* The following allocation/freeing/transfer functions *must* be called inside * a transaction (deadlocks possible otherwise) */ -static inline int DQUOT_PREALLOC_SPACE_NODIRTY(struct inode *inode, qsize_t nr) +static inline int vfs_dq_prealloc_space_nodirty(struct inode *inode, qsize_t nr) { if (sb_any_quota_enabled(inode->i_sb)) { /* Used space is updated in alloc_space() */ @@ -102,15 +125,15 @@ static inline int DQUOT_PREALLOC_SPACE_NODIRTY(struct inode *inode, qsize_t nr) return 0; } -static inline int DQUOT_PREALLOC_SPACE(struct inode *inode, qsize_t nr) +static inline int vfs_dq_prealloc_space(struct inode *inode, qsize_t nr) { int ret; - if (!(ret = DQUOT_PREALLOC_SPACE_NODIRTY(inode, nr))) + if (!(ret = vfs_dq_prealloc_space_nodirty(inode, nr))) mark_inode_dirty(inode); return ret; } -static inline int DQUOT_ALLOC_SPACE_NODIRTY(struct inode *inode, qsize_t nr) +static inline int vfs_dq_alloc_space_nodirty(struct inode *inode, qsize_t nr) { if (sb_any_quota_enabled(inode->i_sb)) { /* Used space is updated in alloc_space() */ @@ -122,25 +145,25 @@ static inline int DQUOT_ALLOC_SPACE_NODIRTY(struct inode *inode, qsize_t nr) return 0; } -static inline int DQUOT_ALLOC_SPACE(struct inode *inode, qsize_t nr) +static inline int vfs_dq_alloc_space(struct inode *inode, qsize_t nr) { int ret; - if (!(ret = DQUOT_ALLOC_SPACE_NODIRTY(inode, nr))) + if (!(ret = vfs_dq_alloc_space_nodirty(inode, nr))) mark_inode_dirty(inode); return ret; } -static inline int DQUOT_ALLOC_INODE(struct inode *inode) +static inline int vfs_dq_alloc_inode(struct inode *inode) { if (sb_any_quota_enabled(inode->i_sb)) { - DQUOT_INIT(inode); + vfs_dq_init(inode); if (inode->i_sb->dq_op->alloc_inode(inode, 1) == NO_QUOTA) return 1; } return 0; } -static inline void DQUOT_FREE_SPACE_NODIRTY(struct inode *inode, qsize_t nr) +static inline void vfs_dq_free_space_nodirty(struct inode *inode, qsize_t nr) { if (sb_any_quota_enabled(inode->i_sb)) inode->i_sb->dq_op->free_space(inode, nr); @@ -148,35 +171,25 @@ static inline void DQUOT_FREE_SPACE_NODIRTY(struct inode *inode, qsize_t nr) inode_sub_bytes(inode, nr); } -static inline void DQUOT_FREE_SPACE(struct inode *inode, qsize_t nr) +static inline void vfs_dq_free_space(struct inode *inode, qsize_t nr) { - DQUOT_FREE_SPACE_NODIRTY(inode, nr); + vfs_dq_free_space_nodirty(inode, nr); mark_inode_dirty(inode); } -static inline void DQUOT_FREE_INODE(struct inode *inode) +static inline void vfs_dq_free_inode(struct inode *inode) { if (sb_any_quota_enabled(inode->i_sb)) inode->i_sb->dq_op->free_inode(inode, 1); } -static inline int DQUOT_TRANSFER(struct inode *inode, struct iattr *iattr) -{ - if (sb_any_quota_enabled(inode->i_sb) && !IS_NOQUOTA(inode)) { - DQUOT_INIT(inode); - if (inode->i_sb->dq_op->transfer(inode, iattr) == NO_QUOTA) - return 1; - } - return 0; -} - /* The following two functions cannot be called inside a transaction */ -static inline void DQUOT_SYNC(struct super_block *sb) +static inline void vfs_dq_sync(struct super_block *sb) { sync_dquots(sb, -1); } -static inline int DQUOT_OFF(struct super_block *sb, int remount) +static inline int vfs_dq_off(struct super_block *sb, int remount) { int ret = -ENOSYS; @@ -185,22 +198,27 @@ static inline int DQUOT_OFF(struct super_block *sb, int remount) return ret; } -static inline int DQUOT_ON_REMOUNT(struct super_block *sb) +#else + +static inline int sb_has_quota_enabled(struct super_block *sb, int type) { - int cnt; - int ret = 0, err; + return 0; +} - if (!sb->s_qcop || !sb->s_qcop->quota_on) - return -ENOSYS; - for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - err = sb->s_qcop->quota_on(sb, cnt, 0, NULL, 1); - if (err < 0 && !ret) - ret = err; - } - return ret; +static inline int sb_any_quota_enabled(struct super_block *sb) +{ + return 0; } -#else +static inline int sb_has_quota_suspended(struct super_block *sb, int type) +{ + return 0; +} + +static inline int sb_any_quota_suspended(struct super_block *sb) +{ + return 0; +} /* * NO-OP when quota not configured. @@ -208,113 +226,144 @@ static inline int DQUOT_ON_REMOUNT(struct super_block *sb) #define sb_dquot_ops (NULL) #define sb_quotactl_ops (NULL) -static inline void DQUOT_INIT(struct inode *inode) +static inline void vfs_dq_init(struct inode *inode) { } -static inline void DQUOT_DROP(struct inode *inode) +static inline void vfs_dq_drop(struct inode *inode) { } -static inline int DQUOT_ALLOC_INODE(struct inode *inode) +static inline int vfs_dq_alloc_inode(struct inode *inode) { return 0; } -static inline void DQUOT_FREE_INODE(struct inode *inode) +static inline void vfs_dq_free_inode(struct inode *inode) { } -static inline void DQUOT_SYNC(struct super_block *sb) +static inline void vfs_dq_sync(struct super_block *sb) { } -static inline int DQUOT_OFF(struct super_block *sb, int remount) +static inline int vfs_dq_off(struct super_block *sb, int remount) { return 0; } -static inline int DQUOT_ON_REMOUNT(struct super_block *sb) +static inline int vfs_dq_quota_on_remount(struct super_block *sb) { return 0; } -static inline int DQUOT_TRANSFER(struct inode *inode, struct iattr *iattr) +static inline int vfs_dq_transfer(struct inode *inode, struct iattr *iattr) { return 0; } -static inline int DQUOT_PREALLOC_SPACE_NODIRTY(struct inode *inode, qsize_t nr) +static inline int vfs_dq_prealloc_space_nodirty(struct inode *inode, qsize_t nr) { inode_add_bytes(inode, nr); return 0; } -static inline int DQUOT_PREALLOC_SPACE(struct inode *inode, qsize_t nr) +static inline int vfs_dq_prealloc_space(struct inode *inode, qsize_t nr) { - DQUOT_PREALLOC_SPACE_NODIRTY(inode, nr); + vfs_dq_prealloc_space_nodirty(inode, nr); mark_inode_dirty(inode); return 0; } -static inline int DQUOT_ALLOC_SPACE_NODIRTY(struct inode *inode, qsize_t nr) +static inline int vfs_dq_alloc_space_nodirty(struct inode *inode, qsize_t nr) { inode_add_bytes(inode, nr); return 0; } -static inline int DQUOT_ALLOC_SPACE(struct inode *inode, qsize_t nr) +static inline int vfs_dq_alloc_space(struct inode *inode, qsize_t nr) { - DQUOT_ALLOC_SPACE_NODIRTY(inode, nr); + vfs_dq_alloc_space_nodirty(inode, nr); mark_inode_dirty(inode); return 0; } -static inline void DQUOT_FREE_SPACE_NODIRTY(struct inode *inode, qsize_t nr) +static inline void vfs_dq_free_space_nodirty(struct inode *inode, qsize_t nr) { inode_sub_bytes(inode, nr); } -static inline void DQUOT_FREE_SPACE(struct inode *inode, qsize_t nr) +static inline void vfs_dq_free_space(struct inode *inode, qsize_t nr) { - DQUOT_FREE_SPACE_NODIRTY(inode, nr); + vfs_dq_free_space_nodirty(inode, nr); mark_inode_dirty(inode); } #endif /* CONFIG_QUOTA */ -static inline int DQUOT_PREALLOC_BLOCK_NODIRTY(struct inode *inode, qsize_t nr) +static inline int vfs_dq_prealloc_block_nodirty(struct inode *inode, qsize_t nr) { - return DQUOT_PREALLOC_SPACE_NODIRTY(inode, + return vfs_dq_prealloc_space_nodirty(inode, nr << inode->i_sb->s_blocksize_bits); } -static inline int DQUOT_PREALLOC_BLOCK(struct inode *inode, qsize_t nr) +static inline int vfs_dq_prealloc_block(struct inode *inode, qsize_t nr) { - return DQUOT_PREALLOC_SPACE(inode, + return vfs_dq_prealloc_space(inode, nr << inode->i_sb->s_blocksize_bits); } -static inline int DQUOT_ALLOC_BLOCK_NODIRTY(struct inode *inode, qsize_t nr) +static inline int vfs_dq_alloc_block_nodirty(struct inode *inode, qsize_t nr) { - return DQUOT_ALLOC_SPACE_NODIRTY(inode, + return vfs_dq_alloc_space_nodirty(inode, nr << inode->i_sb->s_blocksize_bits); } -static inline int DQUOT_ALLOC_BLOCK(struct inode *inode, qsize_t nr) +static inline int vfs_dq_alloc_block(struct inode *inode, qsize_t nr) { - return DQUOT_ALLOC_SPACE(inode, + return vfs_dq_alloc_space(inode, nr << inode->i_sb->s_blocksize_bits); } -static inline void DQUOT_FREE_BLOCK_NODIRTY(struct inode *inode, qsize_t nr) +static inline void vfs_dq_free_block_nodirty(struct inode *inode, qsize_t nr) { - DQUOT_FREE_SPACE_NODIRTY(inode, nr << inode->i_sb->s_blocksize_bits); + vfs_dq_free_space_nodirty(inode, nr << inode->i_sb->s_blocksize_bits); } -static inline void DQUOT_FREE_BLOCK(struct inode *inode, qsize_t nr) +static inline void vfs_dq_free_block(struct inode *inode, qsize_t nr) { - DQUOT_FREE_SPACE(inode, nr << inode->i_sb->s_blocksize_bits); + vfs_dq_free_space(inode, nr << inode->i_sb->s_blocksize_bits); } +/* + * Define uppercase equivalents for compatibility with old function names + * Can go away when we think all users have been converted (15/04/2008) + */ +#define DQUOT_INIT(inode) vfs_dq_init(inode) +#define DQUOT_DROP(inode) vfs_dq_drop(inode) +#define DQUOT_PREALLOC_SPACE_NODIRTY(inode, nr) \ + vfs_dq_prealloc_space_nodirty(inode, nr) +#define DQUOT_PREALLOC_SPACE(inode, nr) vfs_dq_prealloc_space(inode, nr) +#define DQUOT_ALLOC_SPACE_NODIRTY(inode, nr) \ + vfs_dq_alloc_space_nodirty(inode, nr) +#define DQUOT_ALLOC_SPACE(inode, nr) vfs_dq_alloc_space(inode, nr) +#define DQUOT_PREALLOC_BLOCK_NODIRTY(inode, nr) \ + vfs_dq_prealloc_block_nodirty(inode, nr) +#define DQUOT_PREALLOC_BLOCK(inode, nr) vfs_dq_prealloc_block(inode, nr) +#define DQUOT_ALLOC_BLOCK_NODIRTY(inode, nr) \ + vfs_dq_alloc_block_nodirty(inode, nr) +#define DQUOT_ALLOC_BLOCK(inode, nr) vfs_dq_alloc_block(inode, nr) +#define DQUOT_ALLOC_INODE(inode) vfs_dq_alloc_inode(inode) +#define DQUOT_FREE_SPACE_NODIRTY(inode, nr) \ + vfs_dq_free_space_nodirty(inode, nr) +#define DQUOT_FREE_SPACE(inode, nr) vfs_dq_free_space(inode, nr) +#define DQUOT_FREE_BLOCK_NODIRTY(inode, nr) \ + vfs_dq_free_block_nodirty(inode, nr) +#define DQUOT_FREE_BLOCK(inode, nr) vfs_dq_free_block(inode, nr) +#define DQUOT_FREE_INODE(inode) vfs_dq_free_inode(inode) +#define DQUOT_TRANSFER(inode, iattr) vfs_dq_transfer(inode, iattr) +#define DQUOT_SYNC(sb) vfs_dq_sync(sb) +#define DQUOT_OFF(sb, remount) vfs_dq_off(sb, remount) +#define DQUOT_ON_REMOUNT(sb) vfs_dq_quota_on_remount(sb) + #endif /* _LINUX_QUOTAOPS_ */ diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index b8ce2b444bb..a916c6660df 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h @@ -99,12 +99,15 @@ do { \ * * The notable exceptions to this rule are the following functions: * radix_tree_lookup + * radix_tree_lookup_slot * radix_tree_tag_get * radix_tree_gang_lookup + * radix_tree_gang_lookup_slot * radix_tree_gang_lookup_tag + * radix_tree_gang_lookup_tag_slot * radix_tree_tagged * - * The first 4 functions are able to be called locklessly, using RCU. The + * The first 7 functions are able to be called locklessly, using RCU. The * caller must ensure calls to these functions are made within rcu_read_lock() * regions. Other readers (lock-free or otherwise) and modifications may be * running concurrently. @@ -159,6 +162,9 @@ void *radix_tree_delete(struct radix_tree_root *, unsigned long); unsigned int radix_tree_gang_lookup(struct radix_tree_root *root, void **results, unsigned long first_index, unsigned int max_items); +unsigned int +radix_tree_gang_lookup_slot(struct radix_tree_root *root, void ***results, + unsigned long first_index, unsigned int max_items); unsigned long radix_tree_next_hole(struct radix_tree_root *root, unsigned long index, unsigned long max_scan); int radix_tree_preload(gfp_t gfp_mask); @@ -173,6 +179,10 @@ unsigned int radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, unsigned long first_index, unsigned int max_items, unsigned int tag); +unsigned int +radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results, + unsigned long first_index, unsigned int max_items, + unsigned int tag); int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag); static inline void radix_tree_preload_end(void) diff --git a/include/linux/raid/md_k.h b/include/linux/raid/md_k.h index 9f2549ac0e2..c200b9a34af 100644 --- a/include/linux/raid/md_k.h +++ b/include/linux/raid/md_k.h @@ -128,6 +128,7 @@ struct mddev_s #define MD_CHANGE_DEVS 0 /* Some device status has changed */ #define MD_CHANGE_CLEAN 1 /* transition to or from 'clean' */ #define MD_CHANGE_PENDING 2 /* superblock update in progress */ +#define MD_NOTIFY_ARRAY_STATE 3 /* atomic context wants to notify userspace */ int ro; diff --git a/include/linux/ratelimit.h b/include/linux/ratelimit.h new file mode 100644 index 00000000000..18a5b9ba9d4 --- /dev/null +++ b/include/linux/ratelimit.h @@ -0,0 +1,27 @@ +#ifndef _LINUX_RATELIMIT_H +#define _LINUX_RATELIMIT_H +#include <linux/param.h> + +#define DEFAULT_RATELIMIT_INTERVAL (5 * HZ) +#define DEFAULT_RATELIMIT_BURST 10 + +struct ratelimit_state { + int interval; + int burst; + int printed; + int missed; + unsigned long begin; +}; + +#define DEFINE_RATELIMIT_STATE(name, interval, burst) \ + struct ratelimit_state name = {interval, burst,} + +extern int __ratelimit(struct ratelimit_state *rs); + +static inline int ratelimit(void) +{ + static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, + DEFAULT_RATELIMIT_BURST); + return __ratelimit(&rs); +} +#endif diff --git a/include/linux/rcuclassic.h b/include/linux/rcuclassic.h index 8c774905dcf..4ab84362272 100644 --- a/include/linux/rcuclassic.h +++ b/include/linux/rcuclassic.h @@ -117,7 +117,7 @@ extern int rcu_needs_cpu(int cpu); #ifdef CONFIG_DEBUG_LOCK_ALLOC extern struct lockdep_map rcu_lock_map; # define rcu_read_acquire() \ - lock_acquire(&rcu_lock_map, 0, 0, 2, 1, _THIS_IP_) + lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_) # define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_) #else # define rcu_read_acquire() do { } while (0) diff --git a/include/linux/rculist.h b/include/linux/rculist.h index b0f39be08b6..eb4443c7e05 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h @@ -98,6 +98,34 @@ static inline void list_del_rcu(struct list_head *entry) } /** + * hlist_del_init_rcu - deletes entry from hash list with re-initialization + * @n: the element to delete from the hash list. + * + * Note: list_unhashed() on the node return true after this. It is + * useful for RCU based read lockfree traversal if the writer side + * must know if the list entry is still hashed or already unhashed. + * + * In particular, it means that we can not poison the forward pointers + * that may still be used for walking the hash list and we can only + * zero the pprev pointer so list_unhashed() will return true after + * this. + * + * The caller must take whatever precautions are necessary (such as + * holding appropriate locks) to avoid racing with another + * list-mutation primitive, such as hlist_add_head_rcu() or + * hlist_del_rcu(), running on this same list. However, it is + * perfectly legal to run concurrently with the _rcu list-traversal + * primitives, such as hlist_for_each_entry_rcu(). + */ +static inline void hlist_del_init_rcu(struct hlist_node *n) +{ + if (!hlist_unhashed(n)) { + __hlist_del(n); + n->pprev = NULL; + } +} + +/** * list_replace_rcu - replace old entry by new one * @old : the element to be replaced * @new : the new element to insert diff --git a/include/linux/rcupreempt.h b/include/linux/rcupreempt.h index f04b64eca63..0967f03b070 100644 --- a/include/linux/rcupreempt.h +++ b/include/linux/rcupreempt.h @@ -115,16 +115,21 @@ DECLARE_PER_CPU(struct rcu_dyntick_sched, rcu_dyntick_sched); static inline void rcu_enter_nohz(void) { + static DEFINE_RATELIMIT_STATE(rs, 10 * HZ, 1); + smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */ __get_cpu_var(rcu_dyntick_sched).dynticks++; - WARN_ON(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1); + WARN_ON_RATELIMIT(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1, &rs); } static inline void rcu_exit_nohz(void) { + static DEFINE_RATELIMIT_STATE(rs, 10 * HZ, 1); + smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ __get_cpu_var(rcu_dyntick_sched).dynticks++; - WARN_ON(!(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1)); + WARN_ON_RATELIMIT(!(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1), + &rs); } #else /* CONFIG_NO_HZ */ diff --git a/include/linux/regulator/bq24022.h b/include/linux/regulator/bq24022.h new file mode 100644 index 00000000000..e84b0a9feda --- /dev/null +++ b/include/linux/regulator/bq24022.h @@ -0,0 +1,21 @@ +/* + * Support for TI bq24022 (bqTINY-II) Dual Input (USB/AC Adpater) + * 1-Cell Li-Ion Charger connected via GPIOs. + * + * Copyright (c) 2008 Philipp Zabel + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +/** + * bq24022_mach_info - platform data for bq24022 + * @gpio_nce: GPIO line connected to the nCE pin, used to enable / disable charging + * @gpio_iset2: GPIO line connected to the ISET2 pin, used to limit charging current to 100 mA / 500 mA + */ +struct bq24022_mach_info { + int gpio_nce; + int gpio_iset2; +}; diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h new file mode 100644 index 00000000000..afdc4558bb9 --- /dev/null +++ b/include/linux/regulator/consumer.h @@ -0,0 +1,284 @@ +/* + * consumer.h -- SoC Regulator consumer support. + * + * Copyright (C) 2007, 2008 Wolfson Microelectronics PLC. + * + * Author: Liam Girdwood <lg@opensource.wolfsonmicro.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Regulator Consumer Interface. + * + * A Power Management Regulator framework for SoC based devices. + * Features:- + * o Voltage and current level control. + * o Operating mode control. + * o Regulator status. + * o sysfs entries for showing client devices and status + * + * EXPERIMENTAL FEATURES: + * Dynamic Regulator operating Mode Switching (DRMS) - allows regulators + * to use most efficient operating mode depending upon voltage and load and + * is transparent to client drivers. + * + * e.g. Devices x,y,z share regulator r. Device x and y draw 20mA each during + * IO and 1mA at idle. Device z draws 100mA when under load and 5mA when + * idling. Regulator r has > 90% efficiency in NORMAL mode at loads > 100mA + * but this drops rapidly to 60% when below 100mA. Regulator r has > 90% + * efficiency in IDLE mode at loads < 10mA. Thus regulator r will operate + * in normal mode for loads > 10mA and in IDLE mode for load <= 10mA. + * + */ + +#ifndef __LINUX_REGULATOR_CONSUMER_H_ +#define __LINUX_REGULATOR_CONSUMER_H_ + +/* + * Regulator operating modes. + * + * Regulators can run in a variety of different operating modes depending on + * output load. This allows further system power savings by selecting the + * best (and most efficient) regulator mode for a desired load. + * + * Most drivers will only care about NORMAL. The modes below are generic and + * will probably not match the naming convention of your regulator data sheet + * but should match the use cases in the datasheet. + * + * In order of power efficiency (least efficient at top). + * + * Mode Description + * FAST Regulator can handle fast changes in it's load. + * e.g. useful in CPU voltage & frequency scaling where + * load can quickly increase with CPU frequency increases. + * + * NORMAL Normal regulator power supply mode. Most drivers will + * use this mode. + * + * IDLE Regulator runs in a more efficient mode for light + * loads. Can be used for devices that have a low power + * requirement during periods of inactivity. This mode + * may be more noisy than NORMAL and may not be able + * to handle fast load switching. + * + * STANDBY Regulator runs in the most efficient mode for very + * light loads. Can be used by devices when they are + * in a sleep/standby state. This mode is likely to be + * the most noisy and may not be able to handle fast load + * switching. + * + * NOTE: Most regulators will only support a subset of these modes. Some + * will only just support NORMAL. + * + * These modes can be OR'ed together to make up a mask of valid register modes. + */ + +#define REGULATOR_MODE_FAST 0x1 +#define REGULATOR_MODE_NORMAL 0x2 +#define REGULATOR_MODE_IDLE 0x4 +#define REGULATOR_MODE_STANDBY 0x8 + +/* + * Regulator notifier events. + * + * UNDER_VOLTAGE Regulator output is under voltage. + * OVER_CURRENT Regulator output current is too high. + * REGULATION_OUT Regulator output is out of regulation. + * FAIL Regulator output has failed. + * OVER_TEMP Regulator over temp. + * FORCE_DISABLE Regulator shut down by software. + * + * NOTE: These events can be OR'ed together when passed into handler. + */ + +#define REGULATOR_EVENT_UNDER_VOLTAGE 0x01 +#define REGULATOR_EVENT_OVER_CURRENT 0x02 +#define REGULATOR_EVENT_REGULATION_OUT 0x04 +#define REGULATOR_EVENT_FAIL 0x08 +#define REGULATOR_EVENT_OVER_TEMP 0x10 +#define REGULATOR_EVENT_FORCE_DISABLE 0x20 + +struct regulator; + +/** + * struct regulator_bulk_data - Data used for bulk regulator operations. + * + * @supply The name of the supply. Initialised by the user before + * using the bulk regulator APIs. + * @consumer The regulator consumer for the supply. This will be managed + * by the bulk API. + * + * The regulator APIs provide a series of regulator_bulk_() API calls as + * a convenience to consumers which require multiple supplies. This + * structure is used to manage data for these calls. + */ +struct regulator_bulk_data { + const char *supply; + struct regulator *consumer; +}; + +#if defined(CONFIG_REGULATOR) + +/* regulator get and put */ +struct regulator *__must_check regulator_get(struct device *dev, + const char *id); +void regulator_put(struct regulator *regulator); + +/* regulator output control and status */ +int regulator_enable(struct regulator *regulator); +int regulator_disable(struct regulator *regulator); +int regulator_force_disable(struct regulator *regulator); +int regulator_is_enabled(struct regulator *regulator); + +int regulator_bulk_get(struct device *dev, int num_consumers, + struct regulator_bulk_data *consumers); +int regulator_bulk_enable(int num_consumers, + struct regulator_bulk_data *consumers); +int regulator_bulk_disable(int num_consumers, + struct regulator_bulk_data *consumers); +void regulator_bulk_free(int num_consumers, + struct regulator_bulk_data *consumers); + +int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV); +int regulator_get_voltage(struct regulator *regulator); +int regulator_set_current_limit(struct regulator *regulator, + int min_uA, int max_uA); +int regulator_get_current_limit(struct regulator *regulator); + +int regulator_set_mode(struct regulator *regulator, unsigned int mode); +unsigned int regulator_get_mode(struct regulator *regulator); +int regulator_set_optimum_mode(struct regulator *regulator, int load_uA); + +/* regulator notifier block */ +int regulator_register_notifier(struct regulator *regulator, + struct notifier_block *nb); +int regulator_unregister_notifier(struct regulator *regulator, + struct notifier_block *nb); + +/* driver data - core doesn't touch */ +void *regulator_get_drvdata(struct regulator *regulator); +void regulator_set_drvdata(struct regulator *regulator, void *data); + +#else + +/* + * Make sure client drivers will still build on systems with no software + * controllable voltage or current regulators. + */ +static inline struct regulator *__must_check regulator_get(struct device *dev, + const char *id) +{ + /* Nothing except the stubbed out regulator API should be + * looking at the value except to check if it is an error + * value so the actual return value doesn't matter. + */ + return (struct regulator *)id; +} +static inline void regulator_put(struct regulator *regulator) +{ +} + +static inline int regulator_enable(struct regulator *regulator) +{ + return 0; +} + +static inline int regulator_disable(struct regulator *regulator) +{ + return 0; +} + +static inline int regulator_is_enabled(struct regulator *regulator) +{ + return 1; +} + +static inline int regulator_bulk_get(struct device *dev, + int num_consumers, + struct regulator_bulk_data *consumers) +{ + return 0; +} + +static inline int regulator_bulk_enable(int num_consumers, + struct regulator_bulk_data *consumers) +{ + return 0; +} + +static inline int regulator_bulk_disable(int num_consumers, + struct regulator_bulk_data *consumers) +{ + return 0; +} + +static inline void regulator_bulk_free(int num_consumers, + struct regulator_bulk_data *consumers) +{ +} + +static inline int regulator_set_voltage(struct regulator *regulator, + int min_uV, int max_uV) +{ + return 0; +} + +static inline int regulator_get_voltage(struct regulator *regulator) +{ + return 0; +} + +static inline int regulator_set_current_limit(struct regulator *regulator, + int min_uA, int max_uA) +{ + return 0; +} + +static inline int regulator_get_current_limit(struct regulator *regulator) +{ + return 0; +} + +static inline int regulator_set_mode(struct regulator *regulator, + unsigned int mode) +{ + return 0; +} + +static inline unsigned int regulator_get_mode(struct regulator *regulator) +{ + return REGULATOR_MODE_NORMAL; +} + +static inline int regulator_set_optimum_mode(struct regulator *regulator, + int load_uA) +{ + return REGULATOR_MODE_NORMAL; +} + +static inline int regulator_register_notifier(struct regulator *regulator, + struct notifier_block *nb) +{ + return 0; +} + +static inline int regulator_unregister_notifier(struct regulator *regulator, + struct notifier_block *nb) +{ + return 0; +} + +static inline void *regulator_get_drvdata(struct regulator *regulator) +{ + return NULL; +} + +static inline void regulator_set_drvdata(struct regulator *regulator, + void *data) +{ +} + +#endif + +#endif diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h new file mode 100644 index 00000000000..1d712c7172a --- /dev/null +++ b/include/linux/regulator/driver.h @@ -0,0 +1,99 @@ +/* + * driver.h -- SoC Regulator driver support. + * + * Copyright (C) 2007, 2008 Wolfson Microelectronics PLC. + * + * Author: Liam Girdwood <lg@opensource.wolfsonmicro.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Regulator Driver Interface. + */ + +#ifndef __LINUX_REGULATOR_DRIVER_H_ +#define __LINUX_REGULATOR_DRIVER_H_ + +#include <linux/device.h> +#include <linux/regulator/consumer.h> + +struct regulator_constraints; +struct regulator_dev; + +/** + * struct regulator_ops - regulator operations. + * + * This struct describes regulator operations. + */ +struct regulator_ops { + + /* get/set regulator voltage */ + int (*set_voltage) (struct regulator_dev *, int min_uV, int max_uV); + int (*get_voltage) (struct regulator_dev *); + + /* get/set regulator current */ + int (*set_current_limit) (struct regulator_dev *, + int min_uA, int max_uA); + int (*get_current_limit) (struct regulator_dev *); + + /* enable/disable regulator */ + int (*enable) (struct regulator_dev *); + int (*disable) (struct regulator_dev *); + int (*is_enabled) (struct regulator_dev *); + + /* get/set regulator operating mode (defined in regulator.h) */ + int (*set_mode) (struct regulator_dev *, unsigned int mode); + unsigned int (*get_mode) (struct regulator_dev *); + + /* get most efficient regulator operating mode for load */ + unsigned int (*get_optimum_mode) (struct regulator_dev *, int input_uV, + int output_uV, int load_uA); + + /* the operations below are for configuration of regulator state when + * it's parent PMIC enters a global STANBY/HIBERNATE state */ + + /* set regulator suspend voltage */ + int (*set_suspend_voltage) (struct regulator_dev *, int uV); + + /* enable/disable regulator in suspend state */ + int (*set_suspend_enable) (struct regulator_dev *); + int (*set_suspend_disable) (struct regulator_dev *); + + /* set regulator suspend operating mode (defined in regulator.h) */ + int (*set_suspend_mode) (struct regulator_dev *, unsigned int mode); +}; + +/* + * Regulators can either control voltage or current. + */ +enum regulator_type { + REGULATOR_VOLTAGE, + REGULATOR_CURRENT, +}; + +/** + * struct regulator_desc - Regulator descriptor + * + */ +struct regulator_desc { + const char *name; + int id; + struct regulator_ops *ops; + int irq; + enum regulator_type type; + struct module *owner; +}; + + +struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc, + void *reg_data); +void regulator_unregister(struct regulator_dev *rdev); + +int regulator_notifier_call_chain(struct regulator_dev *rdev, + unsigned long event, void *data); + +void *rdev_get_drvdata(struct regulator_dev *rdev); +int rdev_get_id(struct regulator_dev *rdev); + +#endif diff --git a/include/linux/regulator/fixed.h b/include/linux/regulator/fixed.h new file mode 100644 index 00000000000..1387a5d2190 --- /dev/null +++ b/include/linux/regulator/fixed.h @@ -0,0 +1,22 @@ +/* + * fixed.h + * + * Copyright 2008 Wolfson Microelectronics PLC. + * + * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + */ + +#ifndef __REGULATOR_FIXED_H +#define __REGULATOR_FIXED_H + +struct fixed_voltage_config { + const char *supply_name; + int microvolts; +}; + +#endif diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h new file mode 100644 index 00000000000..11e737dbfcf --- /dev/null +++ b/include/linux/regulator/machine.h @@ -0,0 +1,104 @@ +/* + * machine.h -- SoC Regulator support, machine/board driver API. + * + * Copyright (C) 2007, 2008 Wolfson Microelectronics PLC. + * + * Author: Liam Girdwood <lg@opensource.wolfsonmicro.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Regulator Machine/Board Interface. + */ + +#ifndef __LINUX_REGULATOR_MACHINE_H_ +#define __LINUX_REGULATOR_MACHINE_H_ + +#include <linux/regulator/consumer.h> +#include <linux/suspend.h> + +struct regulator; + +/* + * Regulator operation constraint flags. These flags are used to enable + * certain regulator operations and can be OR'ed together. + * + * VOLTAGE: Regulator output voltage can be changed by software on this + * board/machine. + * CURRENT: Regulator output current can be changed by software on this + * board/machine. + * MODE: Regulator operating mode can be changed by software on this + * board/machine. + * STATUS: Regulator can be enabled and disabled. + * DRMS: Dynamic Regulator Mode Switching is enabled for this regulator. + */ + +#define REGULATOR_CHANGE_VOLTAGE 0x1 +#define REGULATOR_CHANGE_CURRENT 0x2 +#define REGULATOR_CHANGE_MODE 0x4 +#define REGULATOR_CHANGE_STATUS 0x8 +#define REGULATOR_CHANGE_DRMS 0x10 + +/** + * struct regulator_state - regulator state during low power syatem states + * + * This describes a regulators state during a system wide low power state. + */ +struct regulator_state { + int uV; /* suspend voltage */ + unsigned int mode; /* suspend regulator operating mode */ + int enabled; /* is regulator enabled in this suspend state */ +}; + +/** + * struct regulation_constraints - regulator operating constraints. + * + * This struct describes regulator and board/machine specific constraints. + */ +struct regulation_constraints { + + char *name; + + /* voltage output range (inclusive) - for voltage control */ + int min_uV; + int max_uV; + + /* current output range (inclusive) - for current control */ + int min_uA; + int max_uA; + + /* valid regulator operating modes for this machine */ + unsigned int valid_modes_mask; + + /* valid operations for regulator on this machine */ + unsigned int valid_ops_mask; + + /* regulator input voltage - only if supply is another regulator */ + int input_uV; + + /* regulator suspend states for global PMIC STANDBY/HIBERNATE */ + struct regulator_state state_disk; + struct regulator_state state_mem; + struct regulator_state state_standby; + suspend_state_t initial_state; /* suspend state to set at init */ + + /* constriant flags */ + unsigned always_on:1; /* regulator never off when system is on */ + unsigned boot_on:1; /* bootloader/firmware enabled regulator */ + unsigned apply_uV:1; /* apply uV constraint iff min == max */ +}; + +int regulator_set_supply(const char *regulator, const char *regulator_supply); + +const char *regulator_get_supply(const char *regulator); + +int regulator_set_machine_constraints(const char *regulator, + struct regulation_constraints *constraints); + +int regulator_set_device_supply(const char *regulator, struct device *dev, + const char *supply); + +int regulator_suspend_prepare(suspend_state_t state); + +#endif diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h index 4aacaeecb56..e9963af16cd 100644 --- a/include/linux/reiserfs_fs.h +++ b/include/linux/reiserfs_fs.h @@ -526,8 +526,8 @@ struct item_head { ** p is the array of __u32, i is the index into the array, v is the value ** to store there. */ -#define get_block_num(p, i) le32_to_cpu(get_unaligned((p) + (i))) -#define put_block_num(p, i, v) put_unaligned(cpu_to_le32(v), (p) + (i)) +#define get_block_num(p, i) get_unaligned_le32((p) + (i)) +#define put_block_num(p, i, v) put_unaligned_le32((v), (p) + (i)) // // in old version uniqueness field shows key type diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h index 336ee43ed7d..315517e8bfa 100644 --- a/include/linux/reiserfs_fs_sb.h +++ b/include/linux/reiserfs_fs_sb.h @@ -152,7 +152,7 @@ struct reiserfs_journal_list { atomic_t j_nonzerolen; atomic_t j_commit_left; atomic_t j_older_commits_done; /* all commits older than this on disk */ - struct semaphore j_commit_lock; + struct mutex j_commit_mutex; unsigned long j_trans_id; time_t j_timestamp; struct reiserfs_list_bitmap *j_list_bitmap; @@ -193,8 +193,8 @@ struct reiserfs_journal { struct buffer_head *j_header_bh; time_t j_trans_start_time; /* time this transaction started */ - struct semaphore j_lock; - struct semaphore j_flush_sem; + struct mutex j_mutex; + struct mutex j_flush_mutex; wait_queue_head_t j_join_wait; /* wait for current transaction to finish before starting new one */ atomic_t j_jlock; /* lock for j_join_wait */ int j_list_bitmap_index; /* number of next list bitmap to use */ diff --git a/include/linux/reiserfs_xattr.h b/include/linux/reiserfs_xattr.h index 66a96814d61..af135ae895d 100644 --- a/include/linux/reiserfs_xattr.h +++ b/include/linux/reiserfs_xattr.h @@ -55,7 +55,7 @@ int reiserfs_removexattr(struct dentry *dentry, const char *name); int reiserfs_delete_xattrs(struct inode *inode); int reiserfs_chown_xattrs(struct inode *inode, struct iattr *attrs); int reiserfs_xattr_init(struct super_block *sb, int mount_flags); -int reiserfs_permission(struct inode *inode, int mask, struct nameidata *nd); +int reiserfs_permission(struct inode *inode, int mask); int reiserfs_xattr_del(struct inode *, const char *); int reiserfs_xattr_get(const struct inode *, const char *, void *, size_t); diff --git a/include/linux/relay.h b/include/linux/relay.h index 6cd8c4425fc..953fc055e87 100644 --- a/include/linux/relay.h +++ b/include/linux/relay.h @@ -48,6 +48,7 @@ struct rchan_buf size_t *padding; /* padding counts per sub-buffer */ size_t prev_padding; /* temporary variable */ size_t bytes_consumed; /* bytes consumed in cur read subbuf */ + size_t early_bytes; /* bytes consumed before VFS inited */ unsigned int cpu; /* this buf's cpu */ } ____cacheline_aligned; @@ -68,6 +69,7 @@ struct rchan int is_global; /* One global buffer ? */ struct list_head list; /* for channel list */ struct dentry *parent; /* parent dentry passed to open */ + int has_base_filename; /* has a filename associated? */ char base_filename[NAME_MAX]; /* saved base filename */ }; @@ -169,6 +171,9 @@ struct rchan *relay_open(const char *base_filename, size_t n_subbufs, struct rchan_callbacks *cb, void *private_data); +extern int relay_late_setup_files(struct rchan *chan, + const char *base_filename, + struct dentry *parent); extern void relay_close(struct rchan *chan); extern void relay_flush(struct rchan *chan); extern void relay_subbufs_consumed(struct rchan *chan, diff --git a/include/linux/res_counter.h b/include/linux/res_counter.h index 6d9e1fca098..fdeadd9740d 100644 --- a/include/linux/res_counter.h +++ b/include/linux/res_counter.h @@ -63,9 +63,14 @@ u64 res_counter_read_u64(struct res_counter *counter, int member); ssize_t res_counter_read(struct res_counter *counter, int member, const char __user *buf, size_t nbytes, loff_t *pos, int (*read_strategy)(unsigned long long val, char *s)); -ssize_t res_counter_write(struct res_counter *counter, int member, - const char __user *buf, size_t nbytes, loff_t *pos, - int (*write_strategy)(char *buf, unsigned long long *val)); + +typedef int (*write_strategy_fn)(const char *buf, unsigned long long *val); + +int res_counter_memparse_write_strategy(const char *buf, + unsigned long long *res); + +int res_counter_write(struct res_counter *counter, int member, + const char *buffer, write_strategy_fn write_strategy); /* * the field descriptors. one for each member of res_counter @@ -95,8 +100,10 @@ void res_counter_init(struct res_counter *counter); * counter->limit _locked call expects the counter->lock to be taken */ -int res_counter_charge_locked(struct res_counter *counter, unsigned long val); -int res_counter_charge(struct res_counter *counter, unsigned long val); +int __must_check res_counter_charge_locked(struct res_counter *counter, + unsigned long val); +int __must_check res_counter_charge(struct res_counter *counter, + unsigned long val); /* * uncharge - tell that some portion of the resource is released @@ -151,4 +158,20 @@ static inline void res_counter_reset_failcnt(struct res_counter *cnt) cnt->failcnt = 0; spin_unlock_irqrestore(&cnt->lock, flags); } + +static inline int res_counter_set_limit(struct res_counter *cnt, + unsigned long long limit) +{ + unsigned long flags; + int ret = -EBUSY; + + spin_lock_irqsave(&cnt->lock, flags); + if (cnt->usage < limit) { + cnt->limit = limit; + ret = 0; + } + spin_unlock_irqrestore(&cnt->lock, flags); + return ret; +} + #endif diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h index c5f6e54ec6a..741d1a62cc3 100644 --- a/include/linux/rfkill.h +++ b/include/linux/rfkill.h @@ -68,7 +68,8 @@ enum rfkill_state { * @user_claim_unsupported: Whether the hardware supports exclusive * RF-kill control by userspace. Set this before registering. * @user_claim: Set when the switch is controlled exlusively by userspace. - * @mutex: Guards switch state transitions + * @mutex: Guards switch state transitions. It serializes callbacks + * and also protects the state. * @data: Pointer to the RF button drivers private data which will be * passed along when toggling radio state. * @toggle_radio(): Mandatory handler to control state of the radio. @@ -89,12 +90,13 @@ struct rfkill { const char *name; enum rfkill_type type; - enum rfkill_state state; bool user_claim_unsupported; bool user_claim; + /* the mutex serializes callbacks and also protects + * the state */ struct mutex mutex; - + enum rfkill_state state; void *data; int (*toggle_radio)(void *data, enum rfkill_state state); int (*get_state)(void *data, enum rfkill_state *state); diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 1383692ac5b..69407f85e10 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -26,6 +26,14 @@ */ struct anon_vma { spinlock_t lock; /* Serialize access to vma list */ + /* + * NOTE: the LSB of the head.next is set by + * mm_take_all_locks() _after_ taking the above lock. So the + * head must only be read/written after taking the above lock + * to be sure to see a valid next pointer. The LSB bit itself + * is serialized by a system wide lock only visible to + * mm_take_all_locks() (mm_all_locks_mutex). + */ struct list_head head; /* List of private "related" vmas */ }; diff --git a/include/linux/rtc.h b/include/linux/rtc.h index f2d0d152772..91f597ad6ac 100644 --- a/include/linux/rtc.h +++ b/include/linux/rtc.h @@ -115,6 +115,23 @@ extern void rtc_time_to_tm(unsigned long time, struct rtc_time *tm); extern struct class *rtc_class; +/* + * For these RTC methods the device parameter is the physical device + * on whatever bus holds the hardware (I2C, Platform, SPI, etc), which + * was passed to rtc_device_register(). Its driver_data normally holds + * device state, including the rtc_device pointer for the RTC. + * + * Most of these methods are called with rtc_device.ops_lock held, + * through the rtc_*(struct rtc_device *, ...) calls. + * + * The (current) exceptions are mostly filesystem hooks: + * - the proc() hook for procfs + * - non-ioctl() chardev hooks: open(), release(), read_callback() + * - periodic irq calls: irq_set_state(), irq_set_freq() + * + * REVISIT those periodic irq calls *do* have ops_lock when they're + * issued through ioctl() ... + */ struct rtc_class_ops { int (*open)(struct device *); void (*release)(struct device *); @@ -208,8 +225,6 @@ typedef struct rtc_task { int rtc_register(rtc_task_t *task); int rtc_unregister(rtc_task_t *task); int rtc_control(rtc_task_t *t, unsigned int cmd, unsigned long arg); -void rtc_get_rtc_time(struct rtc_time *rtc_tm); -irqreturn_t rtc_interrupt(int irq, void *dev_id); #endif /* __KERNEL__ */ diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index f4d386c191f..ca643b13b02 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h @@ -755,13 +755,6 @@ extern void __rtnl_unlock(void); } \ } while(0) -#define BUG_TRAP(x) do { \ - if (unlikely(!(x))) { \ - printk(KERN_ERR "KERNEL: assertion (%s) failed at %s (%d)\n", \ - #x, __FILE__ , __LINE__); \ - } \ -} while(0) - static inline u32 rtm_get_table(struct rtattr **rta, u8 table) { return RTA_GET_U32(rta[RTA_TABLE-1]); diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h index 71fc8136004..e5996984ddd 100644 --- a/include/linux/scatterlist.h +++ b/include/linux/scatterlist.h @@ -224,4 +224,42 @@ size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents, */ #define SG_MAX_SINGLE_ALLOC (PAGE_SIZE / sizeof(struct scatterlist)) + +/* + * Mapping sg iterator + * + * Iterates over sg entries mapping page-by-page. On each successful + * iteration, @miter->page points to the mapped page and + * @miter->length bytes of data can be accessed at @miter->addr. As + * long as an interation is enclosed between start and stop, the user + * is free to choose control structure and when to stop. + * + * @miter->consumed is set to @miter->length on each iteration. It + * can be adjusted if the user can't consume all the bytes in one go. + * Also, a stopped iteration can be resumed by calling next on it. + * This is useful when iteration needs to release all resources and + * continue later (e.g. at the next interrupt). + */ + +#define SG_MITER_ATOMIC (1 << 0) /* use kmap_atomic */ + +struct sg_mapping_iter { + /* the following three fields can be accessed directly */ + struct page *page; /* currently mapped page */ + void *addr; /* pointer to the mapped area */ + size_t length; /* length of the mapped area */ + size_t consumed; /* number of consumed bytes */ + + /* these are internal states, keep away */ + struct scatterlist *__sg; /* current entry */ + unsigned int __nents; /* nr of remaining entries */ + unsigned int __offset; /* offset within sg */ + unsigned int __flags; +}; + +void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl, + unsigned int nents, unsigned int flags); +bool sg_miter_next(struct sg_mapping_iter *miter); +void sg_miter_stop(struct sg_mapping_iter *miter); + #endif /* _LINUX_SCATTERLIST_H */ diff --git a/include/linux/sched.h b/include/linux/sched.h index 1941d8b5cf1..cfb0d87b99f 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -87,6 +87,7 @@ struct sched_param { #include <linux/task_io_accounting.h> #include <linux/kobject.h> #include <linux/latencytop.h> +#include <linux/cred.h> #include <asm/processor.h> @@ -292,13 +293,13 @@ extern void sched_show_task(struct task_struct *p); #ifdef CONFIG_DETECT_SOFTLOCKUP extern void softlockup_tick(void); -extern void spawn_softlockup_task(void); extern void touch_softlockup_watchdog(void); extern void touch_all_softlockup_watchdogs(void); -extern unsigned long softlockup_thresh; +extern unsigned int softlockup_panic; extern unsigned long sysctl_hung_task_check_count; extern unsigned long sysctl_hung_task_timeout_secs; extern unsigned long sysctl_hung_task_warnings; +extern int softlockup_thresh; #else static inline void softlockup_tick(void) { @@ -505,6 +506,7 @@ struct signal_struct { unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt; unsigned long inblock, oublock, cinblock, coublock; + struct task_io_accounting ioac; /* * Cumulative ns of scheduled CPU time for dead threads in the @@ -667,6 +669,10 @@ struct task_delay_info { /* io operations performed */ u32 swapin_count; /* total count of the number of swapin block */ /* io operations performed */ + + struct timespec freepages_start, freepages_end; + u64 freepages_delay; /* wait for memory reclaim */ + u32 freepages_count; /* total count of memory reclaim */ }; #endif /* CONFIG_TASK_DELAY_ACCT */ @@ -824,7 +830,16 @@ extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new, struct sched_domain_attr *dattr_new); extern int arch_reinit_sched_domains(void); -#endif /* CONFIG_SMP */ +#else /* CONFIG_SMP */ + +struct sched_domain_attr; + +static inline void +partition_sched_domains(int ndoms_new, cpumask_t *doms_new, + struct sched_domain_attr *dattr_new) +{ +} +#endif /* !CONFIG_SMP */ struct io_context; /* See blkdev.h */ #define NGROUPS_SMALL 32 @@ -1239,15 +1254,11 @@ struct task_struct { unsigned long ptrace_message; siginfo_t *last_siginfo; /* For ptrace use. */ -#ifdef CONFIG_TASK_XACCT -/* i/o counters(bytes read/written, #syscalls */ - u64 rchar, wchar, syscr, syscw; -#endif struct task_io_accounting ioac; #if defined(CONFIG_TASK_XACCT) u64 acct_rss_mem1; /* accumulated rss usage */ u64 acct_vm_mem1; /* accumulated virtual memory usage */ - cputime_t acct_stimexpd;/* stime since last update */ + cputime_t acct_timexpd; /* stime + utime since last update */ #endif #ifdef CONFIG_CPUSETS nodemask_t mems_allowed; @@ -1486,7 +1497,7 @@ static inline void put_task_struct(struct task_struct *t) #define PF_KSWAPD 0x00040000 /* I am kswapd */ #define PF_SWAPOFF 0x00080000 /* I am in swapoff */ #define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */ -#define PF_BORROWED_MM 0x00200000 /* I am a kthread doing use_mm */ +#define PF_KTHREAD 0x00200000 /* I am a kernel thread */ #define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */ #define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ #define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */ @@ -1541,16 +1552,10 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) extern unsigned long long sched_clock(void); -#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK -static inline void sched_clock_init(void) -{ -} - -static inline u64 sched_clock_cpu(int cpu) -{ - return sched_clock(); -} +extern void sched_clock_init(void); +extern u64 sched_clock_cpu(int cpu); +#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK static inline void sched_clock_tick(void) { } @@ -1562,28 +1567,11 @@ static inline void sched_clock_idle_sleep_event(void) static inline void sched_clock_idle_wakeup_event(u64 delta_ns) { } - -#ifdef CONFIG_NO_HZ -static inline void sched_clock_tick_stop(int cpu) -{ -} - -static inline void sched_clock_tick_start(int cpu) -{ -} -#endif - -#else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ -extern void sched_clock_init(void); -extern u64 sched_clock_cpu(int cpu); +#else extern void sched_clock_tick(void); extern void sched_clock_idle_sleep_event(void); extern void sched_clock_idle_wakeup_event(u64 delta_ns); -#ifdef CONFIG_NO_HZ -extern void sched_clock_tick_stop(int cpu); -extern void sched_clock_tick_start(int cpu); #endif -#endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ /* * For kernel-internal use: high-speed (but slightly incorrect) per-cpu @@ -1705,19 +1693,13 @@ extern struct pid_namespace init_pid_ns; * finds a task by its pid in the specified namespace * find_task_by_vpid(): * finds a task by its virtual pid - * find_task_by_pid(): - * finds a task by its global pid * - * see also find_pid() etc in include/linux/pid.h + * see also find_vpid() etc in include/linux/pid.h */ extern struct task_struct *find_task_by_pid_type_ns(int type, int pid, struct pid_namespace *ns); -static inline struct task_struct *__deprecated find_task_by_pid(pid_t nr) -{ - return find_task_by_pid_type_ns(PIDTYPE_PID, nr, &init_pid_ns); -} extern struct task_struct *find_task_by_vpid(pid_t nr); extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns); @@ -1785,12 +1767,11 @@ extern int kill_pid_info_as_uid(int, struct siginfo *, struct pid *, uid_t, uid_ extern int kill_pgrp(struct pid *pid, int sig, int priv); extern int kill_pid(struct pid *pid, int sig, int priv); extern int kill_proc_info(int, struct siginfo *, pid_t); -extern void do_notify_parent(struct task_struct *, int); +extern int do_notify_parent(struct task_struct *, int); extern void force_sig(int, struct task_struct *); extern void force_sig_specific(int, struct task_struct *); extern int send_sig(int, struct task_struct *, int); extern void zap_other_threads(struct task_struct *p); -extern int kill_proc(pid_t, int, int); extern struct sigqueue *sigqueue_alloc(void); extern void sigqueue_free(struct sigqueue *); extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group); @@ -1872,9 +1853,13 @@ extern void set_task_comm(struct task_struct *tsk, char *from); extern char *get_task_comm(char *to, struct task_struct *tsk); #ifdef CONFIG_SMP -extern void wait_task_inactive(struct task_struct * p); +extern unsigned long wait_task_inactive(struct task_struct *, long match_state); #else -#define wait_task_inactive(p) do { } while (0) +static inline unsigned long wait_task_inactive(struct task_struct *p, + long match_state) +{ + return 1; +} #endif #define next_task(p) list_entry(rcu_dereference((p)->tasks.next), struct task_struct, tasks) @@ -1973,6 +1958,13 @@ static inline unsigned long *end_of_stack(struct task_struct *p) #endif +static inline int object_is_on_stack(void *obj) +{ + void *stack = task_stack_page(current); + + return (obj >= stack) && (obj < (stack + THREAD_SIZE)); +} + extern void thread_info_cache_init(void); /* set thread flags in other task's structures @@ -2037,9 +2029,6 @@ static inline int signal_pending_state(long state, struct task_struct *p) if (!signal_pending(p)) return 0; - if (state & (__TASK_STOPPED | __TASK_TRACED)) - return 0; - return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p); } @@ -2124,16 +2113,7 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) #endif /* CONFIG_SMP */ -#ifdef HAVE_ARCH_PICK_MMAP_LAYOUT extern void arch_pick_mmap_layout(struct mm_struct *mm); -#else -static inline void arch_pick_mmap_layout(struct mm_struct *mm) -{ - mm->mmap_base = TASK_UNMAPPED_BASE; - mm->get_unmapped_area = arch_get_unmapped_area; - mm->unmap_area = arch_unmap_area; -} -#endif #ifdef CONFIG_TRACING extern void @@ -2181,22 +2161,22 @@ extern long sched_group_rt_period(struct task_group *tg); #ifdef CONFIG_TASK_XACCT static inline void add_rchar(struct task_struct *tsk, ssize_t amt) { - tsk->rchar += amt; + tsk->ioac.rchar += amt; } static inline void add_wchar(struct task_struct *tsk, ssize_t amt) { - tsk->wchar += amt; + tsk->ioac.wchar += amt; } static inline void inc_syscr(struct task_struct *tsk) { - tsk->syscr++; + tsk->ioac.syscr++; } static inline void inc_syscw(struct task_struct *tsk) { - tsk->syscw++; + tsk->ioac.syscw++; } #else static inline void add_rchar(struct task_struct *tsk, ssize_t amt) @@ -2216,14 +2196,6 @@ static inline void inc_syscw(struct task_struct *tsk) } #endif -#ifdef CONFIG_SMP -void migration_init(void); -#else -static inline void migration_init(void) -{ -} -#endif - #ifndef TASK_SIZE_OF #define TASK_SIZE_OF(tsk) TASK_SIZE #endif diff --git a/include/linux/security.h b/include/linux/security.h index 31c8851ec5d..fd96e7f8a6f 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -102,9 +102,7 @@ extern unsigned long mmap_min_addr; #define LSM_SETID_FS 8 /* forward declares to avoid warnings */ -struct nfsctl_arg; struct sched_param; -struct swap_info_struct; struct request_sock; /* bprm_apply_creds unsafe reasons */ @@ -1364,7 +1362,7 @@ struct security_operations { struct inode *new_dir, struct dentry *new_dentry); int (*inode_readlink) (struct dentry *dentry); int (*inode_follow_link) (struct dentry *dentry, struct nameidata *nd); - int (*inode_permission) (struct inode *inode, int mask, struct nameidata *nd); + int (*inode_permission) (struct inode *inode, int mask); int (*inode_setattr) (struct dentry *dentry, struct iattr *attr); int (*inode_getattr) (struct vfsmount *mnt, struct dentry *dentry); void (*inode_delete) (struct inode *inode); @@ -1630,7 +1628,7 @@ int security_inode_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry); int security_inode_readlink(struct dentry *dentry); int security_inode_follow_link(struct dentry *dentry, struct nameidata *nd); -int security_inode_permission(struct inode *inode, int mask, struct nameidata *nd); +int security_inode_permission(struct inode *inode, int mask); int security_inode_setattr(struct dentry *dentry, struct iattr *attr); int security_inode_getattr(struct vfsmount *mnt, struct dentry *dentry); void security_inode_delete(struct inode *inode); @@ -2023,8 +2021,7 @@ static inline int security_inode_follow_link(struct dentry *dentry, return 0; } -static inline int security_inode_permission(struct inode *inode, int mask, - struct nameidata *nd) +static inline int security_inode_permission(struct inode *inode, int mask) { return 0; } diff --git a/include/linux/sem.h b/include/linux/sem.h index c8eaad9e4b7..1b191c176bc 100644 --- a/include/linux/sem.h +++ b/include/linux/sem.h @@ -78,6 +78,7 @@ struct seminfo { #ifdef __KERNEL__ #include <asm/atomic.h> +#include <linux/rcupdate.h> struct task_struct; @@ -93,23 +94,19 @@ struct sem_array { time_t sem_otime; /* last semop time */ time_t sem_ctime; /* last change time */ struct sem *sem_base; /* ptr to first semaphore in array */ - struct sem_queue *sem_pending; /* pending operations to be processed */ - struct sem_queue **sem_pending_last; /* last pending operation */ - struct sem_undo *undo; /* undo requests on this array */ + struct list_head sem_pending; /* pending operations to be processed */ + struct list_head list_id; /* undo requests on this array */ unsigned long sem_nsems; /* no. of semaphores in array */ }; /* One queue for each sleeping process in the system. */ struct sem_queue { - struct sem_queue * next; /* next entry in the queue */ - struct sem_queue ** prev; /* previous entry in the queue, *(q->prev) == q */ - struct task_struct* sleeper; /* this process */ - struct sem_undo * undo; /* undo structure */ + struct list_head list; /* queue of pending operations */ + struct task_struct *sleeper; /* this process */ + struct sem_undo *undo; /* undo structure */ int pid; /* process id of requesting process */ int status; /* completion status of operation */ - struct sem_array * sma; /* semaphore array for operations */ - int id; /* internal sem id */ - struct sembuf * sops; /* array of pending operations */ + struct sembuf *sops; /* array of pending operations */ int nsops; /* number of operations */ int alter; /* does the operation alter the array? */ }; @@ -118,8 +115,11 @@ struct sem_queue { * when the process exits. */ struct sem_undo { - struct sem_undo * proc_next; /* next entry on this process */ - struct sem_undo * id_next; /* next entry on this semaphore set */ + struct list_head list_proc; /* per-process list: all undos from one process. */ + /* rcu protected */ + struct rcu_head rcu; /* rcu struct for sem_undo() */ + struct sem_undo_list *ulp; /* sem_undo_list for the process */ + struct list_head list_id; /* per semaphore array list: all undos for one array */ int semid; /* semaphore set identifier */ short * semadj; /* array of adjustments, one per semaphore */ }; @@ -128,9 +128,9 @@ struct sem_undo { * that may be shared among all a CLONE_SYSVSEM task group. */ struct sem_undo_list { - atomic_t refcnt; - spinlock_t lock; - struct sem_undo *proc_list; + atomic_t refcnt; + spinlock_t lock; + struct list_head list_proc; }; struct sysv_sem { diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h index 9cae64b00d6..7415839ac89 100644 --- a/include/linux/semaphore.h +++ b/include/linux/semaphore.h @@ -26,10 +26,8 @@ struct semaphore { .wait_list = LIST_HEAD_INIT((name).wait_list), \ } -#define __DECLARE_SEMAPHORE_GENERIC(name, count) \ - struct semaphore name = __SEMAPHORE_INITIALIZER(name, count) - -#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1) +#define DECLARE_MUTEX(name) \ + struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1) static inline void sema_init(struct semaphore *sem, int val) { diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h index a66304a0995..a1783b229ef 100644 --- a/include/linux/seq_file.h +++ b/include/linux/seq_file.h @@ -4,6 +4,8 @@ #include <linux/types.h> #include <linux/string.h> #include <linux/mutex.h> +#include <linux/cpumask.h> +#include <linux/nodemask.h> struct seq_operations; struct file; @@ -47,6 +49,16 @@ int seq_path(struct seq_file *, struct path *, char *); int seq_dentry(struct seq_file *, struct dentry *, char *); int seq_path_root(struct seq_file *m, struct path *path, struct path *root, char *esc); +int seq_bitmap(struct seq_file *m, unsigned long *bits, unsigned int nr_bits); +static inline int seq_cpumask(struct seq_file *m, cpumask_t *mask) +{ + return seq_bitmap(m, mask->bits, NR_CPUS); +} + +static inline int seq_nodemask(struct seq_file *m, nodemask_t *mask) +{ + return seq_bitmap(m, mask->bits, MAX_NUMNODES); +} int single_open(struct file *, int (*)(struct seq_file *, void *), void *); int single_release(struct inode *, struct file *); diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index f3a1c0e4502..3b2f6c04855 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -59,9 +59,6 @@ #define PORT_SUNZILOG 38 #define PORT_SUNSAB 39 -/* NEC v850. */ -#define PORT_V850E_UART 40 - /* DEC */ #define PORT_DZ 46 #define PORT_ZS 47 diff --git a/include/linux/serio.h b/include/linux/serio.h index e72716cca57..25641d9e0ea 100644 --- a/include/linux/serio.h +++ b/include/linux/serio.h @@ -87,11 +87,10 @@ void serio_unregister_port(struct serio *serio); void serio_unregister_child_port(struct serio *serio); int __serio_register_driver(struct serio_driver *drv, struct module *owner, const char *mod_name); -static inline int serio_register_driver(struct serio_driver *drv) +static inline int __must_check serio_register_driver(struct serio_driver *drv) { return __serio_register_driver(drv, THIS_MODULE, KBUILD_MODNAME); } -int serio_register_driver(struct serio_driver *drv); void serio_unregister_driver(struct serio_driver *drv); static inline int serio_write(struct serio *serio, unsigned char data) diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index f2d12d5a21b..fd83f2584b1 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h @@ -43,7 +43,7 @@ static inline struct shmem_inode_info *SHMEM_I(struct inode *inode) } #ifdef CONFIG_TMPFS_POSIX_ACL -int shmem_permission(struct inode *, int, struct nameidata *); +int shmem_permission(struct inode *, int); int shmem_acl_init(struct inode *, struct inode *); void shmem_acl_destroy_inode(struct inode *); diff --git a/include/linux/signalfd.h b/include/linux/signalfd.h index ea037f28df9..bef0c46d471 100644 --- a/include/linux/signalfd.h +++ b/include/linux/signalfd.h @@ -8,6 +8,12 @@ #ifndef _LINUX_SIGNALFD_H #define _LINUX_SIGNALFD_H +/* For O_CLOEXEC and O_NONBLOCK */ +#include <linux/fcntl.h> + +/* Flags for signalfd4. */ +#define SFD_CLOEXEC O_CLOEXEC +#define SFD_NONBLOCK O_NONBLOCK struct signalfd_siginfo { __u32 ssi_signo; diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 7ea44f6621f..358661c9990 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -243,6 +243,7 @@ typedef unsigned char *sk_buff_data_t; * @tc_index: Traffic control index * @tc_verd: traffic control verdict * @ndisc_nodetype: router type (from link layer) + * @do_not_encrypt: set to prevent encryption of this frame * @dma_cookie: a cookie to one of several possible DMA operations * done by skb DMA functions * @secmark: security marking @@ -316,7 +317,10 @@ struct sk_buff { #ifdef CONFIG_IPV6_NDISC_NODETYPE __u8 ndisc_nodetype:2; #endif - /* 14 bit hole */ +#if defined(CONFIG_MAC80211) || defined(CONFIG_MAC80211_MODULE) + __u8 do_not_encrypt:1; +#endif + /* 0/13/14 bit hole */ #ifdef CONFIG_NET_DMA dma_cookie_t dma_cookie; @@ -897,7 +901,7 @@ extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta); static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len) { if (len > skb_headlen(skb) && - !__pskb_pull_tail(skb, len-skb_headlen(skb))) + !__pskb_pull_tail(skb, len - skb_headlen(skb))) return NULL; skb->len -= len; return skb->data += len; @@ -914,7 +918,7 @@ static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len) return 1; if (unlikely(len > skb->len)) return 0; - return __pskb_pull_tail(skb, len-skb_headlen(skb)) != NULL; + return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL; } /** @@ -1317,7 +1321,7 @@ static inline int skb_padto(struct sk_buff *skb, unsigned int len) unsigned int size = skb->len; if (likely(size >= len)) return 0; - return skb_pad(skb, len-size); + return skb_pad(skb, len - size); } static inline int skb_add_data(struct sk_buff *skb, diff --git a/include/linux/slab.h b/include/linux/slab.h index 9aa90a6f20e..5ff9676c1e2 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -58,7 +58,7 @@ int slab_is_available(void); struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, unsigned long, - void (*)(struct kmem_cache *, void *)); + void (*)(void *)); void kmem_cache_destroy(struct kmem_cache *); int kmem_cache_shrink(struct kmem_cache *); void kmem_cache_free(struct kmem_cache *, void *); @@ -96,6 +96,7 @@ int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr); /* * Common kmalloc functions provided by all allocators */ +void * __must_check __krealloc(const void *, size_t, gfp_t); void * __must_check krealloc(const void *, size_t, gfp_t); void kfree(const void *); size_t ksize(const void *); @@ -180,7 +181,7 @@ size_t ksize(const void *); */ static inline void *kcalloc(size_t n, size_t size, gfp_t flags) { - if (n != 0 && size > ULONG_MAX / n) + if (size != 0 && n > ULONG_MAX / size) return NULL; return __kmalloc(n * size, flags | __GFP_ZERO); } diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index d117ea2825a..2f5c16b1aac 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -46,6 +46,7 @@ struct kmem_cache_cpu { struct kmem_cache_node { spinlock_t list_lock; /* Protect partial list and nr_partial */ unsigned long nr_partial; + unsigned long min_partial; struct list_head partial; #ifdef CONFIG_SLUB_DEBUG atomic_long_t nr_slabs; @@ -85,7 +86,7 @@ struct kmem_cache { struct kmem_cache_order_objects min; gfp_t allocflags; /* gfp flags to use on each alloc */ int refcount; /* Refcount for slab cache destroy */ - void (*ctor)(struct kmem_cache *, void *); + void (*ctor)(void *); int inuse; /* Offset to metadata */ int align; /* Alignment */ const char *name; /* Name (only for display!) */ diff --git a/include/linux/sm501.h b/include/linux/sm501.h index 95c1c39ba44..214f93209b8 100644 --- a/include/linux/sm501.h +++ b/include/linux/sm501.h @@ -46,24 +46,6 @@ extern unsigned long sm501_modify_reg(struct device *dev, unsigned long set, unsigned long clear); -/* sm501_gpio_set - * - * set the state of the given GPIO line -*/ - -extern void sm501_gpio_set(struct device *dev, - unsigned long gpio, - unsigned int to, - unsigned int dir); - -/* sm501_gpio_get - * - * get the state of the given GPIO line -*/ - -extern unsigned long sm501_gpio_get(struct device *dev, - unsigned long gpio); - /* Platform data definitions */ @@ -73,6 +55,8 @@ extern unsigned long sm501_gpio_get(struct device *dev, #define SM501FB_FLAG_USE_HWACCEL (1<<3) #define SM501FB_FLAG_PANEL_NO_FPEN (1<<4) #define SM501FB_FLAG_PANEL_NO_VBIASEN (1<<5) +#define SM501FB_FLAG_PANEL_INV_FPEN (1<<6) +#define SM501FB_FLAG_PANEL_INV_VBIASEN (1<<7) struct sm501_platdata_fbsub { struct fb_videomode *def_mode; @@ -102,11 +86,19 @@ struct sm501_platdata_fb { struct sm501_platdata_fbsub *fb_pnl; }; -/* gpio i2c */ +/* gpio i2c + * + * Note, we have to pass in the bus number, as the number used will be + * passed to the i2c-gpio driver's platform_device.id, subsequently used + * to register the i2c bus. +*/ struct sm501_platdata_gpio_i2c { + unsigned int bus_num; unsigned int pin_sda; unsigned int pin_scl; + int udelay; + int timeout; }; /* sm501_initdata @@ -129,6 +121,7 @@ struct sm501_reg_init { #define SM501_USE_FBACCEL (1<<6) #define SM501_USE_AC97 (1<<7) #define SM501_USE_I2S (1<<8) +#define SM501_USE_GPIO (1<<9) #define SM501_USE_ALL (0xffffffff) @@ -155,6 +148,8 @@ struct sm501_init_gpio { struct sm501_reg_init gpio_ddr_high; }; +#define SM501_FLAG_SUSPEND_OFF (1<<4) + /* sm501_platdata * * This is passed with the platform device to allow the board @@ -168,6 +163,12 @@ struct sm501_platdata { struct sm501_init_gpio *init_gpiop; struct sm501_platdata_fb *fb; + int flags; + int gpio_base; + + int (*get_power)(struct device *dev); + int (*set_power)(struct device *dev, unsigned int on); + struct sm501_platdata_gpio_i2c *gpio_i2c; unsigned int gpio_i2c_nr; }; diff --git a/include/linux/smb_fs.h b/include/linux/smb_fs.h index 2c5cd55f44f..923cd8a247b 100644 --- a/include/linux/smb_fs.h +++ b/include/linux/smb_fs.h @@ -43,18 +43,13 @@ static inline struct smb_inode_info *SMB_I(struct inode *inode) } /* macro names are short for word, double-word, long value (?) */ -#define WVAL(buf,pos) \ - (le16_to_cpu(get_unaligned((__le16 *)((u8 *)(buf) + (pos))))) -#define DVAL(buf,pos) \ - (le32_to_cpu(get_unaligned((__le32 *)((u8 *)(buf) + (pos))))) -#define LVAL(buf,pos) \ - (le64_to_cpu(get_unaligned((__le64 *)((u8 *)(buf) + (pos))))) -#define WSET(buf,pos,val) \ - put_unaligned(cpu_to_le16((u16)(val)), (__le16 *)((u8 *)(buf) + (pos))) -#define DSET(buf,pos,val) \ - put_unaligned(cpu_to_le32((u32)(val)), (__le32 *)((u8 *)(buf) + (pos))) -#define LSET(buf,pos,val) \ - put_unaligned(cpu_to_le64((u64)(val)), (__le64 *)((u8 *)(buf) + (pos))) +#define WVAL(buf, pos) (get_unaligned_le16((u8 *)(buf) + (pos))) +#define DVAL(buf, pos) (get_unaligned_le32((u8 *)(buf) + (pos))) +#define LVAL(buf, pos) (get_unaligned_le64((u8 *)(buf) + (pos))) + +#define WSET(buf, pos, val) put_unaligned_le16((val), (u8 *)(buf) + (pos)) +#define DSET(buf, pos, val) put_unaligned_le32((val), (u8 *)(buf) + (pos)) +#define LSET(buf, pos, val) put_unaligned_le64((val), (u8 *)(buf) + (pos)) /* where to find the base of the SMB packet proper */ #define smb_base(buf) ((u8 *)(((u8 *)(buf))+4)) diff --git a/include/linux/smc91x.h b/include/linux/smc91x.h index 8e0556b8781..3827b922ba1 100644 --- a/include/linux/smc91x.h +++ b/include/linux/smc91x.h @@ -5,9 +5,19 @@ #define SMC91X_USE_16BIT (1 << 1) #define SMC91X_USE_32BIT (1 << 2) +#define SMC91X_NOWAIT (1 << 3) + +/* two bits for IO_SHIFT, let's hope later designs will keep this sane */ +#define SMC91X_IO_SHIFT_0 (0 << 4) +#define SMC91X_IO_SHIFT_1 (1 << 4) +#define SMC91X_IO_SHIFT_2 (2 << 4) +#define SMC91X_IO_SHIFT_3 (3 << 4) +#define SMC91X_IO_SHIFT(x) (((x) >> 4) & 0x3) + +#define SMC91X_USE_DMA (1 << 6) + struct smc91x_platdata { unsigned long flags; - unsigned long irq_flags; /* IRQF_... */ }; #endif /* __SMC91X_H__ */ diff --git a/include/linux/smp.h b/include/linux/smp.h index 48262f86c96..66484d4a845 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -74,15 +74,10 @@ void __smp_call_function_single(int cpuid, struct call_single_data *data); #ifdef CONFIG_USE_GENERIC_SMP_HELPERS void generic_smp_call_function_single_interrupt(void); void generic_smp_call_function_interrupt(void); -void init_call_single_data(void); void ipi_call_lock(void); void ipi_call_unlock(void); void ipi_call_lock_irq(void); void ipi_call_unlock_irq(void); -#else -static inline void init_call_single_data(void) -{ -} #endif /* diff --git a/include/linux/snmp.h b/include/linux/snmp.h index 5df62ef1280..7a6e6bba4a7 100644 --- a/include/linux/snmp.h +++ b/include/linux/snmp.h @@ -214,6 +214,8 @@ enum LINUX_MIB_TCPDSACKIGNOREDOLD, /* TCPSACKIgnoredOld */ LINUX_MIB_TCPDSACKIGNOREDNOUNDO, /* TCPSACKIgnoredNoUndo */ LINUX_MIB_TCPSPURIOUSRTOS, /* TCPSpuriousRTOs */ + LINUX_MIB_TCPMD5NOTFOUND, /* TCPMD5NotFound */ + LINUX_MIB_TCPMD5UNEXPECTED, /* TCPMD5Unexpected */ __LINUX_MIB_MAX }; diff --git a/include/linux/socket.h b/include/linux/socket.h index 950af631e7f..dc5086fe773 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -189,7 +189,8 @@ struct ucred { #define AF_BLUETOOTH 31 /* Bluetooth sockets */ #define AF_IUCV 32 /* IUCV sockets */ #define AF_RXRPC 33 /* RxRPC sockets */ -#define AF_MAX 34 /* For now.. */ +#define AF_ISDN 34 /* mISDN sockets */ +#define AF_MAX 35 /* For now.. */ /* Protocol families, same as address families. */ #define PF_UNSPEC AF_UNSPEC @@ -225,6 +226,7 @@ struct ucred { #define PF_BLUETOOTH AF_BLUETOOTH #define PF_IUCV AF_IUCV #define PF_RXRPC AF_RXRPC +#define PF_ISDN AF_ISDN #define PF_MAX AF_MAX /* Maximum queue length specifiable by listen. */ diff --git a/include/linux/spi/ds1305.h b/include/linux/spi/ds1305.h new file mode 100644 index 00000000000..287ec830eab --- /dev/null +++ b/include/linux/spi/ds1305.h @@ -0,0 +1,35 @@ +#ifndef __LINUX_SPI_DS1305_H +#define __LINUX_SPI_DS1305_H + +/* + * One-time configuration for ds1305 and ds1306 RTC chips. + * + * Put a pointer to this in spi_board_info.platform_data if you want to + * be sure that Linux (re)initializes this as needed ... after losing + * backup power, and potentially on the first boot. + */ +struct ds1305_platform_data { + + /* Trickle charge configuration: it's OK to leave out the MAGIC + * bitmask; mask in either DS1 or DS2, and then one of 2K/4k/8K. + */ +#define DS1305_TRICKLE_MAGIC 0xa0 +#define DS1305_TRICKLE_DS2 0x08 /* two diodes */ +#define DS1305_TRICKLE_DS1 0x04 /* one diode */ +#define DS1305_TRICKLE_2K 0x01 /* 2 KOhm resistance */ +#define DS1305_TRICKLE_4K 0x02 /* 4 KOhm resistance */ +#define DS1305_TRICKLE_8K 0x03 /* 8 KOhm resistance */ + u8 trickle; + + /* set only on ds1306 parts */ + bool is_ds1306; + + /* ds1306 only: enable 1 Hz output */ + bool en_1hz; + + /* REVISIT: the driver currently expects nINT0 to be wired + * as the alarm IRQ. ALM1 may also need to be set up ... + */ +}; + +#endif /* __LINUX_SPI_DS1305_H */ diff --git a/include/linux/spi/mcp23s08.h b/include/linux/spi/mcp23s08.h index 835ddf47d45..22ef107d770 100644 --- a/include/linux/spi/mcp23s08.h +++ b/include/linux/spi/mcp23s08.h @@ -1,18 +1,25 @@ -/* FIXME driver should be able to handle all four slaves that - * can be hooked up to each chipselect, as well as IRQs... - */ +/* FIXME driver should be able to handle IRQs... */ + +struct mcp23s08_chip_info { + bool is_present; /* true iff populated */ + u8 pullups; /* BIT(x) means enable pullup x */ +}; struct mcp23s08_platform_data { - /* four slaves can share one SPI chipselect */ - u8 slave; + /* Four slaves (numbered 0..3) can share one SPI chipselect, and + * will provide 8..32 GPIOs using 1..4 gpio_chip instances. + */ + struct mcp23s08_chip_info chip[4]; - /* number assigned to the first GPIO */ + /* "base" is the number of the first GPIO. Dynamic assignment is + * not currently supported, and even if there are gaps in chip + * addressing the GPIO numbers are sequential .. so for example + * if only slaves 0 and 3 are present, their GPIOs range from + * base to base+15. + */ unsigned base; - /* pins with pullups */ - u8 pullups; - void *context; /* param to setup/teardown */ int (*setup)(struct spi_device *spi, diff --git a/include/linux/spi/orion_spi.h b/include/linux/spi/orion_spi.h new file mode 100644 index 00000000000..b4d9fa6f797 --- /dev/null +++ b/include/linux/spi/orion_spi.h @@ -0,0 +1,17 @@ +/* + * orion_spi.h + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#ifndef __LINUX_SPI_ORION_SPI_H +#define __LINUX_SPI_ORION_SPI_H + +struct orion_spi_info { + u32 tclk; /* no <linux/clk.h> support yet */ +}; + + +#endif diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index b9a76c97208..4be01bb4437 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -82,7 +82,7 @@ struct spi_device { int irq; void *controller_state; void *controller_data; - const char *modalias; + char modalias[32]; /* * likely need more hooks for more protocol options affecting how @@ -778,8 +778,20 @@ spi_register_board_info(struct spi_board_info const *info, unsigned n) * use spi_new_device() to describe each device. You can also call * spi_unregister_device() to start making that device vanish, but * normally that would be handled by spi_unregister_master(). + * + * You can also use spi_alloc_device() and spi_add_device() to use a two + * stage registration sequence for each spi_device. This gives the caller + * some more control over the spi_device structure before it is registered, + * but requires that caller to initialize fields that would otherwise + * be defined using the board info. */ extern struct spi_device * +spi_alloc_device(struct spi_master *master); + +extern int +spi_add_device(struct spi_device *spi); + +extern struct spi_device * spi_new_device(struct spi_master *, struct spi_board_info *); static inline void diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index d311a090fae..e0c0fccced4 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -46,6 +46,7 @@ * linux/spinlock.h: builds the final spin_*() APIs. */ +#include <linux/typecheck.h> #include <linux/preempt.h> #include <linux/linkage.h> #include <linux/compiler.h> @@ -182,8 +183,14 @@ do { \ #ifdef CONFIG_DEBUG_LOCK_ALLOC # define spin_lock_nested(lock, subclass) _spin_lock_nested(lock, subclass) +# define spin_lock_nest_lock(lock, nest_lock) \ + do { \ + typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\ + _spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \ + } while (0) #else # define spin_lock_nested(lock, subclass) _spin_lock(lock) +# define spin_lock_nest_lock(lock, nest_lock) _spin_lock(lock) #endif #define write_lock(lock) _write_lock(lock) @@ -191,23 +198,53 @@ do { \ #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) -#define spin_lock_irqsave(lock, flags) flags = _spin_lock_irqsave(lock) -#define read_lock_irqsave(lock, flags) flags = _read_lock_irqsave(lock) -#define write_lock_irqsave(lock, flags) flags = _write_lock_irqsave(lock) +#define spin_lock_irqsave(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + flags = _spin_lock_irqsave(lock); \ + } while (0) +#define read_lock_irqsave(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + flags = _read_lock_irqsave(lock); \ + } while (0) +#define write_lock_irqsave(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + flags = _write_lock_irqsave(lock); \ + } while (0) #ifdef CONFIG_DEBUG_LOCK_ALLOC -#define spin_lock_irqsave_nested(lock, flags, subclass) \ - flags = _spin_lock_irqsave_nested(lock, subclass) +#define spin_lock_irqsave_nested(lock, flags, subclass) \ + do { \ + typecheck(unsigned long, flags); \ + flags = _spin_lock_irqsave_nested(lock, subclass); \ + } while (0) #else -#define spin_lock_irqsave_nested(lock, flags, subclass) \ - flags = _spin_lock_irqsave(lock) +#define spin_lock_irqsave_nested(lock, flags, subclass) \ + do { \ + typecheck(unsigned long, flags); \ + flags = _spin_lock_irqsave(lock); \ + } while (0) #endif #else -#define spin_lock_irqsave(lock, flags) _spin_lock_irqsave(lock, flags) -#define read_lock_irqsave(lock, flags) _read_lock_irqsave(lock, flags) -#define write_lock_irqsave(lock, flags) _write_lock_irqsave(lock, flags) +#define spin_lock_irqsave(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + _spin_lock_irqsave(lock, flags); \ + } while (0) +#define read_lock_irqsave(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + _read_lock_irqsave(lock, flags); \ + } while (0) +#define write_lock_irqsave(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + _write_lock_irqsave(lock, flags); \ + } while (0) #define spin_lock_irqsave_nested(lock, flags, subclass) \ spin_lock_irqsave(lock, flags) @@ -260,16 +297,25 @@ do { \ } while (0) #endif -#define spin_unlock_irqrestore(lock, flags) \ - _spin_unlock_irqrestore(lock, flags) +#define spin_unlock_irqrestore(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + _spin_unlock_irqrestore(lock, flags); \ + } while (0) #define spin_unlock_bh(lock) _spin_unlock_bh(lock) -#define read_unlock_irqrestore(lock, flags) \ - _read_unlock_irqrestore(lock, flags) +#define read_unlock_irqrestore(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + _read_unlock_irqrestore(lock, flags); \ + } while (0) #define read_unlock_bh(lock) _read_unlock_bh(lock) -#define write_unlock_irqrestore(lock, flags) \ - _write_unlock_irqrestore(lock, flags) +#define write_unlock_irqrestore(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + _write_unlock_irqrestore(lock, flags); \ + } while (0) #define write_unlock_bh(lock) _write_unlock_bh(lock) #define spin_trylock_bh(lock) __cond_lock(lock, _spin_trylock_bh(lock)) diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h index 8a2307ce729..d79845d034b 100644 --- a/include/linux/spinlock_api_smp.h +++ b/include/linux/spinlock_api_smp.h @@ -22,6 +22,8 @@ int in_lock_functions(unsigned long addr); void __lockfunc _spin_lock(spinlock_t *lock) __acquires(lock); void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass) __acquires(lock); +void __lockfunc _spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *map) + __acquires(lock); void __lockfunc _read_lock(rwlock_t *lock) __acquires(lock); void __lockfunc _write_lock(rwlock_t *lock) __acquires(lock); void __lockfunc _spin_lock_bh(spinlock_t *lock) __acquires(lock); diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h index 4bf8cade9db..e530026eedf 100644 --- a/include/linux/ssb/ssb.h +++ b/include/linux/ssb/ssb.h @@ -427,9 +427,9 @@ static inline int ssb_dma_mapping_error(struct ssb_device *dev, dma_addr_t addr) { switch (dev->bus->bustype) { case SSB_BUSTYPE_PCI: - return pci_dma_mapping_error(addr); + return pci_dma_mapping_error(dev->bus->host_pci, addr); case SSB_BUSTYPE_SSB: - return dma_mapping_error(addr); + return dma_mapping_error(dev->dev, addr); default: __ssb_dma_not_implemented(dev); } diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h index 5bfc553bdb2..f1cb0ba6d71 100644 --- a/include/linux/stop_machine.h +++ b/include/linux/stop_machine.h @@ -5,41 +5,43 @@ (and more). So the "read" side to such a lock is anything which diables preeempt. */ #include <linux/cpu.h> +#include <linux/cpumask.h> #include <asm/system.h> #if defined(CONFIG_STOP_MACHINE) && defined(CONFIG_SMP) + +/* Deprecated, but useful for transition. */ +#define ALL_CPUS ~0U + /** - * stop_machine_run: freeze the machine on all CPUs and run this function + * stop_machine: freeze the machine on all CPUs and run this function * @fn: the function to run * @data: the data ptr for the @fn() - * @cpu: the cpu to run @fn() on (or any, if @cpu == NR_CPUS. + * @cpus: the cpus to run the @fn() on (NULL = any online cpu) * - * Description: This causes a thread to be scheduled on every other cpu, - * each of which disables interrupts, and finally interrupts are disabled - * on the current CPU. The result is that noone is holding a spinlock - * or inside any other preempt-disabled region when @fn() runs. + * Description: This causes a thread to be scheduled on every cpu, + * each of which disables interrupts. The result is that noone is + * holding a spinlock or inside any other preempt-disabled region when + * @fn() runs. * * This can be thought of as a very heavy write lock, equivalent to * grabbing every spinlock in the kernel. */ -int stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu); +int stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus); /** - * __stop_machine_run: freeze the machine on all CPUs and run this function + * __stop_machine: freeze the machine on all CPUs and run this function * @fn: the function to run * @data: the data ptr for the @fn - * @cpu: the cpu to run @fn on (or any, if @cpu == NR_CPUS. + * @cpus: the cpus to run the @fn() on (NULL = any online cpu) * - * Description: This is a special version of the above, which returns the - * thread which has run @fn(): kthread_stop will return the return value - * of @fn(). Used by hotplug cpu. + * Description: This is a special version of the above, which assumes cpus + * won't come or go while it's being called. Used by hotplug cpu. */ -struct task_struct *__stop_machine_run(int (*fn)(void *), void *data, - unsigned int cpu); - +int __stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus); #else -static inline int stop_machine_run(int (*fn)(void *), void *data, - unsigned int cpu) +static inline int stop_machine(int (*fn)(void *), void *data, + const cpumask_t *cpus) { int ret; local_irq_disable(); @@ -48,4 +50,18 @@ static inline int stop_machine_run(int (*fn)(void *), void *data, return ret; } #endif /* CONFIG_SMP */ + +static inline int __deprecated stop_machine_run(int (*fn)(void *), void *data, + unsigned int cpu) +{ + /* If they don't care which cpu fn runs on, just pick one. */ + if (cpu == NR_CPUS) + return stop_machine(fn, data, NULL); + else if (cpu == ~0U) + return stop_machine(fn, data, &cpu_possible_map); + else { + cpumask_t cpus = cpumask_of_cpu(cpu); + return stop_machine(fn, data, &cpus); + } +} #endif /* _LINUX_STOP_MACHINE */ diff --git a/include/linux/string.h b/include/linux/string.h index efdc44593b5..810d80df0a1 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -111,5 +111,8 @@ extern void argv_free(char **argv); extern bool sysfs_streq(const char *s1, const char *s2); +extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos, + const void *from, size_t available); + #endif #endif /* _LINUX_STRING_H_ */ diff --git a/include/linux/suspend.h b/include/linux/suspend.h index e8e69159af7..c6343509597 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -278,4 +278,6 @@ static inline void register_nosave_region_late(unsigned long b, unsigned long e) } #endif +extern struct mutex pm_mutex; + #endif /* _LINUX_SUSPEND_H */ diff --git a/include/linux/swab.h b/include/linux/swab.h new file mode 100644 index 00000000000..270d5c208a8 --- /dev/null +++ b/include/linux/swab.h @@ -0,0 +1,309 @@ +#ifndef _LINUX_SWAB_H +#define _LINUX_SWAB_H + +#include <linux/types.h> +#include <linux/compiler.h> +#include <asm/byteorder.h> + +/* + * casts are necessary for constants, because we never know how for sure + * how U/UL/ULL map to __u16, __u32, __u64. At least not in a portable way. + */ +#define __const_swab16(x) ((__u16)( \ + (((__u16)(x) & (__u16)0x00ffU) << 8) | \ + (((__u16)(x) & (__u16)0xff00U) >> 8))) + +#define __const_swab32(x) ((__u32)( \ + (((__u32)(x) & (__u32)0x000000ffUL) << 24) | \ + (((__u32)(x) & (__u32)0x0000ff00UL) << 8) | \ + (((__u32)(x) & (__u32)0x00ff0000UL) >> 8) | \ + (((__u32)(x) & (__u32)0xff000000UL) >> 24))) + +#define __const_swab64(x) ((__u64)( \ + (((__u64)(x) & (__u64)0x00000000000000ffULL) << 56) | \ + (((__u64)(x) & (__u64)0x000000000000ff00ULL) << 40) | \ + (((__u64)(x) & (__u64)0x0000000000ff0000ULL) << 24) | \ + (((__u64)(x) & (__u64)0x00000000ff000000ULL) << 8) | \ + (((__u64)(x) & (__u64)0x000000ff00000000ULL) >> 8) | \ + (((__u64)(x) & (__u64)0x0000ff0000000000ULL) >> 24) | \ + (((__u64)(x) & (__u64)0x00ff000000000000ULL) >> 40) | \ + (((__u64)(x) & (__u64)0xff00000000000000ULL) >> 56))) + +#define __const_swahw32(x) ((__u32)( \ + (((__u32)(x) & (__u32)0x0000ffffUL) << 16) | \ + (((__u32)(x) & (__u32)0xffff0000UL) >> 16))) + +#define __const_swahb32(x) ((__u32)( \ + (((__u32)(x) & (__u32)0x00ff00ffUL) << 8) | \ + (((__u32)(x) & (__u32)0xff00ff00UL) >> 8))) + +/* + * Implement the following as inlines, but define the interface using + * macros to allow constant folding when possible: + * ___swab16, ___swab32, ___swab64, ___swahw32, ___swahb32 + */ + +static inline __attribute_const__ __u16 ___swab16(__u16 val) +{ +#ifdef __arch_swab16 + return __arch_swab16(val); +#elif defined(__arch_swab16p) + return __arch_swab16p(&val); +#else + return __const_swab16(val); +#endif +} + +static inline __attribute_const__ __u32 ___swab32(__u32 val) +{ +#ifdef __arch_swab32 + return __arch_swab32(val); +#elif defined(__arch_swab32p) + return __arch_swab32p(&val); +#else + return __const_swab32(val); +#endif +} + +static inline __attribute_const__ __u64 ___swab64(__u64 val) +{ +#ifdef __arch_swab64 + return __arch_swab64(val); +#elif defined(__arch_swab64p) + return __arch_swab64p(&val); +#elif defined(__SWAB_64_THRU_32__) + __u32 h = val >> 32; + __u32 l = val & ((1ULL << 32) - 1); + return (((__u64)___swab32(l)) << 32) | ((__u64)(___swab32(h))); +#else + return __const_swab64(val); +#endif +} + +static inline __attribute_const__ __u32 ___swahw32(__u32 val) +{ +#ifdef __arch_swahw32 + return __arch_swahw32(val); +#elif defined(__arch_swahw32p) + return __arch_swahw32p(&val); +#else + return __const_swahw32(val); +#endif +} + +static inline __attribute_const__ __u32 ___swahb32(__u32 val) +{ +#ifdef __arch_swahb32 + return __arch_swahb32(val); +#elif defined(__arch_swahb32p) + return __arch_swahb32p(&val); +#else + return __const_swahb32(val); +#endif +} + +/** + * __swab16 - return a byteswapped 16-bit value + * @x: value to byteswap + */ +#define __swab16(x) \ + (__builtin_constant_p((__u16)(x)) ? \ + __const_swab16((x)) : \ + ___swab16((x))) + +/** + * __swab32 - return a byteswapped 32-bit value + * @x: value to byteswap + */ +#define __swab32(x) \ + (__builtin_constant_p((__u32)(x)) ? \ + __const_swab32((x)) : \ + ___swab32((x))) + +/** + * __swab64 - return a byteswapped 64-bit value + * @x: value to byteswap + */ +#define __swab64(x) \ + (__builtin_constant_p((__u64)(x)) ? \ + __const_swab64((x)) : \ + ___swab64((x))) + +/** + * __swahw32 - return a word-swapped 32-bit value + * @x: value to wordswap + * + * __swahw32(0x12340000) is 0x00001234 + */ +#define __swahw32(x) \ + (__builtin_constant_p((__u32)(x)) ? \ + __const_swahw32((x)) : \ + ___swahw32((x))) + +/** + * __swahb32 - return a high and low byte-swapped 32-bit value + * @x: value to byteswap + * + * __swahb32(0x12345678) is 0x34127856 + */ +#define __swahb32(x) \ + (__builtin_constant_p((__u32)(x)) ? \ + __const_swahb32((x)) : \ + ___swahb32((x))) + +/** + * __swab16p - return a byteswapped 16-bit value from a pointer + * @p: pointer to a naturally-aligned 16-bit value + */ +static inline __u16 __swab16p(const __u16 *p) +{ +#ifdef __arch_swab16p + return __arch_swab16p(p); +#else + return __swab16(*p); +#endif +} + +/** + * __swab32p - return a byteswapped 32-bit value from a pointer + * @p: pointer to a naturally-aligned 32-bit value + */ +static inline __u32 __swab32p(const __u32 *p) +{ +#ifdef __arch_swab32p + return __arch_swab32p(p); +#else + return __swab32(*p); +#endif +} + +/** + * __swab64p - return a byteswapped 64-bit value from a pointer + * @p: pointer to a naturally-aligned 64-bit value + */ +static inline __u64 __swab64p(const __u64 *p) +{ +#ifdef __arch_swab64p + return __arch_swab64p(p); +#else + return __swab64(*p); +#endif +} + +/** + * __swahw32p - return a wordswapped 32-bit value from a pointer + * @p: pointer to a naturally-aligned 32-bit value + * + * See __swahw32() for details of wordswapping. + */ +static inline __u32 __swahw32p(const __u32 *p) +{ +#ifdef __arch_swahw32p + return __arch_swahw32p(p); +#else + return __swahw32(*p); +#endif +} + +/** + * __swahb32p - return a high and low byteswapped 32-bit value from a pointer + * @p: pointer to a naturally-aligned 32-bit value + * + * See __swahb32() for details of high/low byteswapping. + */ +static inline __u32 __swahb32p(const __u32 *p) +{ +#ifdef __arch_swahb32p + return __arch_swahb32p(p); +#else + return __swahb32(*p); +#endif +} + +/** + * __swab16s - byteswap a 16-bit value in-place + * @p: pointer to a naturally-aligned 16-bit value + */ +static inline void __swab16s(__u16 *p) +{ +#ifdef __arch_swab16s + __arch_swab16s(p); +#else + *p = __swab16p(p); +#endif +} +/** + * __swab32s - byteswap a 32-bit value in-place + * @p: pointer to a naturally-aligned 32-bit value + */ +static inline void __swab32s(__u32 *p) +{ +#ifdef __arch_swab32s + __arch_swab32s(p); +#else + *p = __swab32p(p); +#endif +} + +/** + * __swab64s - byteswap a 64-bit value in-place + * @p: pointer to a naturally-aligned 64-bit value + */ +static inline void __swab64s(__u64 *p) +{ +#ifdef __arch_swab64s + __arch_swab64s(p); +#else + *p = __swab64p(p); +#endif +} + +/** + * __swahw32s - wordswap a 32-bit value in-place + * @p: pointer to a naturally-aligned 32-bit value + * + * See __swahw32() for details of wordswapping + */ +static inline void __swahw32s(__u32 *p) +{ +#ifdef __arch_swahw32s + __arch_swahw32s(p); +#else + *p = __swahw32p(p); +#endif +} + +/** + * __swahb32s - high and low byteswap a 32-bit value in-place + * @p: pointer to a naturally-aligned 32-bit value + * + * See __swahb32() for details of high and low byte swapping + */ +static inline void __swahb32s(__u32 *p) +{ +#ifdef __arch_swahb32s + __arch_swahb32s(p); +#else + *p = __swahb32p(p); +#endif +} + +#ifdef __KERNEL__ +# define swab16 __swab16 +# define swab32 __swab32 +# define swab64 __swab64 +# define swahw32 __swahw32 +# define swahb32 __swahb32 +# define swab16p __swab16p +# define swab32p __swab32p +# define swab64p __swab64p +# define swahw32p __swahw32p +# define swahb32p __swahb32p +# define swab16s __swab16s +# define swab32s __swab32s +# define swab64s __swab64s +# define swahw32s __swahw32s +# define swahb32s __swahb32s +#endif /* __KERNEL__ */ + +#endif /* _LINUX_SWAB_H */ diff --git a/include/linux/swap.h b/include/linux/swap.h index 0b3377650c8..de40f169a4e 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -237,7 +237,6 @@ extern struct page *swapin_readahead(swp_entry_t, gfp_t, /* linux/mm/swapfile.c */ extern long total_swap_pages; -extern unsigned int nr_swapfiles; extern void si_swapinfo(struct sysinfo *); extern swp_entry_t get_swap_page(void); extern swp_entry_t get_swap_page_of_type(int); @@ -254,8 +253,6 @@ extern int can_share_swap_page(struct page *); extern int remove_exclusive_swap_page(struct page *); struct backing_dev_info; -extern spinlock_t swap_lock; - /* linux/mm/thrash.c */ extern struct mm_struct * swap_token_mm; extern void grab_swap_token(void); diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 0522f368f9d..d6ff145919c 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -305,6 +305,7 @@ asmlinkage long sys_fcntl64(unsigned int fd, #endif asmlinkage long sys_dup(unsigned int fildes); asmlinkage long sys_dup2(unsigned int oldfd, unsigned int newfd); +asmlinkage long sys_dup3(unsigned int oldfd, unsigned int newfd, int flags); asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int on); asmlinkage long sys_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg); @@ -409,6 +410,8 @@ asmlinkage long sys_getsockopt(int fd, int level, int optname, asmlinkage long sys_bind(int, struct sockaddr __user *, int); asmlinkage long sys_connect(int, struct sockaddr __user *, int); asmlinkage long sys_accept(int, struct sockaddr __user *, int __user *); +asmlinkage long sys_paccept(int, struct sockaddr __user *, int __user *, + const __user sigset_t *, size_t, int); asmlinkage long sys_getsockname(int, struct sockaddr __user *, int __user *); asmlinkage long sys_getpeername(int, struct sockaddr __user *, int __user *); asmlinkage long sys_send(int, void __user *, size_t, unsigned); @@ -428,6 +431,7 @@ asmlinkage long sys_poll(struct pollfd __user *ufds, unsigned int nfds, asmlinkage long sys_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp, struct timeval __user *tvp); asmlinkage long sys_epoll_create(int size); +asmlinkage long sys_epoll_create1(int flags); asmlinkage long sys_epoll_ctl(int epfd, int op, int fd, struct epoll_event __user *event); asmlinkage long sys_epoll_wait(int epfd, struct epoll_event __user *events, @@ -443,7 +447,7 @@ asmlinkage long sys_newuname(struct new_utsname __user *name); asmlinkage long sys_getrlimit(unsigned int resource, struct rlimit __user *rlim); -#if defined(COMPAT_RLIM_OLD_INFINITY) || !(defined(CONFIG_IA64) || defined(CONFIG_V850)) +#if defined(COMPAT_RLIM_OLD_INFINITY) || !(defined(CONFIG_IA64)) asmlinkage long sys_old_getrlimit(unsigned int resource, struct rlimit __user *rlim); #endif asmlinkage long sys_setrlimit(unsigned int resource, @@ -543,6 +547,7 @@ asmlinkage long sys_get_mempolicy(int __user *policy, unsigned long addr, unsigned long flags); asmlinkage long sys_inotify_init(void); +asmlinkage long sys_inotify_init1(int flags); asmlinkage long sys_inotify_add_watch(int fd, const char __user *path, u32 mask); asmlinkage long sys_inotify_rm_watch(int fd, u32 wd); @@ -608,12 +613,14 @@ asmlinkage long sys_set_robust_list(struct robust_list_head __user *head, size_t len); asmlinkage long sys_getcpu(unsigned __user *cpu, unsigned __user *node, struct getcpu_cache __user *cache); asmlinkage long sys_signalfd(int ufd, sigset_t __user *user_mask, size_t sizemask); +asmlinkage long sys_signalfd4(int ufd, sigset_t __user *user_mask, size_t sizemask, int flags); asmlinkage long sys_timerfd_create(int clockid, int flags); asmlinkage long sys_timerfd_settime(int ufd, int flags, const struct itimerspec __user *utmr, struct itimerspec __user *otmr); asmlinkage long sys_timerfd_gettime(int ufd, struct itimerspec __user *otmr); asmlinkage long sys_eventfd(unsigned int count); +asmlinkage long sys_eventfd2(unsigned int count, int flags); asmlinkage long sys_fallocate(int fd, int mode, loff_t offset, loff_t len); int kernel_execve(const char *filename, char *const argv[], char *const envp[]); diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index 24141b4d1a1..d0437f36921 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h @@ -947,6 +947,22 @@ struct ctl_table; struct nsproxy; struct ctl_table_root; +struct ctl_table_set { + struct list_head list; + struct ctl_table_set *parent; + int (*is_seen)(struct ctl_table_set *); +}; + +extern void setup_sysctl_set(struct ctl_table_set *p, + struct ctl_table_set *parent, + int (*is_seen)(struct ctl_table_set *)); + +struct ctl_table_header; + +extern void sysctl_head_get(struct ctl_table_header *); +extern void sysctl_head_put(struct ctl_table_header *); +extern int sysctl_is_seen(struct ctl_table_header *); +extern struct ctl_table_header *sysctl_head_grab(struct ctl_table_header *); extern struct ctl_table_header *sysctl_head_next(struct ctl_table_header *prev); extern struct ctl_table_header *__sysctl_head_next(struct nsproxy *namespaces, struct ctl_table_header *prev); @@ -1049,8 +1065,8 @@ struct ctl_table struct ctl_table_root { struct list_head root_list; - struct list_head header_list; - struct list_head *(*lookup)(struct ctl_table_root *root, + struct ctl_table_set default_set; + struct ctl_table_set *(*lookup)(struct ctl_table_root *root, struct nsproxy *namespaces); int (*permissions)(struct ctl_table_root *root, struct nsproxy *namespaces, struct ctl_table *table); @@ -1063,9 +1079,14 @@ struct ctl_table_header struct ctl_table *ctl_table; struct list_head ctl_entry; int used; + int count; struct completion *unregistering; struct ctl_table *ctl_table_arg; struct ctl_table_root *root; + struct ctl_table_set *set; + struct ctl_table *attached_by; + struct ctl_table *attached_to; + struct ctl_table_header *parent; }; /* struct ctl_path describes where in the hierarchy a table is added */ diff --git a/include/linux/task_io_accounting.h b/include/linux/task_io_accounting.h index 44d00e9ccee..5e88afc9a2f 100644 --- a/include/linux/task_io_accounting.h +++ b/include/linux/task_io_accounting.h @@ -8,8 +8,19 @@ * Blame akpm@osdl.org for all this. */ -#ifdef CONFIG_TASK_IO_ACCOUNTING struct task_io_accounting { +#ifdef CONFIG_TASK_XACCT + /* bytes read */ + u64 rchar; + /* bytes written */ + u64 wchar; + /* # of read syscalls */ + u64 syscr; + /* # of write syscalls */ + u64 syscw; +#endif /* CONFIG_TASK_XACCT */ + +#ifdef CONFIG_TASK_IO_ACCOUNTING /* * The number of bytes which this task has caused to be read from * storage. @@ -30,8 +41,5 @@ struct task_io_accounting { * information loss in doing that. */ u64 cancelled_write_bytes; +#endif /* CONFIG_TASK_IO_ACCOUNTING */ }; -#else -struct task_io_accounting { -}; -#endif diff --git a/include/linux/task_io_accounting_ops.h b/include/linux/task_io_accounting_ops.h index ff46c6fad79..4d090f9ee60 100644 --- a/include/linux/task_io_accounting_ops.h +++ b/include/linux/task_io_accounting_ops.h @@ -40,9 +40,17 @@ static inline void task_io_account_cancelled_write(size_t bytes) current->ioac.cancelled_write_bytes += bytes; } -static inline void task_io_accounting_init(struct task_struct *tsk) +static inline void task_io_accounting_init(struct task_io_accounting *ioac) { - memset(&tsk->ioac, 0, sizeof(tsk->ioac)); + memset(ioac, 0, sizeof(*ioac)); +} + +static inline void task_blk_io_accounting_add(struct task_io_accounting *dst, + struct task_io_accounting *src) +{ + dst->read_bytes += src->read_bytes; + dst->write_bytes += src->write_bytes; + dst->cancelled_write_bytes += src->cancelled_write_bytes; } #else @@ -69,9 +77,37 @@ static inline void task_io_account_cancelled_write(size_t bytes) { } -static inline void task_io_accounting_init(struct task_struct *tsk) +static inline void task_io_accounting_init(struct task_io_accounting *ioac) +{ +} + +static inline void task_blk_io_accounting_add(struct task_io_accounting *dst, + struct task_io_accounting *src) { } -#endif /* CONFIG_TASK_IO_ACCOUNTING */ -#endif /* __TASK_IO_ACCOUNTING_OPS_INCLUDED */ +#endif /* CONFIG_TASK_IO_ACCOUNTING */ + +#ifdef CONFIG_TASK_XACCT +static inline void task_chr_io_accounting_add(struct task_io_accounting *dst, + struct task_io_accounting *src) +{ + dst->rchar += src->rchar; + dst->wchar += src->wchar; + dst->syscr += src->syscr; + dst->syscw += src->syscw; +} +#else +static inline void task_chr_io_accounting_add(struct task_io_accounting *dst, + struct task_io_accounting *src) +{ +} +#endif /* CONFIG_TASK_XACCT */ + +static inline void task_io_accounting_add(struct task_io_accounting *dst, + struct task_io_accounting *src) +{ + task_chr_io_accounting_add(dst, src); + task_blk_io_accounting_add(dst, src); +} +#endif /* __TASK_IO_ACCOUNTING_OPS_INCLUDED */ diff --git a/include/linux/taskstats.h b/include/linux/taskstats.h index 5d69c0744ff..18269e956a7 100644 --- a/include/linux/taskstats.h +++ b/include/linux/taskstats.h @@ -31,7 +31,7 @@ */ -#define TASKSTATS_VERSION 6 +#define TASKSTATS_VERSION 7 #define TS_COMM_LEN 32 /* should be >= TASK_COMM_LEN * in linux/sched.h */ @@ -157,6 +157,10 @@ struct taskstats { __u64 ac_utimescaled; /* utime scaled on frequency etc */ __u64 ac_stimescaled; /* stime scaled on frequency etc */ __u64 cpu_scaled_run_real_total; /* scaled cpu_run_real_total */ + + /* Delay waiting for memory reclaim */ + __u64 freepages_count; + __u64 freepages_delay_total; }; diff --git a/include/linux/tick.h b/include/linux/tick.h index a881c652f7e..d3c02695dc5 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h @@ -49,6 +49,7 @@ struct tick_sched { unsigned long check_clocks; enum tick_nohz_mode nohz_mode; ktime_t idle_tick; + int inidle; int tick_stopped; unsigned long idle_jiffies; unsigned long idle_calls; @@ -105,14 +106,14 @@ static inline int tick_check_oneshot_change(int allow_nohz) { return 0; } #endif /* !CONFIG_GENERIC_CLOCKEVENTS */ # ifdef CONFIG_NO_HZ -extern void tick_nohz_stop_sched_tick(void); +extern void tick_nohz_stop_sched_tick(int inidle); extern void tick_nohz_restart_sched_tick(void); extern void tick_nohz_update_jiffies(void); extern ktime_t tick_nohz_get_sleep_length(void); extern void tick_nohz_stop_idle(int cpu); extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time); # else -static inline void tick_nohz_stop_sched_tick(void) { } +static inline void tick_nohz_stop_sched_tick(int inidle) { } static inline void tick_nohz_restart_sched_tick(void) { } static inline void tick_nohz_update_jiffies(void) { } static inline ktime_t tick_nohz_get_sleep_length(void) diff --git a/include/linux/timerfd.h b/include/linux/timerfd.h index cf2b10d7573..86cb0501d3e 100644 --- a/include/linux/timerfd.h +++ b/include/linux/timerfd.h @@ -8,9 +8,15 @@ #ifndef _LINUX_TIMERFD_H #define _LINUX_TIMERFD_H +/* For O_CLOEXEC and O_NONBLOCK */ +#include <linux/fcntl.h> +/* Flags for timerfd_settime. */ #define TFD_TIMER_ABSTIME (1 << 0) +/* Flags for timerfd_create. */ +#define TFD_CLOEXEC O_CLOEXEC +#define TFD_NONBLOCK O_NONBLOCK #endif /* _LINUX_TIMERFD_H */ diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h new file mode 100644 index 00000000000..b48d8196957 --- /dev/null +++ b/include/linux/tracehook.h @@ -0,0 +1,582 @@ +/* + * Tracing hooks + * + * Copyright (C) 2008 Red Hat, Inc. All rights reserved. + * + * This copyrighted material is made available to anyone wishing to use, + * modify, copy, or redistribute it subject to the terms and conditions + * of the GNU General Public License v.2. + * + * This file defines hook entry points called by core code where + * user tracing/debugging support might need to do something. These + * entry points are called tracehook_*(). Each hook declared below + * has a detailed kerneldoc comment giving the context (locking et + * al) from which it is called, and the meaning of its return value. + * + * Each function here typically has only one call site, so it is ok + * to have some nontrivial tracehook_*() inlines. In all cases, the + * fast path when no tracing is enabled should be very short. + * + * The purpose of this file and the tracehook_* layer is to consolidate + * the interface that the kernel core and arch code uses to enable any + * user debugging or tracing facility (such as ptrace). The interfaces + * here are carefully documented so that maintainers of core and arch + * code do not need to think about the implementation details of the + * tracing facilities. Likewise, maintainers of the tracing code do not + * need to understand all the calling core or arch code in detail, just + * documented circumstances of each call, such as locking conditions. + * + * If the calling core code changes so that locking is different, then + * it is ok to change the interface documented here. The maintainer of + * core code changing should notify the maintainers of the tracing code + * that they need to work out the change. + * + * Some tracehook_*() inlines take arguments that the current tracing + * implementations might not necessarily use. These function signatures + * are chosen to pass in all the information that is on hand in the + * caller and might conceivably be relevant to a tracer, so that the + * core code won't have to be updated when tracing adds more features. + * If a call site changes so that some of those parameters are no longer + * already on hand without extra work, then the tracehook_* interface + * can change so there is no make-work burden on the core code. The + * maintainer of core code changing should notify the maintainers of the + * tracing code that they need to work out the change. + */ + +#ifndef _LINUX_TRACEHOOK_H +#define _LINUX_TRACEHOOK_H 1 + +#include <linux/sched.h> +#include <linux/ptrace.h> +#include <linux/security.h> +struct linux_binprm; + +/** + * tracehook_expect_breakpoints - guess if task memory might be touched + * @task: current task, making a new mapping + * + * Return nonzero if @task is expected to want breakpoint insertion in + * its memory at some point. A zero return is no guarantee it won't + * be done, but this is a hint that it's known to be likely. + * + * May be called with @task->mm->mmap_sem held for writing. + */ +static inline int tracehook_expect_breakpoints(struct task_struct *task) +{ + return (task_ptrace(task) & PT_PTRACED) != 0; +} + +/* + * ptrace report for syscall entry and exit looks identical. + */ +static inline void ptrace_report_syscall(struct pt_regs *regs) +{ + int ptrace = task_ptrace(current); + + if (!(ptrace & PT_PTRACED)) + return; + + ptrace_notify(SIGTRAP | ((ptrace & PT_TRACESYSGOOD) ? 0x80 : 0)); + + /* + * this isn't the same as continuing with a signal, but it will do + * for normal use. strace only continues with a signal if the + * stopping signal is not SIGTRAP. -brl + */ + if (current->exit_code) { + send_sig(current->exit_code, current, 1); + current->exit_code = 0; + } +} + +/** + * tracehook_report_syscall_entry - task is about to attempt a system call + * @regs: user register state of current task + * + * This will be called if %TIF_SYSCALL_TRACE has been set, when the + * current task has just entered the kernel for a system call. + * Full user register state is available here. Changing the values + * in @regs can affect the system call number and arguments to be tried. + * It is safe to block here, preventing the system call from beginning. + * + * Returns zero normally, or nonzero if the calling arch code should abort + * the system call. That must prevent normal entry so no system call is + * made. If @task ever returns to user mode after this, its register state + * is unspecified, but should be something harmless like an %ENOSYS error + * return. It should preserve enough information so that syscall_rollback() + * can work (see asm-generic/syscall.h). + * + * Called without locks, just after entering kernel mode. + */ +static inline __must_check int tracehook_report_syscall_entry( + struct pt_regs *regs) +{ + ptrace_report_syscall(regs); + return 0; +} + +/** + * tracehook_report_syscall_exit - task has just finished a system call + * @regs: user register state of current task + * @step: nonzero if simulating single-step or block-step + * + * This will be called if %TIF_SYSCALL_TRACE has been set, when the + * current task has just finished an attempted system call. Full + * user register state is available here. It is safe to block here, + * preventing signals from being processed. + * + * If @step is nonzero, this report is also in lieu of the normal + * trap that would follow the system call instruction because + * user_enable_block_step() or user_enable_single_step() was used. + * In this case, %TIF_SYSCALL_TRACE might not be set. + * + * Called without locks, just before checking for pending signals. + */ +static inline void tracehook_report_syscall_exit(struct pt_regs *regs, int step) +{ + ptrace_report_syscall(regs); +} + +/** + * tracehook_unsafe_exec - check for exec declared unsafe due to tracing + * @task: current task doing exec + * + * Return %LSM_UNSAFE_* bits applied to an exec because of tracing. + * + * Called with task_lock() held on @task. + */ +static inline int tracehook_unsafe_exec(struct task_struct *task) +{ + int unsafe = 0; + int ptrace = task_ptrace(task); + if (ptrace & PT_PTRACED) { + if (ptrace & PT_PTRACE_CAP) + unsafe |= LSM_UNSAFE_PTRACE_CAP; + else + unsafe |= LSM_UNSAFE_PTRACE; + } + return unsafe; +} + +/** + * tracehook_tracer_task - return the task that is tracing the given task + * @tsk: task to consider + * + * Returns NULL if noone is tracing @task, or the &struct task_struct + * pointer to its tracer. + * + * Must called under rcu_read_lock(). The pointer returned might be kept + * live only by RCU. During exec, this may be called with task_lock() + * held on @task, still held from when tracehook_unsafe_exec() was called. + */ +static inline struct task_struct *tracehook_tracer_task(struct task_struct *tsk) +{ + if (task_ptrace(tsk) & PT_PTRACED) + return rcu_dereference(tsk->parent); + return NULL; +} + +/** + * tracehook_report_exec - a successful exec was completed + * @fmt: &struct linux_binfmt that performed the exec + * @bprm: &struct linux_binprm containing exec details + * @regs: user-mode register state + * + * An exec just completed, we are shortly going to return to user mode. + * The freshly initialized register state can be seen and changed in @regs. + * The name, file and other pointers in @bprm are still on hand to be + * inspected, but will be freed as soon as this returns. + * + * Called with no locks, but with some kernel resources held live + * and a reference on @fmt->module. + */ +static inline void tracehook_report_exec(struct linux_binfmt *fmt, + struct linux_binprm *bprm, + struct pt_regs *regs) +{ + if (!ptrace_event(PT_TRACE_EXEC, PTRACE_EVENT_EXEC, 0) && + unlikely(task_ptrace(current) & PT_PTRACED)) + send_sig(SIGTRAP, current, 0); +} + +/** + * tracehook_report_exit - task has begun to exit + * @exit_code: pointer to value destined for @current->exit_code + * + * @exit_code points to the value passed to do_exit(), which tracing + * might change here. This is almost the first thing in do_exit(), + * before freeing any resources or setting the %PF_EXITING flag. + * + * Called with no locks held. + */ +static inline void tracehook_report_exit(long *exit_code) +{ + ptrace_event(PT_TRACE_EXIT, PTRACE_EVENT_EXIT, *exit_code); +} + +/** + * tracehook_prepare_clone - prepare for new child to be cloned + * @clone_flags: %CLONE_* flags from clone/fork/vfork system call + * + * This is called before a new user task is to be cloned. + * Its return value will be passed to tracehook_finish_clone(). + * + * Called with no locks held. + */ +static inline int tracehook_prepare_clone(unsigned clone_flags) +{ + if (clone_flags & CLONE_UNTRACED) + return 0; + + if (clone_flags & CLONE_VFORK) { + if (current->ptrace & PT_TRACE_VFORK) + return PTRACE_EVENT_VFORK; + } else if ((clone_flags & CSIGNAL) != SIGCHLD) { + if (current->ptrace & PT_TRACE_CLONE) + return PTRACE_EVENT_CLONE; + } else if (current->ptrace & PT_TRACE_FORK) + return PTRACE_EVENT_FORK; + + return 0; +} + +/** + * tracehook_finish_clone - new child created and being attached + * @child: new child task + * @clone_flags: %CLONE_* flags from clone/fork/vfork system call + * @trace: return value from tracehook_prepare_clone() + * + * This is called immediately after adding @child to its parent's children list. + * The @trace value is that returned by tracehook_prepare_clone(). + * + * Called with current's siglock and write_lock_irq(&tasklist_lock) held. + */ +static inline void tracehook_finish_clone(struct task_struct *child, + unsigned long clone_flags, int trace) +{ + ptrace_init_task(child, (clone_flags & CLONE_PTRACE) || trace); +} + +/** + * tracehook_report_clone - in parent, new child is about to start running + * @trace: return value from tracehook_prepare_clone() + * @regs: parent's user register state + * @clone_flags: flags from parent's system call + * @pid: new child's PID in the parent's namespace + * @child: new child task + * + * Called after a child is set up, but before it has been started + * running. @trace is the value returned by tracehook_prepare_clone(). + * This is not a good place to block, because the child has not started + * yet. Suspend the child here if desired, and then block in + * tracehook_report_clone_complete(). This must prevent the child from + * self-reaping if tracehook_report_clone_complete() uses the @child + * pointer; otherwise it might have died and been released by the time + * tracehook_report_report_clone_complete() is called. + * + * Called with no locks held, but the child cannot run until this returns. + */ +static inline void tracehook_report_clone(int trace, struct pt_regs *regs, + unsigned long clone_flags, + pid_t pid, struct task_struct *child) +{ + if (unlikely(trace) || unlikely(clone_flags & CLONE_PTRACE)) { + /* + * The child starts up with an immediate SIGSTOP. + */ + sigaddset(&child->pending.signal, SIGSTOP); + set_tsk_thread_flag(child, TIF_SIGPENDING); + } +} + +/** + * tracehook_report_clone_complete - new child is running + * @trace: return value from tracehook_prepare_clone() + * @regs: parent's user register state + * @clone_flags: flags from parent's system call + * @pid: new child's PID in the parent's namespace + * @child: child task, already running + * + * This is called just after the child has started running. This is + * just before the clone/fork syscall returns, or blocks for vfork + * child completion if @clone_flags has the %CLONE_VFORK bit set. + * The @child pointer may be invalid if a self-reaping child died and + * tracehook_report_clone() took no action to prevent it from self-reaping. + * + * Called with no locks held. + */ +static inline void tracehook_report_clone_complete(int trace, + struct pt_regs *regs, + unsigned long clone_flags, + pid_t pid, + struct task_struct *child) +{ + if (unlikely(trace)) + ptrace_event(0, trace, pid); +} + +/** + * tracehook_report_vfork_done - vfork parent's child has exited or exec'd + * @child: child task, already running + * @pid: new child's PID in the parent's namespace + * + * Called after a %CLONE_VFORK parent has waited for the child to complete. + * The clone/vfork system call will return immediately after this. + * The @child pointer may be invalid if a self-reaping child died and + * tracehook_report_clone() took no action to prevent it from self-reaping. + * + * Called with no locks held. + */ +static inline void tracehook_report_vfork_done(struct task_struct *child, + pid_t pid) +{ + ptrace_event(PT_TRACE_VFORK_DONE, PTRACE_EVENT_VFORK_DONE, pid); +} + +/** + * tracehook_prepare_release_task - task is being reaped, clean up tracing + * @task: task in %EXIT_DEAD state + * + * This is called in release_task() just before @task gets finally reaped + * and freed. This would be the ideal place to remove and clean up any + * tracing-related state for @task. + * + * Called with no locks held. + */ +static inline void tracehook_prepare_release_task(struct task_struct *task) +{ +} + +/** + * tracehook_finish_release_task - final tracing clean-up + * @task: task in %EXIT_DEAD state + * + * This is called in release_task() when @task is being in the middle of + * being reaped. After this, there must be no tracing entanglements. + * + * Called with write_lock_irq(&tasklist_lock) held. + */ +static inline void tracehook_finish_release_task(struct task_struct *task) +{ + ptrace_release_task(task); +} + +/** + * tracehook_signal_handler - signal handler setup is complete + * @sig: number of signal being delivered + * @info: siginfo_t of signal being delivered + * @ka: sigaction setting that chose the handler + * @regs: user register state + * @stepping: nonzero if debugger single-step or block-step in use + * + * Called by the arch code after a signal handler has been set up. + * Register and stack state reflects the user handler about to run. + * Signal mask changes have already been made. + * + * Called without locks, shortly before returning to user mode + * (or handling more signals). + */ +static inline void tracehook_signal_handler(int sig, siginfo_t *info, + const struct k_sigaction *ka, + struct pt_regs *regs, int stepping) +{ + if (stepping) + ptrace_notify(SIGTRAP); +} + +/** + * tracehook_consider_ignored_signal - suppress short-circuit of ignored signal + * @task: task receiving the signal + * @sig: signal number being sent + * @handler: %SIG_IGN or %SIG_DFL + * + * Return zero iff tracing doesn't care to examine this ignored signal, + * so it can short-circuit normal delivery and never even get queued. + * Either @handler is %SIG_DFL and @sig's default is ignore, or it's %SIG_IGN. + * + * Called with @task->sighand->siglock held. + */ +static inline int tracehook_consider_ignored_signal(struct task_struct *task, + int sig, + void __user *handler) +{ + return (task_ptrace(task) & PT_PTRACED) != 0; +} + +/** + * tracehook_consider_fatal_signal - suppress special handling of fatal signal + * @task: task receiving the signal + * @sig: signal number being sent + * @handler: %SIG_DFL or %SIG_IGN + * + * Return nonzero to prevent special handling of this termination signal. + * Normally @handler is %SIG_DFL. It can be %SIG_IGN if @sig is ignored, + * in which case force_sig() is about to reset it to %SIG_DFL. + * When this returns zero, this signal might cause a quick termination + * that does not give the debugger a chance to intercept the signal. + * + * Called with or without @task->sighand->siglock held. + */ +static inline int tracehook_consider_fatal_signal(struct task_struct *task, + int sig, + void __user *handler) +{ + return (task_ptrace(task) & PT_PTRACED) != 0; +} + +/** + * tracehook_force_sigpending - let tracing force signal_pending(current) on + * + * Called when recomputing our signal_pending() flag. Return nonzero + * to force the signal_pending() flag on, so that tracehook_get_signal() + * will be called before the next return to user mode. + * + * Called with @current->sighand->siglock held. + */ +static inline int tracehook_force_sigpending(void) +{ + return 0; +} + +/** + * tracehook_get_signal - deliver synthetic signal to traced task + * @task: @current + * @regs: task_pt_regs(@current) + * @info: details of synthetic signal + * @return_ka: sigaction for synthetic signal + * + * Return zero to check for a real pending signal normally. + * Return -1 after releasing the siglock to repeat the check. + * Return a signal number to induce an artifical signal delivery, + * setting *@info and *@return_ka to specify its details and behavior. + * + * The @return_ka->sa_handler value controls the disposition of the + * signal, no matter the signal number. For %SIG_DFL, the return value + * is a representative signal to indicate the behavior (e.g. %SIGTERM + * for death, %SIGQUIT for core dump, %SIGSTOP for job control stop, + * %SIGTSTP for stop unless in an orphaned pgrp), but the signal number + * reported will be @info->si_signo instead. + * + * Called with @task->sighand->siglock held, before dequeuing pending signals. + */ +static inline int tracehook_get_signal(struct task_struct *task, + struct pt_regs *regs, + siginfo_t *info, + struct k_sigaction *return_ka) +{ + return 0; +} + +/** + * tracehook_notify_jctl - report about job control stop/continue + * @notify: nonzero if this is the last thread in the group to stop + * @why: %CLD_STOPPED or %CLD_CONTINUED + * + * This is called when we might call do_notify_parent_cldstop(). + * It's called when about to stop for job control; we are already in + * %TASK_STOPPED state, about to call schedule(). It's also called when + * a delayed %CLD_STOPPED or %CLD_CONTINUED report is ready to be made. + * + * Return nonzero to generate a %SIGCHLD with @why, which is + * normal if @notify is nonzero. + * + * Called with no locks held. + */ +static inline int tracehook_notify_jctl(int notify, int why) +{ + return notify || (current->ptrace & PT_PTRACED); +} + +#define DEATH_REAP -1 +#define DEATH_DELAYED_GROUP_LEADER -2 + +/** + * tracehook_notify_death - task is dead, ready to notify parent + * @task: @current task now exiting + * @death_cookie: value to pass to tracehook_report_death() + * @group_dead: nonzero if this was the last thread in the group to die + * + * A return value >= 0 means call do_notify_parent() with that signal + * number. Negative return value can be %DEATH_REAP to self-reap right + * now, or %DEATH_DELAYED_GROUP_LEADER to a zombie without notifying our + * parent. Note that a return value of 0 means a do_notify_parent() call + * that sends no signal, but still wakes up a parent blocked in wait*(). + * + * Called with write_lock_irq(&tasklist_lock) held. + */ +static inline int tracehook_notify_death(struct task_struct *task, + void **death_cookie, int group_dead) +{ + if (task->exit_signal == -1) + return task->ptrace ? SIGCHLD : DEATH_REAP; + + /* + * If something other than our normal parent is ptracing us, then + * send it a SIGCHLD instead of honoring exit_signal. exit_signal + * only has special meaning to our real parent. + */ + if (thread_group_empty(task) && !ptrace_reparented(task)) + return task->exit_signal; + + return task->ptrace ? SIGCHLD : DEATH_DELAYED_GROUP_LEADER; +} + +/** + * tracehook_report_death - task is dead and ready to be reaped + * @task: @current task now exiting + * @signal: return value from tracheook_notify_death() + * @death_cookie: value passed back from tracehook_notify_death() + * @group_dead: nonzero if this was the last thread in the group to die + * + * Thread has just become a zombie or is about to self-reap. If positive, + * @signal is the signal number just sent to the parent (usually %SIGCHLD). + * If @signal is %DEATH_REAP, this thread will self-reap. If @signal is + * %DEATH_DELAYED_GROUP_LEADER, this is a delayed_group_leader() zombie. + * The @death_cookie was passed back by tracehook_notify_death(). + * + * If normal reaping is not inhibited, @task->exit_state might be changing + * in parallel. + * + * Called without locks. + */ +static inline void tracehook_report_death(struct task_struct *task, + int signal, void *death_cookie, + int group_dead) +{ +} + +#ifdef TIF_NOTIFY_RESUME +/** + * set_notify_resume - cause tracehook_notify_resume() to be called + * @task: task that will call tracehook_notify_resume() + * + * Calling this arranges that @task will call tracehook_notify_resume() + * before returning to user mode. If it's already running in user mode, + * it will enter the kernel and call tracehook_notify_resume() soon. + * If it's blocked, it will not be woken. + */ +static inline void set_notify_resume(struct task_struct *task) +{ + if (!test_and_set_tsk_thread_flag(task, TIF_NOTIFY_RESUME)) + kick_process(task); +} + +/** + * tracehook_notify_resume - report when about to return to user mode + * @regs: user-mode registers of @current task + * + * This is called when %TIF_NOTIFY_RESUME has been set. Now we are + * about to return to user mode, and the user state in @regs can be + * inspected or adjusted. The caller in arch code has cleared + * %TIF_NOTIFY_RESUME before the call. If the flag gets set again + * asynchronously, this will be called again before we return to + * user mode. + * + * Called without locks. + */ +static inline void tracehook_notify_resume(struct pt_regs *regs) +{ +} +#endif /* TIF_NOTIFY_RESUME */ + +#endif /* <linux/tracehook.h> */ diff --git a/include/linux/typecheck.h b/include/linux/typecheck.h new file mode 100644 index 00000000000..eb5b74a575b --- /dev/null +++ b/include/linux/typecheck.h @@ -0,0 +1,24 @@ +#ifndef TYPECHECK_H_INCLUDED +#define TYPECHECK_H_INCLUDED + +/* + * Check at compile time that something is of a particular type. + * Always evaluates to 1 so you may use it easily in comparisons. + */ +#define typecheck(type,x) \ +({ type __dummy; \ + typeof(x) __dummy2; \ + (void)(&__dummy == &__dummy2); \ + 1; \ +}) + +/* + * Check at compile time that 'function' is a certain type, or is a pointer + * to that type (needs to use typedef for the function type.) + */ +#define typecheck_fn(type,function) \ +({ typeof(type) __tmp = function; \ + (void)__tmp; \ +}) + +#endif /* TYPECHECK_H_INCLUDED */ diff --git a/include/linux/usb.h b/include/linux/usb.h index 5811c5da69f..0924cd9c30f 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -110,6 +110,8 @@ enum usb_interface_condition { * @sysfs_files_created: sysfs attributes exist * @needs_remote_wakeup: flag set when the driver requires remote-wakeup * capability during autosuspend. + * @needs_binding: flag set when the driver should be re-probed or unbound + * following a reset or suspend operation it doesn't support. * @dev: driver model's view of this device * @usb_dev: if an interface is bound to the USB major, this will point * to the sysfs representation for that device. diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h index 747c3a49cdc..c932390c6da 100644 --- a/include/linux/usb/composite.h +++ b/include/linux/usb/composite.h @@ -330,7 +330,7 @@ extern int usb_string_id(struct usb_composite_dev *c); dev_vdbg(&(d)->gadget->dev , fmt , ## args) #define ERROR(d, fmt, args...) \ dev_err(&(d)->gadget->dev , fmt , ## args) -#define WARN(d, fmt, args...) \ +#define WARNING(d, fmt, args...) \ dev_warn(&(d)->gadget->dev , fmt , ## args) #define INFO(d, fmt, args...) \ dev_info(&(d)->gadget->dev , fmt , ## args) diff --git a/include/linux/usb/musb.h b/include/linux/usb/musb.h new file mode 100644 index 00000000000..630962c04ca --- /dev/null +++ b/include/linux/usb/musb.h @@ -0,0 +1,98 @@ +/* + * This is used to for host and peripheral modes of the driver for + * Inventra (Multidrop) Highspeed Dual-Role Controllers: (M)HDRC. + * + * Board initialization should put one of these into dev->platform_data, + * probably on some platform_device named "musb_hdrc". It encapsulates + * key configuration differences between boards. + */ + +/* The USB role is defined by the connector used on the board, so long as + * standards are being followed. (Developer boards sometimes won't.) + */ +enum musb_mode { + MUSB_UNDEFINED = 0, + MUSB_HOST, /* A or Mini-A connector */ + MUSB_PERIPHERAL, /* B or Mini-B connector */ + MUSB_OTG /* Mini-AB connector */ +}; + +struct clk; + +struct musb_hdrc_eps_bits { + const char name[16]; + u8 bits; +}; + +struct musb_hdrc_config { + /* MUSB configuration-specific details */ + unsigned multipoint:1; /* multipoint device */ + unsigned dyn_fifo:1; /* supports dynamic fifo sizing */ + unsigned soft_con:1; /* soft connect required */ + unsigned utm_16:1; /* utm data witdh is 16 bits */ + unsigned big_endian:1; /* true if CPU uses big-endian */ + unsigned mult_bulk_tx:1; /* Tx ep required for multbulk pkts */ + unsigned mult_bulk_rx:1; /* Rx ep required for multbulk pkts */ + unsigned high_iso_tx:1; /* Tx ep required for HB iso */ + unsigned high_iso_rx:1; /* Rx ep required for HD iso */ + unsigned dma:1; /* supports DMA */ + unsigned vendor_req:1; /* vendor registers required */ + + u8 num_eps; /* number of endpoints _with_ ep0 */ + u8 dma_channels; /* number of dma channels */ + u8 dyn_fifo_size; /* dynamic size in bytes */ + u8 vendor_ctrl; /* vendor control reg width */ + u8 vendor_stat; /* vendor status reg witdh */ + u8 dma_req_chan; /* bitmask for required dma channels */ + u8 ram_bits; /* ram address size */ + + struct musb_hdrc_eps_bits *eps_bits; +}; + +struct musb_hdrc_platform_data { + /* MUSB_HOST, MUSB_PERIPHERAL, or MUSB_OTG */ + u8 mode; + + /* for clk_get() */ + const char *clock; + + /* (HOST or OTG) switch VBUS on/off */ + int (*set_vbus)(struct device *dev, int is_on); + + /* (HOST or OTG) mA/2 power supplied on (default = 8mA) */ + u8 power; + + /* (PERIPHERAL) mA/2 max power consumed (default = 100mA) */ + u8 min_power; + + /* (HOST or OTG) msec/2 after VBUS on till power good */ + u8 potpgt; + + /* Power the device on or off */ + int (*set_power)(int state); + + /* Turn device clock on or off */ + int (*set_clock)(struct clk *clock, int is_on); + + /* MUSB configuration-specific details */ + struct musb_hdrc_config *config; +}; + + +/* TUSB 6010 support */ + +#define TUSB6010_OSCCLK_60 16667 /* psec/clk @ 60.0 MHz */ +#define TUSB6010_REFCLK_24 41667 /* psec/clk @ 24.0 MHz XI */ +#define TUSB6010_REFCLK_19 52083 /* psec/clk @ 19.2 MHz CLKIN */ + +#ifdef CONFIG_ARCH_OMAP2 + +extern int __init tusb6010_setup_interface( + struct musb_hdrc_platform_data *data, + unsigned ps_refclk, unsigned waitpin, + unsigned async_cs, unsigned sync_cs, + unsigned irq, unsigned dmachan); + +extern int tusb6010_platform_retime(unsigned is_refclk); + +#endif /* OMAP2 */ diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h index 09a3e6a7518..655341d0f53 100644 --- a/include/linux/usb/serial.h +++ b/include/linux/usb/serial.h @@ -17,7 +17,8 @@ #include <linux/mutex.h> #define SERIAL_TTY_MAJOR 188 /* Nice legal number now */ -#define SERIAL_TTY_MINORS 255 /* loads of devices :) */ +#define SERIAL_TTY_MINORS 254 /* loads of devices :) */ +#define SERIAL_TTY_NO_MINOR 255 /* No minor was assigned */ /* The maximum number of ports one device can grab at once */ #define MAX_NUM_PORTS 8 diff --git a/include/linux/videodev.h b/include/linux/videodev.h index 9385a566aed..15a653d4113 100644 --- a/include/linux/videodev.h +++ b/include/linux/videodev.h @@ -17,6 +17,21 @@ #if defined(CONFIG_VIDEO_V4L1_COMPAT) || !defined (__KERNEL__) +#define VID_TYPE_CAPTURE 1 /* Can capture */ +#define VID_TYPE_TUNER 2 /* Can tune */ +#define VID_TYPE_TELETEXT 4 /* Does teletext */ +#define VID_TYPE_OVERLAY 8 /* Overlay onto frame buffer */ +#define VID_TYPE_CHROMAKEY 16 /* Overlay by chromakey */ +#define VID_TYPE_CLIPPING 32 /* Can clip */ +#define VID_TYPE_FRAMERAM 64 /* Uses the frame buffer memory */ +#define VID_TYPE_SCALES 128 /* Scalable */ +#define VID_TYPE_MONOCHROME 256 /* Monochrome only */ +#define VID_TYPE_SUBCAPTURE 512 /* Can capture subareas of the image */ +#define VID_TYPE_MPEG_DECODER 1024 /* Can decode MPEG streams */ +#define VID_TYPE_MPEG_ENCODER 2048 /* Can encode MPEG streams */ +#define VID_TYPE_MJPEG_DECODER 4096 /* Can decode MJPEG streams */ +#define VID_TYPE_MJPEG_ENCODER 8192 /* Can encode MJPEG streams */ + struct video_capability { char name[32]; diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h index 2e66a95e8d3..e466bd54a50 100644 --- a/include/linux/videodev2.h +++ b/include/linux/videodev2.h @@ -71,6 +71,11 @@ */ #define VIDEO_MAX_FRAME 32 +#ifndef __KERNEL__ + +/* These defines are V4L1 specific and should not be used with the V4L2 API! + They will be removed from this header in the future. */ + #define VID_TYPE_CAPTURE 1 /* Can capture */ #define VID_TYPE_TUNER 2 /* Can tune */ #define VID_TYPE_TELETEXT 4 /* Does teletext */ @@ -85,14 +90,15 @@ #define VID_TYPE_MPEG_ENCODER 2048 /* Can encode MPEG streams */ #define VID_TYPE_MJPEG_DECODER 4096 /* Can decode MJPEG streams */ #define VID_TYPE_MJPEG_ENCODER 8192 /* Can encode MJPEG streams */ +#endif /* * M I S C E L L A N E O U S */ /* Four-character-code (FOURCC) */ -#define v4l2_fourcc(a,b,c,d)\ - (((__u32)(a)<<0)|((__u32)(b)<<8)|((__u32)(c)<<16)|((__u32)(d)<<24)) +#define v4l2_fourcc(a, b, c, d)\ + ((__u32)(a) | ((__u32)(b) << 8) | ((__u32)(c) << 16) | ((__u32)(d) << 24)) /* * E N U M S @@ -226,8 +232,7 @@ struct v4l2_fract { /* * D R I V E R C A P A B I L I T I E S */ -struct v4l2_capability -{ +struct v4l2_capability { __u8 driver[16]; /* i.e. "bttv" */ __u8 card[32]; /* i.e. "Hauppauge WinTV" */ __u8 bus_info[32]; /* "PCI:" + pci_name(pci_dev) */ @@ -259,8 +264,7 @@ struct v4l2_capability /* * V I D E O I M A G E F O R M A T */ -struct v4l2_pix_format -{ +struct v4l2_pix_format { __u32 width; __u32 height; __u32 pixelformat; @@ -272,68 +276,69 @@ struct v4l2_pix_format }; /* Pixel format FOURCC depth Description */ -#define V4L2_PIX_FMT_RGB332 v4l2_fourcc('R','G','B','1') /* 8 RGB-3-3-2 */ -#define V4L2_PIX_FMT_RGB444 v4l2_fourcc('R','4','4','4') /* 16 xxxxrrrr ggggbbbb */ -#define V4L2_PIX_FMT_RGB555 v4l2_fourcc('R','G','B','O') /* 16 RGB-5-5-5 */ -#define V4L2_PIX_FMT_RGB565 v4l2_fourcc('R','G','B','P') /* 16 RGB-5-6-5 */ -#define V4L2_PIX_FMT_RGB555X v4l2_fourcc('R','G','B','Q') /* 16 RGB-5-5-5 BE */ -#define V4L2_PIX_FMT_RGB565X v4l2_fourcc('R','G','B','R') /* 16 RGB-5-6-5 BE */ -#define V4L2_PIX_FMT_BGR24 v4l2_fourcc('B','G','R','3') /* 24 BGR-8-8-8 */ -#define V4L2_PIX_FMT_RGB24 v4l2_fourcc('R','G','B','3') /* 24 RGB-8-8-8 */ -#define V4L2_PIX_FMT_BGR32 v4l2_fourcc('B','G','R','4') /* 32 BGR-8-8-8-8 */ -#define V4L2_PIX_FMT_RGB32 v4l2_fourcc('R','G','B','4') /* 32 RGB-8-8-8-8 */ -#define V4L2_PIX_FMT_GREY v4l2_fourcc('G','R','E','Y') /* 8 Greyscale */ -#define V4L2_PIX_FMT_Y16 v4l2_fourcc('Y','1','6',' ') /* 16 Greyscale */ -#define V4L2_PIX_FMT_PAL8 v4l2_fourcc('P','A','L','8') /* 8 8-bit palette */ -#define V4L2_PIX_FMT_YVU410 v4l2_fourcc('Y','V','U','9') /* 9 YVU 4:1:0 */ -#define V4L2_PIX_FMT_YVU420 v4l2_fourcc('Y','V','1','2') /* 12 YVU 4:2:0 */ -#define V4L2_PIX_FMT_YUYV v4l2_fourcc('Y','U','Y','V') /* 16 YUV 4:2:2 */ -#define V4L2_PIX_FMT_UYVY v4l2_fourcc('U','Y','V','Y') /* 16 YUV 4:2:2 */ -#define V4L2_PIX_FMT_YUV422P v4l2_fourcc('4','2','2','P') /* 16 YVU422 planar */ -#define V4L2_PIX_FMT_YUV411P v4l2_fourcc('4','1','1','P') /* 16 YVU411 planar */ -#define V4L2_PIX_FMT_Y41P v4l2_fourcc('Y','4','1','P') /* 12 YUV 4:1:1 */ -#define V4L2_PIX_FMT_YUV444 v4l2_fourcc('Y','4','4','4') /* 16 xxxxyyyy uuuuvvvv */ -#define V4L2_PIX_FMT_YUV555 v4l2_fourcc('Y','U','V','O') /* 16 YUV-5-5-5 */ -#define V4L2_PIX_FMT_YUV565 v4l2_fourcc('Y','U','V','P') /* 16 YUV-5-6-5 */ -#define V4L2_PIX_FMT_YUV32 v4l2_fourcc('Y','U','V','4') /* 32 YUV-8-8-8-8 */ +#define V4L2_PIX_FMT_RGB332 v4l2_fourcc('R', 'G', 'B', '1') /* 8 RGB-3-3-2 */ +#define V4L2_PIX_FMT_RGB444 v4l2_fourcc('R', '4', '4', '4') /* 16 xxxxrrrr ggggbbbb */ +#define V4L2_PIX_FMT_RGB555 v4l2_fourcc('R', 'G', 'B', 'O') /* 16 RGB-5-5-5 */ +#define V4L2_PIX_FMT_RGB565 v4l2_fourcc('R', 'G', 'B', 'P') /* 16 RGB-5-6-5 */ +#define V4L2_PIX_FMT_RGB555X v4l2_fourcc('R', 'G', 'B', 'Q') /* 16 RGB-5-5-5 BE */ +#define V4L2_PIX_FMT_RGB565X v4l2_fourcc('R', 'G', 'B', 'R') /* 16 RGB-5-6-5 BE */ +#define V4L2_PIX_FMT_BGR24 v4l2_fourcc('B', 'G', 'R', '3') /* 24 BGR-8-8-8 */ +#define V4L2_PIX_FMT_RGB24 v4l2_fourcc('R', 'G', 'B', '3') /* 24 RGB-8-8-8 */ +#define V4L2_PIX_FMT_BGR32 v4l2_fourcc('B', 'G', 'R', '4') /* 32 BGR-8-8-8-8 */ +#define V4L2_PIX_FMT_RGB32 v4l2_fourcc('R', 'G', 'B', '4') /* 32 RGB-8-8-8-8 */ +#define V4L2_PIX_FMT_GREY v4l2_fourcc('G', 'R', 'E', 'Y') /* 8 Greyscale */ +#define V4L2_PIX_FMT_Y16 v4l2_fourcc('Y', '1', '6', ' ') /* 16 Greyscale */ +#define V4L2_PIX_FMT_PAL8 v4l2_fourcc('P', 'A', 'L', '8') /* 8 8-bit palette */ +#define V4L2_PIX_FMT_YVU410 v4l2_fourcc('Y', 'V', 'U', '9') /* 9 YVU 4:1:0 */ +#define V4L2_PIX_FMT_YVU420 v4l2_fourcc('Y', 'V', '1', '2') /* 12 YVU 4:2:0 */ +#define V4L2_PIX_FMT_YUYV v4l2_fourcc('Y', 'U', 'Y', 'V') /* 16 YUV 4:2:2 */ +#define V4L2_PIX_FMT_UYVY v4l2_fourcc('U', 'Y', 'V', 'Y') /* 16 YUV 4:2:2 */ +#define V4L2_PIX_FMT_YUV422P v4l2_fourcc('4', '2', '2', 'P') /* 16 YVU422 planar */ +#define V4L2_PIX_FMT_YUV411P v4l2_fourcc('4', '1', '1', 'P') /* 16 YVU411 planar */ +#define V4L2_PIX_FMT_Y41P v4l2_fourcc('Y', '4', '1', 'P') /* 12 YUV 4:1:1 */ +#define V4L2_PIX_FMT_YUV444 v4l2_fourcc('Y', '4', '4', '4') /* 16 xxxxyyyy uuuuvvvv */ +#define V4L2_PIX_FMT_YUV555 v4l2_fourcc('Y', 'U', 'V', 'O') /* 16 YUV-5-5-5 */ +#define V4L2_PIX_FMT_YUV565 v4l2_fourcc('Y', 'U', 'V', 'P') /* 16 YUV-5-6-5 */ +#define V4L2_PIX_FMT_YUV32 v4l2_fourcc('Y', 'U', 'V', '4') /* 32 YUV-8-8-8-8 */ /* two planes -- one Y, one Cr + Cb interleaved */ -#define V4L2_PIX_FMT_NV12 v4l2_fourcc('N','V','1','2') /* 12 Y/CbCr 4:2:0 */ -#define V4L2_PIX_FMT_NV21 v4l2_fourcc('N','V','2','1') /* 12 Y/CrCb 4:2:0 */ +#define V4L2_PIX_FMT_NV12 v4l2_fourcc('N', 'V', '1', '2') /* 12 Y/CbCr 4:2:0 */ +#define V4L2_PIX_FMT_NV21 v4l2_fourcc('N', 'V', '2', '1') /* 12 Y/CrCb 4:2:0 */ /* The following formats are not defined in the V4L2 specification */ -#define V4L2_PIX_FMT_YUV410 v4l2_fourcc('Y','U','V','9') /* 9 YUV 4:1:0 */ -#define V4L2_PIX_FMT_YUV420 v4l2_fourcc('Y','U','1','2') /* 12 YUV 4:2:0 */ -#define V4L2_PIX_FMT_YYUV v4l2_fourcc('Y','Y','U','V') /* 16 YUV 4:2:2 */ -#define V4L2_PIX_FMT_HI240 v4l2_fourcc('H','I','2','4') /* 8 8-bit color */ -#define V4L2_PIX_FMT_HM12 v4l2_fourcc('H','M','1','2') /* 8 YUV 4:2:0 16x16 macroblocks */ +#define V4L2_PIX_FMT_YUV410 v4l2_fourcc('Y', 'U', 'V', '9') /* 9 YUV 4:1:0 */ +#define V4L2_PIX_FMT_YUV420 v4l2_fourcc('Y', 'U', '1', '2') /* 12 YUV 4:2:0 */ +#define V4L2_PIX_FMT_YYUV v4l2_fourcc('Y', 'Y', 'U', 'V') /* 16 YUV 4:2:2 */ +#define V4L2_PIX_FMT_HI240 v4l2_fourcc('H', 'I', '2', '4') /* 8 8-bit color */ +#define V4L2_PIX_FMT_HM12 v4l2_fourcc('H', 'M', '1', '2') /* 8 YUV 4:2:0 16x16 macroblocks */ /* see http://www.siliconimaging.com/RGB%20Bayer.htm */ -#define V4L2_PIX_FMT_SBGGR8 v4l2_fourcc('B','A','8','1') /* 8 BGBG.. GRGR.. */ -#define V4L2_PIX_FMT_SGBRG8 v4l2_fourcc('G','B','R','G') /* 8 GBGB.. RGRG.. */ -#define V4L2_PIX_FMT_SBGGR16 v4l2_fourcc('B','Y','R','2') /* 16 BGBG.. GRGR.. */ +#define V4L2_PIX_FMT_SBGGR8 v4l2_fourcc('B', 'A', '8', '1') /* 8 BGBG.. GRGR.. */ +#define V4L2_PIX_FMT_SGBRG8 v4l2_fourcc('G', 'B', 'R', 'G') /* 8 GBGB.. RGRG.. */ +#define V4L2_PIX_FMT_SBGGR16 v4l2_fourcc('B', 'Y', 'R', '2') /* 16 BGBG.. GRGR.. */ /* compressed formats */ -#define V4L2_PIX_FMT_MJPEG v4l2_fourcc('M','J','P','G') /* Motion-JPEG */ -#define V4L2_PIX_FMT_JPEG v4l2_fourcc('J','P','E','G') /* JFIF JPEG */ -#define V4L2_PIX_FMT_DV v4l2_fourcc('d','v','s','d') /* 1394 */ -#define V4L2_PIX_FMT_MPEG v4l2_fourcc('M','P','E','G') /* MPEG-1/2/4 */ +#define V4L2_PIX_FMT_MJPEG v4l2_fourcc('M', 'J', 'P', 'G') /* Motion-JPEG */ +#define V4L2_PIX_FMT_JPEG v4l2_fourcc('J', 'P', 'E', 'G') /* JFIF JPEG */ +#define V4L2_PIX_FMT_DV v4l2_fourcc('d', 'v', 's', 'd') /* 1394 */ +#define V4L2_PIX_FMT_MPEG v4l2_fourcc('M', 'P', 'E', 'G') /* MPEG-1/2/4 */ /* Vendor-specific formats */ -#define V4L2_PIX_FMT_WNVA v4l2_fourcc('W','N','V','A') /* Winnov hw compress */ -#define V4L2_PIX_FMT_SN9C10X v4l2_fourcc('S','9','1','0') /* SN9C10x compression */ -#define V4L2_PIX_FMT_PWC1 v4l2_fourcc('P','W','C','1') /* pwc older webcam */ -#define V4L2_PIX_FMT_PWC2 v4l2_fourcc('P','W','C','2') /* pwc newer webcam */ -#define V4L2_PIX_FMT_ET61X251 v4l2_fourcc('E','6','2','5') /* ET61X251 compression */ -#define V4L2_PIX_FMT_SPCA501 v4l2_fourcc('S','5','0','1') /* YUYV per line */ -#define V4L2_PIX_FMT_SPCA561 v4l2_fourcc('S','5','6','1') /* compressed GBRG bayer */ -#define V4L2_PIX_FMT_PAC207 v4l2_fourcc('P','2','0','7') /* compressed BGGR bayer */ +#define V4L2_PIX_FMT_WNVA v4l2_fourcc('W', 'N', 'V', 'A') /* Winnov hw compress */ +#define V4L2_PIX_FMT_SN9C10X v4l2_fourcc('S', '9', '1', '0') /* SN9C10x compression */ +#define V4L2_PIX_FMT_PWC1 v4l2_fourcc('P', 'W', 'C', '1') /* pwc older webcam */ +#define V4L2_PIX_FMT_PWC2 v4l2_fourcc('P', 'W', 'C', '2') /* pwc newer webcam */ +#define V4L2_PIX_FMT_ET61X251 v4l2_fourcc('E', '6', '2', '5') /* ET61X251 compression */ +#define V4L2_PIX_FMT_SPCA501 v4l2_fourcc('S', '5', '0', '1') /* YUYV per line */ +#define V4L2_PIX_FMT_SPCA505 v4l2_fourcc('S', '5', '0', '5') /* YYUV per line */ +#define V4L2_PIX_FMT_SPCA508 v4l2_fourcc('S', '5', '0', '8') /* YUVY per line */ +#define V4L2_PIX_FMT_SPCA561 v4l2_fourcc('S', '5', '6', '1') /* compressed GBRG bayer */ +#define V4L2_PIX_FMT_PAC207 v4l2_fourcc('P', '2', '0', '7') /* compressed BGGR bayer */ /* * F O R M A T E N U M E R A T I O N */ -struct v4l2_fmtdesc -{ +struct v4l2_fmtdesc { __u32 index; /* Format number */ enum v4l2_buf_type type; /* buffer type */ __u32 flags; @@ -349,21 +354,18 @@ struct v4l2_fmtdesc /* * F R A M E S I Z E E N U M E R A T I O N */ -enum v4l2_frmsizetypes -{ +enum v4l2_frmsizetypes { V4L2_FRMSIZE_TYPE_DISCRETE = 1, V4L2_FRMSIZE_TYPE_CONTINUOUS = 2, V4L2_FRMSIZE_TYPE_STEPWISE = 3, }; -struct v4l2_frmsize_discrete -{ +struct v4l2_frmsize_discrete { __u32 width; /* Frame width [pixel] */ __u32 height; /* Frame height [pixel] */ }; -struct v4l2_frmsize_stepwise -{ +struct v4l2_frmsize_stepwise { __u32 min_width; /* Minimum frame width [pixel] */ __u32 max_width; /* Maximum frame width [pixel] */ __u32 step_width; /* Frame width step size [pixel] */ @@ -372,8 +374,7 @@ struct v4l2_frmsize_stepwise __u32 step_height; /* Frame height step size [pixel] */ }; -struct v4l2_frmsizeenum -{ +struct v4l2_frmsizeenum { __u32 index; /* Frame size number */ __u32 pixel_format; /* Pixel format */ __u32 type; /* Frame size type the device supports. */ @@ -389,22 +390,19 @@ struct v4l2_frmsizeenum /* * F R A M E R A T E E N U M E R A T I O N */ -enum v4l2_frmivaltypes -{ +enum v4l2_frmivaltypes { V4L2_FRMIVAL_TYPE_DISCRETE = 1, V4L2_FRMIVAL_TYPE_CONTINUOUS = 2, V4L2_FRMIVAL_TYPE_STEPWISE = 3, }; -struct v4l2_frmival_stepwise -{ +struct v4l2_frmival_stepwise { struct v4l2_fract min; /* Minimum frame interval [s] */ struct v4l2_fract max; /* Maximum frame interval [s] */ struct v4l2_fract step; /* Frame interval step size [s] */ }; -struct v4l2_frmivalenum -{ +struct v4l2_frmivalenum { __u32 index; /* Frame format index */ __u32 pixel_format; /* Pixel format */ __u32 width; /* Frame width */ @@ -423,8 +421,7 @@ struct v4l2_frmivalenum /* * T I M E C O D E */ -struct v4l2_timecode -{ +struct v4l2_timecode { __u32 type; __u32 flags; __u8 frames; @@ -449,8 +446,7 @@ struct v4l2_timecode #define V4L2_TC_USERBITS_8BITCHARS 0x0008 /* The above is based on SMPTE timecodes */ -struct v4l2_jpegcompression -{ +struct v4l2_jpegcompression { int quality; int APPn; /* Number of APP segment to be written, @@ -482,16 +478,14 @@ struct v4l2_jpegcompression /* * M E M O R Y - M A P P I N G B U F F E R S */ -struct v4l2_requestbuffers -{ +struct v4l2_requestbuffers { __u32 count; enum v4l2_buf_type type; enum v4l2_memory memory; __u32 reserved[2]; }; -struct v4l2_buffer -{ +struct v4l2_buffer { __u32 index; enum v4l2_buf_type type; __u32 bytesused; @@ -525,13 +519,12 @@ struct v4l2_buffer /* * O V E R L A Y P R E V I E W */ -struct v4l2_framebuffer -{ +struct v4l2_framebuffer { __u32 capability; __u32 flags; /* FIXME: in theory we should pass something like PCI device + memory * region + offset instead of some physical address */ - void* base; + void *base; struct v4l2_pix_format fmt; }; /* Flags for the 'capability' field. Read only */ @@ -550,14 +543,12 @@ struct v4l2_framebuffer #define V4L2_FBUF_FLAG_GLOBAL_ALPHA 0x0010 #define V4L2_FBUF_FLAG_LOCAL_INV_ALPHA 0x0020 -struct v4l2_clip -{ +struct v4l2_clip { struct v4l2_rect c; struct v4l2_clip __user *next; }; -struct v4l2_window -{ +struct v4l2_window { struct v4l2_rect w; enum v4l2_field field; __u32 chromakey; @@ -570,8 +561,7 @@ struct v4l2_window /* * C A P T U R E P A R A M E T E R S */ -struct v4l2_captureparm -{ +struct v4l2_captureparm { __u32 capability; /* Supported modes */ __u32 capturemode; /* Current mode */ struct v4l2_fract timeperframe; /* Time per frame in .1us units */ @@ -584,8 +574,7 @@ struct v4l2_captureparm #define V4L2_MODE_HIGHQUALITY 0x0001 /* High quality imaging mode */ #define V4L2_CAP_TIMEPERFRAME 0x1000 /* timeperframe field is supported */ -struct v4l2_outputparm -{ +struct v4l2_outputparm { __u32 capability; /* Supported modes */ __u32 outputmode; /* Current mode */ struct v4l2_fract timeperframe; /* Time per frame in seconds */ @@ -702,8 +691,7 @@ typedef __u64 v4l2_std_id; #define V4L2_STD_ALL (V4L2_STD_525_60 |\ V4L2_STD_625_50) -struct v4l2_standard -{ +struct v4l2_standard { __u32 index; v4l2_std_id id; __u8 name[24]; @@ -715,8 +703,7 @@ struct v4l2_standard /* * V I D E O I N P U T S */ -struct v4l2_input -{ +struct v4l2_input { __u32 index; /* Which input */ __u8 name[32]; /* Label */ __u32 type; /* Type of input */ @@ -753,8 +740,7 @@ struct v4l2_input /* * V I D E O O U T P U T S */ -struct v4l2_output -{ +struct v4l2_output { __u32 index; /* Which output */ __u8 name[32]; /* Label */ __u32 type; /* Type of output */ @@ -771,14 +757,12 @@ struct v4l2_output /* * C O N T R O L S */ -struct v4l2_control -{ +struct v4l2_control { __u32 id; __s32 value; }; -struct v4l2_ext_control -{ +struct v4l2_ext_control { __u32 id; __u32 reserved2[2]; union { @@ -788,8 +772,7 @@ struct v4l2_ext_control }; } __attribute__ ((packed)); -struct v4l2_ext_controls -{ +struct v4l2_ext_controls { __u32 ctrl_class; __u32 count; __u32 error_idx; @@ -807,8 +790,7 @@ struct v4l2_ext_controls #define V4L2_CTRL_DRIVER_PRIV(id) (((id) & 0xffff) >= 0x1000) /* Used in the VIDIOC_QUERYCTRL ioctl for querying controls */ -struct v4l2_queryctrl -{ +struct v4l2_queryctrl { __u32 id; enum v4l2_ctrl_type type; __u8 name[32]; /* Whatever */ @@ -821,8 +803,7 @@ struct v4l2_queryctrl }; /* Used in the VIDIOC_QUERYMENU ioctl for querying menu items */ -struct v4l2_querymenu -{ +struct v4l2_querymenu { __u32 id; __u32 index; __u8 name[32]; /* Whatever */ @@ -1104,8 +1085,7 @@ enum v4l2_exposure_auto_type { /* * T U N I N G */ -struct v4l2_tuner -{ +struct v4l2_tuner { __u32 index; __u8 name[32]; enum v4l2_tuner_type type; @@ -1119,8 +1099,7 @@ struct v4l2_tuner __u32 reserved[4]; }; -struct v4l2_modulator -{ +struct v4l2_modulator { __u32 index; __u8 name[32]; __u32 capability; @@ -1153,8 +1132,7 @@ struct v4l2_modulator #define V4L2_TUNER_MODE_LANG1 0x0003 #define V4L2_TUNER_MODE_LANG1_LANG2 0x0004 -struct v4l2_frequency -{ +struct v4l2_frequency { __u32 tuner; enum v4l2_tuner_type type; __u32 frequency; @@ -1172,8 +1150,7 @@ struct v4l2_hw_freq_seek { /* * A U D I O */ -struct v4l2_audio -{ +struct v4l2_audio { __u32 index; __u8 name[32]; __u32 capability; @@ -1188,8 +1165,7 @@ struct v4l2_audio /* Flags for the 'mode' field */ #define V4L2_AUDMODE_AVL 0x00001 -struct v4l2_audioout -{ +struct v4l2_audioout { __u32 index; __u8 name[32]; __u32 capability; @@ -1253,8 +1229,7 @@ struct v4l2_encoder_cmd { */ /* Raw VBI */ -struct v4l2_vbi_format -{ +struct v4l2_vbi_format { __u32 sampling_rate; /* in 1 Hz */ __u32 offset; __u32 samples_per_line; @@ -1266,8 +1241,8 @@ struct v4l2_vbi_format }; /* VBI flags */ -#define V4L2_VBI_UNSYNC (1<< 0) -#define V4L2_VBI_INTERLACED (1<< 1) +#define V4L2_VBI_UNSYNC (1 << 0) +#define V4L2_VBI_INTERLACED (1 << 1) /* Sliced VBI * @@ -1276,8 +1251,7 @@ struct v4l2_vbi_format * notice in the definitive implementation. */ -struct v4l2_sliced_vbi_format -{ +struct v4l2_sliced_vbi_format { __u16 service_set; /* service_lines[0][...] specifies lines 0-23 (1-23 used) of the first field service_lines[1][...] specifies lines 0-23 (1-23 used) of the second field @@ -1301,8 +1275,7 @@ struct v4l2_sliced_vbi_format #define V4L2_SLICED_VBI_525 (V4L2_SLICED_CAPTION_525) #define V4L2_SLICED_VBI_625 (V4L2_SLICED_TELETEXT_B | V4L2_SLICED_VPS | V4L2_SLICED_WSS_625) -struct v4l2_sliced_vbi_cap -{ +struct v4l2_sliced_vbi_cap { __u16 service_set; /* service_lines[0][...] specifies lines 0-23 (1-23 used) of the first field service_lines[1][...] specifies lines 0-23 (1-23 used) of the second field @@ -1313,8 +1286,7 @@ struct v4l2_sliced_vbi_cap __u32 reserved[3]; /* must be 0 */ }; -struct v4l2_sliced_vbi_data -{ +struct v4l2_sliced_vbi_data { __u32 id; __u32 field; /* 0: first field, 1: second field */ __u32 line; /* 1-23 */ @@ -1328,27 +1300,23 @@ struct v4l2_sliced_vbi_data /* Stream data format */ -struct v4l2_format -{ +struct v4l2_format { enum v4l2_buf_type type; - union - { - struct v4l2_pix_format pix; // V4L2_BUF_TYPE_VIDEO_CAPTURE - struct v4l2_window win; // V4L2_BUF_TYPE_VIDEO_OVERLAY - struct v4l2_vbi_format vbi; // V4L2_BUF_TYPE_VBI_CAPTURE - struct v4l2_sliced_vbi_format sliced; // V4L2_BUF_TYPE_SLICED_VBI_CAPTURE - __u8 raw_data[200]; // user-defined + union { + struct v4l2_pix_format pix; /* V4L2_BUF_TYPE_VIDEO_CAPTURE */ + struct v4l2_window win; /* V4L2_BUF_TYPE_VIDEO_OVERLAY */ + struct v4l2_vbi_format vbi; /* V4L2_BUF_TYPE_VBI_CAPTURE */ + struct v4l2_sliced_vbi_format sliced; /* V4L2_BUF_TYPE_SLICED_VBI_CAPTURE */ + __u8 raw_data[200]; /* user-defined */ } fmt; }; /* Stream type-dependent parameters */ -struct v4l2_streamparm -{ +struct v4l2_streamparm { enum v4l2_buf_type type; - union - { + union { struct v4l2_captureparm capture; struct v4l2_outputparm output; __u8 raw_data[200]; /* user-defined */ @@ -1386,92 +1354,86 @@ struct v4l2_chip_ident { * I O C T L C O D E S F O R V I D E O D E V I C E S * */ -#define VIDIOC_QUERYCAP _IOR ('V', 0, struct v4l2_capability) -#define VIDIOC_RESERVED _IO ('V', 1) -#define VIDIOC_ENUM_FMT _IOWR ('V', 2, struct v4l2_fmtdesc) -#define VIDIOC_G_FMT _IOWR ('V', 4, struct v4l2_format) -#define VIDIOC_S_FMT _IOWR ('V', 5, struct v4l2_format) -#define VIDIOC_REQBUFS _IOWR ('V', 8, struct v4l2_requestbuffers) -#define VIDIOC_QUERYBUF _IOWR ('V', 9, struct v4l2_buffer) -#define VIDIOC_G_FBUF _IOR ('V', 10, struct v4l2_framebuffer) -#define VIDIOC_S_FBUF _IOW ('V', 11, struct v4l2_framebuffer) -#define VIDIOC_OVERLAY _IOW ('V', 14, int) -#define VIDIOC_QBUF _IOWR ('V', 15, struct v4l2_buffer) -#define VIDIOC_DQBUF _IOWR ('V', 17, struct v4l2_buffer) -#define VIDIOC_STREAMON _IOW ('V', 18, int) -#define VIDIOC_STREAMOFF _IOW ('V', 19, int) -#define VIDIOC_G_PARM _IOWR ('V', 21, struct v4l2_streamparm) -#define VIDIOC_S_PARM _IOWR ('V', 22, struct v4l2_streamparm) -#define VIDIOC_G_STD _IOR ('V', 23, v4l2_std_id) -#define VIDIOC_S_STD _IOW ('V', 24, v4l2_std_id) -#define VIDIOC_ENUMSTD _IOWR ('V', 25, struct v4l2_standard) -#define VIDIOC_ENUMINPUT _IOWR ('V', 26, struct v4l2_input) -#define VIDIOC_G_CTRL _IOWR ('V', 27, struct v4l2_control) -#define VIDIOC_S_CTRL _IOWR ('V', 28, struct v4l2_control) -#define VIDIOC_G_TUNER _IOWR ('V', 29, struct v4l2_tuner) -#define VIDIOC_S_TUNER _IOW ('V', 30, struct v4l2_tuner) -#define VIDIOC_G_AUDIO _IOR ('V', 33, struct v4l2_audio) -#define VIDIOC_S_AUDIO _IOW ('V', 34, struct v4l2_audio) -#define VIDIOC_QUERYCTRL _IOWR ('V', 36, struct v4l2_queryctrl) -#define VIDIOC_QUERYMENU _IOWR ('V', 37, struct v4l2_querymenu) -#define VIDIOC_G_INPUT _IOR ('V', 38, int) -#define VIDIOC_S_INPUT _IOWR ('V', 39, int) -#define VIDIOC_G_OUTPUT _IOR ('V', 46, int) -#define VIDIOC_S_OUTPUT _IOWR ('V', 47, int) -#define VIDIOC_ENUMOUTPUT _IOWR ('V', 48, struct v4l2_output) -#define VIDIOC_G_AUDOUT _IOR ('V', 49, struct v4l2_audioout) -#define VIDIOC_S_AUDOUT _IOW ('V', 50, struct v4l2_audioout) -#define VIDIOC_G_MODULATOR _IOWR ('V', 54, struct v4l2_modulator) -#define VIDIOC_S_MODULATOR _IOW ('V', 55, struct v4l2_modulator) -#define VIDIOC_G_FREQUENCY _IOWR ('V', 56, struct v4l2_frequency) -#define VIDIOC_S_FREQUENCY _IOW ('V', 57, struct v4l2_frequency) -#define VIDIOC_CROPCAP _IOWR ('V', 58, struct v4l2_cropcap) -#define VIDIOC_G_CROP _IOWR ('V', 59, struct v4l2_crop) -#define VIDIOC_S_CROP _IOW ('V', 60, struct v4l2_crop) -#define VIDIOC_G_JPEGCOMP _IOR ('V', 61, struct v4l2_jpegcompression) -#define VIDIOC_S_JPEGCOMP _IOW ('V', 62, struct v4l2_jpegcompression) -#define VIDIOC_QUERYSTD _IOR ('V', 63, v4l2_std_id) -#define VIDIOC_TRY_FMT _IOWR ('V', 64, struct v4l2_format) -#define VIDIOC_ENUMAUDIO _IOWR ('V', 65, struct v4l2_audio) -#define VIDIOC_ENUMAUDOUT _IOWR ('V', 66, struct v4l2_audioout) -#define VIDIOC_G_PRIORITY _IOR ('V', 67, enum v4l2_priority) -#define VIDIOC_S_PRIORITY _IOW ('V', 68, enum v4l2_priority) -#define VIDIOC_G_SLICED_VBI_CAP _IOWR ('V', 69, struct v4l2_sliced_vbi_cap) -#define VIDIOC_LOG_STATUS _IO ('V', 70) -#define VIDIOC_G_EXT_CTRLS _IOWR ('V', 71, struct v4l2_ext_controls) -#define VIDIOC_S_EXT_CTRLS _IOWR ('V', 72, struct v4l2_ext_controls) -#define VIDIOC_TRY_EXT_CTRLS _IOWR ('V', 73, struct v4l2_ext_controls) +#define VIDIOC_QUERYCAP _IOR('V', 0, struct v4l2_capability) +#define VIDIOC_RESERVED _IO('V', 1) +#define VIDIOC_ENUM_FMT _IOWR('V', 2, struct v4l2_fmtdesc) +#define VIDIOC_G_FMT _IOWR('V', 4, struct v4l2_format) +#define VIDIOC_S_FMT _IOWR('V', 5, struct v4l2_format) +#define VIDIOC_REQBUFS _IOWR('V', 8, struct v4l2_requestbuffers) +#define VIDIOC_QUERYBUF _IOWR('V', 9, struct v4l2_buffer) +#define VIDIOC_G_FBUF _IOR('V', 10, struct v4l2_framebuffer) +#define VIDIOC_S_FBUF _IOW('V', 11, struct v4l2_framebuffer) +#define VIDIOC_OVERLAY _IOW('V', 14, int) +#define VIDIOC_QBUF _IOWR('V', 15, struct v4l2_buffer) +#define VIDIOC_DQBUF _IOWR('V', 17, struct v4l2_buffer) +#define VIDIOC_STREAMON _IOW('V', 18, int) +#define VIDIOC_STREAMOFF _IOW('V', 19, int) +#define VIDIOC_G_PARM _IOWR('V', 21, struct v4l2_streamparm) +#define VIDIOC_S_PARM _IOWR('V', 22, struct v4l2_streamparm) +#define VIDIOC_G_STD _IOR('V', 23, v4l2_std_id) +#define VIDIOC_S_STD _IOW('V', 24, v4l2_std_id) +#define VIDIOC_ENUMSTD _IOWR('V', 25, struct v4l2_standard) +#define VIDIOC_ENUMINPUT _IOWR('V', 26, struct v4l2_input) +#define VIDIOC_G_CTRL _IOWR('V', 27, struct v4l2_control) +#define VIDIOC_S_CTRL _IOWR('V', 28, struct v4l2_control) +#define VIDIOC_G_TUNER _IOWR('V', 29, struct v4l2_tuner) +#define VIDIOC_S_TUNER _IOW('V', 30, struct v4l2_tuner) +#define VIDIOC_G_AUDIO _IOR('V', 33, struct v4l2_audio) +#define VIDIOC_S_AUDIO _IOW('V', 34, struct v4l2_audio) +#define VIDIOC_QUERYCTRL _IOWR('V', 36, struct v4l2_queryctrl) +#define VIDIOC_QUERYMENU _IOWR('V', 37, struct v4l2_querymenu) +#define VIDIOC_G_INPUT _IOR('V', 38, int) +#define VIDIOC_S_INPUT _IOWR('V', 39, int) +#define VIDIOC_G_OUTPUT _IOR('V', 46, int) +#define VIDIOC_S_OUTPUT _IOWR('V', 47, int) +#define VIDIOC_ENUMOUTPUT _IOWR('V', 48, struct v4l2_output) +#define VIDIOC_G_AUDOUT _IOR('V', 49, struct v4l2_audioout) +#define VIDIOC_S_AUDOUT _IOW('V', 50, struct v4l2_audioout) +#define VIDIOC_G_MODULATOR _IOWR('V', 54, struct v4l2_modulator) +#define VIDIOC_S_MODULATOR _IOW('V', 55, struct v4l2_modulator) +#define VIDIOC_G_FREQUENCY _IOWR('V', 56, struct v4l2_frequency) +#define VIDIOC_S_FREQUENCY _IOW('V', 57, struct v4l2_frequency) +#define VIDIOC_CROPCAP _IOWR('V', 58, struct v4l2_cropcap) +#define VIDIOC_G_CROP _IOWR('V', 59, struct v4l2_crop) +#define VIDIOC_S_CROP _IOW('V', 60, struct v4l2_crop) +#define VIDIOC_G_JPEGCOMP _IOR('V', 61, struct v4l2_jpegcompression) +#define VIDIOC_S_JPEGCOMP _IOW('V', 62, struct v4l2_jpegcompression) +#define VIDIOC_QUERYSTD _IOR('V', 63, v4l2_std_id) +#define VIDIOC_TRY_FMT _IOWR('V', 64, struct v4l2_format) +#define VIDIOC_ENUMAUDIO _IOWR('V', 65, struct v4l2_audio) +#define VIDIOC_ENUMAUDOUT _IOWR('V', 66, struct v4l2_audioout) +#define VIDIOC_G_PRIORITY _IOR('V', 67, enum v4l2_priority) +#define VIDIOC_S_PRIORITY _IOW('V', 68, enum v4l2_priority) +#define VIDIOC_G_SLICED_VBI_CAP _IOWR('V', 69, struct v4l2_sliced_vbi_cap) +#define VIDIOC_LOG_STATUS _IO('V', 70) +#define VIDIOC_G_EXT_CTRLS _IOWR('V', 71, struct v4l2_ext_controls) +#define VIDIOC_S_EXT_CTRLS _IOWR('V', 72, struct v4l2_ext_controls) +#define VIDIOC_TRY_EXT_CTRLS _IOWR('V', 73, struct v4l2_ext_controls) #if 1 -#define VIDIOC_ENUM_FRAMESIZES _IOWR ('V', 74, struct v4l2_frmsizeenum) -#define VIDIOC_ENUM_FRAMEINTERVALS _IOWR ('V', 75, struct v4l2_frmivalenum) -#define VIDIOC_G_ENC_INDEX _IOR ('V', 76, struct v4l2_enc_idx) -#define VIDIOC_ENCODER_CMD _IOWR ('V', 77, struct v4l2_encoder_cmd) -#define VIDIOC_TRY_ENCODER_CMD _IOWR ('V', 78, struct v4l2_encoder_cmd) +#define VIDIOC_ENUM_FRAMESIZES _IOWR('V', 74, struct v4l2_frmsizeenum) +#define VIDIOC_ENUM_FRAMEINTERVALS _IOWR('V', 75, struct v4l2_frmivalenum) +#define VIDIOC_G_ENC_INDEX _IOR('V', 76, struct v4l2_enc_idx) +#define VIDIOC_ENCODER_CMD _IOWR('V', 77, struct v4l2_encoder_cmd) +#define VIDIOC_TRY_ENCODER_CMD _IOWR('V', 78, struct v4l2_encoder_cmd) /* Experimental, only implemented if CONFIG_VIDEO_ADV_DEBUG is defined */ -#define VIDIOC_DBG_S_REGISTER _IOW ('V', 79, struct v4l2_register) -#define VIDIOC_DBG_G_REGISTER _IOWR ('V', 80, struct v4l2_register) +#define VIDIOC_DBG_S_REGISTER _IOW('V', 79, struct v4l2_register) +#define VIDIOC_DBG_G_REGISTER _IOWR('V', 80, struct v4l2_register) -#define VIDIOC_G_CHIP_IDENT _IOWR ('V', 81, struct v4l2_chip_ident) +#define VIDIOC_G_CHIP_IDENT _IOWR('V', 81, struct v4l2_chip_ident) #endif -#define VIDIOC_S_HW_FREQ_SEEK _IOW ('V', 82, struct v4l2_hw_freq_seek) +#define VIDIOC_S_HW_FREQ_SEEK _IOW('V', 82, struct v4l2_hw_freq_seek) #ifdef __OLD_VIDIOC_ /* for compatibility, will go away some day */ -#define VIDIOC_OVERLAY_OLD _IOWR ('V', 14, int) -#define VIDIOC_S_PARM_OLD _IOW ('V', 22, struct v4l2_streamparm) -#define VIDIOC_S_CTRL_OLD _IOW ('V', 28, struct v4l2_control) -#define VIDIOC_G_AUDIO_OLD _IOWR ('V', 33, struct v4l2_audio) -#define VIDIOC_G_AUDOUT_OLD _IOWR ('V', 49, struct v4l2_audioout) -#define VIDIOC_CROPCAP_OLD _IOR ('V', 58, struct v4l2_cropcap) +#define VIDIOC_OVERLAY_OLD _IOWR('V', 14, int) +#define VIDIOC_S_PARM_OLD _IOW('V', 22, struct v4l2_streamparm) +#define VIDIOC_S_CTRL_OLD _IOW('V', 28, struct v4l2_control) +#define VIDIOC_G_AUDIO_OLD _IOWR('V', 33, struct v4l2_audio) +#define VIDIOC_G_AUDOUT_OLD _IOWR('V', 49, struct v4l2_audioout) +#define VIDIOC_CROPCAP_OLD _IOR('V', 58, struct v4l2_cropcap) #endif #define BASE_VIDIOC_PRIVATE 192 /* 192-255 are private */ #endif /* __LINUX_VIDEODEV2_H */ - -/* - * Local variables: - * c-basic-offset: 8 - * End: - */ diff --git a/include/linux/videotext.h b/include/linux/videotext.h index 018f92047ff..3e68c8d1c7f 100644 --- a/include/linux/videotext.h +++ b/include/linux/videotext.h @@ -45,10 +45,10 @@ #define VTXIOCCLRCACHE_OLD 0x710b /* clear cache on VTX-interface (if avail.) */ #define VTXIOCSETVIRT_OLD 0x710c /* turn on virtual mode (this disables TV-display) */ -/* +/* * Definitions for VTXIOCGETINFO */ - + #define SAA5243 0 #define SAA5246 1 #define SAA5249 2 @@ -57,10 +57,10 @@ typedef struct { int version_major, version_minor; /* version of driver; if version_major changes, driver */ - /* is not backward compatible!!! CHECK THIS!!! */ + /* is not backward compatible!!! CHECK THIS!!! */ int numpages; /* number of page-buffers of vtx-chipset */ int cct_type; /* type of vtx-chipset (SAA5243, SAA5246, SAA5248 or - * SAA5249) */ + * SAA5249) */ } vtx_info_t; @@ -81,7 +81,7 @@ vtx_info_t; #define PGMASK_HOUR (HR_TEN | HR_UNIT) #define PGMASK_MINUTE (MIN_TEN | MIN_UNIT) -typedef struct +typedef struct { int page; /* number of requested page (hexadecimal) */ int hour; /* requested hour (hexadecimal) */ @@ -98,11 +98,11 @@ vtx_pagereq_t; /* * Definitions for VTXIOC{GETSTAT,PUTSTAT} */ - + #define VTX_PAGESIZE (40 * 24) #define VTX_VIRTUALSIZE (40 * 49) -typedef struct +typedef struct { int pagenum; /* number of page (hexadecimal) */ int hour; /* hour (hexadecimal) */ @@ -121,5 +121,5 @@ typedef struct unsigned hamming : 1; /* hamming-error occurred */ } vtx_pageinfo_t; - + #endif /* _VTX_H */ diff --git a/include/linux/virtio_9p.h b/include/linux/virtio_9p.h index 8eff0b53910..b3c4a60ceeb 100644 --- a/include/linux/virtio_9p.h +++ b/include/linux/virtio_9p.h @@ -1,5 +1,7 @@ #ifndef _LINUX_VIRTIO_9P_H #define _LINUX_VIRTIO_9P_H +/* This header is BSD licensed so anyone can use the definitions to implement + * compatible drivers/servers. */ #include <linux/virtio_config.h> /* The ID for virtio console */ diff --git a/include/linux/virtio_balloon.h b/include/linux/virtio_balloon.h index 979524ee75b..c30c7bfbf39 100644 --- a/include/linux/virtio_balloon.h +++ b/include/linux/virtio_balloon.h @@ -1,5 +1,7 @@ #ifndef _LINUX_VIRTIO_BALLOON_H #define _LINUX_VIRTIO_BALLOON_H +/* This header is BSD licensed so anyone can use the definitions to implement + * compatible drivers/servers. */ #include <linux/virtio_config.h> /* The ID for virtio_balloon */ diff --git a/include/linux/virtio_blk.h b/include/linux/virtio_blk.h index 5f79a5f9de7..c1aef85243b 100644 --- a/include/linux/virtio_blk.h +++ b/include/linux/virtio_blk.h @@ -1,5 +1,7 @@ #ifndef _LINUX_VIRTIO_BLK_H #define _LINUX_VIRTIO_BLK_H +/* This header is BSD licensed so anyone can use the definitions to implement + * compatible drivers/servers. */ #include <linux/virtio_config.h> /* The ID for virtio_block */ @@ -11,6 +13,7 @@ #define VIRTIO_BLK_F_SEG_MAX 2 /* Indicates maximum # of segments */ #define VIRTIO_BLK_F_GEOMETRY 4 /* Legacy geometry available */ #define VIRTIO_BLK_F_RO 5 /* Disk is read-only */ +#define VIRTIO_BLK_F_BLK_SIZE 6 /* Block size of disk is available*/ struct virtio_blk_config { @@ -26,6 +29,8 @@ struct virtio_blk_config __u8 heads; __u8 sectors; } geometry; + /* block size of device (if VIRTIO_BLK_F_BLK_SIZE) */ + __u32 blk_size; } __attribute__((packed)); /* These two define direction. */ diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index f364bbf63c3..bf8ec283b23 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h @@ -1,5 +1,8 @@ #ifndef _LINUX_VIRTIO_CONFIG_H #define _LINUX_VIRTIO_CONFIG_H +/* This header, excluding the #ifdef __KERNEL__ part, is BSD licensed so + * anyone can use the definitions to implement compatible drivers/servers. */ + /* Virtio devices use a standardized configuration space to define their * features and pass configuration information, but each implementation can * store and access that space differently. */ @@ -15,6 +18,12 @@ /* We've given up on this device. */ #define VIRTIO_CONFIG_S_FAILED 0x80 +/* Some virtio feature bits (currently bits 28 through 31) are reserved for the + * transport being used (eg. virtio_ring), the rest are per-device feature + * bits. */ +#define VIRTIO_TRANSPORT_F_START 28 +#define VIRTIO_TRANSPORT_F_END 32 + /* Do we get callbacks when the ring is completely used, even if we've * suppressed them? */ #define VIRTIO_F_NOTIFY_ON_EMPTY 24 @@ -52,9 +61,10 @@ * @get_features: get the array of feature bits for this device. * vdev: the virtio_device * Returns the first 32 feature bits (all we currently need). - * @set_features: confirm what device features we'll be using. + * @finalize_features: confirm what device features we'll be using. * vdev: the virtio_device - * feature: the first 32 feature bits + * This gives the final feature bits for the device: it can change + * the dev->feature bits if it wants. */ struct virtio_config_ops { @@ -70,7 +80,7 @@ struct virtio_config_ops void (*callback)(struct virtqueue *)); void (*del_vq)(struct virtqueue *vq); u32 (*get_features)(struct virtio_device *vdev); - void (*set_features)(struct virtio_device *vdev, u32 features); + void (*finalize_features)(struct virtio_device *vdev); }; /* If driver didn't advertise the feature, it will never appear. */ diff --git a/include/linux/virtio_console.h b/include/linux/virtio_console.h index ed2d4ead7eb..19a0da0dba4 100644 --- a/include/linux/virtio_console.h +++ b/include/linux/virtio_console.h @@ -1,6 +1,8 @@ #ifndef _LINUX_VIRTIO_CONSOLE_H #define _LINUX_VIRTIO_CONSOLE_H #include <linux/virtio_config.h> +/* This header, excluding the #ifdef __KERNEL__ part, is BSD licensed so + * anyone can use the definitions to implement compatible drivers/servers. */ /* The ID for virtio console */ #define VIRTIO_ID_CONSOLE 3 diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h index 38c0571820f..5e33761b9b8 100644 --- a/include/linux/virtio_net.h +++ b/include/linux/virtio_net.h @@ -1,5 +1,7 @@ #ifndef _LINUX_VIRTIO_NET_H #define _LINUX_VIRTIO_NET_H +/* This header is BSD licensed so anyone can use the definitions to implement + * compatible drivers/servers. */ #include <linux/virtio_config.h> /* The ID for virtio_net */ diff --git a/include/linux/virtio_pci.h b/include/linux/virtio_pci.h index b3151659cf4..cdef3574293 100644 --- a/include/linux/virtio_pci.h +++ b/include/linux/virtio_pci.h @@ -9,9 +9,8 @@ * Authors: * Anthony Liguori <aliguori@us.ibm.com> * - * This work is licensed under the terms of the GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. - * + * This header is BSD licensed so anyone can use the definitions to implement + * compatible drivers/servers. */ #ifndef _LINUX_VIRTIO_PCI_H diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h index abe481ed990..c4a598fb382 100644 --- a/include/linux/virtio_ring.h +++ b/include/linux/virtio_ring.h @@ -120,6 +120,8 @@ struct virtqueue *vring_new_virtqueue(unsigned int num, void (*notify)(struct virtqueue *vq), void (*callback)(struct virtqueue *vq)); void vring_del_virtqueue(struct virtqueue *vq); +/* Filter out transport-specific feature bits. */ +void vring_transport_features(struct virtio_device *vdev); irqreturn_t vring_interrupt(int irq, void *_vq); #endif /* __KERNEL__ */ diff --git a/include/linux/virtio_rng.h b/include/linux/virtio_rng.h index 331afb6c9f6..1a85dab8a94 100644 --- a/include/linux/virtio_rng.h +++ b/include/linux/virtio_rng.h @@ -1,5 +1,7 @@ #ifndef _LINUX_VIRTIO_RNG_H #define _LINUX_VIRTIO_RNG_H +/* This header is BSD licensed so anyone can use the definitions to implement + * compatible drivers/servers. */ #include <linux/virtio_config.h> /* The ID for virtio_rng */ diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index e83b69346d2..58334d43951 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -44,6 +44,12 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, NR_VM_EVENT_ITEMS }; +extern const struct seq_operations fragmentation_op; +extern const struct seq_operations pagetypeinfo_op; +extern const struct seq_operations zoneinfo_op; +extern const struct seq_operations vmstat_op; +extern int sysctl_stat_interval; + #ifdef CONFIG_VM_EVENT_COUNTERS /* * Light weight per cpu counter implementation. diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h index 9448ffbdcbf..1c78d56c57e 100644 --- a/include/linux/vt_kern.h +++ b/include/linux/vt_kern.h @@ -12,6 +12,7 @@ #include <linux/mutex.h> #include <linux/console_struct.h> #include <linux/mm.h> +#include <linux/consolemap.h> /* * Presently, a lot of graphics programs do not restore the contents of @@ -54,6 +55,7 @@ void redraw_screen(struct vc_data *vc, int is_switch); struct tty_struct; int tioclinux(struct tty_struct *tty, unsigned long arg); +#ifdef CONFIG_CONSOLE_TRANSLATIONS /* consolemap.c */ struct unimapinit; @@ -71,6 +73,24 @@ void con_free_unimap(struct vc_data *vc); void con_protect_unimap(struct vc_data *vc, int rdonly); int con_copy_unimap(struct vc_data *dst_vc, struct vc_data *src_vc); +#define vc_translate(vc, c) ((vc)->vc_translate[(c) | \ + ((vc)->vc_toggle_meta ? 0x80 : 0)]) +#else +#define con_set_trans_old(arg) (0) +#define con_get_trans_old(arg) (-EINVAL) +#define con_set_trans_new(arg) (0) +#define con_get_trans_new(arg) (-EINVAL) +#define con_clear_unimap(vc, ui) (0) +#define con_set_unimap(vc, ct, list) (0) +#define con_set_default_unimap(vc) (0) +#define con_copy_unimap(d, s) (0) +#define con_get_unimap(vc, ct, uct, list) (-EINVAL) +#define con_free_unimap(vc) do { ; } while (0) +#define con_protect_unimap(vc, rdonly) do { ; } while (0) + +#define vc_translate(vc, c) (c) +#endif + /* vt.c */ int vt_waitactive(int vt); void change_console(struct vc_data *new_vc); diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 542526c6e8e..5c158c477ac 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -179,6 +179,8 @@ __create_workqueue_key(const char *name, int singlethread, extern void destroy_workqueue(struct workqueue_struct *wq); extern int queue_work(struct workqueue_struct *wq, struct work_struct *work); +extern int queue_work_on(int cpu, struct workqueue_struct *wq, + struct work_struct *work); extern int queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *work, unsigned long delay); extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, @@ -188,6 +190,7 @@ extern void flush_workqueue(struct workqueue_struct *wq); extern void flush_scheduled_work(void); extern int schedule_work(struct work_struct *work); +extern int schedule_work_on(int cpu, struct work_struct *work); extern int schedule_delayed_work(struct delayed_work *work, unsigned long delay); extern int schedule_delayed_work_on(int cpu, struct delayed_work *work, unsigned long delay); @@ -198,6 +201,8 @@ extern int keventd_up(void); extern void init_workqueues(void); int execute_in_process_context(work_func_t fn, struct execute_work *); +extern int flush_work(struct work_struct *work); + extern int cancel_work_sync(struct work_struct *work); /* |