diff options
Diffstat (limited to 'include/linux')
300 files changed, 8188 insertions, 3557 deletions
diff --git a/include/linux/8250_pci.h b/include/linux/8250_pci.h index 3209dd46ea7..b24ff086a66 100644 --- a/include/linux/8250_pci.h +++ b/include/linux/8250_pci.h @@ -31,7 +31,7 @@ struct pciserial_board { struct serial_private; struct serial_private * -pciserial_init_ports(struct pci_dev *dev, struct pciserial_board *board); +pciserial_init_ports(struct pci_dev *dev, const struct pciserial_board *board); void pciserial_remove_ports(struct serial_private *priv); void pciserial_suspend_ports(struct serial_private *priv); void pciserial_resume_ports(struct serial_private *priv); diff --git a/include/linux/Kbuild b/include/linux/Kbuild index e531783e5d7..12e9a2957ca 100644 --- a/include/linux/Kbuild +++ b/include/linux/Kbuild @@ -56,8 +56,6 @@ header-y += dlm_device.h header-y += dlm_netlink.h header-y += dm-ioctl.h header-y += dn.h -header-y += dqblk_v1.h -header-y += dqblk_v2.h header-y += dqblk_xfs.h header-y += efs_fs_sb.h header-y += elf-fdpic.h @@ -134,8 +132,6 @@ header-y += posix_types.h header-y += ppdev.h header-y += prctl.h header-y += qnxtypes.h -header-y += quotaio_v1.h -header-y += quotaio_v2.h header-y += radeonfb.h header-y += raw.h header-y += resource.h @@ -183,7 +179,6 @@ unifdef-y += auto_fs.h unifdef-y += auxvec.h unifdef-y += binfmts.h unifdef-y += blktrace_api.h -unifdef-y += byteorder.h unifdef-y += capability.h unifdef-y += capi.h unifdef-y += cciss_ioctl.h @@ -313,6 +308,7 @@ unifdef-y += ptrace.h unifdef-y += qnx4_fs.h unifdef-y += quota.h unifdef-y += random.h +unifdef-y += irqnr.h unifdef-y += reboot.h unifdef-y += reiserfs_fs.h unifdef-y += reiserfs_xattr.h @@ -375,3 +371,5 @@ unifdef-y += xattr.h unifdef-y += xfrm.h objhdr-y += version.h +header-y += wimax.h +header-y += wimax/ diff --git a/include/linux/aio.h b/include/linux/aio.h index f6b8cf99b59..b16a957030f 100644 --- a/include/linux/aio.h +++ b/include/linux/aio.h @@ -5,6 +5,7 @@ #include <linux/workqueue.h> #include <linux/aio_abi.h> #include <linux/uio.h> +#include <linux/rcupdate.h> #include <asm/atomic.h> @@ -183,7 +184,7 @@ struct kioctx { /* This needs improving */ unsigned long user_id; - struct kioctx *next; + struct hlist_node list; wait_queue_head_t wait; @@ -199,6 +200,8 @@ struct kioctx { struct aio_ring_info ring_info; struct delayed_work wq; + + struct rcu_head rcu_head; }; /* prototypes */ diff --git a/include/linux/async.h b/include/linux/async.h new file mode 100644 index 00000000000..c4ecacd0b32 --- /dev/null +++ b/include/linux/async.h @@ -0,0 +1,25 @@ +/* + * async.h: Asynchronous function calls for boot performance + * + * (C) Copyright 2009 Intel Corporation + * Author: Arjan van de Ven <arjan@linux.intel.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; version 2 + * of the License. + */ + +#include <linux/types.h> +#include <linux/list.h> + +typedef u64 async_cookie_t; +typedef void (async_func_ptr) (void *data, async_cookie_t cookie); + +extern async_cookie_t async_schedule(async_func_ptr *ptr, void *data); +extern async_cookie_t async_schedule_special(async_func_ptr *ptr, void *data, struct list_head *list); +extern void async_synchronize_full(void); +extern void async_synchronize_full_special(struct list_head *list); +extern void async_synchronize_cookie(async_cookie_t cookie); +extern void async_synchronize_cookie_special(async_cookie_t cookie, struct list_head *list); + diff --git a/include/linux/atm.h b/include/linux/atm.h index c791ddd9693..d3b292174ae 100644 --- a/include/linux/atm.h +++ b/include/linux/atm.h @@ -231,10 +231,21 @@ static __inline__ int atmpvc_addr_in_use(struct sockaddr_atmpvc addr) */ struct atmif_sioc { - int number; - int length; - void __user *arg; + int number; + int length; + void __user *arg; }; +#ifdef __KERNEL__ +#ifdef CONFIG_COMPAT +#include <linux/compat.h> +struct compat_atmif_sioc { + int number; + int length; + compat_uptr_t arg; +}; +#endif +#endif + typedef unsigned short atm_backend_t; #endif diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h index a3d07c29d16..086e5c362d3 100644 --- a/include/linux/atmdev.h +++ b/include/linux/atmdev.h @@ -100,6 +100,10 @@ struct atm_dev_stats { /* use backend to make new if */ #define ATM_ADDPARTY _IOW('a', ATMIOC_SPECIAL+4,struct atm_iobuf) /* add party to p2mp call */ +#ifdef CONFIG_COMPAT +/* It actually takes struct sockaddr_atmsvc, not struct atm_iobuf */ +#define COMPAT_ATM_ADDPARTY _IOW('a', ATMIOC_SPECIAL+4,struct compat_atm_iobuf) +#endif #define ATM_DROPPARTY _IOW('a', ATMIOC_SPECIAL+5,int) /* drop party from p2mp call */ @@ -224,6 +228,13 @@ struct atm_cirange { extern struct proc_dir_entry *atm_proc_root; #endif +#ifdef CONFIG_COMPAT +#include <linux/compat.h> +struct compat_atm_iobuf { + int length; + compat_uptr_t buffer; +}; +#endif struct k_atm_aal_stats { #define __HANDLE_ITEM(i) atomic_t i @@ -379,6 +390,10 @@ struct atmdev_ops { /* only send is required */ int (*open)(struct atm_vcc *vcc); void (*close)(struct atm_vcc *vcc); int (*ioctl)(struct atm_dev *dev,unsigned int cmd,void __user *arg); +#ifdef CONFIG_COMPAT + int (*compat_ioctl)(struct atm_dev *dev,unsigned int cmd, + void __user *arg); +#endif int (*getsockopt)(struct atm_vcc *vcc,int level,int optname, void __user *optval,int optlen); int (*setsockopt)(struct atm_vcc *vcc,int level,int optname, diff --git a/include/linux/atmel-mci.h b/include/linux/atmel-mci.h new file mode 100644 index 00000000000..2a2213eefd8 --- /dev/null +++ b/include/linux/atmel-mci.h @@ -0,0 +1,39 @@ +#ifndef __LINUX_ATMEL_MCI_H +#define __LINUX_ATMEL_MCI_H + +#define ATMEL_MCI_MAX_NR_SLOTS 2 + +struct dma_slave; + +/** + * struct mci_slot_pdata - board-specific per-slot configuration + * @bus_width: Number of data lines wired up the slot + * @detect_pin: GPIO pin wired to the card detect switch + * @wp_pin: GPIO pin wired to the write protect sensor + * + * If a given slot is not present on the board, @bus_width should be + * set to 0. The other fields are ignored in this case. + * + * Any pins that aren't available should be set to a negative value. + * + * Note that support for multiple slots is experimental -- some cards + * might get upset if we don't get the clock management exactly right. + * But in most cases, it should work just fine. + */ +struct mci_slot_pdata { + unsigned int bus_width; + int detect_pin; + int wp_pin; +}; + +/** + * struct mci_platform_data - board-specific MMC/SDcard configuration + * @dma_slave: DMA slave interface to use in data transfers, or NULL. + * @slot: Per-slot configuration data. + */ +struct mci_platform_data { + struct dma_slave *dma_slave; + struct mci_slot_pdata slot[ATMEL_MCI_MAX_NR_SLOTS]; +}; + +#endif /* __LINUX_ATMEL_MCI_H */ diff --git a/include/linux/audit.h b/include/linux/audit.h index 8f0672d13eb..67e5dbfc296 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h @@ -99,6 +99,8 @@ #define AUDIT_OBJ_PID 1318 /* ptrace target */ #define AUDIT_TTY 1319 /* Input on an administrative TTY */ #define AUDIT_EOE 1320 /* End of multi-record event */ +#define AUDIT_BPRM_FCAPS 1321 /* Information about fcaps increasing perms */ +#define AUDIT_CAPSET 1322 /* Record showing argument to sys_capset */ #define AUDIT_AVC 1400 /* SE Linux avc denial or grant */ #define AUDIT_SELINUX_ERR 1401 /* Internal SE Linux Errors */ @@ -245,6 +247,18 @@ #define AUDIT_GREATER_THAN_OR_EQUAL (AUDIT_GREATER_THAN|AUDIT_EQUAL) #define AUDIT_OPERATORS (AUDIT_EQUAL|AUDIT_NOT_EQUAL|AUDIT_BIT_MASK) +enum { + Audit_equal, + Audit_not_equal, + Audit_bitmask, + Audit_bittest, + Audit_lt, + Audit_gt, + Audit_le, + Audit_ge, + Audit_bad +}; + /* Status symbols */ /* Mask values */ #define AUDIT_STATUS_ENABLED 0x0001 @@ -371,6 +385,8 @@ struct audit_krule { struct audit_watch *watch; /* associated watch */ struct audit_tree *tree; /* associated watched tree */ struct list_head rlist; /* entry in audit_{watch,tree}.rules list */ + struct list_head list; /* for AUDIT_LIST* purposes only */ + u64 prio; }; struct audit_field { @@ -441,67 +457,74 @@ extern int audit_set_loginuid(struct task_struct *task, uid_t loginuid); #define audit_get_loginuid(t) ((t)->loginuid) #define audit_get_sessionid(t) ((t)->sessionid) extern void audit_log_task_context(struct audit_buffer *ab); -extern int __audit_ipc_obj(struct kern_ipc_perm *ipcp); -extern int __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, mode_t mode); +extern void __audit_ipc_obj(struct kern_ipc_perm *ipcp); +extern void __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, mode_t mode); extern int audit_bprm(struct linux_binprm *bprm); -extern int audit_socketcall(int nargs, unsigned long *args); +extern void audit_socketcall(int nargs, unsigned long *args); extern int audit_sockaddr(int len, void *addr); -extern int __audit_fd_pair(int fd1, int fd2); +extern void __audit_fd_pair(int fd1, int fd2); extern int audit_set_macxattr(const char *name); -extern int __audit_mq_open(int oflag, mode_t mode, struct mq_attr __user *u_attr); -extern int __audit_mq_timedsend(mqd_t mqdes, size_t msg_len, unsigned int msg_prio, const struct timespec __user *u_abs_timeout); -extern int __audit_mq_timedreceive(mqd_t mqdes, size_t msg_len, unsigned int __user *u_msg_prio, const struct timespec __user *u_abs_timeout); -extern int __audit_mq_notify(mqd_t mqdes, const struct sigevent __user *u_notification); -extern int __audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat); - -static inline int audit_ipc_obj(struct kern_ipc_perm *ipcp) +extern void __audit_mq_open(int oflag, mode_t mode, struct mq_attr *attr); +extern void __audit_mq_sendrecv(mqd_t mqdes, size_t msg_len, unsigned int msg_prio, const struct timespec *abs_timeout); +extern void __audit_mq_notify(mqd_t mqdes, const struct sigevent *notification); +extern void __audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat); +extern int __audit_log_bprm_fcaps(struct linux_binprm *bprm, + const struct cred *new, + const struct cred *old); +extern void __audit_log_capset(pid_t pid, const struct cred *new, const struct cred *old); + +static inline void audit_ipc_obj(struct kern_ipc_perm *ipcp) { if (unlikely(!audit_dummy_context())) - return __audit_ipc_obj(ipcp); - return 0; + __audit_ipc_obj(ipcp); } -static inline int audit_fd_pair(int fd1, int fd2) +static inline void audit_fd_pair(int fd1, int fd2) { if (unlikely(!audit_dummy_context())) - return __audit_fd_pair(fd1, fd2); - return 0; + __audit_fd_pair(fd1, fd2); } -static inline int audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, mode_t mode) +static inline void audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, mode_t mode) { if (unlikely(!audit_dummy_context())) - return __audit_ipc_set_perm(qbytes, uid, gid, mode); - return 0; + __audit_ipc_set_perm(qbytes, uid, gid, mode); } -static inline int audit_mq_open(int oflag, mode_t mode, struct mq_attr __user *u_attr) +static inline void audit_mq_open(int oflag, mode_t mode, struct mq_attr *attr) { if (unlikely(!audit_dummy_context())) - return __audit_mq_open(oflag, mode, u_attr); - return 0; + __audit_mq_open(oflag, mode, attr); } -static inline int audit_mq_timedsend(mqd_t mqdes, size_t msg_len, unsigned int msg_prio, const struct timespec __user *u_abs_timeout) +static inline void audit_mq_sendrecv(mqd_t mqdes, size_t msg_len, unsigned int msg_prio, const struct timespec *abs_timeout) { if (unlikely(!audit_dummy_context())) - return __audit_mq_timedsend(mqdes, msg_len, msg_prio, u_abs_timeout); - return 0; + __audit_mq_sendrecv(mqdes, msg_len, msg_prio, abs_timeout); } -static inline int audit_mq_timedreceive(mqd_t mqdes, size_t msg_len, unsigned int __user *u_msg_prio, const struct timespec __user *u_abs_timeout) +static inline void audit_mq_notify(mqd_t mqdes, const struct sigevent *notification) { if (unlikely(!audit_dummy_context())) - return __audit_mq_timedreceive(mqdes, msg_len, u_msg_prio, u_abs_timeout); - return 0; + __audit_mq_notify(mqdes, notification); } -static inline int audit_mq_notify(mqd_t mqdes, const struct sigevent __user *u_notification) +static inline void audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat) { if (unlikely(!audit_dummy_context())) - return __audit_mq_notify(mqdes, u_notification); - return 0; + __audit_mq_getsetattr(mqdes, mqstat); } -static inline int audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat) + +static inline int audit_log_bprm_fcaps(struct linux_binprm *bprm, + const struct cred *new, + const struct cred *old) { if (unlikely(!audit_dummy_context())) - return __audit_mq_getsetattr(mqdes, mqstat); + return __audit_log_bprm_fcaps(bprm, new, old); return 0; } + +static inline void audit_log_capset(pid_t pid, const struct cred *new, + const struct cred *old) +{ + if (unlikely(!audit_dummy_context())) + __audit_log_capset(pid, new, old); +} + extern int audit_n_rules; extern int audit_signals; #else @@ -522,18 +545,19 @@ extern int audit_signals; #define audit_get_loginuid(t) (-1) #define audit_get_sessionid(t) (-1) #define audit_log_task_context(b) do { ; } while (0) -#define audit_ipc_obj(i) ({ 0; }) -#define audit_ipc_set_perm(q,u,g,m) ({ 0; }) +#define audit_ipc_obj(i) ((void)0) +#define audit_ipc_set_perm(q,u,g,m) ((void)0) #define audit_bprm(p) ({ 0; }) -#define audit_socketcall(n,a) ({ 0; }) -#define audit_fd_pair(n,a) ({ 0; }) +#define audit_socketcall(n,a) ((void)0) +#define audit_fd_pair(n,a) ((void)0) #define audit_sockaddr(len, addr) ({ 0; }) #define audit_set_macxattr(n) do { ; } while (0) -#define audit_mq_open(o,m,a) ({ 0; }) -#define audit_mq_timedsend(d,l,p,t) ({ 0; }) -#define audit_mq_timedreceive(d,l,p,t) ({ 0; }) -#define audit_mq_notify(d,n) ({ 0; }) -#define audit_mq_getsetattr(d,s) ({ 0; }) +#define audit_mq_open(o,m,a) ((void)0) +#define audit_mq_sendrecv(d,l,p,t) ((void)0) +#define audit_mq_notify(d,n) ((void)0) +#define audit_mq_getsetattr(d,s) ((void)0) +#define audit_log_bprm_fcaps(b, ncr, ocr) ({ 0; }) +#define audit_log_capset(pid, ncr, ocr) ((void)0) #define audit_ptrace(t) ((void)0) #define audit_n_rules 0 #define audit_signals 0 diff --git a/include/linux/auto_dev-ioctl.h b/include/linux/auto_dev-ioctl.h index f4d05ccd731..91a773993a5 100644 --- a/include/linux/auto_dev-ioctl.h +++ b/include/linux/auto_dev-ioctl.h @@ -10,6 +10,7 @@ #ifndef _LINUX_AUTO_DEV_IOCTL_H #define _LINUX_AUTO_DEV_IOCTL_H +#include <linux/string.h> #include <linux/types.h> #define AUTOFS_DEVICE_NAME "autofs" @@ -25,6 +26,60 @@ * An ioctl interface for autofs mount point control. */ +struct args_protover { + __u32 version; +}; + +struct args_protosubver { + __u32 sub_version; +}; + +struct args_openmount { + __u32 devid; +}; + +struct args_ready { + __u32 token; +}; + +struct args_fail { + __u32 token; + __s32 status; +}; + +struct args_setpipefd { + __s32 pipefd; +}; + +struct args_timeout { + __u64 timeout; +}; + +struct args_requester { + __u32 uid; + __u32 gid; +}; + +struct args_expire { + __u32 how; +}; + +struct args_askumount { + __u32 may_umount; +}; + +struct args_ismountpoint { + union { + struct args_in { + __u32 type; + } in; + struct args_out { + __u32 devid; + __u32 magic; + } out; + }; +}; + /* * All the ioctls use this structure. * When sending a path size must account for the total length @@ -39,20 +94,32 @@ struct autofs_dev_ioctl { * including this struct */ __s32 ioctlfd; /* automount command fd */ - __u32 arg1; /* Command parameters */ - __u32 arg2; + /* Command parameters */ + + union { + struct args_protover protover; + struct args_protosubver protosubver; + struct args_openmount openmount; + struct args_ready ready; + struct args_fail fail; + struct args_setpipefd setpipefd; + struct args_timeout timeout; + struct args_requester requester; + struct args_expire expire; + struct args_askumount askumount; + struct args_ismountpoint ismountpoint; + }; char path[0]; }; static inline void init_autofs_dev_ioctl(struct autofs_dev_ioctl *in) { + memset(in, 0, sizeof(struct autofs_dev_ioctl)); in->ver_major = AUTOFS_DEV_IOCTL_VERSION_MAJOR; in->ver_minor = AUTOFS_DEV_IOCTL_VERSION_MINOR; in->size = sizeof(struct autofs_dev_ioctl); in->ioctlfd = -1; - in->arg1 = 0; - in->arg2 = 0; return; } diff --git a/include/linux/auto_fs4.h b/include/linux/auto_fs4.h index 2253716d4b9..55fa478bd63 100644 --- a/include/linux/auto_fs4.h +++ b/include/linux/auto_fs4.h @@ -29,10 +29,64 @@ #define AUTOFS_EXP_IMMEDIATE 1 #define AUTOFS_EXP_LEAVES 2 -#define AUTOFS_TYPE_ANY 0x0000 -#define AUTOFS_TYPE_INDIRECT 0x0001 -#define AUTOFS_TYPE_DIRECT 0x0002 -#define AUTOFS_TYPE_OFFSET 0x0004 +#define AUTOFS_TYPE_ANY 0U +#define AUTOFS_TYPE_INDIRECT 1U +#define AUTOFS_TYPE_DIRECT 2U +#define AUTOFS_TYPE_OFFSET 4U + +static inline void set_autofs_type_indirect(unsigned int *type) +{ + *type = AUTOFS_TYPE_INDIRECT; + return; +} + +static inline unsigned int autofs_type_indirect(unsigned int type) +{ + return (type == AUTOFS_TYPE_INDIRECT); +} + +static inline void set_autofs_type_direct(unsigned int *type) +{ + *type = AUTOFS_TYPE_DIRECT; + return; +} + +static inline unsigned int autofs_type_direct(unsigned int type) +{ + return (type == AUTOFS_TYPE_DIRECT); +} + +static inline void set_autofs_type_offset(unsigned int *type) +{ + *type = AUTOFS_TYPE_OFFSET; + return; +} + +static inline unsigned int autofs_type_offset(unsigned int type) +{ + return (type == AUTOFS_TYPE_OFFSET); +} + +static inline unsigned int autofs_type_trigger(unsigned int type) +{ + return (type == AUTOFS_TYPE_DIRECT || type == AUTOFS_TYPE_OFFSET); +} + +/* + * This isn't really a type as we use it to say "no type set" to + * indicate we want to search for "any" mount in the + * autofs_dev_ioctl_ismountpoint() device ioctl function. + */ +static inline void set_autofs_type_any(unsigned int *type) +{ + *type = AUTOFS_TYPE_ANY; + return; +} + +static inline unsigned int autofs_type_any(unsigned int type) +{ + return (type == AUTOFS_TYPE_ANY); +} /* Daemon notification packet types */ enum autofs_notify { diff --git a/include/linux/auxvec.h b/include/linux/auxvec.h index d7afa9dd663..f3b5d4e3a2a 100644 --- a/include/linux/auxvec.h +++ b/include/linux/auxvec.h @@ -23,16 +23,16 @@ #define AT_PLATFORM 15 /* string identifying CPU for optimizations */ #define AT_HWCAP 16 /* arch dependent hints at CPU capabilities */ #define AT_CLKTCK 17 /* frequency at which times() increments */ - +/* AT_* values 18 through 22 are reserved */ #define AT_SECURE 23 /* secure mode boolean */ - #define AT_BASE_PLATFORM 24 /* string identifying real platform, may * differ from AT_PLATFORM. */ +#define AT_RANDOM 25 /* address of 16 random bytes */ #define AT_EXECFN 31 /* filename of program */ #ifdef __KERNEL__ -#define AT_VECTOR_SIZE_BASE 18 /* NEW_AUX_ENT entries in auxiliary table */ +#define AT_VECTOR_SIZE_BASE 19 /* NEW_AUX_ENT entries in auxiliary table */ /* number of "#define AT_.*" above, minus {AT_NULL, AT_IGNORE, AT_NOTELF} */ #endif diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h index 7394b5b349f..77b4a9e4600 100644 --- a/include/linux/binfmts.h +++ b/include/linux/binfmts.h @@ -18,6 +18,7 @@ struct pt_regs; #define BINPRM_BUF_SIZE 128 #ifdef __KERNEL__ +#include <linux/list.h> #define CORENAME_MAX_SIZE 128 @@ -35,16 +36,20 @@ struct linux_binprm{ struct mm_struct *mm; unsigned long p; /* current top of mem */ unsigned int sh_bang:1, - misc_bang:1; + misc_bang:1, + cred_prepared:1,/* true if creds already prepared (multiple + * preps happen for interpreters) */ + cap_effective:1;/* true if has elevated effective capabilities, + * false if not; except for init which inherits + * its parent's caps anyway */ #ifdef __alpha__ unsigned int taso:1; #endif unsigned int recursion_depth; struct file * file; - int e_uid, e_gid; - kernel_cap_t cap_post_exec_permitted; - bool cap_effective; - void *security; + struct cred *cred; /* new credentials */ + int unsafe; /* how unsafe this exec is (mask of LSM_UNSAFE_*) */ + unsigned int per_clear; /* bits to clear in current->personality */ int argc, envc; char * filename; /* Name of binary as seen by procps */ char * interp; /* Name of the binary really executed. Most @@ -101,8 +106,8 @@ extern int setup_arg_pages(struct linux_binprm * bprm, int executable_stack); extern int bprm_mm_init(struct linux_binprm *bprm); extern int copy_strings_kernel(int argc,char ** argv,struct linux_binprm *bprm); -extern void compute_creds(struct linux_binprm *binprm); -extern int do_coredump(long signr, int exit_code, struct pt_regs * regs); +extern void install_exec_creds(struct linux_binprm *bprm); +extern void do_coredump(long signr, int exit_code, struct pt_regs *regs); extern int set_binfmt(struct linux_binfmt *new); extern void free_bprm(struct linux_binprm *); diff --git a/include/linux/bio.h b/include/linux/bio.h index 6a642098e5c..18462c5b8ff 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -90,10 +90,11 @@ struct bio { unsigned int bi_comp_cpu; /* completion CPU */ + atomic_t bi_cnt; /* pin count */ + struct bio_vec *bi_io_vec; /* the actual vec list */ bio_end_io_t *bi_end_io; - atomic_t bi_cnt; /* pin count */ void *bi_private; #if defined(CONFIG_BLK_DEV_INTEGRITY) @@ -101,6 +102,13 @@ struct bio { #endif bio_destructor_t *bi_destructor; /* destructor */ + + /* + * We can inline a number of vecs at the end of the bio, to avoid + * double allocations for a small number of bio_vecs. This member + * MUST obviously be kept at the very end of the bio. + */ + struct bio_vec bi_inline_vecs[0]; }; /* @@ -117,6 +125,7 @@ struct bio { #define BIO_CPU_AFFINE 8 /* complete bio on same CPU as submitted */ #define BIO_NULL_MAPPED 9 /* contains invalid user pages */ #define BIO_FS_INTEGRITY 10 /* fs owns integrity data, not block layer */ +#define BIO_QUIET 11 /* Make BIO Quiet */ #define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag))) /* @@ -211,6 +220,11 @@ static inline void *bio_data(struct bio *bio) return NULL; } +static inline int bio_has_allocated_vec(struct bio *bio) +{ + return bio->bi_io_vec && bio->bi_io_vec != bio->bi_inline_vecs; +} + /* * will die */ @@ -332,7 +346,7 @@ struct bio_pair { extern struct bio_pair *bio_split(struct bio *bi, int first_sectors); extern void bio_pair_release(struct bio_pair *dbio); -extern struct bio_set *bioset_create(int, int); +extern struct bio_set *bioset_create(unsigned int, unsigned int); extern void bioset_free(struct bio_set *); extern struct bio *bio_alloc(gfp_t, int); @@ -377,6 +391,7 @@ extern struct bio *bio_copy_user_iov(struct request_queue *, extern int bio_uncopy_user(struct bio *); void zero_fill_bio(struct bio *bio); extern struct bio_vec *bvec_alloc_bs(gfp_t, int, unsigned long *, struct bio_set *); +extern void bvec_free_bs(struct bio_set *, struct bio_vec *, unsigned int); extern unsigned int bvec_nr_vecs(unsigned short idx); /* @@ -395,13 +410,17 @@ static inline void bio_set_completion_cpu(struct bio *bio, unsigned int cpu) */ #define BIO_POOL_SIZE 2 #define BIOVEC_NR_POOLS 6 +#define BIOVEC_MAX_IDX (BIOVEC_NR_POOLS - 1) struct bio_set { + struct kmem_cache *bio_slab; + unsigned int front_pad; + mempool_t *bio_pool; #if defined(CONFIG_BLK_DEV_INTEGRITY) mempool_t *bio_integrity_pool; #endif - mempool_t *bvec_pools[BIOVEC_NR_POOLS]; + mempool_t *bvec_pool; }; struct biovec_slab { @@ -411,6 +430,7 @@ struct biovec_slab { }; extern struct bio_set *fs_bio_set; +extern struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly; /* * a small number of entries is fine, not going to be performance critical. diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h index a08c33a26ca..2878811c613 100644 --- a/include/linux/bitmap.h +++ b/include/linux/bitmap.h @@ -137,9 +137,12 @@ extern void bitmap_copy_le(void *dst, const unsigned long *src, int nbits); (1UL<<((nbits) % BITS_PER_LONG))-1 : ~0UL \ ) +#define small_const_nbits(nbits) \ + (__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG) + static inline void bitmap_zero(unsigned long *dst, int nbits) { - if (nbits <= BITS_PER_LONG) + if (small_const_nbits(nbits)) *dst = 0UL; else { int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); @@ -150,7 +153,7 @@ static inline void bitmap_zero(unsigned long *dst, int nbits) static inline void bitmap_fill(unsigned long *dst, int nbits) { size_t nlongs = BITS_TO_LONGS(nbits); - if (nlongs > 1) { + if (!small_const_nbits(nbits)) { int len = (nlongs - 1) * sizeof(unsigned long); memset(dst, 0xff, len); } @@ -160,7 +163,7 @@ static inline void bitmap_fill(unsigned long *dst, int nbits) static inline void bitmap_copy(unsigned long *dst, const unsigned long *src, int nbits) { - if (nbits <= BITS_PER_LONG) + if (small_const_nbits(nbits)) *dst = *src; else { int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); @@ -171,7 +174,7 @@ static inline void bitmap_copy(unsigned long *dst, const unsigned long *src, static inline void bitmap_and(unsigned long *dst, const unsigned long *src1, const unsigned long *src2, int nbits) { - if (nbits <= BITS_PER_LONG) + if (small_const_nbits(nbits)) *dst = *src1 & *src2; else __bitmap_and(dst, src1, src2, nbits); @@ -180,7 +183,7 @@ static inline void bitmap_and(unsigned long *dst, const unsigned long *src1, static inline void bitmap_or(unsigned long *dst, const unsigned long *src1, const unsigned long *src2, int nbits) { - if (nbits <= BITS_PER_LONG) + if (small_const_nbits(nbits)) *dst = *src1 | *src2; else __bitmap_or(dst, src1, src2, nbits); @@ -189,7 +192,7 @@ static inline void bitmap_or(unsigned long *dst, const unsigned long *src1, static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1, const unsigned long *src2, int nbits) { - if (nbits <= BITS_PER_LONG) + if (small_const_nbits(nbits)) *dst = *src1 ^ *src2; else __bitmap_xor(dst, src1, src2, nbits); @@ -198,7 +201,7 @@ static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1, static inline void bitmap_andnot(unsigned long *dst, const unsigned long *src1, const unsigned long *src2, int nbits) { - if (nbits <= BITS_PER_LONG) + if (small_const_nbits(nbits)) *dst = *src1 & ~(*src2); else __bitmap_andnot(dst, src1, src2, nbits); @@ -207,7 +210,7 @@ static inline void bitmap_andnot(unsigned long *dst, const unsigned long *src1, static inline void bitmap_complement(unsigned long *dst, const unsigned long *src, int nbits) { - if (nbits <= BITS_PER_LONG) + if (small_const_nbits(nbits)) *dst = ~(*src) & BITMAP_LAST_WORD_MASK(nbits); else __bitmap_complement(dst, src, nbits); @@ -216,7 +219,7 @@ static inline void bitmap_complement(unsigned long *dst, const unsigned long *sr static inline int bitmap_equal(const unsigned long *src1, const unsigned long *src2, int nbits) { - if (nbits <= BITS_PER_LONG) + if (small_const_nbits(nbits)) return ! ((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits)); else return __bitmap_equal(src1, src2, nbits); @@ -225,7 +228,7 @@ static inline int bitmap_equal(const unsigned long *src1, static inline int bitmap_intersects(const unsigned long *src1, const unsigned long *src2, int nbits) { - if (nbits <= BITS_PER_LONG) + if (small_const_nbits(nbits)) return ((*src1 & *src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0; else return __bitmap_intersects(src1, src2, nbits); @@ -234,7 +237,7 @@ static inline int bitmap_intersects(const unsigned long *src1, static inline int bitmap_subset(const unsigned long *src1, const unsigned long *src2, int nbits) { - if (nbits <= BITS_PER_LONG) + if (small_const_nbits(nbits)) return ! ((*src1 & ~(*src2)) & BITMAP_LAST_WORD_MASK(nbits)); else return __bitmap_subset(src1, src2, nbits); @@ -242,7 +245,7 @@ static inline int bitmap_subset(const unsigned long *src1, static inline int bitmap_empty(const unsigned long *src, int nbits) { - if (nbits <= BITS_PER_LONG) + if (small_const_nbits(nbits)) return ! (*src & BITMAP_LAST_WORD_MASK(nbits)); else return __bitmap_empty(src, nbits); @@ -250,7 +253,7 @@ static inline int bitmap_empty(const unsigned long *src, int nbits) static inline int bitmap_full(const unsigned long *src, int nbits) { - if (nbits <= BITS_PER_LONG) + if (small_const_nbits(nbits)) return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits)); else return __bitmap_full(src, nbits); @@ -258,7 +261,7 @@ static inline int bitmap_full(const unsigned long *src, int nbits) static inline int bitmap_weight(const unsigned long *src, int nbits) { - if (nbits <= BITS_PER_LONG) + if (small_const_nbits(nbits)) return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits)); return __bitmap_weight(src, nbits); } @@ -266,7 +269,7 @@ static inline int bitmap_weight(const unsigned long *src, int nbits) static inline void bitmap_shift_right(unsigned long *dst, const unsigned long *src, int n, int nbits) { - if (nbits <= BITS_PER_LONG) + if (small_const_nbits(nbits)) *dst = *src >> n; else __bitmap_shift_right(dst, src, n, nbits); @@ -275,7 +278,7 @@ static inline void bitmap_shift_right(unsigned long *dst, static inline void bitmap_shift_left(unsigned long *dst, const unsigned long *src, int n, int nbits) { - if (nbits <= BITS_PER_LONG) + if (small_const_nbits(nbits)) *dst = (*src << n) & BITMAP_LAST_WORD_MASK(nbits); else __bitmap_shift_left(dst, src, n, nbits); diff --git a/include/linux/bitops.h b/include/linux/bitops.h index 024f2b02724..61829139795 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -134,9 +134,20 @@ extern unsigned long find_first_bit(const unsigned long *addr, */ extern unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size); - #endif /* CONFIG_GENERIC_FIND_FIRST_BIT */ +#ifdef CONFIG_GENERIC_FIND_LAST_BIT +/** + * find_last_bit - find the last set bit in a memory region + * @addr: The address to start the search at + * @size: The maximum size to search + * + * Returns the bit number of the first set bit, or size. + */ +extern unsigned long find_last_bit(const unsigned long *addr, + unsigned long size); +#endif /* CONFIG_GENERIC_FIND_LAST_BIT */ + #ifdef CONFIG_GENERIC_FIND_NEXT_BIT /** diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 031a315c050..044467ef7b1 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -26,7 +26,6 @@ struct scsi_ioctl_command; struct request_queue; struct elevator_queue; -typedef struct elevator_queue elevator_t; struct request_pm_state; struct blk_trace; struct request; @@ -313,7 +312,7 @@ struct request_queue */ struct list_head queue_head; struct request *last_merge; - elevator_t *elevator; + struct elevator_queue *elevator; /* * the queue request freelist, one for reads and one for writes @@ -449,6 +448,7 @@ struct request_queue #define QUEUE_FLAG_FAIL_IO 12 /* fake timeout */ #define QUEUE_FLAG_STACKABLE 13 /* supports request stacking */ #define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */ +#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ static inline int queue_is_locked(struct request_queue *q) { @@ -522,22 +522,32 @@ enum { * TAG_FLUSH : ordering by tag w/ pre and post flushes * TAG_FUA : ordering by tag w/ pre flush and FUA write */ - QUEUE_ORDERED_NONE = 0x00, - QUEUE_ORDERED_DRAIN = 0x01, - QUEUE_ORDERED_TAG = 0x02, - - QUEUE_ORDERED_PREFLUSH = 0x10, - QUEUE_ORDERED_POSTFLUSH = 0x20, - QUEUE_ORDERED_FUA = 0x40, - - QUEUE_ORDERED_DRAIN_FLUSH = QUEUE_ORDERED_DRAIN | - QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH, - QUEUE_ORDERED_DRAIN_FUA = QUEUE_ORDERED_DRAIN | - QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA, - QUEUE_ORDERED_TAG_FLUSH = QUEUE_ORDERED_TAG | - QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH, - QUEUE_ORDERED_TAG_FUA = QUEUE_ORDERED_TAG | - QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA, + QUEUE_ORDERED_BY_DRAIN = 0x01, + QUEUE_ORDERED_BY_TAG = 0x02, + QUEUE_ORDERED_DO_PREFLUSH = 0x10, + QUEUE_ORDERED_DO_BAR = 0x20, + QUEUE_ORDERED_DO_POSTFLUSH = 0x40, + QUEUE_ORDERED_DO_FUA = 0x80, + + QUEUE_ORDERED_NONE = 0x00, + + QUEUE_ORDERED_DRAIN = QUEUE_ORDERED_BY_DRAIN | + QUEUE_ORDERED_DO_BAR, + QUEUE_ORDERED_DRAIN_FLUSH = QUEUE_ORDERED_DRAIN | + QUEUE_ORDERED_DO_PREFLUSH | + QUEUE_ORDERED_DO_POSTFLUSH, + QUEUE_ORDERED_DRAIN_FUA = QUEUE_ORDERED_DRAIN | + QUEUE_ORDERED_DO_PREFLUSH | + QUEUE_ORDERED_DO_FUA, + + QUEUE_ORDERED_TAG = QUEUE_ORDERED_BY_TAG | + QUEUE_ORDERED_DO_BAR, + QUEUE_ORDERED_TAG_FLUSH = QUEUE_ORDERED_TAG | + QUEUE_ORDERED_DO_PREFLUSH | + QUEUE_ORDERED_DO_POSTFLUSH, + QUEUE_ORDERED_TAG_FUA = QUEUE_ORDERED_TAG | + QUEUE_ORDERED_DO_PREFLUSH | + QUEUE_ORDERED_DO_FUA, /* * Ordered operation sequence @@ -585,7 +595,6 @@ enum { #define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA) #define blk_discard_rq(rq) ((rq)->cmd_flags & REQ_DISCARD) #define blk_bidi_rq(rq) ((rq)->next_rq != NULL) -#define blk_empty_barrier(rq) (blk_barrier_rq(rq) && blk_fs_request(rq) && !(rq)->hard_nr_sectors) /* rq->queuelist of dequeued request must be list_empty() */ #define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist)) @@ -681,6 +690,8 @@ struct rq_map_data { struct page **pages; int page_order; int nr_entries; + unsigned long offset; + int null_mapped; }; struct req_iterator { @@ -855,10 +866,10 @@ extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *); -extern int blk_do_ordered(struct request_queue *, struct request **); +extern bool blk_do_ordered(struct request_queue *, struct request **); extern unsigned blk_ordered_cur_seq(struct request_queue *); extern unsigned blk_ordered_req_seq(struct request *); -extern void blk_ordered_complete_seq(struct request_queue *, unsigned, int); +extern bool blk_ordered_complete_seq(struct request_queue *, unsigned, int); extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); extern void blk_dump_rq_flags(struct request *, char *); @@ -977,7 +988,6 @@ static inline void put_dev_sector(Sector p) struct work_struct; int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); -void kblockd_flush_work(struct work_struct *work); #define MODULE_ALIAS_BLOCKDEV(major,minor) \ MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h index bdf505d33e7..1dba3493d52 100644 --- a/include/linux/blktrace_api.h +++ b/include/linux/blktrace_api.h @@ -160,7 +160,6 @@ struct blk_trace { extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *); extern void blk_trace_shutdown(struct request_queue *); -extern void __blk_add_trace(struct blk_trace *, sector_t, int, int, u32, int, int, void *); extern int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev, struct blk_user_trace_setup *buts); extern void __trace_note_message(struct blk_trace *, const char *fmt, ...); @@ -186,168 +185,8 @@ extern void __trace_note_message(struct blk_trace *, const char *fmt, ...); } while (0) #define BLK_TN_MAX_MSG 128 -/** - * blk_add_trace_rq - Add a trace for a request oriented action - * @q: queue the io is for - * @rq: the source request - * @what: the action - * - * Description: - * Records an action against a request. Will log the bio offset + size. - * - **/ -static inline void blk_add_trace_rq(struct request_queue *q, struct request *rq, - u32 what) -{ - struct blk_trace *bt = q->blk_trace; - int rw = rq->cmd_flags & 0x03; - - if (likely(!bt)) - return; - - if (blk_discard_rq(rq)) - rw |= (1 << BIO_RW_DISCARD); - - if (blk_pc_request(rq)) { - what |= BLK_TC_ACT(BLK_TC_PC); - __blk_add_trace(bt, 0, rq->data_len, rw, what, rq->errors, sizeof(rq->cmd), rq->cmd); - } else { - what |= BLK_TC_ACT(BLK_TC_FS); - __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9, rw, what, rq->errors, 0, NULL); - } -} - -/** - * blk_add_trace_bio - Add a trace for a bio oriented action - * @q: queue the io is for - * @bio: the source bio - * @what: the action - * - * Description: - * Records an action against a bio. Will log the bio offset + size. - * - **/ -static inline void blk_add_trace_bio(struct request_queue *q, struct bio *bio, - u32 what) -{ - struct blk_trace *bt = q->blk_trace; - - if (likely(!bt)) - return; - - __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what, !bio_flagged(bio, BIO_UPTODATE), 0, NULL); -} - -/** - * blk_add_trace_generic - Add a trace for a generic action - * @q: queue the io is for - * @bio: the source bio - * @rw: the data direction - * @what: the action - * - * Description: - * Records a simple trace - * - **/ -static inline void blk_add_trace_generic(struct request_queue *q, - struct bio *bio, int rw, u32 what) -{ - struct blk_trace *bt = q->blk_trace; - - if (likely(!bt)) - return; - - if (bio) - blk_add_trace_bio(q, bio, what); - else - __blk_add_trace(bt, 0, 0, rw, what, 0, 0, NULL); -} - -/** - * blk_add_trace_pdu_int - Add a trace for a bio with an integer payload - * @q: queue the io is for - * @what: the action - * @bio: the source bio - * @pdu: the integer payload - * - * Description: - * Adds a trace with some integer payload. This might be an unplug - * option given as the action, with the depth at unplug time given - * as the payload - * - **/ -static inline void blk_add_trace_pdu_int(struct request_queue *q, u32 what, - struct bio *bio, unsigned int pdu) -{ - struct blk_trace *bt = q->blk_trace; - __be64 rpdu = cpu_to_be64(pdu); - - if (likely(!bt)) - return; - - if (bio) - __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what, !bio_flagged(bio, BIO_UPTODATE), sizeof(rpdu), &rpdu); - else - __blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu); -} - -/** - * blk_add_trace_remap - Add a trace for a remap operation - * @q: queue the io is for - * @bio: the source bio - * @dev: target device - * @from: source sector - * @to: target sector - * - * Description: - * Device mapper or raid target sometimes need to split a bio because - * it spans a stripe (or similar). Add a trace for that action. - * - **/ -static inline void blk_add_trace_remap(struct request_queue *q, struct bio *bio, - dev_t dev, sector_t from, sector_t to) -{ - struct blk_trace *bt = q->blk_trace; - struct blk_io_trace_remap r; - - if (likely(!bt)) - return; - - r.device = cpu_to_be32(dev); - r.device_from = cpu_to_be32(bio->bi_bdev->bd_dev); - r.sector = cpu_to_be64(to); - - __blk_add_trace(bt, from, bio->bi_size, bio->bi_rw, BLK_TA_REMAP, !bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r); -} - -/** - * blk_add_driver_data - Add binary message with driver-specific data - * @q: queue the io is for - * @rq: io request - * @data: driver-specific data - * @len: length of driver-specific data - * - * Description: - * Some drivers might want to write driver-specific data per request. - * - **/ -static inline void blk_add_driver_data(struct request_queue *q, - struct request *rq, - void *data, size_t len) -{ - struct blk_trace *bt = q->blk_trace; - - if (likely(!bt)) - return; - - if (blk_pc_request(rq)) - __blk_add_trace(bt, 0, rq->data_len, 0, BLK_TA_DRV_DATA, - rq->errors, len, data); - else - __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9, - 0, BLK_TA_DRV_DATA, rq->errors, len, data); -} - +extern void blk_add_driver_data(struct request_queue *q, struct request *rq, + void *data, size_t len); extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev, char __user *arg); extern int blk_trace_startstop(struct request_queue *q, int start); @@ -356,13 +195,8 @@ extern int blk_trace_remove(struct request_queue *q); #else /* !CONFIG_BLK_DEV_IO_TRACE */ #define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY) #define blk_trace_shutdown(q) do { } while (0) -#define blk_add_trace_rq(q, rq, what) do { } while (0) -#define blk_add_trace_bio(q, rq, what) do { } while (0) -#define blk_add_trace_generic(q, rq, rw, what) do { } while (0) -#define blk_add_trace_pdu_int(q, what, bio, pdu) do { } while (0) -#define blk_add_trace_remap(q, bio, dev, f, t) do {} while (0) -#define blk_add_driver_data(q, rq, data, len) do {} while (0) #define do_blk_trace_setup(q, name, dev, buts) (-ENOTTY) +#define blk_add_driver_data(q, rq, data, len) do {} while (0) #define blk_trace_setup(q, name, dev, arg) (-ENOTTY) #define blk_trace_startstop(q, start) (-ENOTTY) #define blk_trace_remove(q) (-ENOTTY) diff --git a/include/linux/blockgroup_lock.h b/include/linux/blockgroup_lock.h index 8607312983b..e44b88ba552 100644 --- a/include/linux/blockgroup_lock.h +++ b/include/linux/blockgroup_lock.h @@ -53,7 +53,10 @@ static inline void bgl_lock_init(struct blockgroup_lock *bgl) * The accessor is a macro so we can embed a blockgroup_lock into different * superblock types */ -#define sb_bgl_lock(sb, block_group) \ - (&(sb)->s_blockgroup_lock.locks[(block_group) & (NR_BG_LOCKS-1)].lock) +static inline spinlock_t * +bgl_lock_ptr(struct blockgroup_lock *bgl, unsigned int block_group) +{ + return &bgl->locks[(block_group) & (NR_BG_LOCKS-1)].lock; +} #endif diff --git a/include/linux/bottom_half.h b/include/linux/bottom_half.h index 777dbf695d4..27b1bcffe40 100644 --- a/include/linux/bottom_half.h +++ b/include/linux/bottom_half.h @@ -2,7 +2,6 @@ #define _LINUX_BH_H extern void local_bh_disable(void); -extern void __local_bh_enable(void); extern void _local_bh_enable(void); extern void local_bh_enable(void); extern void local_bh_enable_ip(unsigned long ip); diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 3ce64b90118..8605f8a74df 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -35,6 +35,7 @@ enum bh_state_bits { BH_Ordered, /* ordered write */ BH_Eopnotsupp, /* operation not supported (barrier) */ BH_Unwritten, /* Buffer is allocated on disk but not written */ + BH_Quiet, /* Buffer Error Prinks to be quiet */ BH_PrivateStart,/* not a state bit, but the first bit available * for private allocation by other entities diff --git a/include/linux/byteorder.h b/include/linux/byteorder.h deleted file mode 100644 index 29f002d73d9..00000000000 --- a/include/linux/byteorder.h +++ /dev/null @@ -1,372 +0,0 @@ -#ifndef _LINUX_BYTEORDER_H -#define _LINUX_BYTEORDER_H - -#include <linux/types.h> -#include <linux/swab.h> - -#if defined(__LITTLE_ENDIAN) && defined(__BIG_ENDIAN) -# error Fix asm/byteorder.h to define one endianness -#endif - -#if !defined(__LITTLE_ENDIAN) && !defined(__BIG_ENDIAN) -# error Fix asm/byteorder.h to define arch endianness -#endif - -#ifdef __LITTLE_ENDIAN -# undef __LITTLE_ENDIAN -# define __LITTLE_ENDIAN 1234 -#endif - -#ifdef __BIG_ENDIAN -# undef __BIG_ENDIAN -# define __BIG_ENDIAN 4321 -#endif - -#if defined(__LITTLE_ENDIAN) && !defined(__LITTLE_ENDIAN_BITFIELD) -# define __LITTLE_ENDIAN_BITFIELD -#endif - -#if defined(__BIG_ENDIAN) && !defined(__BIG_ENDIAN_BITFIELD) -# define __BIG_ENDIAN_BITFIELD -#endif - -#ifdef __LITTLE_ENDIAN -# define __le16_to_cpu(x) ((__force __u16)(__le16)(x)) -# define __le32_to_cpu(x) ((__force __u32)(__le32)(x)) -# define __le64_to_cpu(x) ((__force __u64)(__le64)(x)) -# define __cpu_to_le16(x) ((__force __le16)(__u16)(x)) -# define __cpu_to_le32(x) ((__force __le32)(__u32)(x)) -# define __cpu_to_le64(x) ((__force __le64)(__u64)(x)) - -# define __be16_to_cpu(x) __swab16((__force __u16)(__be16)(x)) -# define __be32_to_cpu(x) __swab32((__force __u32)(__be32)(x)) -# define __be64_to_cpu(x) __swab64((__force __u64)(__be64)(x)) -# define __cpu_to_be16(x) ((__force __be16)__swab16(x)) -# define __cpu_to_be32(x) ((__force __be32)__swab32(x)) -# define __cpu_to_be64(x) ((__force __be64)__swab64(x)) -#endif - -#ifdef __BIG_ENDIAN -# define __be16_to_cpu(x) ((__force __u16)(__be16)(x)) -# define __be32_to_cpu(x) ((__force __u32)(__be32)(x)) -# define __be64_to_cpu(x) ((__force __u64)(__be64)(x)) -# define __cpu_to_be16(x) ((__force __be16)(__u16)(x)) -# define __cpu_to_be32(x) ((__force __be32)(__u32)(x)) -# define __cpu_to_be64(x) ((__force __be64)(__u64)(x)) - -# define __le16_to_cpu(x) __swab16((__force __u16)(__le16)(x)) -# define __le32_to_cpu(x) __swab32((__force __u32)(__le32)(x)) -# define __le64_to_cpu(x) __swab64((__force __u64)(__le64)(x)) -# define __cpu_to_le16(x) ((__force __le16)__swab16(x)) -# define __cpu_to_le32(x) ((__force __le32)__swab32(x)) -# define __cpu_to_le64(x) ((__force __le64)__swab64(x)) -#endif - -/* - * These helpers could be phased out over time as the base version - * handles constant folding. - */ -#define __constant_htonl(x) __cpu_to_be32(x) -#define __constant_ntohl(x) __be32_to_cpu(x) -#define __constant_htons(x) __cpu_to_be16(x) -#define __constant_ntohs(x) __be16_to_cpu(x) - -#define __constant_le16_to_cpu(x) __le16_to_cpu(x) -#define __constant_le32_to_cpu(x) __le32_to_cpu(x) -#define __constant_le64_to_cpu(x) __le64_to_cpu(x) -#define __constant_be16_to_cpu(x) __be16_to_cpu(x) -#define __constant_be32_to_cpu(x) __be32_to_cpu(x) -#define __constant_be64_to_cpu(x) __be64_to_cpu(x) - -#define __constant_cpu_to_le16(x) __cpu_to_le16(x) -#define __constant_cpu_to_le32(x) __cpu_to_le32(x) -#define __constant_cpu_to_le64(x) __cpu_to_le64(x) -#define __constant_cpu_to_be16(x) __cpu_to_be16(x) -#define __constant_cpu_to_be32(x) __cpu_to_be32(x) -#define __constant_cpu_to_be64(x) __cpu_to_be64(x) - -static inline void __le16_to_cpus(__u16 *p) -{ -#ifdef __BIG_ENDIAN - __swab16s(p); -#endif -} - -static inline void __cpu_to_le16s(__u16 *p) -{ -#ifdef __BIG_ENDIAN - __swab16s(p); -#endif -} - -static inline void __le32_to_cpus(__u32 *p) -{ -#ifdef __BIG_ENDIAN - __swab32s(p); -#endif -} - -static inline void __cpu_to_le32s(__u32 *p) -{ -#ifdef __BIG_ENDIAN - __swab32s(p); -#endif -} - -static inline void __le64_to_cpus(__u64 *p) -{ -#ifdef __BIG_ENDIAN - __swab64s(p); -#endif -} - -static inline void __cpu_to_le64s(__u64 *p) -{ -#ifdef __BIG_ENDIAN - __swab64s(p); -#endif -} - -static inline void __be16_to_cpus(__u16 *p) -{ -#ifdef __LITTLE_ENDIAN - __swab16s(p); -#endif -} - -static inline void __cpu_to_be16s(__u16 *p) -{ -#ifdef __LITTLE_ENDIAN - __swab16s(p); -#endif -} - -static inline void __be32_to_cpus(__u32 *p) -{ -#ifdef __LITTLE_ENDIAN - __swab32s(p); -#endif -} - -static inline void __cpu_to_be32s(__u32 *p) -{ -#ifdef __LITTLE_ENDIAN - __swab32s(p); -#endif -} - -static inline void __be64_to_cpus(__u64 *p) -{ -#ifdef __LITTLE_ENDIAN - __swab64s(p); -#endif -} - -static inline void __cpu_to_be64s(__u64 *p) -{ -#ifdef __LITTLE_ENDIAN - __swab64s(p); -#endif -} - -static inline __u16 __le16_to_cpup(const __le16 *p) -{ -#ifdef __LITTLE_ENDIAN - return (__force __u16)*p; -#else - return __swab16p((__force __u16 *)p); -#endif -} - -static inline __u32 __le32_to_cpup(const __le32 *p) -{ -#ifdef __LITTLE_ENDIAN - return (__force __u32)*p; -#else - return __swab32p((__force __u32 *)p); -#endif -} - -static inline __u64 __le64_to_cpup(const __le64 *p) -{ -#ifdef __LITTLE_ENDIAN - return (__force __u64)*p; -#else - return __swab64p((__force __u64 *)p); -#endif -} - -static inline __le16 __cpu_to_le16p(const __u16 *p) -{ -#ifdef __LITTLE_ENDIAN - return (__force __le16)*p; -#else - return (__force __le16)__swab16p(p); -#endif -} - -static inline __le32 __cpu_to_le32p(const __u32 *p) -{ -#ifdef __LITTLE_ENDIAN - return (__force __le32)*p; -#else - return (__force __le32)__swab32p(p); -#endif -} - -static inline __le64 __cpu_to_le64p(const __u64 *p) -{ -#ifdef __LITTLE_ENDIAN - return (__force __le64)*p; -#else - return (__force __le64)__swab64p(p); -#endif -} - -static inline __u16 __be16_to_cpup(const __be16 *p) -{ -#ifdef __BIG_ENDIAN - return (__force __u16)*p; -#else - return __swab16p((__force __u16 *)p); -#endif -} - -static inline __u32 __be32_to_cpup(const __be32 *p) -{ -#ifdef __BIG_ENDIAN - return (__force __u32)*p; -#else - return __swab32p((__force __u32 *)p); -#endif -} - -static inline __u64 __be64_to_cpup(const __be64 *p) -{ -#ifdef __BIG_ENDIAN - return (__force __u64)*p; -#else - return __swab64p((__force __u64 *)p); -#endif -} - -static inline __be16 __cpu_to_be16p(const __u16 *p) -{ -#ifdef __BIG_ENDIAN - return (__force __be16)*p; -#else - return (__force __be16)__swab16p(p); -#endif -} - -static inline __be32 __cpu_to_be32p(const __u32 *p) -{ -#ifdef __BIG_ENDIAN - return (__force __be32)*p; -#else - return (__force __be32)__swab32p(p); -#endif -} - -static inline __be64 __cpu_to_be64p(const __u64 *p) -{ -#ifdef __BIG_ENDIAN - return (__force __be64)*p; -#else - return (__force __be64)__swab64p(p); -#endif -} - -#ifdef __KERNEL__ - -# define le16_to_cpu __le16_to_cpu -# define le32_to_cpu __le32_to_cpu -# define le64_to_cpu __le64_to_cpu -# define be16_to_cpu __be16_to_cpu -# define be32_to_cpu __be32_to_cpu -# define be64_to_cpu __be64_to_cpu -# define cpu_to_le16 __cpu_to_le16 -# define cpu_to_le32 __cpu_to_le32 -# define cpu_to_le64 __cpu_to_le64 -# define cpu_to_be16 __cpu_to_be16 -# define cpu_to_be32 __cpu_to_be32 -# define cpu_to_be64 __cpu_to_be64 - -# define le16_to_cpup __le16_to_cpup -# define le32_to_cpup __le32_to_cpup -# define le64_to_cpup __le64_to_cpup -# define be16_to_cpup __be16_to_cpup -# define be32_to_cpup __be32_to_cpup -# define be64_to_cpup __be64_to_cpup -# define cpu_to_le16p __cpu_to_le16p -# define cpu_to_le32p __cpu_to_le32p -# define cpu_to_le64p __cpu_to_le64p -# define cpu_to_be16p __cpu_to_be16p -# define cpu_to_be32p __cpu_to_be32p -# define cpu_to_be64p __cpu_to_be64p - -# define le16_to_cpus __le16_to_cpus -# define le32_to_cpus __le32_to_cpus -# define le64_to_cpus __le64_to_cpus -# define be16_to_cpus __be16_to_cpus -# define be32_to_cpus __be32_to_cpus -# define be64_to_cpus __be64_to_cpus -# define cpu_to_le16s __cpu_to_le16s -# define cpu_to_le32s __cpu_to_le32s -# define cpu_to_le64s __cpu_to_le64s -# define cpu_to_be16s __cpu_to_be16s -# define cpu_to_be32s __cpu_to_be32s -# define cpu_to_be64s __cpu_to_be64s - -/* - * They have to be macros in order to do the constant folding - * correctly - if the argument passed into a inline function - * it is no longer constant according to gcc.. - */ -# undef ntohl -# undef ntohs -# undef htonl -# undef htons - -# define ___htonl(x) __cpu_to_be32(x) -# define ___htons(x) __cpu_to_be16(x) -# define ___ntohl(x) __be32_to_cpu(x) -# define ___ntohs(x) __be16_to_cpu(x) - -# define htonl(x) ___htonl(x) -# define ntohl(x) ___ntohl(x) -# define htons(x) ___htons(x) -# define ntohs(x) ___ntohs(x) - -static inline void le16_add_cpu(__le16 *var, u16 val) -{ - *var = cpu_to_le16(le16_to_cpup(var) + val); -} - -static inline void le32_add_cpu(__le32 *var, u32 val) -{ - *var = cpu_to_le32(le32_to_cpup(var) + val); -} - -static inline void le64_add_cpu(__le64 *var, u64 val) -{ - *var = cpu_to_le64(le64_to_cpup(var) + val); -} - -static inline void be16_add_cpu(__be16 *var, u16 val) -{ - *var = cpu_to_be16(be16_to_cpup(var) + val); -} - -static inline void be32_add_cpu(__be32 *var, u32 val) -{ - *var = cpu_to_be32(be32_to_cpup(var) + val); -} - -static inline void be64_add_cpu(__be64 *var, u64 val) -{ - *var = cpu_to_be64(be64_to_cpup(var) + val); -} - -#endif /* __KERNEL__ */ -#endif /* _LINUX_BYTEORDER_H */ diff --git a/include/linux/byteorder/Kbuild b/include/linux/byteorder/Kbuild index fbaa7f9cee3..38437225b09 100644 --- a/include/linux/byteorder/Kbuild +++ b/include/linux/byteorder/Kbuild @@ -1,4 +1,2 @@ unifdef-y += big_endian.h unifdef-y += little_endian.h -unifdef-y += swab.h -unifdef-y += swabb.h diff --git a/include/linux/byteorder/big_endian.h b/include/linux/byteorder/big_endian.h index 1cba3f3efe5..3c80fd7e8b5 100644 --- a/include/linux/byteorder/big_endian.h +++ b/include/linux/byteorder/big_endian.h @@ -9,8 +9,7 @@ #endif #include <linux/types.h> -#include <linux/byteorder/swab.h> -#include <linux/byteorder/swabb.h> +#include <linux/swab.h> #define __constant_htonl(x) ((__force __be32)(__u32)(x)) #define __constant_ntohl(x) ((__force __u32)(__be32)(x)) diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h index cedc1b5a289..83195fb8296 100644 --- a/include/linux/byteorder/little_endian.h +++ b/include/linux/byteorder/little_endian.h @@ -9,8 +9,7 @@ #endif #include <linux/types.h> -#include <linux/byteorder/swab.h> -#include <linux/byteorder/swabb.h> +#include <linux/swab.h> #define __constant_htonl(x) ((__force __be32)___constant_swab32((x))) #define __constant_ntohl(x) ___constant_swab32((__force __be32)(x)) diff --git a/include/linux/byteorder/swab.h b/include/linux/byteorder/swab.h deleted file mode 100644 index 142134ff164..00000000000 --- a/include/linux/byteorder/swab.h +++ /dev/null @@ -1,222 +0,0 @@ -#ifndef _LINUX_BYTEORDER_SWAB_H -#define _LINUX_BYTEORDER_SWAB_H - -/* - * linux/byteorder/swab.h - * Byte-swapping, independently from CPU endianness - * swabXX[ps]?(foo) - * - * Francois-Rene Rideau <fare@tunes.org> 19971205 - * separated swab functions from cpu_to_XX, - * to clean up support for bizarre-endian architectures. - * - * Trent Piepho <xyzzy@speakeasy.org> 2007114 - * make constant-folding work, provide C versions that - * gcc can optimize better, explain different versions - * - * See asm-i386/byteorder.h and suches for examples of how to provide - * architecture-dependent optimized versions - * - */ - -#include <linux/compiler.h> - -/* Functions/macros defined, there are a lot: - * - * ___swabXX - * Generic C versions of the swab functions. - * - * ___constant_swabXX - * C versions that gcc can fold into a compile-time constant when - * the argument is a compile-time constant. - * - * __arch__swabXX[sp]? - * Architecture optimized versions of all the swab functions - * (including the s and p versions). These can be defined in - * asm-arch/byteorder.h. Any which are not, are defined here. - * __arch__swabXXs() is defined in terms of __arch__swabXXp(), which - * is defined in terms of __arch__swabXX(), which is in turn defined - * in terms of ___swabXX(x). - * These must be macros. They may be unsafe for arguments with - * side-effects. - * - * __fswabXX - * Inline function versions of the __arch__ macros. These _are_ safe - * if the arguments have side-effects. Note there are no s and p - * versions of these. - * - * __swabXX[sb] - * There are the ones you should actually use. The __swabXX versions - * will be a constant given a constant argument and use the arch - * specific code (if any) for non-constant arguments. The s and p - * versions always use the arch specific code (constant folding - * doesn't apply). They are safe to use with arguments with - * side-effects. - * - * swabXX[sb] - * Nicknames for __swabXX[sb] to use in the kernel. - */ - -/* casts are necessary for constants, because we never know how for sure - * how U/UL/ULL map to __u16, __u32, __u64. At least not in a portable way. - */ - -static __inline__ __attribute_const__ __u16 ___swab16(__u16 x) -{ - return x<<8 | x>>8; -} -static __inline__ __attribute_const__ __u32 ___swab32(__u32 x) -{ - return x<<24 | x>>24 | - (x & (__u32)0x0000ff00UL)<<8 | - (x & (__u32)0x00ff0000UL)>>8; -} -static __inline__ __attribute_const__ __u64 ___swab64(__u64 x) -{ - return x<<56 | x>>56 | - (x & (__u64)0x000000000000ff00ULL)<<40 | - (x & (__u64)0x0000000000ff0000ULL)<<24 | - (x & (__u64)0x00000000ff000000ULL)<< 8 | - (x & (__u64)0x000000ff00000000ULL)>> 8 | - (x & (__u64)0x0000ff0000000000ULL)>>24 | - (x & (__u64)0x00ff000000000000ULL)>>40; -} - -#define ___constant_swab16(x) \ - ((__u16)( \ - (((__u16)(x) & (__u16)0x00ffU) << 8) | \ - (((__u16)(x) & (__u16)0xff00U) >> 8) )) -#define ___constant_swab32(x) \ - ((__u32)( \ - (((__u32)(x) & (__u32)0x000000ffUL) << 24) | \ - (((__u32)(x) & (__u32)0x0000ff00UL) << 8) | \ - (((__u32)(x) & (__u32)0x00ff0000UL) >> 8) | \ - (((__u32)(x) & (__u32)0xff000000UL) >> 24) )) -#define ___constant_swab64(x) \ - ((__u64)( \ - (__u64)(((__u64)(x) & (__u64)0x00000000000000ffULL) << 56) | \ - (__u64)(((__u64)(x) & (__u64)0x000000000000ff00ULL) << 40) | \ - (__u64)(((__u64)(x) & (__u64)0x0000000000ff0000ULL) << 24) | \ - (__u64)(((__u64)(x) & (__u64)0x00000000ff000000ULL) << 8) | \ - (__u64)(((__u64)(x) & (__u64)0x000000ff00000000ULL) >> 8) | \ - (__u64)(((__u64)(x) & (__u64)0x0000ff0000000000ULL) >> 24) | \ - (__u64)(((__u64)(x) & (__u64)0x00ff000000000000ULL) >> 40) | \ - (__u64)(((__u64)(x) & (__u64)0xff00000000000000ULL) >> 56) )) - -/* - * provide defaults when no architecture-specific optimization is detected - */ -#ifndef __arch__swab16 -# define __arch__swab16(x) ___swab16(x) -#endif -#ifndef __arch__swab32 -# define __arch__swab32(x) ___swab32(x) -#endif -#ifndef __arch__swab64 -# define __arch__swab64(x) ___swab64(x) -#endif - -#ifndef __arch__swab16p -# define __arch__swab16p(x) __arch__swab16(*(x)) -#endif -#ifndef __arch__swab32p -# define __arch__swab32p(x) __arch__swab32(*(x)) -#endif -#ifndef __arch__swab64p -# define __arch__swab64p(x) __arch__swab64(*(x)) -#endif - -#ifndef __arch__swab16s -# define __arch__swab16s(x) ((void)(*(x) = __arch__swab16p(x))) -#endif -#ifndef __arch__swab32s -# define __arch__swab32s(x) ((void)(*(x) = __arch__swab32p(x))) -#endif -#ifndef __arch__swab64s -# define __arch__swab64s(x) ((void)(*(x) = __arch__swab64p(x))) -#endif - - -/* - * Allow constant folding - */ -#if defined(__GNUC__) && defined(__OPTIMIZE__) -# define __swab16(x) \ -(__builtin_constant_p((__u16)(x)) ? \ - ___constant_swab16((x)) : \ - __fswab16((x))) -# define __swab32(x) \ -(__builtin_constant_p((__u32)(x)) ? \ - ___constant_swab32((x)) : \ - __fswab32((x))) -# define __swab64(x) \ -(__builtin_constant_p((__u64)(x)) ? \ - ___constant_swab64((x)) : \ - __fswab64((x))) -#else -# define __swab16(x) __fswab16(x) -# define __swab32(x) __fswab32(x) -# define __swab64(x) __fswab64(x) -#endif /* OPTIMIZE */ - - -static __inline__ __attribute_const__ __u16 __fswab16(__u16 x) -{ - return __arch__swab16(x); -} -static __inline__ __u16 __swab16p(const __u16 *x) -{ - return __arch__swab16p(x); -} -static __inline__ void __swab16s(__u16 *addr) -{ - __arch__swab16s(addr); -} - -static __inline__ __attribute_const__ __u32 __fswab32(__u32 x) -{ - return __arch__swab32(x); -} -static __inline__ __u32 __swab32p(const __u32 *x) -{ - return __arch__swab32p(x); -} -static __inline__ void __swab32s(__u32 *addr) -{ - __arch__swab32s(addr); -} - -#ifdef __BYTEORDER_HAS_U64__ -static __inline__ __attribute_const__ __u64 __fswab64(__u64 x) -{ -# ifdef __SWAB_64_THRU_32__ - __u32 h = x >> 32; - __u32 l = x & ((1ULL<<32)-1); - return (((__u64)__swab32(l)) << 32) | ((__u64)(__swab32(h))); -# else - return __arch__swab64(x); -# endif -} -static __inline__ __u64 __swab64p(const __u64 *x) -{ - return __arch__swab64p(x); -} -static __inline__ void __swab64s(__u64 *addr) -{ - __arch__swab64s(addr); -} -#endif /* __BYTEORDER_HAS_U64__ */ - -#if defined(__KERNEL__) -#define swab16 __swab16 -#define swab32 __swab32 -#define swab64 __swab64 -#define swab16p __swab16p -#define swab32p __swab32p -#define swab64p __swab64p -#define swab16s __swab16s -#define swab32s __swab32s -#define swab64s __swab64s -#endif - -#endif /* _LINUX_BYTEORDER_SWAB_H */ diff --git a/include/linux/byteorder/swabb.h b/include/linux/byteorder/swabb.h deleted file mode 100644 index 8c780c7d779..00000000000 --- a/include/linux/byteorder/swabb.h +++ /dev/null @@ -1,135 +0,0 @@ -#ifndef _LINUX_BYTEORDER_SWABB_H -#define _LINUX_BYTEORDER_SWABB_H - -/* - * linux/byteorder/swabb.h - * SWAp Bytes Bizarrely - * swaHHXX[ps]?(foo) - * - * Support for obNUXIous pdp-endian and other bizarre architectures. - * Will Linux ever run on such ancient beasts? if not, this file - * will be but a programming pearl. Still, it's a reminder that we - * shouldn't be making too many assumptions when trying to be portable. - * - */ - -/* - * Meaning of the names I chose (vaxlinux people feel free to correct them): - * swahw32 swap 16-bit half-words in a 32-bit word - * swahb32 swap 8-bit halves of each 16-bit half-word in a 32-bit word - * - * No 64-bit support yet. I don't know NUXI conventions for long longs. - * I guarantee it will be a mess when it's there, though :-> - * It will be even worse if there are conflicting 64-bit conventions. - * Hopefully, no one ever used 64-bit objects on NUXI machines. - * - */ - -#include <linux/types.h> - -#define ___swahw32(x) \ -({ \ - __u32 __x = (x); \ - ((__u32)( \ - (((__u32)(__x) & (__u32)0x0000ffffUL) << 16) | \ - (((__u32)(__x) & (__u32)0xffff0000UL) >> 16) )); \ -}) -#define ___swahb32(x) \ -({ \ - __u32 __x = (x); \ - ((__u32)( \ - (((__u32)(__x) & (__u32)0x00ff00ffUL) << 8) | \ - (((__u32)(__x) & (__u32)0xff00ff00UL) >> 8) )); \ -}) - -#define ___constant_swahw32(x) \ - ((__u32)( \ - (((__u32)(x) & (__u32)0x0000ffffUL) << 16) | \ - (((__u32)(x) & (__u32)0xffff0000UL) >> 16) )) -#define ___constant_swahb32(x) \ - ((__u32)( \ - (((__u32)(x) & (__u32)0x00ff00ffUL) << 8) | \ - (((__u32)(x) & (__u32)0xff00ff00UL) >> 8) )) - -/* - * provide defaults when no architecture-specific optimization is detected - */ -#ifndef __arch__swahw32 -# define __arch__swahw32(x) ___swahw32(x) -#endif -#ifndef __arch__swahb32 -# define __arch__swahb32(x) ___swahb32(x) -#endif - -#ifndef __arch__swahw32p -# define __arch__swahw32p(x) __swahw32(*(x)) -#endif -#ifndef __arch__swahb32p -# define __arch__swahb32p(x) __swahb32(*(x)) -#endif - -#ifndef __arch__swahw32s -# define __arch__swahw32s(x) do { *(x) = __swahw32p((x)); } while (0) -#endif -#ifndef __arch__swahb32s -# define __arch__swahb32s(x) do { *(x) = __swahb32p((x)); } while (0) -#endif - - -/* - * Allow constant folding - */ -#define __swahw32(x) \ -(__builtin_constant_p((__u32)(x)) ? \ - ___swahw32((x)) : \ - __fswahw32((x))) -#define __swahb32(x) \ -(__builtin_constant_p((__u32)(x)) ? \ - ___swahb32((x)) : \ - __fswahb32((x))) - - -static inline __u32 __fswahw32(__u32 x) -{ - return __arch__swahw32(x); -} - -static inline __u32 __swahw32p(__u32 *x) -{ - return __arch__swahw32p(x); -} - -static inline void __swahw32s(__u32 *addr) -{ - __arch__swahw32s(addr); -} - -static inline __u32 __fswahb32(__u32 x) -{ - return __arch__swahb32(x); -} - -static inline __u32 __swahb32p(__u32 *x) -{ - return __arch__swahb32p(x); -} - -static inline void __swahb32s(__u32 *addr) -{ - __arch__swahb32s(addr); -} - -#ifdef __BYTEORDER_HAS_U64__ -/* - * Not supported yet - */ -#endif /* __BYTEORDER_HAS_U64__ */ - -#define swahw32 __swahw32 -#define swahb32 __swahb32 -#define swahw32p __swahw32p -#define swahb32p __swahb32p -#define swahw32s __swahw32s -#define swahb32s __swahb32s - -#endif /* _LINUX_BYTEORDER_SWABB_H */ diff --git a/include/linux/can/core.h b/include/linux/can/core.h index f50785ad478..25085cbadcf 100644 --- a/include/linux/can/core.h +++ b/include/linux/can/core.h @@ -19,7 +19,7 @@ #include <linux/skbuff.h> #include <linux/netdevice.h> -#define CAN_VERSION "20081130" +#define CAN_VERSION "20090105" /* increment this number each time you change some user-space interface */ #define CAN_ABI_VERSION "8" diff --git a/include/linux/capability.h b/include/linux/capability.h index 9d1fe30b6f6..02bdb768d43 100644 --- a/include/linux/capability.h +++ b/include/linux/capability.h @@ -53,6 +53,7 @@ typedef struct __user_cap_data_struct { #define XATTR_NAME_CAPS XATTR_SECURITY_PREFIX XATTR_CAPS_SUFFIX #define VFS_CAP_REVISION_MASK 0xFF000000 +#define VFS_CAP_REVISION_SHIFT 24 #define VFS_CAP_FLAGS_MASK ~VFS_CAP_REVISION_MASK #define VFS_CAP_FLAGS_EFFECTIVE 0x000001 @@ -68,6 +69,9 @@ typedef struct __user_cap_data_struct { #define VFS_CAP_U32 VFS_CAP_U32_2 #define VFS_CAP_REVISION VFS_CAP_REVISION_2 +#ifdef CONFIG_SECURITY_FILE_CAPABILITIES +extern int file_caps_enabled; +#endif struct vfs_cap_data { __le32 magic_etc; /* Little endian */ @@ -96,6 +100,13 @@ typedef struct kernel_cap_struct { __u32 cap[_KERNEL_CAPABILITY_U32S]; } kernel_cap_t; +/* exact same as vfs_cap_data but in cpu endian and always filled completely */ +struct cpu_vfs_cap_data { + __u32 magic_etc; + kernel_cap_t permitted; + kernel_cap_t inheritable; +}; + #define _USER_CAP_HEADER_SIZE (sizeof(struct __user_cap_header_struct)) #define _KERNEL_CAP_T_SIZE (sizeof(kernel_cap_t)) @@ -454,6 +465,13 @@ static inline int cap_isclear(const kernel_cap_t a) return 1; } +/* + * Check if "a" is a subset of "set". + * return 1 if ALL of the capabilities in "a" are also in "set" + * cap_issubset(0101, 1111) will return 1 + * return 0 if ANY of the capabilities in "a" are not in "set" + * cap_issubset(1111, 0101) will return 0 + */ static inline int cap_issubset(const kernel_cap_t a, const kernel_cap_t set) { kernel_cap_t dest; @@ -501,8 +519,6 @@ extern const kernel_cap_t __cap_empty_set; extern const kernel_cap_t __cap_full_set; extern const kernel_cap_t __cap_init_eff_set; -kernel_cap_t cap_set_effective(const kernel_cap_t pE_new); - /** * has_capability - Determine if a task has a superior capability available * @t: The task in question @@ -513,10 +529,28 @@ kernel_cap_t cap_set_effective(const kernel_cap_t pE_new); * * Note that this does not set PF_SUPERPRIV on the task. */ -#define has_capability(t, cap) (security_capable((t), (cap)) == 0) +#define has_capability(t, cap) (security_real_capable((t), (cap)) == 0) + +/** + * has_capability_noaudit - Determine if a task has a superior capability available (unaudited) + * @t: The task in question + * @cap: The capability to be tested for + * + * Return true if the specified task has the given superior capability + * currently in effect, false if not, but don't write an audit message for the + * check. + * + * Note that this does not set PF_SUPERPRIV on the task. + */ +#define has_capability_noaudit(t, cap) \ + (security_real_capable_noaudit((t), (cap)) == 0) extern int capable(int cap); +/* audit system wants to get cap info from files as well */ +struct dentry; +extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps); + #endif /* __KERNEL__ */ #endif /* !_LINUX_CAPABILITY_H */ diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 1164963c3a8..e267e62827b 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -52,9 +52,9 @@ struct cgroup_subsys_state { * hierarchy structure */ struct cgroup *cgroup; - /* State maintained by the cgroup system to allow - * subsystems to be "busy". Should be accessed via css_get() - * and css_put() */ + /* State maintained by the cgroup system to allow subsystems + * to be "busy". Should be accessed via css_get(), + * css_tryget() and and css_put(). */ atomic_t refcnt; @@ -64,11 +64,14 @@ struct cgroup_subsys_state { /* bits in struct cgroup_subsys_state flags field */ enum { CSS_ROOT, /* This CSS is the root of the subsystem */ + CSS_REMOVED, /* This CSS is dead */ }; /* - * Call css_get() to hold a reference on the cgroup; - * + * Call css_get() to hold a reference on the css; it can be used + * for a reference obtained via: + * - an existing ref-counted reference to the css + * - task->cgroups for a locked task */ static inline void css_get(struct cgroup_subsys_state *css) @@ -77,9 +80,32 @@ static inline void css_get(struct cgroup_subsys_state *css) if (!test_bit(CSS_ROOT, &css->flags)) atomic_inc(&css->refcnt); } + +static inline bool css_is_removed(struct cgroup_subsys_state *css) +{ + return test_bit(CSS_REMOVED, &css->flags); +} + +/* + * Call css_tryget() to take a reference on a css if your existing + * (known-valid) reference isn't already ref-counted. Returns false if + * the css has been destroyed. + */ + +static inline bool css_tryget(struct cgroup_subsys_state *css) +{ + if (test_bit(CSS_ROOT, &css->flags)) + return true; + while (!atomic_inc_not_zero(&css->refcnt)) { + if (test_bit(CSS_REMOVED, &css->flags)) + return false; + } + return true; +} + /* * css_put() should be called to release a reference taken by - * css_get() + * css_get() or css_tryget() */ extern void __css_put(struct cgroup_subsys_state *css); @@ -116,7 +142,7 @@ struct cgroup { struct list_head children; /* my children */ struct cgroup *parent; /* my parent */ - struct dentry *dentry; /* cgroup fs entry */ + struct dentry *dentry; /* cgroup fs entry, RCU protected */ /* Private pointers for each registered subsystem */ struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT]; @@ -145,6 +171,9 @@ struct cgroup { int pids_use_count; /* Length of the current tasks_pids array */ int pids_length; + + /* For RCU-protected deletion */ + struct rcu_head rcu_head; }; /* A css_set is a structure holding pointers to a set of @@ -329,13 +358,7 @@ struct cgroup_subsys { struct cgroup *cgrp); void (*post_clone)(struct cgroup_subsys *ss, struct cgroup *cgrp); void (*bind)(struct cgroup_subsys *ss, struct cgroup *root); - /* - * This routine is called with the task_lock of mm->owner held - */ - void (*mm_owner_changed)(struct cgroup_subsys *ss, - struct cgroup *old, - struct cgroup *new, - struct task_struct *p); + int subsys_id; int active; int disabled; @@ -343,9 +366,23 @@ struct cgroup_subsys { #define MAX_CGROUP_TYPE_NAMELEN 32 const char *name; - /* Protected by RCU */ - struct cgroupfs_root *root; + /* + * Protects sibling/children links of cgroups in this + * hierarchy, plus protects which hierarchy (or none) the + * subsystem is a part of (i.e. root/sibling). To avoid + * potential deadlocks, the following operations should not be + * undertaken while holding any hierarchy_mutex: + * + * - allocating memory + * - initiating hotplug events + */ + struct mutex hierarchy_mutex; + /* + * Link to parent, and list entry in parent's children. + * Protected by this->hierarchy_mutex and cgroup_lock() + */ + struct cgroupfs_root *root; struct list_head sibling; }; @@ -400,9 +437,6 @@ void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it); int cgroup_scan_tasks(struct cgroup_scanner *scan); int cgroup_attach_task(struct cgroup *, struct task_struct *); -void cgroup_mm_owner_callbacks(struct task_struct *old, - struct task_struct *new); - #else /* !CONFIG_CGROUPS */ static inline int cgroup_init_early(void) { return 0; } @@ -420,9 +454,6 @@ static inline int cgroupstats_build(struct cgroupstats *stats, return -EINVAL; } -static inline void cgroup_mm_owner_callbacks(struct task_struct *old, - struct task_struct *new) {} - #endif /* !CONFIG_CGROUPS */ #endif /* _LINUX_CGROUP_H */ diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h index 9c22396e8b5..9c8d31bacf4 100644 --- a/include/linux/cgroup_subsys.h +++ b/include/linux/cgroup_subsys.h @@ -54,3 +54,9 @@ SUBSYS(freezer) #endif /* */ + +#ifdef CONFIG_NET_CLS_CGROUP +SUBSYS(net_cls) +#endif + +/* */ diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h index ed3a5d473e5..cea153697ec 100644 --- a/include/linux/clockchips.h +++ b/include/linux/clockchips.h @@ -82,13 +82,13 @@ struct clock_event_device { int shift; int rating; int irq; - cpumask_t cpumask; + const struct cpumask *cpumask; int (*set_next_event)(unsigned long evt, struct clock_event_device *); void (*set_mode)(enum clock_event_mode mode, struct clock_event_device *); void (*event_handler)(struct clock_event_device *); - void (*broadcast)(cpumask_t mask); + void (*broadcast)(const struct cpumask *mask); struct list_head list; enum clock_event_mode mode; ktime_t next_event; diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index 5c8351b859f..af40f8eb86f 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -61,3 +61,8 @@ #define noinline __attribute__((noinline)) #define __attribute_const__ __attribute__((__const__)) #define __maybe_unused __attribute__((unused)) + +#define __gcc_header(x) #x +#define _gcc_header(x) __gcc_header(linux/compiler-gcc##x.h) +#define gcc_header(x) _gcc_header(x) +#include gcc_header(__GNUC__) diff --git a/include/linux/compiler-gcc3.h b/include/linux/compiler-gcc3.h index e5eb795f78a..8005effc04f 100644 --- a/include/linux/compiler-gcc3.h +++ b/include/linux/compiler-gcc3.h @@ -2,8 +2,9 @@ #error "Please don't include <linux/compiler-gcc3.h> directly, include <linux/compiler.h> instead." #endif -/* These definitions are for GCC v3.x. */ -#include <linux/compiler-gcc.h> +#if __GNUC_MINOR__ < 2 +# error Sorry, your compiler is too old - please upgrade it. +#endif #if __GNUC_MINOR__ >= 3 # define __used __attribute__((__used__)) diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h index 974f5b7bb20..09992718f9e 100644 --- a/include/linux/compiler-gcc4.h +++ b/include/linux/compiler-gcc4.h @@ -2,8 +2,10 @@ #error "Please don't include <linux/compiler-gcc4.h> directly, include <linux/compiler.h> instead." #endif -/* These definitions are for GCC v4.x. */ -#include <linux/compiler-gcc.h> +/* GCC 4.1.[01] miscompiles __weak */ +#if __GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL__ <= 1 +# error Your version of gcc miscompiles the __weak directive +#endif #define __used __attribute__((__used__)) #define __must_check __attribute__((warn_unused_result)) @@ -16,7 +18,7 @@ */ #define uninitialized_var(x) x = x -#if !(__GNUC__ == 4 && __GNUC_MINOR__ < 3) +#if __GNUC_MINOR__ >= 3 /* Mark functions as cold. gcc will assume any path leading to a call to them will be unlikely. This means a lot of manual unlikely()s are unnecessary now for any paths leading to the usual suspects diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 98115d9d04d..d95da1020f1 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -36,12 +36,8 @@ extern void __chk_io_ptr(const volatile void __iomem *); #ifdef __KERNEL__ -#if __GNUC__ >= 4 -# include <linux/compiler-gcc4.h> -#elif __GNUC__ == 3 && __GNUC_MINOR__ >= 2 -# include <linux/compiler-gcc3.h> -#else -# error Sorry, your compiler is too old/not recognized. +#ifdef __GNUC__ +#include <linux/compiler-gcc.h> #endif #define notrace __attribute__((no_instrument_function)) @@ -59,8 +55,88 @@ extern void __chk_io_ptr(const volatile void __iomem *); * specific implementations come from the above header files */ -#define likely(x) __builtin_expect(!!(x), 1) -#define unlikely(x) __builtin_expect(!!(x), 0) +struct ftrace_branch_data { + const char *func; + const char *file; + unsigned line; + union { + struct { + unsigned long correct; + unsigned long incorrect; + }; + struct { + unsigned long miss; + unsigned long hit; + }; + }; +}; + +/* + * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code + * to disable branch tracing on a per file basis. + */ +#if defined(CONFIG_TRACE_BRANCH_PROFILING) && !defined(DISABLE_BRANCH_PROFILING) +void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); + +#define likely_notrace(x) __builtin_expect(!!(x), 1) +#define unlikely_notrace(x) __builtin_expect(!!(x), 0) + +#define __branch_check__(x, expect) ({ \ + int ______r; \ + static struct ftrace_branch_data \ + __attribute__((__aligned__(4))) \ + __attribute__((section("_ftrace_annotated_branch"))) \ + ______f = { \ + .func = __func__, \ + .file = __FILE__, \ + .line = __LINE__, \ + }; \ + ______r = likely_notrace(x); \ + ftrace_likely_update(&______f, ______r, expect); \ + ______r; \ + }) + +/* + * Using __builtin_constant_p(x) to ignore cases where the return + * value is always the same. This idea is taken from a similar patch + * written by Daniel Walker. + */ +# ifndef likely +# define likely(x) (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 1)) +# endif +# ifndef unlikely +# define unlikely(x) (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 0)) +# endif + +#ifdef CONFIG_PROFILE_ALL_BRANCHES +/* + * "Define 'is'", Bill Clinton + * "Define 'if'", Steven Rostedt + */ +#define if(cond) if (__builtin_constant_p((cond)) ? !!(cond) : \ + ({ \ + int ______r; \ + static struct ftrace_branch_data \ + __attribute__((__aligned__(4))) \ + __attribute__((section("_ftrace_branch"))) \ + ______f = { \ + .func = __func__, \ + .file = __FILE__, \ + .line = __LINE__, \ + }; \ + ______r = !!(cond); \ + if (______r) \ + ______f.hit++; \ + else \ + ______f.miss++; \ + ______r; \ + })) +#endif /* CONFIG_PROFILE_ALL_BRANCHES */ + +#else +# define likely(x) __builtin_expect(!!(x), 1) +# define unlikely(x) __builtin_expect(!!(x), 0) +#endif /* Optimization barrier */ #ifndef barrier diff --git a/include/linux/console.h b/include/linux/console.h index 248e6e3b9b7..a67a90cf826 100644 --- a/include/linux/console.h +++ b/include/linux/console.h @@ -153,4 +153,8 @@ void vcs_remove_sysfs(struct tty_struct *tty); #define VESA_HSYNC_SUSPEND 2 #define VESA_POWERDOWN 3 +#ifdef CONFIG_VGA_CONSOLE +extern bool vgacon_text_force(void); +#endif + #endif /* _LINUX_CONSOLE_H */ diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 1ee608fd7b7..484b3abf61b 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -234,6 +234,7 @@ struct cpufreq_driver { int (*suspend) (struct cpufreq_policy *policy, pm_message_t pmsg); int (*resume) (struct cpufreq_policy *policy); struct freq_attr **attr; + bool hide_interface; }; /* flags */ diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 21e1dd43e52..9f315382610 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -144,6 +144,7 @@ typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t; extern cpumask_t _unused_cpumask_arg_; +#ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS #define cpu_set(cpu, dst) __cpu_set((cpu), &(dst)) static inline void __cpu_set(int cpu, volatile cpumask_t *dstp) { @@ -267,6 +268,26 @@ static inline void __cpus_shift_left(cpumask_t *dstp, { bitmap_shift_left(dstp->bits, srcp->bits, n, nbits); } +#endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */ + +/** + * to_cpumask - convert an NR_CPUS bitmap to a struct cpumask * + * @bitmap: the bitmap + * + * There are a few places where cpumask_var_t isn't appropriate and + * static cpumasks must be used (eg. very early boot), yet we don't + * expose the definition of 'struct cpumask'. + * + * This does the conversion, and can be used as a constant initializer. + */ +#define to_cpumask(bitmap) \ + ((struct cpumask *)(1 ? (bitmap) \ + : (void *)sizeof(__check_is_bitmap(bitmap)))) + +static inline int __check_is_bitmap(const unsigned long *bitmap) +{ + return 1; +} /* * Special-case data structure for "single bit set only" constant CPU masks. @@ -278,13 +299,14 @@ static inline void __cpus_shift_left(cpumask_t *dstp, extern const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)]; -static inline const cpumask_t *get_cpu_mask(unsigned int cpu) +static inline const struct cpumask *get_cpu_mask(unsigned int cpu) { const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG]; p -= cpu / BITS_PER_LONG; - return (const cpumask_t *)p; + return to_cpumask(p); } +#ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS /* * In cases where we take the address of the cpumask immediately, * gcc optimizes it out (it's a constant) and there's no huge stack @@ -339,36 +361,6 @@ extern cpumask_t cpu_mask_all; #endif #define CPUMASK_PTR(v, m) cpumask_t *v = &(m->v) -#define cpumask_scnprintf(buf, len, src) \ - __cpumask_scnprintf((buf), (len), &(src), NR_CPUS) -static inline int __cpumask_scnprintf(char *buf, int len, - const cpumask_t *srcp, int nbits) -{ - return bitmap_scnprintf(buf, len, srcp->bits, nbits); -} - -#define cpumask_parse_user(ubuf, ulen, dst) \ - __cpumask_parse_user((ubuf), (ulen), &(dst), NR_CPUS) -static inline int __cpumask_parse_user(const char __user *buf, int len, - cpumask_t *dstp, int nbits) -{ - return bitmap_parse_user(buf, len, dstp->bits, nbits); -} - -#define cpulist_scnprintf(buf, len, src) \ - __cpulist_scnprintf((buf), (len), &(src), NR_CPUS) -static inline int __cpulist_scnprintf(char *buf, int len, - const cpumask_t *srcp, int nbits) -{ - return bitmap_scnlistprintf(buf, len, srcp->bits, nbits); -} - -#define cpulist_parse(buf, dst) __cpulist_parse((buf), &(dst), NR_CPUS) -static inline int __cpulist_parse(const char *buf, cpumask_t *dstp, int nbits) -{ - return bitmap_parselist(buf, dstp->bits, nbits); -} - #define cpu_remap(oldbit, old, new) \ __cpu_remap((oldbit), &(old), &(new), NR_CPUS) static inline int __cpu_remap(int oldbit, @@ -400,19 +392,22 @@ static inline void __cpus_fold(cpumask_t *dstp, const cpumask_t *origp, { bitmap_fold(dstp->bits, origp->bits, sz, nbits); } +#endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */ #if NR_CPUS == 1 #define nr_cpu_ids 1 +#ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS #define first_cpu(src) ({ (void)(src); 0; }) #define next_cpu(n, src) ({ (void)(src); 1; }) #define any_online_cpu(mask) 0 #define for_each_cpu_mask(cpu, mask) \ for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask) - +#endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */ #else /* NR_CPUS > 1 */ extern int nr_cpu_ids; +#ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS int __first_cpu(const cpumask_t *srcp); int __next_cpu(int n, const cpumask_t *srcp); int __any_online_cpu(const cpumask_t *mask); @@ -424,8 +419,10 @@ int __any_online_cpu(const cpumask_t *mask); for ((cpu) = -1; \ (cpu) = next_cpu((cpu), (mask)), \ (cpu) < NR_CPUS; ) +#endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */ #endif +#ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS #if NR_CPUS <= 64 #define next_cpu_nr(n, src) next_cpu(n, src) @@ -443,77 +440,67 @@ int __next_cpu_nr(int n, const cpumask_t *srcp); (cpu) < nr_cpu_ids; ) #endif /* NR_CPUS > 64 */ +#endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */ /* * The following particular system cpumasks and operations manage - * possible, present, active and online cpus. Each of them is a fixed size - * bitmap of size NR_CPUS. + * possible, present, active and online cpus. * - * #ifdef CONFIG_HOTPLUG_CPU - * cpu_possible_map - has bit 'cpu' set iff cpu is populatable - * cpu_present_map - has bit 'cpu' set iff cpu is populated - * cpu_online_map - has bit 'cpu' set iff cpu available to scheduler - * cpu_active_map - has bit 'cpu' set iff cpu available to migration - * #else - * cpu_possible_map - has bit 'cpu' set iff cpu is populated - * cpu_present_map - copy of cpu_possible_map - * cpu_online_map - has bit 'cpu' set iff cpu available to scheduler - * #endif + * cpu_possible_mask- has bit 'cpu' set iff cpu is populatable + * cpu_present_mask - has bit 'cpu' set iff cpu is populated + * cpu_online_mask - has bit 'cpu' set iff cpu available to scheduler + * cpu_active_mask - has bit 'cpu' set iff cpu available to migration * - * In either case, NR_CPUS is fixed at compile time, as the static - * size of these bitmaps. The cpu_possible_map is fixed at boot - * time, as the set of CPU id's that it is possible might ever - * be plugged in at anytime during the life of that system boot. - * The cpu_present_map is dynamic(*), representing which CPUs - * are currently plugged in. And cpu_online_map is the dynamic - * subset of cpu_present_map, indicating those CPUs available - * for scheduling. + * If !CONFIG_HOTPLUG_CPU, present == possible, and active == online. * - * If HOTPLUG is enabled, then cpu_possible_map is forced to have + * The cpu_possible_mask is fixed at boot time, as the set of CPU id's + * that it is possible might ever be plugged in at anytime during the + * life of that system boot. The cpu_present_mask is dynamic(*), + * representing which CPUs are currently plugged in. And + * cpu_online_mask is the dynamic subset of cpu_present_mask, + * indicating those CPUs available for scheduling. + * + * If HOTPLUG is enabled, then cpu_possible_mask is forced to have * all NR_CPUS bits set, otherwise it is just the set of CPUs that * ACPI reports present at boot. * - * If HOTPLUG is enabled, then cpu_present_map varies dynamically, + * If HOTPLUG is enabled, then cpu_present_mask varies dynamically, * depending on what ACPI reports as currently plugged in, otherwise - * cpu_present_map is just a copy of cpu_possible_map. + * cpu_present_mask is just a copy of cpu_possible_mask. * - * (*) Well, cpu_present_map is dynamic in the hotplug case. If not - * hotplug, it's a copy of cpu_possible_map, hence fixed at boot. + * (*) Well, cpu_present_mask is dynamic in the hotplug case. If not + * hotplug, it's a copy of cpu_possible_mask, hence fixed at boot. * * Subtleties: * 1) UP arch's (NR_CPUS == 1, CONFIG_SMP not defined) hardcode * assumption that their single CPU is online. The UP - * cpu_{online,possible,present}_maps are placebos. Changing them + * cpu_{online,possible,present}_masks are placebos. Changing them * will have no useful affect on the following num_*_cpus() * and cpu_*() macros in the UP case. This ugliness is a UP * optimization - don't waste any instructions or memory references * asking if you're online or how many CPUs there are if there is * only one CPU. - * 2) Most SMP arch's #define some of these maps to be some - * other map specific to that arch. Therefore, the following - * must be #define macros, not inlines. To see why, examine - * the assembly code produced by the following. Note that - * set1() writes phys_x_map, but set2() writes x_map: - * int x_map, phys_x_map; - * #define set1(a) x_map = a - * inline void set2(int a) { x_map = a; } - * #define x_map phys_x_map - * main(){ set1(3); set2(5); } */ -extern cpumask_t cpu_possible_map; -extern cpumask_t cpu_online_map; -extern cpumask_t cpu_present_map; -extern cpumask_t cpu_active_map; +extern const struct cpumask *const cpu_possible_mask; +extern const struct cpumask *const cpu_online_mask; +extern const struct cpumask *const cpu_present_mask; +extern const struct cpumask *const cpu_active_mask; + +/* These strip const, as traditionally they weren't const. */ +#define cpu_possible_map (*(cpumask_t *)cpu_possible_mask) +#define cpu_online_map (*(cpumask_t *)cpu_online_mask) +#define cpu_present_map (*(cpumask_t *)cpu_present_mask) +#define cpu_active_map (*(cpumask_t *)cpu_active_mask) #if NR_CPUS > 1 -#define num_online_cpus() cpus_weight_nr(cpu_online_map) -#define num_possible_cpus() cpus_weight_nr(cpu_possible_map) -#define num_present_cpus() cpus_weight_nr(cpu_present_map) -#define cpu_online(cpu) cpu_isset((cpu), cpu_online_map) -#define cpu_possible(cpu) cpu_isset((cpu), cpu_possible_map) -#define cpu_present(cpu) cpu_isset((cpu), cpu_present_map) -#define cpu_active(cpu) cpu_isset((cpu), cpu_active_map) +#define num_online_cpus() cpumask_weight(cpu_online_mask) +#define num_possible_cpus() cpumask_weight(cpu_possible_mask) +#define num_present_cpus() cpumask_weight(cpu_present_mask) +#define cpu_online(cpu) cpumask_test_cpu((cpu), cpu_online_mask) +#define cpu_possible(cpu) cpumask_test_cpu((cpu), cpu_possible_mask) +#define cpu_present(cpu) cpumask_test_cpu((cpu), cpu_present_mask) +#define cpu_active(cpu) cpumask_test_cpu((cpu), cpu_active_mask) #else #define num_online_cpus() 1 #define num_possible_cpus() 1 @@ -526,10 +513,6 @@ extern cpumask_t cpu_active_map; #define cpu_is_offline(cpu) unlikely(!cpu_online(cpu)) -#define for_each_possible_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_possible_map) -#define for_each_online_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_online_map) -#define for_each_present_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_present_map) - /* These are the new versions of the cpumask operators: passed by pointer. * The older versions will be implemented in terms of these, then deleted. */ #define cpumask_bits(maskp) ((maskp)->bits) @@ -540,9 +523,6 @@ extern cpumask_t cpu_active_map; [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \ } -/* This produces more efficient code. */ -#define nr_cpumask_bits NR_CPUS - #else /* NR_CPUS > BITS_PER_LONG */ #define CPU_BITS_ALL \ @@ -550,9 +530,15 @@ extern cpumask_t cpu_active_map; [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \ [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \ } +#endif /* NR_CPUS > BITS_PER_LONG */ +#ifdef CONFIG_CPUMASK_OFFSTACK +/* Assuming NR_CPUS is huge, a runtime limit is more efficient. Also, + * not all bits may be allocated. */ #define nr_cpumask_bits nr_cpu_ids -#endif /* NR_CPUS > BITS_PER_LONG */ +#else +#define nr_cpumask_bits NR_CPUS +#endif /* verify cpu argument to cpumask_* operators */ static inline unsigned int cpumask_check(unsigned int cpu) @@ -714,7 +700,7 @@ static inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp) * No static inline type checking - see Subtlety (1) above. */ #define cpumask_test_cpu(cpu, cpumask) \ - test_bit(cpumask_check(cpu), (cpumask)->bits) + test_bit(cpumask_check(cpu), cpumask_bits((cpumask))) /** * cpumask_test_and_set_cpu - atomically test and set a cpu in a cpumask @@ -946,22 +932,61 @@ static inline void cpumask_copy(struct cpumask *dstp, #define cpumask_of(cpu) (get_cpu_mask(cpu)) /** - * to_cpumask - convert an NR_CPUS bitmap to a struct cpumask * - * @bitmap: the bitmap + * cpumask_scnprintf - print a cpumask into a string as comma-separated hex + * @buf: the buffer to sprintf into + * @len: the length of the buffer + * @srcp: the cpumask to print * - * There are a few places where cpumask_var_t isn't appropriate and - * static cpumasks must be used (eg. very early boot), yet we don't - * expose the definition of 'struct cpumask'. + * If len is zero, returns zero. Otherwise returns the length of the + * (nul-terminated) @buf string. + */ +static inline int cpumask_scnprintf(char *buf, int len, + const struct cpumask *srcp) +{ + return bitmap_scnprintf(buf, len, cpumask_bits(srcp), nr_cpumask_bits); +} + +/** + * cpumask_parse_user - extract a cpumask from a user string + * @buf: the buffer to extract from + * @len: the length of the buffer + * @dstp: the cpumask to set. * - * This does the conversion, and can be used as a constant initializer. + * Returns -errno, or 0 for success. */ -#define to_cpumask(bitmap) \ - ((struct cpumask *)(1 ? (bitmap) \ - : (void *)sizeof(__check_is_bitmap(bitmap)))) +static inline int cpumask_parse_user(const char __user *buf, int len, + struct cpumask *dstp) +{ + return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpumask_bits); +} -static inline int __check_is_bitmap(const unsigned long *bitmap) +/** + * cpulist_scnprintf - print a cpumask into a string as comma-separated list + * @buf: the buffer to sprintf into + * @len: the length of the buffer + * @srcp: the cpumask to print + * + * If len is zero, returns zero. Otherwise returns the length of the + * (nul-terminated) @buf string. + */ +static inline int cpulist_scnprintf(char *buf, int len, + const struct cpumask *srcp) { - return 1; + return bitmap_scnlistprintf(buf, len, cpumask_bits(srcp), + nr_cpumask_bits); +} + +/** + * cpulist_parse_user - extract a cpumask from a user string of ranges + * @buf: the buffer to extract from + * @len: the length of the buffer + * @dstp: the cpumask to set. + * + * Returns -errno, or 0 for success. + */ +static inline int cpulist_parse(const char *buf, struct cpumask *dstp) +{ + return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpumask_bits); } /** @@ -995,6 +1020,7 @@ static inline size_t cpumask_size(void) #ifdef CONFIG_CPUMASK_OFFSTACK typedef struct cpumask *cpumask_var_t; +bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node); bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags); void alloc_bootmem_cpumask_var(cpumask_var_t *mask); void free_cpumask_var(cpumask_var_t mask); @@ -1008,6 +1034,12 @@ static inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) return true; } +static inline bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, + int node) +{ + return true; +} + static inline void alloc_bootmem_cpumask_var(cpumask_var_t *mask) { } @@ -1021,12 +1053,6 @@ static inline void free_bootmem_cpumask_var(cpumask_var_t mask) } #endif /* CONFIG_CPUMASK_OFFSTACK */ -/* The pointer versions of the maps, these will become the primary versions. */ -#define cpu_possible_mask ((const struct cpumask *)&cpu_possible_map) -#define cpu_online_mask ((const struct cpumask *)&cpu_online_map) -#define cpu_present_mask ((const struct cpumask *)&cpu_present_map) -#define cpu_active_mask ((const struct cpumask *)&cpu_active_map) - /* It's common to want to use cpu_all_mask in struct member initializers, * so it has to refer to an address rather than a pointer. */ extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS); @@ -1035,51 +1061,16 @@ extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS); /* First bits of cpu_bit_bitmap are in fact unset. */ #define cpu_none_mask to_cpumask(cpu_bit_bitmap[0]) -/* Wrappers for arch boot code to manipulate normally-constant masks */ -static inline void set_cpu_possible(unsigned int cpu, bool possible) -{ - if (possible) - cpumask_set_cpu(cpu, &cpu_possible_map); - else - cpumask_clear_cpu(cpu, &cpu_possible_map); -} - -static inline void set_cpu_present(unsigned int cpu, bool present) -{ - if (present) - cpumask_set_cpu(cpu, &cpu_present_map); - else - cpumask_clear_cpu(cpu, &cpu_present_map); -} - -static inline void set_cpu_online(unsigned int cpu, bool online) -{ - if (online) - cpumask_set_cpu(cpu, &cpu_online_map); - else - cpumask_clear_cpu(cpu, &cpu_online_map); -} - -static inline void set_cpu_active(unsigned int cpu, bool active) -{ - if (active) - cpumask_set_cpu(cpu, &cpu_active_map); - else - cpumask_clear_cpu(cpu, &cpu_active_map); -} - -static inline void init_cpu_present(const struct cpumask *src) -{ - cpumask_copy(&cpu_present_map, src); -} - -static inline void init_cpu_possible(const struct cpumask *src) -{ - cpumask_copy(&cpu_possible_map, src); -} +#define for_each_possible_cpu(cpu) for_each_cpu((cpu), cpu_possible_mask) +#define for_each_online_cpu(cpu) for_each_cpu((cpu), cpu_online_mask) +#define for_each_present_cpu(cpu) for_each_cpu((cpu), cpu_present_mask) -static inline void init_cpu_online(const struct cpumask *src) -{ - cpumask_copy(&cpu_online_map, src); -} +/* Wrappers for arch boot code to manipulate normally-constant masks */ +void set_cpu_possible(unsigned int cpu, bool possible); +void set_cpu_present(unsigned int cpu, bool present); +void set_cpu_online(unsigned int cpu, bool online); +void set_cpu_active(unsigned int cpu, bool active); +void init_cpu_present(const struct cpumask *src); +void init_cpu_possible(const struct cpumask *src); +void init_cpu_online(const struct cpumask *src); #endif /* __LINUX_CPUMASK_H */ diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index 8e540d32c9f..90c6074a36c 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h @@ -20,8 +20,9 @@ extern int number_of_cpusets; /* How many cpusets are defined in system? */ extern int cpuset_init_early(void); extern int cpuset_init(void); extern void cpuset_init_smp(void); -extern void cpuset_cpus_allowed(struct task_struct *p, cpumask_t *mask); -extern void cpuset_cpus_allowed_locked(struct task_struct *p, cpumask_t *mask); +extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask); +extern void cpuset_cpus_allowed_locked(struct task_struct *p, + struct cpumask *mask); extern nodemask_t cpuset_mems_allowed(struct task_struct *p); #define cpuset_current_mems_allowed (current->mems_allowed) void cpuset_init_current_mems_allowed(void); @@ -78,18 +79,21 @@ extern int current_cpuset_is_being_rebound(void); extern void rebuild_sched_domains(void); +extern void cpuset_print_task_mems_allowed(struct task_struct *p); + #else /* !CONFIG_CPUSETS */ static inline int cpuset_init_early(void) { return 0; } static inline int cpuset_init(void) { return 0; } static inline void cpuset_init_smp(void) {} -static inline void cpuset_cpus_allowed(struct task_struct *p, cpumask_t *mask) +static inline void cpuset_cpus_allowed(struct task_struct *p, + struct cpumask *mask) { *mask = cpu_possible_map; } static inline void cpuset_cpus_allowed_locked(struct task_struct *p, - cpumask_t *mask) + struct cpumask *mask) { *mask = cpu_possible_map; } @@ -159,6 +163,10 @@ static inline void rebuild_sched_domains(void) partition_sched_domains(1, NULL, NULL); } +static inline void cpuset_print_task_mems_allowed(struct task_struct *p) +{ +} + #endif /* !CONFIG_CPUSETS */ #endif /* _LINUX_CPUSET_H */ diff --git a/include/linux/crc32c.h b/include/linux/crc32c.h index 508f512e5a2..bd8b44d96bd 100644 --- a/include/linux/crc32c.h +++ b/include/linux/crc32c.h @@ -3,9 +3,9 @@ #include <linux/types.h> -extern u32 crc32c_le(u32 crc, unsigned char const *address, size_t length); -extern u32 crc32c_be(u32 crc, unsigned char const *address, size_t length); +extern u32 crc32c(u32 crc, const void *address, unsigned int length); -#define crc32c(seed, data, length) crc32c_le(seed, (unsigned char const *)data, length) +/* This macro exists for backwards-compatibility. */ +#define crc32c_le crc32c #endif /* _LINUX_CRC32C_H */ diff --git a/include/linux/cred.h b/include/linux/cred.h index b69222cc1fd..3282ee4318e 100644 --- a/include/linux/cred.h +++ b/include/linux/cred.h @@ -1,4 +1,4 @@ -/* Credentials management +/* Credentials management - see Documentation/credentials.txt * * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) @@ -12,39 +12,335 @@ #ifndef _LINUX_CRED_H #define _LINUX_CRED_H -#define get_current_user() (get_uid(current->user)) +#include <linux/capability.h> +#include <linux/key.h> +#include <asm/atomic.h> -#define task_uid(task) ((task)->uid) -#define task_gid(task) ((task)->gid) -#define task_euid(task) ((task)->euid) -#define task_egid(task) ((task)->egid) +struct user_struct; +struct cred; +struct inode; -#define current_uid() (current->uid) -#define current_gid() (current->gid) -#define current_euid() (current->euid) -#define current_egid() (current->egid) -#define current_suid() (current->suid) -#define current_sgid() (current->sgid) -#define current_fsuid() (current->fsuid) -#define current_fsgid() (current->fsgid) -#define current_cap() (current->cap_effective) +/* + * COW Supplementary groups list + */ +#define NGROUPS_SMALL 32 +#define NGROUPS_PER_BLOCK ((unsigned int)(PAGE_SIZE / sizeof(gid_t))) + +struct group_info { + atomic_t usage; + int ngroups; + int nblocks; + gid_t small_block[NGROUPS_SMALL]; + gid_t *blocks[0]; +}; + +/** + * get_group_info - Get a reference to a group info structure + * @group_info: The group info to reference + * + * This gets a reference to a set of supplementary groups. + * + * If the caller is accessing a task's credentials, they must hold the RCU read + * lock when reading. + */ +static inline struct group_info *get_group_info(struct group_info *gi) +{ + atomic_inc(&gi->usage); + return gi; +} + +/** + * put_group_info - Release a reference to a group info structure + * @group_info: The group info to release + */ +#define put_group_info(group_info) \ +do { \ + if (atomic_dec_and_test(&(group_info)->usage)) \ + groups_free(group_info); \ +} while (0) + +extern struct group_info *groups_alloc(int); +extern struct group_info init_groups; +extern void groups_free(struct group_info *); +extern int set_current_groups(struct group_info *); +extern int set_groups(struct cred *, struct group_info *); +extern int groups_search(const struct group_info *, gid_t); + +/* access the groups "array" with this macro */ +#define GROUP_AT(gi, i) \ + ((gi)->blocks[(i) / NGROUPS_PER_BLOCK][(i) % NGROUPS_PER_BLOCK]) + +extern int in_group_p(gid_t); +extern int in_egroup_p(gid_t); + +/* + * The common credentials for a thread group + * - shared by CLONE_THREAD + */ +#ifdef CONFIG_KEYS +struct thread_group_cred { + atomic_t usage; + pid_t tgid; /* thread group process ID */ + spinlock_t lock; + struct key *session_keyring; /* keyring inherited over fork */ + struct key *process_keyring; /* keyring private to this process */ + struct rcu_head rcu; /* RCU deletion hook */ +}; +#endif + +/* + * The security context of a task + * + * The parts of the context break down into two categories: + * + * (1) The objective context of a task. These parts are used when some other + * task is attempting to affect this one. + * + * (2) The subjective context. These details are used when the task is acting + * upon another object, be that a file, a task, a key or whatever. + * + * Note that some members of this structure belong to both categories - the + * LSM security pointer for instance. + * + * A task has two security pointers. task->real_cred points to the objective + * context that defines that task's actual details. The objective part of this + * context is used whenever that task is acted upon. + * + * task->cred points to the subjective context that defines the details of how + * that task is going to act upon another object. This may be overridden + * temporarily to point to another security context, but normally points to the + * same context as task->real_cred. + */ +struct cred { + atomic_t usage; + uid_t uid; /* real UID of the task */ + gid_t gid; /* real GID of the task */ + uid_t suid; /* saved UID of the task */ + gid_t sgid; /* saved GID of the task */ + uid_t euid; /* effective UID of the task */ + gid_t egid; /* effective GID of the task */ + uid_t fsuid; /* UID for VFS ops */ + gid_t fsgid; /* GID for VFS ops */ + unsigned securebits; /* SUID-less security management */ + kernel_cap_t cap_inheritable; /* caps our children can inherit */ + kernel_cap_t cap_permitted; /* caps we're permitted */ + kernel_cap_t cap_effective; /* caps we can actually use */ + kernel_cap_t cap_bset; /* capability bounding set */ +#ifdef CONFIG_KEYS + unsigned char jit_keyring; /* default keyring to attach requested + * keys to */ + struct key *thread_keyring; /* keyring private to this thread */ + struct key *request_key_auth; /* assumed request_key authority */ + struct thread_group_cred *tgcred; /* thread-group shared credentials */ +#endif +#ifdef CONFIG_SECURITY + void *security; /* subjective LSM security */ +#endif + struct user_struct *user; /* real user ID subscription */ + struct group_info *group_info; /* supplementary groups for euid/fsgid */ + struct rcu_head rcu; /* RCU deletion hook */ +}; + +extern void __put_cred(struct cred *); +extern int copy_creds(struct task_struct *, unsigned long); +extern struct cred *prepare_creds(void); +extern struct cred *prepare_exec_creds(void); +extern struct cred *prepare_usermodehelper_creds(void); +extern int commit_creds(struct cred *); +extern void abort_creds(struct cred *); +extern const struct cred *override_creds(const struct cred *); +extern void revert_creds(const struct cred *); +extern struct cred *prepare_kernel_cred(struct task_struct *); +extern int change_create_files_as(struct cred *, struct inode *); +extern int set_security_override(struct cred *, u32); +extern int set_security_override_from_ctx(struct cred *, const char *); +extern int set_create_files_as(struct cred *, struct inode *); +extern void __init cred_init(void); + +/** + * get_new_cred - Get a reference on a new set of credentials + * @cred: The new credentials to reference + * + * Get a reference on the specified set of new credentials. The caller must + * release the reference. + */ +static inline struct cred *get_new_cred(struct cred *cred) +{ + atomic_inc(&cred->usage); + return cred; +} + +/** + * get_cred - Get a reference on a set of credentials + * @cred: The credentials to reference + * + * Get a reference on the specified set of credentials. The caller must + * release the reference. + * + * This is used to deal with a committed set of credentials. Although the + * pointer is const, this will temporarily discard the const and increment the + * usage count. The purpose of this is to attempt to catch at compile time the + * accidental alteration of a set of credentials that should be considered + * immutable. + */ +static inline const struct cred *get_cred(const struct cred *cred) +{ + return get_new_cred((struct cred *) cred); +} + +/** + * put_cred - Release a reference to a set of credentials + * @cred: The credentials to release + * + * Release a reference to a set of credentials, deleting them when the last ref + * is released. + * + * This takes a const pointer to a set of credentials because the credentials + * on task_struct are attached by const pointers to prevent accidental + * alteration of otherwise immutable credential sets. + */ +static inline void put_cred(const struct cred *_cred) +{ + struct cred *cred = (struct cred *) _cred; + + BUG_ON(atomic_read(&(cred)->usage) <= 0); + if (atomic_dec_and_test(&(cred)->usage)) + __put_cred(cred); +} + +/** + * current_cred - Access the current task's subjective credentials + * + * Access the subjective credentials of the current task. + */ +#define current_cred() \ + (current->cred) + +/** + * __task_cred - Access a task's objective credentials + * @task: The task to query + * + * Access the objective credentials of a task. The caller must hold the RCU + * readlock. + * + * The caller must make sure task doesn't go away, either by holding a ref on + * task or by holding tasklist_lock to prevent it from being unlinked. + */ +#define __task_cred(task) \ + ((const struct cred *)(rcu_dereference((task)->real_cred))) + +/** + * get_task_cred - Get another task's objective credentials + * @task: The task to query + * + * Get the objective credentials of a task, pinning them so that they can't go + * away. Accessing a task's credentials directly is not permitted. + * + * The caller must make sure task doesn't go away, either by holding a ref on + * task or by holding tasklist_lock to prevent it from being unlinked. + */ +#define get_task_cred(task) \ +({ \ + struct cred *__cred; \ + rcu_read_lock(); \ + __cred = (struct cred *) __task_cred((task)); \ + get_cred(__cred); \ + rcu_read_unlock(); \ + __cred; \ +}) + +/** + * get_current_cred - Get the current task's subjective credentials + * + * Get the subjective credentials of the current task, pinning them so that + * they can't go away. Accessing the current task's credentials directly is + * not permitted. + */ +#define get_current_cred() \ + (get_cred(current_cred())) + +/** + * get_current_user - Get the current task's user_struct + * + * Get the user record of the current task, pinning it so that it can't go + * away. + */ +#define get_current_user() \ +({ \ + struct user_struct *__u; \ + struct cred *__cred; \ + __cred = (struct cred *) current_cred(); \ + __u = get_uid(__cred->user); \ + __u; \ +}) + +/** + * get_current_groups - Get the current task's supplementary group list + * + * Get the supplementary group list of the current task, pinning it so that it + * can't go away. + */ +#define get_current_groups() \ +({ \ + struct group_info *__groups; \ + struct cred *__cred; \ + __cred = (struct cred *) current_cred(); \ + __groups = get_group_info(__cred->group_info); \ + __groups; \ +}) + +#define task_cred_xxx(task, xxx) \ +({ \ + __typeof__(((struct cred *)NULL)->xxx) ___val; \ + rcu_read_lock(); \ + ___val = __task_cred((task))->xxx; \ + rcu_read_unlock(); \ + ___val; \ +}) + +#define task_uid(task) (task_cred_xxx((task), uid)) +#define task_euid(task) (task_cred_xxx((task), euid)) + +#define current_cred_xxx(xxx) \ +({ \ + current->cred->xxx; \ +}) + +#define current_uid() (current_cred_xxx(uid)) +#define current_gid() (current_cred_xxx(gid)) +#define current_euid() (current_cred_xxx(euid)) +#define current_egid() (current_cred_xxx(egid)) +#define current_suid() (current_cred_xxx(suid)) +#define current_sgid() (current_cred_xxx(sgid)) +#define current_fsuid() (current_cred_xxx(fsuid)) +#define current_fsgid() (current_cred_xxx(fsgid)) +#define current_cap() (current_cred_xxx(cap_effective)) +#define current_user() (current_cred_xxx(user)) +#define current_user_ns() (current_cred_xxx(user)->user_ns) +#define current_security() (current_cred_xxx(security)) #define current_uid_gid(_uid, _gid) \ do { \ - *(_uid) = current->uid; \ - *(_gid) = current->gid; \ + const struct cred *__cred; \ + __cred = current_cred(); \ + *(_uid) = __cred->uid; \ + *(_gid) = __cred->gid; \ } while(0) -#define current_euid_egid(_uid, _gid) \ +#define current_euid_egid(_euid, _egid) \ do { \ - *(_uid) = current->euid; \ - *(_gid) = current->egid; \ + const struct cred *__cred; \ + __cred = current_cred(); \ + *(_euid) = __cred->euid; \ + *(_egid) = __cred->egid; \ } while(0) -#define current_fsuid_fsgid(_uid, _gid) \ +#define current_fsuid_fsgid(_fsuid, _fsgid) \ do { \ - *(_uid) = current->fsuid; \ - *(_gid) = current->fsgid; \ + const struct cred *__cred; \ + __cred = current_cred(); \ + *(_fsuid) = __cred->fsuid; \ + *(_fsgid) = __cred->fsgid; \ } while(0) #endif /* _LINUX_CRED_H */ diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 3d2317e4af2..3bacd71509f 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -36,7 +36,8 @@ #define CRYPTO_ALG_TYPE_ABLKCIPHER 0x00000005 #define CRYPTO_ALG_TYPE_GIVCIPHER 0x00000006 #define CRYPTO_ALG_TYPE_DIGEST 0x00000008 -#define CRYPTO_ALG_TYPE_HASH 0x00000009 +#define CRYPTO_ALG_TYPE_HASH 0x00000008 +#define CRYPTO_ALG_TYPE_SHASH 0x00000009 #define CRYPTO_ALG_TYPE_AHASH 0x0000000a #define CRYPTO_ALG_TYPE_RNG 0x0000000c @@ -220,6 +221,7 @@ struct ablkcipher_alg { struct ahash_alg { int (*init)(struct ahash_request *req); + int (*reinit)(struct ahash_request *req); int (*update)(struct ahash_request *req); int (*final)(struct ahash_request *req); int (*digest)(struct ahash_request *req); @@ -480,6 +482,8 @@ struct crypto_tfm { struct compress_tfm compress; struct rng_tfm rng; } crt_u; + + void (*exit)(struct crypto_tfm *tfm); struct crypto_alg *__crt_alg; @@ -544,7 +548,9 @@ struct crypto_attr_u32 { * Transform user interface. */ -struct crypto_tfm *crypto_alloc_tfm(const char *alg_name, u32 tfm_flags); +struct crypto_tfm *crypto_alloc_tfm(const char *alg_name, + const struct crypto_type *frontend, + u32 type, u32 mask); struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask); void crypto_free_tfm(struct crypto_tfm *tfm); diff --git a/include/linux/dcache.h b/include/linux/dcache.h index a37359d0bad..c66d22487bf 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -75,14 +75,22 @@ full_name_hash(const unsigned char *name, unsigned int len) return end_name_hash(hash); } -struct dcookie_struct; - -#define DNAME_INLINE_LEN_MIN 36 +/* + * Try to keep struct dentry aligned on 64 byte cachelines (this will + * give reasonable cacheline footprint with larger lines without the + * large memory footprint increase). + */ +#ifdef CONFIG_64BIT +#define DNAME_INLINE_LEN_MIN 32 /* 192 bytes */ +#else +#define DNAME_INLINE_LEN_MIN 40 /* 128 bytes */ +#endif struct dentry { atomic_t d_count; unsigned int d_flags; /* protected by d_lock */ spinlock_t d_lock; /* per dentry lock */ + int d_mounted; struct inode *d_inode; /* Where the name belongs to - NULL is * negative */ /* @@ -107,10 +115,7 @@ struct dentry { struct dentry_operations *d_op; struct super_block *d_sb; /* The root of the dentry tree */ void *d_fsdata; /* fs-specific data */ -#ifdef CONFIG_PROFILING - struct dcookie_struct *d_cookie; /* cookie, if any */ -#endif - int d_mounted; + unsigned char d_iname[DNAME_INLINE_LEN_MIN]; /* small names */ }; @@ -177,6 +182,8 @@ d_iput: no no no yes #define DCACHE_INOTIFY_PARENT_WATCHED 0x0020 /* Parent inode is watched */ +#define DCACHE_COOKIE 0x0040 /* For use by dcookie subsystem */ + extern spinlock_t dcache_lock; extern seqlock_t rename_lock; diff --git a/include/linux/dcbnl.h b/include/linux/dcbnl.h new file mode 100644 index 00000000000..b0ef274e003 --- /dev/null +++ b/include/linux/dcbnl.h @@ -0,0 +1,340 @@ +/* + * Copyright (c) 2008, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple + * Place - Suite 330, Boston, MA 02111-1307 USA. + * + * Author: Lucy Liu <lucy.liu@intel.com> + */ + +#ifndef __LINUX_DCBNL_H__ +#define __LINUX_DCBNL_H__ + +#define DCB_PROTO_VERSION 1 + +struct dcbmsg { + unsigned char dcb_family; + __u8 cmd; + __u16 dcb_pad; +}; + +/** + * enum dcbnl_commands - supported DCB commands + * + * @DCB_CMD_UNDEFINED: unspecified command to catch errors + * @DCB_CMD_GSTATE: request the state of DCB in the device + * @DCB_CMD_SSTATE: set the state of DCB in the device + * @DCB_CMD_PGTX_GCFG: request the priority group configuration for Tx + * @DCB_CMD_PGTX_SCFG: set the priority group configuration for Tx + * @DCB_CMD_PGRX_GCFG: request the priority group configuration for Rx + * @DCB_CMD_PGRX_SCFG: set the priority group configuration for Rx + * @DCB_CMD_PFC_GCFG: request the priority flow control configuration + * @DCB_CMD_PFC_SCFG: set the priority flow control configuration + * @DCB_CMD_SET_ALL: apply all changes to the underlying device + * @DCB_CMD_GPERM_HWADDR: get the permanent MAC address of the underlying + * device. Only useful when using bonding. + * @DCB_CMD_GCAP: request the DCB capabilities of the device + * @DCB_CMD_GNUMTCS: get the number of traffic classes currently supported + * @DCB_CMD_SNUMTCS: set the number of traffic classes + * @DCB_CMD_GBCN: set backward congestion notification configuration + * @DCB_CMD_SBCN: get backward congestion notification configration. + */ +enum dcbnl_commands { + DCB_CMD_UNDEFINED, + + DCB_CMD_GSTATE, + DCB_CMD_SSTATE, + + DCB_CMD_PGTX_GCFG, + DCB_CMD_PGTX_SCFG, + DCB_CMD_PGRX_GCFG, + DCB_CMD_PGRX_SCFG, + + DCB_CMD_PFC_GCFG, + DCB_CMD_PFC_SCFG, + + DCB_CMD_SET_ALL, + + DCB_CMD_GPERM_HWADDR, + + DCB_CMD_GCAP, + + DCB_CMD_GNUMTCS, + DCB_CMD_SNUMTCS, + + DCB_CMD_PFC_GSTATE, + DCB_CMD_PFC_SSTATE, + + DCB_CMD_BCN_GCFG, + DCB_CMD_BCN_SCFG, + + __DCB_CMD_ENUM_MAX, + DCB_CMD_MAX = __DCB_CMD_ENUM_MAX - 1, +}; + +/** + * enum dcbnl_attrs - DCB top-level netlink attributes + * + * @DCB_ATTR_UNDEFINED: unspecified attribute to catch errors + * @DCB_ATTR_IFNAME: interface name of the underlying device (NLA_STRING) + * @DCB_ATTR_STATE: enable state of DCB in the device (NLA_U8) + * @DCB_ATTR_PFC_STATE: enable state of PFC in the device (NLA_U8) + * @DCB_ATTR_PFC_CFG: priority flow control configuration (NLA_NESTED) + * @DCB_ATTR_NUM_TC: number of traffic classes supported in the device (NLA_U8) + * @DCB_ATTR_PG_CFG: priority group configuration (NLA_NESTED) + * @DCB_ATTR_SET_ALL: bool to commit changes to hardware or not (NLA_U8) + * @DCB_ATTR_PERM_HWADDR: MAC address of the physical device (NLA_NESTED) + * @DCB_ATTR_CAP: DCB capabilities of the device (NLA_NESTED) + * @DCB_ATTR_NUMTCS: number of traffic classes supported (NLA_NESTED) + * @DCB_ATTR_BCN: backward congestion notification configuration (NLA_NESTED) + */ +enum dcbnl_attrs { + DCB_ATTR_UNDEFINED, + + DCB_ATTR_IFNAME, + DCB_ATTR_STATE, + DCB_ATTR_PFC_STATE, + DCB_ATTR_PFC_CFG, + DCB_ATTR_NUM_TC, + DCB_ATTR_PG_CFG, + DCB_ATTR_SET_ALL, + DCB_ATTR_PERM_HWADDR, + DCB_ATTR_CAP, + DCB_ATTR_NUMTCS, + DCB_ATTR_BCN, + + __DCB_ATTR_ENUM_MAX, + DCB_ATTR_MAX = __DCB_ATTR_ENUM_MAX - 1, +}; + +/** + * enum dcbnl_pfc_attrs - DCB Priority Flow Control user priority nested attrs + * + * @DCB_PFC_UP_ATTR_UNDEFINED: unspecified attribute to catch errors + * @DCB_PFC_UP_ATTR_0: Priority Flow Control value for User Priority 0 (NLA_U8) + * @DCB_PFC_UP_ATTR_1: Priority Flow Control value for User Priority 1 (NLA_U8) + * @DCB_PFC_UP_ATTR_2: Priority Flow Control value for User Priority 2 (NLA_U8) + * @DCB_PFC_UP_ATTR_3: Priority Flow Control value for User Priority 3 (NLA_U8) + * @DCB_PFC_UP_ATTR_4: Priority Flow Control value for User Priority 4 (NLA_U8) + * @DCB_PFC_UP_ATTR_5: Priority Flow Control value for User Priority 5 (NLA_U8) + * @DCB_PFC_UP_ATTR_6: Priority Flow Control value for User Priority 6 (NLA_U8) + * @DCB_PFC_UP_ATTR_7: Priority Flow Control value for User Priority 7 (NLA_U8) + * @DCB_PFC_UP_ATTR_MAX: highest attribute number currently defined + * @DCB_PFC_UP_ATTR_ALL: apply to all priority flow control attrs (NLA_FLAG) + * + */ +enum dcbnl_pfc_up_attrs { + DCB_PFC_UP_ATTR_UNDEFINED, + + DCB_PFC_UP_ATTR_0, + DCB_PFC_UP_ATTR_1, + DCB_PFC_UP_ATTR_2, + DCB_PFC_UP_ATTR_3, + DCB_PFC_UP_ATTR_4, + DCB_PFC_UP_ATTR_5, + DCB_PFC_UP_ATTR_6, + DCB_PFC_UP_ATTR_7, + DCB_PFC_UP_ATTR_ALL, + + __DCB_PFC_UP_ATTR_ENUM_MAX, + DCB_PFC_UP_ATTR_MAX = __DCB_PFC_UP_ATTR_ENUM_MAX - 1, +}; + +/** + * enum dcbnl_pg_attrs - DCB Priority Group attributes + * + * @DCB_PG_ATTR_UNDEFINED: unspecified attribute to catch errors + * @DCB_PG_ATTR_TC_0: Priority Group Traffic Class 0 configuration (NLA_NESTED) + * @DCB_PG_ATTR_TC_1: Priority Group Traffic Class 1 configuration (NLA_NESTED) + * @DCB_PG_ATTR_TC_2: Priority Group Traffic Class 2 configuration (NLA_NESTED) + * @DCB_PG_ATTR_TC_3: Priority Group Traffic Class 3 configuration (NLA_NESTED) + * @DCB_PG_ATTR_TC_4: Priority Group Traffic Class 4 configuration (NLA_NESTED) + * @DCB_PG_ATTR_TC_5: Priority Group Traffic Class 5 configuration (NLA_NESTED) + * @DCB_PG_ATTR_TC_6: Priority Group Traffic Class 6 configuration (NLA_NESTED) + * @DCB_PG_ATTR_TC_7: Priority Group Traffic Class 7 configuration (NLA_NESTED) + * @DCB_PG_ATTR_TC_MAX: highest attribute number currently defined + * @DCB_PG_ATTR_TC_ALL: apply to all traffic classes (NLA_NESTED) + * @DCB_PG_ATTR_BW_ID_0: Percent of link bandwidth for Priority Group 0 (NLA_U8) + * @DCB_PG_ATTR_BW_ID_1: Percent of link bandwidth for Priority Group 1 (NLA_U8) + * @DCB_PG_ATTR_BW_ID_2: Percent of link bandwidth for Priority Group 2 (NLA_U8) + * @DCB_PG_ATTR_BW_ID_3: Percent of link bandwidth for Priority Group 3 (NLA_U8) + * @DCB_PG_ATTR_BW_ID_4: Percent of link bandwidth for Priority Group 4 (NLA_U8) + * @DCB_PG_ATTR_BW_ID_5: Percent of link bandwidth for Priority Group 5 (NLA_U8) + * @DCB_PG_ATTR_BW_ID_6: Percent of link bandwidth for Priority Group 6 (NLA_U8) + * @DCB_PG_ATTR_BW_ID_7: Percent of link bandwidth for Priority Group 7 (NLA_U8) + * @DCB_PG_ATTR_BW_ID_MAX: highest attribute number currently defined + * @DCB_PG_ATTR_BW_ID_ALL: apply to all priority groups (NLA_FLAG) + * + */ +enum dcbnl_pg_attrs { + DCB_PG_ATTR_UNDEFINED, + + DCB_PG_ATTR_TC_0, + DCB_PG_ATTR_TC_1, + DCB_PG_ATTR_TC_2, + DCB_PG_ATTR_TC_3, + DCB_PG_ATTR_TC_4, + DCB_PG_ATTR_TC_5, + DCB_PG_ATTR_TC_6, + DCB_PG_ATTR_TC_7, + DCB_PG_ATTR_TC_MAX, + DCB_PG_ATTR_TC_ALL, + + DCB_PG_ATTR_BW_ID_0, + DCB_PG_ATTR_BW_ID_1, + DCB_PG_ATTR_BW_ID_2, + DCB_PG_ATTR_BW_ID_3, + DCB_PG_ATTR_BW_ID_4, + DCB_PG_ATTR_BW_ID_5, + DCB_PG_ATTR_BW_ID_6, + DCB_PG_ATTR_BW_ID_7, + DCB_PG_ATTR_BW_ID_MAX, + DCB_PG_ATTR_BW_ID_ALL, + + __DCB_PG_ATTR_ENUM_MAX, + DCB_PG_ATTR_MAX = __DCB_PG_ATTR_ENUM_MAX - 1, +}; + +/** + * enum dcbnl_tc_attrs - DCB Traffic Class attributes + * + * @DCB_TC_ATTR_PARAM_UNDEFINED: unspecified attribute to catch errors + * @DCB_TC_ATTR_PARAM_PGID: (NLA_U8) Priority group the traffic class belongs to + * Valid values are: 0-7 + * @DCB_TC_ATTR_PARAM_UP_MAPPING: (NLA_U8) Traffic class to user priority map + * Some devices may not support changing the + * user priority map of a TC. + * @DCB_TC_ATTR_PARAM_STRICT_PRIO: (NLA_U8) Strict priority setting + * 0 - none + * 1 - group strict + * 2 - link strict + * @DCB_TC_ATTR_PARAM_BW_PCT: optional - (NLA_U8) If supported by the device and + * not configured to use link strict priority, + * this is the percentage of bandwidth of the + * priority group this traffic class belongs to + * @DCB_TC_ATTR_PARAM_ALL: (NLA_FLAG) all traffic class parameters + * + */ +enum dcbnl_tc_attrs { + DCB_TC_ATTR_PARAM_UNDEFINED, + + DCB_TC_ATTR_PARAM_PGID, + DCB_TC_ATTR_PARAM_UP_MAPPING, + DCB_TC_ATTR_PARAM_STRICT_PRIO, + DCB_TC_ATTR_PARAM_BW_PCT, + DCB_TC_ATTR_PARAM_ALL, + + __DCB_TC_ATTR_PARAM_ENUM_MAX, + DCB_TC_ATTR_PARAM_MAX = __DCB_TC_ATTR_PARAM_ENUM_MAX - 1, +}; + +/** + * enum dcbnl_cap_attrs - DCB Capability attributes + * + * @DCB_CAP_ATTR_UNDEFINED: unspecified attribute to catch errors + * @DCB_CAP_ATTR_ALL: (NLA_FLAG) all capability parameters + * @DCB_CAP_ATTR_PG: (NLA_U8) device supports Priority Groups + * @DCB_CAP_ATTR_PFC: (NLA_U8) device supports Priority Flow Control + * @DCB_CAP_ATTR_UP2TC: (NLA_U8) device supports user priority to + * traffic class mapping + * @DCB_CAP_ATTR_PG_TCS: (NLA_U8) bitmap where each bit represents a + * number of traffic classes the device + * can be configured to use for Priority Groups + * @DCB_CAP_ATTR_PFC_TCS: (NLA_U8) bitmap where each bit represents a + * number of traffic classes the device can be + * configured to use for Priority Flow Control + * @DCB_CAP_ATTR_GSP: (NLA_U8) device supports group strict priority + * @DCB_CAP_ATTR_BCN: (NLA_U8) device supports Backwards Congestion + * Notification + */ +enum dcbnl_cap_attrs { + DCB_CAP_ATTR_UNDEFINED, + DCB_CAP_ATTR_ALL, + DCB_CAP_ATTR_PG, + DCB_CAP_ATTR_PFC, + DCB_CAP_ATTR_UP2TC, + DCB_CAP_ATTR_PG_TCS, + DCB_CAP_ATTR_PFC_TCS, + DCB_CAP_ATTR_GSP, + DCB_CAP_ATTR_BCN, + + __DCB_CAP_ATTR_ENUM_MAX, + DCB_CAP_ATTR_MAX = __DCB_CAP_ATTR_ENUM_MAX - 1, +}; + +/** + * enum dcbnl_numtcs_attrs - number of traffic classes + * + * @DCB_NUMTCS_ATTR_UNDEFINED: unspecified attribute to catch errors + * @DCB_NUMTCS_ATTR_ALL: (NLA_FLAG) all traffic class attributes + * @DCB_NUMTCS_ATTR_PG: (NLA_U8) number of traffic classes used for + * priority groups + * @DCB_NUMTCS_ATTR_PFC: (NLA_U8) number of traffic classes which can + * support priority flow control + */ +enum dcbnl_numtcs_attrs { + DCB_NUMTCS_ATTR_UNDEFINED, + DCB_NUMTCS_ATTR_ALL, + DCB_NUMTCS_ATTR_PG, + DCB_NUMTCS_ATTR_PFC, + + __DCB_NUMTCS_ATTR_ENUM_MAX, + DCB_NUMTCS_ATTR_MAX = __DCB_NUMTCS_ATTR_ENUM_MAX - 1, +}; + +enum dcbnl_bcn_attrs{ + DCB_BCN_ATTR_UNDEFINED = 0, + + DCB_BCN_ATTR_RP_0, + DCB_BCN_ATTR_RP_1, + DCB_BCN_ATTR_RP_2, + DCB_BCN_ATTR_RP_3, + DCB_BCN_ATTR_RP_4, + DCB_BCN_ATTR_RP_5, + DCB_BCN_ATTR_RP_6, + DCB_BCN_ATTR_RP_7, + DCB_BCN_ATTR_RP_ALL, + + DCB_BCN_ATTR_BCNA_0, + DCB_BCN_ATTR_BCNA_1, + DCB_BCN_ATTR_ALPHA, + DCB_BCN_ATTR_BETA, + DCB_BCN_ATTR_GD, + DCB_BCN_ATTR_GI, + DCB_BCN_ATTR_TMAX, + DCB_BCN_ATTR_TD, + DCB_BCN_ATTR_RMIN, + DCB_BCN_ATTR_W, + DCB_BCN_ATTR_RD, + DCB_BCN_ATTR_RU, + DCB_BCN_ATTR_WRTT, + DCB_BCN_ATTR_RI, + DCB_BCN_ATTR_C, + DCB_BCN_ATTR_ALL, + + __DCB_BCN_ATTR_ENUM_MAX, + DCB_BCN_ATTR_MAX = __DCB_BCN_ATTR_ENUM_MAX - 1, +}; + +/** + * enum dcb_general_attr_values - general DCB attribute values + * + * @DCB_ATTR_UNDEFINED: value used to indicate an attribute is not supported + * + */ +enum dcb_general_attr_values { + DCB_ATTR_VALUE_UNDEFINED = 0xff +}; + + +#endif /* __LINUX_DCBNL_H__ */ diff --git a/include/linux/dccp.h b/include/linux/dccp.h index 6080449fbec..61734e27abb 100644 --- a/include/linux/dccp.h +++ b/include/linux/dccp.h @@ -168,6 +168,8 @@ enum { DCCPO_MIN_CCID_SPECIFIC = 128, DCCPO_MAX_CCID_SPECIFIC = 255, }; +/* maximum size of a single TLV-encoded DCCP option (sans type/len bytes) */ +#define DCCP_SINGLE_OPT_MAXLEN 253 /* DCCP CCIDS */ enum { @@ -176,29 +178,23 @@ enum { }; /* DCCP features (RFC 4340 section 6.4) */ -enum { +enum dccp_feature_numbers { DCCPF_RESERVED = 0, DCCPF_CCID = 1, - DCCPF_SHORT_SEQNOS = 2, /* XXX: not yet implemented */ + DCCPF_SHORT_SEQNOS = 2, DCCPF_SEQUENCE_WINDOW = 3, - DCCPF_ECN_INCAPABLE = 4, /* XXX: not yet implemented */ + DCCPF_ECN_INCAPABLE = 4, DCCPF_ACK_RATIO = 5, DCCPF_SEND_ACK_VECTOR = 6, DCCPF_SEND_NDP_COUNT = 7, DCCPF_MIN_CSUM_COVER = 8, - DCCPF_DATA_CHECKSUM = 9, /* XXX: not yet implemented */ + DCCPF_DATA_CHECKSUM = 9, /* 10-127 reserved */ DCCPF_MIN_CCID_SPECIFIC = 128, + DCCPF_SEND_LEV_RATE = 192, /* RFC 4342, sec. 8.4 */ DCCPF_MAX_CCID_SPECIFIC = 255, }; -/* this structure is argument to DCCP_SOCKOPT_CHANGE_X */ -struct dccp_so_feat { - __u8 dccpsf_feat; - __u8 __user *dccpsf_val; - __u8 dccpsf_len; -}; - /* DCCP socket options */ #define DCCP_SOCKOPT_PACKET_SIZE 1 /* XXX deprecated, without effect */ #define DCCP_SOCKOPT_SERVICE 2 @@ -208,6 +204,10 @@ struct dccp_so_feat { #define DCCP_SOCKOPT_SERVER_TIMEWAIT 6 #define DCCP_SOCKOPT_SEND_CSCOV 10 #define DCCP_SOCKOPT_RECV_CSCOV 11 +#define DCCP_SOCKOPT_AVAILABLE_CCIDS 12 +#define DCCP_SOCKOPT_CCID 13 +#define DCCP_SOCKOPT_TX_CCID 14 +#define DCCP_SOCKOPT_RX_CCID 15 #define DCCP_SOCKOPT_CCID_RX_INFO 128 #define DCCP_SOCKOPT_CCID_TX_INFO 192 @@ -360,7 +360,6 @@ static inline unsigned int dccp_hdr_len(const struct sk_buff *skb) #define DCCPF_INITIAL_SEQUENCE_WINDOW 100 #define DCCPF_INITIAL_ACK_RATIO 2 #define DCCPF_INITIAL_CCID DCCPC_CCID2 -#define DCCPF_INITIAL_SEND_ACK_VECTOR 1 /* FIXME: for now we're default to 1 but it should really be 0 */ #define DCCPF_INITIAL_SEND_NDP_COUNT 1 @@ -370,20 +369,11 @@ static inline unsigned int dccp_hdr_len(const struct sk_buff *skb) * Will be used to pass the state from dccp_request_sock to dccp_sock. * * @dccpms_sequence_window - Sequence Window Feature (section 7.5.2) - * @dccpms_ccid - Congestion Control Id (CCID) (section 10) - * @dccpms_send_ack_vector - Send Ack Vector Feature (section 11.5) - * @dccpms_send_ndp_count - Send NDP Count Feature (7.7.2) - * @dccpms_ack_ratio - Ack Ratio Feature (section 11.3) * @dccpms_pending - List of features being negotiated * @dccpms_conf - */ struct dccp_minisock { __u64 dccpms_sequence_window; - __u8 dccpms_rx_ccid; - __u8 dccpms_tx_ccid; - __u8 dccpms_send_ack_vector; - __u8 dccpms_send_ndp_count; - __u8 dccpms_ack_ratio; struct list_head dccpms_pending; struct list_head dccpms_conf; }; @@ -411,6 +401,7 @@ extern void dccp_minisock_init(struct dccp_minisock *dmsk); * @dreq_iss: initial sequence number sent on the Response (RFC 4340, 7.1) * @dreq_isr: initial sequence number received on the Request * @dreq_service: service code present on the Request (there is just one) + * @dreq_featneg: feature negotiation options for this connection * The following two fields are analogous to the ones in dccp_sock: * @dreq_timestamp_echo: last received timestamp to echo (13.1) * @dreq_timestamp_echo: the time of receiving the last @dreq_timestamp_echo @@ -420,6 +411,7 @@ struct dccp_request_sock { __u64 dreq_iss; __u64 dreq_isr; __be32 dreq_service; + struct list_head dreq_featneg; __u32 dreq_timestamp_echo; __u32 dreq_timestamp_time; }; @@ -493,10 +485,12 @@ struct dccp_ackvec; * @dccps_r_ack_ratio - feature-remote Ack Ratio * @dccps_pcslen - sender partial checksum coverage (via sockopt) * @dccps_pcrlen - receiver partial checksum coverage (via sockopt) + * @dccps_send_ndp_count - local Send NDP Count feature (7.7.2) * @dccps_ndp_count - number of Non Data Packets since last data packet * @dccps_mss_cache - current value of MSS (path MTU minus header sizes) * @dccps_rate_last - timestamp for rate-limiting DCCP-Sync (RFC 4340, 7.5.4) * @dccps_minisock - associated minisock (accessed via dccp_msk) + * @dccps_featneg - tracks feature-negotiation state (mostly during handshake) * @dccps_hc_rx_ackvec - rx half connection ack vector * @dccps_hc_rx_ccid - CCID used for the receiver (or receiving half-connection) * @dccps_hc_tx_ccid - CCID used for the sender (or sending half-connection) @@ -529,11 +523,13 @@ struct dccp_sock { __u32 dccps_timestamp_time; __u16 dccps_l_ack_ratio; __u16 dccps_r_ack_ratio; - __u16 dccps_pcslen; - __u16 dccps_pcrlen; + __u8 dccps_pcslen:4; + __u8 dccps_pcrlen:4; + __u8 dccps_send_ndp_count:1; __u64 dccps_ndp_count:48; unsigned long dccps_rate_last; struct dccp_minisock dccps_minisock; + struct list_head dccps_featneg; struct dccp_ackvec *dccps_hc_rx_ackvec; struct ccid *dccps_hc_rx_ccid; struct ccid *dccps_hc_tx_ccid; diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h index 4aaa4afb1cb..096476f1fb3 100644 --- a/include/linux/debug_locks.h +++ b/include/linux/debug_locks.h @@ -17,7 +17,7 @@ extern int debug_locks_off(void); ({ \ int __ret = 0; \ \ - if (unlikely(c)) { \ + if (!oops_in_progress && unlikely(c)) { \ if (debug_locks_off() && !debug_locks_silent) \ WARN_ON(1); \ __ret = 1; \ diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h index e1a6c046cea..23936b16426 100644 --- a/include/linux/debugfs.h +++ b/include/linux/debugfs.h @@ -63,6 +63,8 @@ struct dentry *debugfs_create_x16(const char *name, mode_t mode, struct dentry *parent, u16 *value); struct dentry *debugfs_create_x32(const char *name, mode_t mode, struct dentry *parent, u32 *value); +struct dentry *debugfs_create_size_t(const char *name, mode_t mode, + struct dentry *parent, size_t *value); struct dentry *debugfs_create_bool(const char *name, mode_t mode, struct dentry *parent, u32 *value); diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index c17fd334e57..8209e08969f 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -45,6 +45,8 @@ typedef void (*dm_dtr_fn) (struct dm_target *ti); */ typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio, union map_info *map_context); +typedef int (*dm_map_request_fn) (struct dm_target *ti, struct request *clone, + union map_info *map_context); /* * Returns: @@ -57,6 +59,9 @@ typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio, typedef int (*dm_endio_fn) (struct dm_target *ti, struct bio *bio, int error, union map_info *map_context); +typedef int (*dm_request_endio_fn) (struct dm_target *ti, + struct request *clone, int error, + union map_info *map_context); typedef void (*dm_flush_fn) (struct dm_target *ti); typedef void (*dm_presuspend_fn) (struct dm_target *ti); @@ -75,6 +80,13 @@ typedef int (*dm_ioctl_fn) (struct dm_target *ti, unsigned int cmd, typedef int (*dm_merge_fn) (struct dm_target *ti, struct bvec_merge_data *bvm, struct bio_vec *biovec, int max_size); +/* + * Returns: + * 0: The target can handle the next I/O immediately. + * 1: The target can't handle the next I/O immediately. + */ +typedef int (*dm_busy_fn) (struct dm_target *ti); + void dm_error(const char *message); /* @@ -100,14 +112,23 @@ void dm_put_device(struct dm_target *ti, struct dm_dev *d); /* * Information about a target type */ + +/* + * Target features + */ +#define DM_TARGET_SUPPORTS_BARRIERS 0x00000001 + struct target_type { + uint64_t features; const char *name; struct module *module; unsigned version[3]; dm_ctr_fn ctr; dm_dtr_fn dtr; dm_map_fn map; + dm_map_request_fn map_rq; dm_endio_fn end_io; + dm_request_endio_fn rq_end_io; dm_flush_fn flush; dm_presuspend_fn presuspend; dm_postsuspend_fn postsuspend; @@ -117,6 +138,7 @@ struct target_type { dm_message_fn message; dm_ioctl_fn ioctl; dm_merge_fn merge; + dm_busy_fn busy; }; struct io_restrictions { @@ -157,8 +179,7 @@ struct dm_target { }; int dm_register_target(struct target_type *t); -int dm_unregister_target(struct target_type *t); - +void dm_unregister_target(struct target_type *t); /*----------------------------------------------------------------- * Functions for creating and manipulating mapped devices. @@ -276,6 +297,9 @@ void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size); *---------------------------------------------------------------*/ #define DM_NAME "device-mapper" +#define DMCRIT(f, arg...) \ + printk(KERN_CRIT DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg) + #define DMERR(f, arg...) \ printk(KERN_ERR DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg) #define DMERR_LIMIT(f, arg...) \ diff --git a/include/linux/device.h b/include/linux/device.h index 1a3686d15f9..7d9da4b4993 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -28,6 +28,7 @@ #define BUS_ID_SIZE 20 struct device; +struct device_private; struct device_driver; struct driver_private; struct class; @@ -65,7 +66,7 @@ struct bus_type { int (*resume_early)(struct device *dev); int (*resume)(struct device *dev); - struct pm_ext_ops *pm; + struct dev_pm_ops *pm; struct bus_type_private *p; }; @@ -133,7 +134,7 @@ struct device_driver { int (*resume) (struct device *dev); struct attribute_group **groups; - struct pm_ops *pm; + struct dev_pm_ops *pm; struct driver_private *p; }; @@ -198,7 +199,7 @@ struct class { int (*suspend)(struct device *dev, pm_message_t state); int (*resume)(struct device *dev); - struct pm_ops *pm; + struct dev_pm_ops *pm; struct class_private *p; }; @@ -291,7 +292,7 @@ struct device_type { int (*suspend)(struct device *dev, pm_message_t state); int (*resume)(struct device *dev); - struct pm_ops *pm; + struct dev_pm_ops *pm; }; /* interface for exporting device attributes */ @@ -365,17 +366,15 @@ struct device_dma_parameters { }; struct device { - struct klist klist_children; - struct klist_node knode_parent; /* node in sibling list */ - struct klist_node knode_driver; - struct klist_node knode_bus; struct device *parent; + struct device_private *p; + struct kobject kobj; char bus_id[BUS_ID_SIZE]; /* position on parent bus */ + unsigned uevent_suppress:1; const char *init_name; /* initial name of the device */ struct device_type *type; - unsigned uevent_suppress:1; struct semaphore sem; /* semaphore to synchronize calls to * its driver. @@ -408,12 +407,13 @@ struct device { /* arch specific additions */ struct dev_archdata archdata; + dev_t devt; /* dev_t, creates the sysfs "dev" */ + spinlock_t devres_lock; struct list_head devres_head; struct klist_node knode_class; struct class *class; - dev_t devt; /* dev_t, creates the sysfs "dev" */ struct attribute_group **groups; /* optional groups */ void (*release)(struct device *dev); @@ -483,6 +483,17 @@ extern int device_rename(struct device *dev, char *new_name); extern int device_move(struct device *dev, struct device *new_parent); /* + * Root device objects for grouping under /sys/devices + */ +extern struct device *__root_device_register(const char *name, + struct module *owner); +static inline struct device *root_device_register(const char *name) +{ + return __root_device_register(name, THIS_MODULE); +} +extern void root_device_unregister(struct device *root); + +/* * Manual binding of a device to driver. See drivers/base/bus.c * for information on use. */ @@ -553,13 +564,13 @@ extern const char *dev_driver_string(const struct device *dev); #define dev_info(dev, format, arg...) \ dev_printk(KERN_INFO , dev , format , ## arg) -#if defined(CONFIG_DYNAMIC_PRINTK_DEBUG) +#if defined(DEBUG) +#define dev_dbg(dev, format, arg...) \ + dev_printk(KERN_DEBUG , dev , format , ## arg) +#elif defined(CONFIG_DYNAMIC_PRINTK_DEBUG) #define dev_dbg(dev, format, ...) do { \ dynamic_dev_dbg(dev, format, ##__VA_ARGS__); \ } while (0) -#elif defined(DEBUG) -#define dev_dbg(dev, format, arg...) \ - dev_printk(KERN_DEBUG , dev , format , ## arg) #else #define dev_dbg(dev, format, arg...) \ ({ if (0) dev_printk(KERN_DEBUG, dev, format, ##arg); 0; }) diff --git a/include/linux/dma_remapping.h b/include/linux/dma_remapping.h index 952df39c989..af1dab41674 100644 --- a/include/linux/dma_remapping.h +++ b/include/linux/dma_remapping.h @@ -9,148 +9,24 @@ #define VTD_PAGE_MASK (((u64)-1) << VTD_PAGE_SHIFT) #define VTD_PAGE_ALIGN(addr) (((addr) + VTD_PAGE_SIZE - 1) & VTD_PAGE_MASK) -#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT) -#define DMA_32BIT_PFN IOVA_PFN(DMA_32BIT_MASK) -#define DMA_64BIT_PFN IOVA_PFN(DMA_64BIT_MASK) - - -/* - * 0: Present - * 1-11: Reserved - * 12-63: Context Ptr (12 - (haw-1)) - * 64-127: Reserved - */ -struct root_entry { - u64 val; - u64 rsvd1; -}; -#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry)) -static inline bool root_present(struct root_entry *root) -{ - return (root->val & 1); -} -static inline void set_root_present(struct root_entry *root) -{ - root->val |= 1; -} -static inline void set_root_value(struct root_entry *root, unsigned long value) -{ - root->val |= value & VTD_PAGE_MASK; -} - -struct context_entry; -static inline struct context_entry * -get_context_addr_from_root(struct root_entry *root) -{ - return (struct context_entry *) - (root_present(root)?phys_to_virt( - root->val & VTD_PAGE_MASK) : - NULL); -} - -/* - * low 64 bits: - * 0: present - * 1: fault processing disable - * 2-3: translation type - * 12-63: address space root - * high 64 bits: - * 0-2: address width - * 3-6: aval - * 8-23: domain id - */ -struct context_entry { - u64 lo; - u64 hi; -}; -#define context_present(c) ((c).lo & 1) -#define context_fault_disable(c) (((c).lo >> 1) & 1) -#define context_translation_type(c) (((c).lo >> 2) & 3) -#define context_address_root(c) ((c).lo & VTD_PAGE_MASK) -#define context_address_width(c) ((c).hi & 7) -#define context_domain_id(c) (((c).hi >> 8) & ((1 << 16) - 1)) - -#define context_set_present(c) do {(c).lo |= 1;} while (0) -#define context_set_fault_enable(c) \ - do {(c).lo &= (((u64)-1) << 2) | 1;} while (0) -#define context_set_translation_type(c, val) \ - do { \ - (c).lo &= (((u64)-1) << 4) | 3; \ - (c).lo |= ((val) & 3) << 2; \ - } while (0) -#define CONTEXT_TT_MULTI_LEVEL 0 -#define context_set_address_root(c, val) \ - do {(c).lo |= (val) & VTD_PAGE_MASK; } while (0) -#define context_set_address_width(c, val) do {(c).hi |= (val) & 7;} while (0) -#define context_set_domain_id(c, val) \ - do {(c).hi |= ((val) & ((1 << 16) - 1)) << 8;} while (0) -#define context_clear_entry(c) do {(c).lo = 0; (c).hi = 0;} while (0) - -/* - * 0: readable - * 1: writable - * 2-6: reserved - * 7: super page - * 8-11: available - * 12-63: Host physcial address - */ -struct dma_pte { - u64 val; -}; -#define dma_clear_pte(p) do {(p).val = 0;} while (0) - #define DMA_PTE_READ (1) #define DMA_PTE_WRITE (2) -#define dma_set_pte_readable(p) do {(p).val |= DMA_PTE_READ;} while (0) -#define dma_set_pte_writable(p) do {(p).val |= DMA_PTE_WRITE;} while (0) -#define dma_set_pte_prot(p, prot) \ - do {(p).val = ((p).val & ~3) | ((prot) & 3); } while (0) -#define dma_pte_addr(p) ((p).val & VTD_PAGE_MASK) -#define dma_set_pte_addr(p, addr) do {\ - (p).val |= ((addr) & VTD_PAGE_MASK); } while (0) -#define dma_pte_present(p) (((p).val & 3) != 0) - struct intel_iommu; +struct dmar_domain; +struct root_entry; -struct dmar_domain { - int id; /* domain id */ - struct intel_iommu *iommu; /* back pointer to owning iommu */ - - struct list_head devices; /* all devices' list */ - struct iova_domain iovad; /* iova's that belong to this domain */ - - struct dma_pte *pgd; /* virtual address */ - spinlock_t mapping_lock; /* page table lock */ - int gaw; /* max guest address width */ - - /* adjusted guest address width, 0 is level 2 30-bit */ - int agaw; - -#define DOMAIN_FLAG_MULTIPLE_DEVICES 1 - int flags; -}; - -/* PCI domain-device relationship */ -struct device_domain_info { - struct list_head link; /* link to domain siblings */ - struct list_head global; /* link to global list */ - u8 bus; /* PCI bus numer */ - u8 devfn; /* PCI devfn number */ - struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */ - struct dmar_domain *domain; /* pointer to domain */ -}; - -extern int init_dmars(void); extern void free_dmar_iommu(struct intel_iommu *iommu); -extern int dmar_disabled; - -#ifndef CONFIG_DMAR_GFX_WA -static inline void iommu_prepare_gfx_mapping(void) +#ifdef CONFIG_DMAR +extern int iommu_calculate_agaw(struct intel_iommu *iommu); +#else +static inline int iommu_calculate_agaw(struct intel_iommu *iommu) { - return; + return 0; } -#endif /* !CONFIG_DMAR_GFX_WA */ +#endif + +extern int dmar_disabled; #endif diff --git a/include/linux/dmar.h b/include/linux/dmar.h index f1984fc3e06..f28440784cf 100644 --- a/include/linux/dmar.h +++ b/include/linux/dmar.h @@ -144,7 +144,6 @@ struct dmar_rmrr_unit { list_for_each_entry(rmrr, &dmar_rmrr_units, list) /* Intel DMAR initialization functions */ extern int intel_iommu_init(void); -extern int dmar_disabled; #else static inline int intel_iommu_init(void) { diff --git a/include/linux/dmi.h b/include/linux/dmi.h index e5084eb5943..34161907b2f 100644 --- a/include/linux/dmi.h +++ b/include/linux/dmi.h @@ -44,8 +44,10 @@ extern const struct dmi_device * dmi_find_device(int type, const char *name, extern void dmi_scan_machine(void); extern int dmi_get_year(int field); extern int dmi_name_in_vendors(const char *str); +extern int dmi_name_in_serial(const char *str); extern int dmi_available; extern int dmi_walk(void (*decode)(const struct dmi_header *)); +extern bool dmi_match(enum dmi_field f, const char *str); #else @@ -56,9 +58,12 @@ static inline const struct dmi_device * dmi_find_device(int type, const char *na static inline void dmi_scan_machine(void) { return; } static inline int dmi_get_year(int year) { return 0; } static inline int dmi_name_in_vendors(const char *s) { return 0; } +static inline int dmi_name_in_serial(const char *s) { return 0; } #define dmi_available 0 static inline int dmi_walk(void (*decode)(const struct dmi_header *)) { return -1; } +static inline bool dmi_match(enum dmi_field f, const char *str) + { return false; } #endif diff --git a/include/linux/dqblk_qtree.h b/include/linux/dqblk_qtree.h new file mode 100644 index 00000000000..82a16527b36 --- /dev/null +++ b/include/linux/dqblk_qtree.h @@ -0,0 +1,56 @@ +/* + * Definitions of structures and functions for quota formats using trie + */ + +#ifndef _LINUX_DQBLK_QTREE_H +#define _LINUX_DQBLK_QTREE_H + +#include <linux/types.h> + +/* Numbers of blocks needed for updates - we count with the smallest + * possible block size (1024) */ +#define QTREE_INIT_ALLOC 4 +#define QTREE_INIT_REWRITE 2 +#define QTREE_DEL_ALLOC 0 +#define QTREE_DEL_REWRITE 6 + +struct dquot; + +/* Operations */ +struct qtree_fmt_operations { + void (*mem2disk_dqblk)(void *disk, struct dquot *dquot); /* Convert given entry from in memory format to disk one */ + void (*disk2mem_dqblk)(struct dquot *dquot, void *disk); /* Convert given entry from disk format to in memory one */ + int (*is_id)(void *disk, struct dquot *dquot); /* Is this structure for given id? */ +}; + +/* Inmemory copy of version specific information */ +struct qtree_mem_dqinfo { + struct super_block *dqi_sb; /* Sb quota is on */ + int dqi_type; /* Quota type */ + unsigned int dqi_blocks; /* # of blocks in quota file */ + unsigned int dqi_free_blk; /* First block in list of free blocks */ + unsigned int dqi_free_entry; /* First block with free entry */ + unsigned int dqi_blocksize_bits; /* Block size of quota file */ + unsigned int dqi_entry_size; /* Size of quota entry in quota file */ + unsigned int dqi_usable_bs; /* Space usable in block for quota data */ + unsigned int dqi_qtree_depth; /* Precomputed depth of quota tree */ + struct qtree_fmt_operations *dqi_ops; /* Operations for entry manipulation */ +}; + +int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot); +int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot); +int qtree_delete_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot); +int qtree_release_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot); +int qtree_entry_unused(struct qtree_mem_dqinfo *info, char *disk); +static inline int qtree_depth(struct qtree_mem_dqinfo *info) +{ + unsigned int epb = info->dqi_usable_bs >> 2; + unsigned long long entries = epb; + int i; + + for (i = 1; entries < (1ULL << 32); i++) + entries *= epb; + return i; +} + +#endif /* _LINUX_DQBLK_QTREE_H */ diff --git a/include/linux/dqblk_v1.h b/include/linux/dqblk_v1.h index 57f1250d5a5..3713a7232dd 100644 --- a/include/linux/dqblk_v1.h +++ b/include/linux/dqblk_v1.h @@ -5,9 +5,6 @@ #ifndef _LINUX_DQBLK_V1_H #define _LINUX_DQBLK_V1_H -/* Id of quota format */ -#define QFMT_VFS_OLD 1 - /* Root squash turned on */ #define V1_DQF_RSQUASH 1 @@ -17,8 +14,4 @@ #define V1_DEL_ALLOC 0 #define V1_DEL_REWRITE 2 -/* Special information about quotafile */ -struct v1_mem_dqinfo { -}; - #endif /* _LINUX_DQBLK_V1_H */ diff --git a/include/linux/dqblk_v2.h b/include/linux/dqblk_v2.h index 4f853322cb7..18000a54267 100644 --- a/include/linux/dqblk_v2.h +++ b/include/linux/dqblk_v2.h @@ -1,26 +1,16 @@ /* - * Definitions of structures for vfsv0 quota format + * Definitions for vfsv0 quota format */ #ifndef _LINUX_DQBLK_V2_H #define _LINUX_DQBLK_V2_H -#include <linux/types.h> - -/* id numbers of quota format */ -#define QFMT_VFS_V0 2 +#include <linux/dqblk_qtree.h> /* Numbers of blocks needed for updates */ -#define V2_INIT_ALLOC 4 -#define V2_INIT_REWRITE 2 -#define V2_DEL_ALLOC 0 -#define V2_DEL_REWRITE 6 - -/* Inmemory copy of version specific information */ -struct v2_mem_dqinfo { - unsigned int dqi_blocks; - unsigned int dqi_free_blk; - unsigned int dqi_free_entry; -}; +#define V2_INIT_ALLOC QTREE_INIT_ALLOC +#define V2_INIT_REWRITE QTREE_INIT_REWRITE +#define V2_DEL_ALLOC QTREE_DEL_ALLOC +#define V2_DEL_REWRITE QTREE_DEL_REWRITE #endif /* _LINUX_DQBLK_V2_H */ diff --git a/include/linux/dvb/frontend.h b/include/linux/dvb/frontend.h index 79a8ed8e6a7..55026b1a40b 100644 --- a/include/linux/dvb/frontend.h +++ b/include/linux/dvb/frontend.h @@ -62,10 +62,11 @@ typedef enum fe_caps { FE_CAN_HIERARCHY_AUTO = 0x100000, FE_CAN_8VSB = 0x200000, FE_CAN_16VSB = 0x400000, - FE_HAS_EXTENDED_CAPS = 0x800000, // We need more bitspace for newer APIs, indicate this. - FE_NEEDS_BENDING = 0x20000000, // not supported anymore, don't use (frontend requires frequency bending) - FE_CAN_RECOVER = 0x40000000, // frontend can recover from a cable unplug automatically - FE_CAN_MUTE_TS = 0x80000000 // frontend can stop spurious TS data output + FE_HAS_EXTENDED_CAPS = 0x800000, /* We need more bitspace for newer APIs, indicate this. */ + FE_CAN_2G_MODULATION = 0x10000000, /* frontend supports "2nd generation modulation" (DVB-S2) */ + FE_NEEDS_BENDING = 0x20000000, /* not supported anymore, don't use (frontend requires frequency bending) */ + FE_CAN_RECOVER = 0x40000000, /* frontend can recover from a cable unplug automatically */ + FE_CAN_MUTE_TS = 0x80000000 /* frontend can stop spurious TS data output */ } fe_caps_t; @@ -121,15 +122,15 @@ typedef enum fe_sec_mini_cmd { typedef enum fe_status { - FE_HAS_SIGNAL = 0x01, /* found something above the noise level */ - FE_HAS_CARRIER = 0x02, /* found a DVB signal */ - FE_HAS_VITERBI = 0x04, /* FEC is stable */ - FE_HAS_SYNC = 0x08, /* found sync bytes */ - FE_HAS_LOCK = 0x10, /* everything's working... */ - FE_TIMEDOUT = 0x20, /* no lock within the last ~2 seconds */ - FE_REINIT = 0x40 /* frontend was reinitialized, */ -} fe_status_t; /* application is recommended to reset */ - /* DiSEqC, tone and parameters */ + FE_HAS_SIGNAL = 0x01, /* found something above the noise level */ + FE_HAS_CARRIER = 0x02, /* found a DVB signal */ + FE_HAS_VITERBI = 0x04, /* FEC is stable */ + FE_HAS_SYNC = 0x08, /* found sync bytes */ + FE_HAS_LOCK = 0x10, /* everything's working... */ + FE_TIMEDOUT = 0x20, /* no lock within the last ~2 seconds */ + FE_REINIT = 0x40 /* frontend was reinitialized, */ +} fe_status_t; /* application is recommended to reset */ + /* DiSEqC, tone and parameters */ typedef enum fe_spectral_inversion { INVERSION_OFF, diff --git a/include/linux/elevator.h b/include/linux/elevator.h index 92f6f634e3e..7a204256b15 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h @@ -28,7 +28,7 @@ typedef void (elevator_activate_req_fn) (struct request_queue *, struct request typedef void (elevator_deactivate_req_fn) (struct request_queue *, struct request *); typedef void *(elevator_init_fn) (struct request_queue *); -typedef void (elevator_exit_fn) (elevator_t *); +typedef void (elevator_exit_fn) (struct elevator_queue *); struct elevator_ops { @@ -62,8 +62,8 @@ struct elevator_ops struct elv_fs_entry { struct attribute attr; - ssize_t (*show)(elevator_t *, char *); - ssize_t (*store)(elevator_t *, const char *, size_t); + ssize_t (*show)(struct elevator_queue *, char *); + ssize_t (*store)(struct elevator_queue *, const char *, size_t); }; /* @@ -130,7 +130,7 @@ extern ssize_t elv_iosched_show(struct request_queue *, char *); extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t); extern int elevator_init(struct request_queue *, char *); -extern void elevator_exit(elevator_t *); +extern void elevator_exit(struct elevator_queue *); extern int elv_rq_merge_ok(struct request *, struct bio *); /* diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h index 25d62e6e329..1cb0f0b9092 100644 --- a/include/linux/etherdevice.h +++ b/include/linux/etherdevice.h @@ -27,6 +27,7 @@ #include <linux/if_ether.h> #include <linux/netdevice.h> #include <linux/random.h> +#include <asm/unaligned.h> #ifdef __KERNEL__ extern __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev); @@ -41,6 +42,10 @@ extern int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh); extern void eth_header_cache_update(struct hh_cache *hh, const struct net_device *dev, const unsigned char *haddr); +extern int eth_mac_addr(struct net_device *dev, void *p); +extern int eth_change_mtu(struct net_device *dev, int new_mtu); +extern int eth_validate_addr(struct net_device *dev); + extern struct net_device *alloc_etherdev_mq(int sizeof_priv, unsigned int queue_count); @@ -136,6 +141,47 @@ static inline unsigned compare_ether_addr(const u8 *addr1, const u8 *addr2) BUILD_BUG_ON(ETH_ALEN != 6); return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) != 0; } + +static inline unsigned long zap_last_2bytes(unsigned long value) +{ +#ifdef __BIG_ENDIAN + return value >> 16; +#else + return value << 16; +#endif +} + +/** + * compare_ether_addr_64bits - Compare two Ethernet addresses + * @addr1: Pointer to an array of 8 bytes + * @addr2: Pointer to an other array of 8 bytes + * + * Compare two ethernet addresses, returns 0 if equal. + * Same result than "memcmp(addr1, addr2, ETH_ALEN)" but without conditional + * branches, and possibly long word memory accesses on CPU allowing cheap + * unaligned memory reads. + * arrays = { byte1, byte2, byte3, byte4, byte6, byte7, pad1, pad2} + * + * Please note that alignment of addr1 & addr2 is only guaranted to be 16 bits. + */ + +static inline unsigned compare_ether_addr_64bits(const u8 addr1[6+2], + const u8 addr2[6+2]) +{ +#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + unsigned long fold = ((*(unsigned long *)addr1) ^ + (*(unsigned long *)addr2)); + + if (sizeof(fold) == 8) + return zap_last_2bytes(fold) != 0; + + fold |= zap_last_2bytes((*(unsigned long *)(addr1 + 4)) ^ + (*(unsigned long *)(addr2 + 4))); + return fold != 0; +#else + return compare_ether_addr(addr1, addr2); +#endif +} #endif /* __KERNEL__ */ #endif /* _LINUX_ETHERDEVICE_H */ diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index b4b038b89ee..27c67a54223 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -467,6 +467,8 @@ struct ethtool_ops { #define ETHTOOL_GRXFH 0x00000029 /* Get RX flow hash configuration */ #define ETHTOOL_SRXFH 0x0000002a /* Set RX flow hash configuration */ +#define ETHTOOL_GGRO 0x0000002b /* Get GRO enable (ethtool_value) */ +#define ETHTOOL_SGRO 0x0000002c /* Set GRO enable (ethtool_value) */ /* compatibility with older code */ #define SPARC_ETH_GSET ETHTOOL_GSET diff --git a/include/linux/ext2_fs.h b/include/linux/ext2_fs.h index 78c775a83f7..121720d74e1 100644 --- a/include/linux/ext2_fs.h +++ b/include/linux/ext2_fs.h @@ -194,6 +194,30 @@ struct ext2_group_desc #define EXT2_FL_USER_VISIBLE FS_FL_USER_VISIBLE /* User visible flags */ #define EXT2_FL_USER_MODIFIABLE FS_FL_USER_MODIFIABLE /* User modifiable flags */ +/* Flags that should be inherited by new inodes from their parent. */ +#define EXT2_FL_INHERITED (EXT2_SECRM_FL | EXT2_UNRM_FL | EXT2_COMPR_FL |\ + EXT2_SYNC_FL | EXT2_IMMUTABLE_FL | EXT2_APPEND_FL |\ + EXT2_NODUMP_FL | EXT2_NOATIME_FL | EXT2_COMPRBLK_FL|\ + EXT2_NOCOMP_FL | EXT2_JOURNAL_DATA_FL |\ + EXT2_NOTAIL_FL | EXT2_DIRSYNC_FL) + +/* Flags that are appropriate for regular files (all but dir-specific ones). */ +#define EXT2_REG_FLMASK (~(EXT2_DIRSYNC_FL | EXT2_TOPDIR_FL)) + +/* Flags that are appropriate for non-directories/regular files. */ +#define EXT2_OTHER_FLMASK (EXT2_NODUMP_FL | EXT2_NOATIME_FL) + +/* Mask out flags that are inappropriate for the given type of inode. */ +static inline __u32 ext2_mask_flags(umode_t mode, __u32 flags) +{ + if (S_ISDIR(mode)) + return flags; + else if (S_ISREG(mode)) + return flags & EXT2_REG_FLMASK; + else + return flags & EXT2_OTHER_FLMASK; +} + /* * ioctl commands */ diff --git a/include/linux/ext2_fs_sb.h b/include/linux/ext2_fs_sb.h index f273415ab6f..1cdb66367c9 100644 --- a/include/linux/ext2_fs_sb.h +++ b/include/linux/ext2_fs_sb.h @@ -101,11 +101,17 @@ struct ext2_sb_info { struct percpu_counter s_freeblocks_counter; struct percpu_counter s_freeinodes_counter; struct percpu_counter s_dirs_counter; - struct blockgroup_lock s_blockgroup_lock; + struct blockgroup_lock *s_blockgroup_lock; /* root of the per fs reservation window tree */ spinlock_t s_rsv_window_lock; struct rb_root s_rsv_window_root; struct ext2_reserve_window_node s_rsv_window_head; }; +static inline spinlock_t * +sb_bgl_lock(struct ext2_sb_info *sbi, unsigned int block_group) +{ + return bgl_lock_ptr(sbi->s_blockgroup_lock, block_group); +} + #endif /* _LINUX_EXT2_FS_SB */ diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h index d14f0291848..dd495b8c309 100644 --- a/include/linux/ext3_fs.h +++ b/include/linux/ext3_fs.h @@ -178,6 +178,30 @@ struct ext3_group_desc #define EXT3_FL_USER_VISIBLE 0x0003DFFF /* User visible flags */ #define EXT3_FL_USER_MODIFIABLE 0x000380FF /* User modifiable flags */ +/* Flags that should be inherited by new inodes from their parent. */ +#define EXT3_FL_INHERITED (EXT3_SECRM_FL | EXT3_UNRM_FL | EXT3_COMPR_FL |\ + EXT3_SYNC_FL | EXT3_IMMUTABLE_FL | EXT3_APPEND_FL |\ + EXT3_NODUMP_FL | EXT3_NOATIME_FL | EXT3_COMPRBLK_FL|\ + EXT3_NOCOMPR_FL | EXT3_JOURNAL_DATA_FL |\ + EXT3_NOTAIL_FL | EXT3_DIRSYNC_FL) + +/* Flags that are appropriate for regular files (all but dir-specific ones). */ +#define EXT3_REG_FLMASK (~(EXT3_DIRSYNC_FL | EXT3_TOPDIR_FL)) + +/* Flags that are appropriate for non-directories/regular files. */ +#define EXT3_OTHER_FLMASK (EXT3_NODUMP_FL | EXT3_NOATIME_FL) + +/* Mask out flags that are inappropriate for the given type of inode. */ +static inline __u32 ext3_mask_flags(umode_t mode, __u32 flags) +{ + if (S_ISDIR(mode)) + return flags; + else if (S_ISREG(mode)) + return flags & EXT3_REG_FLMASK; + else + return flags & EXT3_OTHER_FLMASK; +} + /* * Inode dynamic state flags */ @@ -354,6 +378,13 @@ struct ext3_inode { #define EXT3_ORPHAN_FS 0x0004 /* Orphans being recovered */ /* + * Misc. filesystem flags + */ +#define EXT2_FLAGS_SIGNED_HASH 0x0001 /* Signed dirhash in use */ +#define EXT2_FLAGS_UNSIGNED_HASH 0x0002 /* Unsigned dirhash in use */ +#define EXT2_FLAGS_TEST_FILESYS 0x0004 /* to test development code */ + +/* * Mount flags */ #define EXT3_MOUNT_CHECK 0x00001 /* Do mount-time checks */ @@ -489,7 +520,23 @@ struct ext3_super_block { __u16 s_reserved_word_pad; __le32 s_default_mount_opts; __le32 s_first_meta_bg; /* First metablock block group */ - __u32 s_reserved[190]; /* Padding to the end of the block */ + __le32 s_mkfs_time; /* When the filesystem was created */ + __le32 s_jnl_blocks[17]; /* Backup of the journal inode */ + /* 64bit support valid if EXT4_FEATURE_COMPAT_64BIT */ +/*150*/ __le32 s_blocks_count_hi; /* Blocks count */ + __le32 s_r_blocks_count_hi; /* Reserved blocks count */ + __le32 s_free_blocks_count_hi; /* Free blocks count */ + __le16 s_min_extra_isize; /* All inodes have at least # bytes */ + __le16 s_want_extra_isize; /* New inodes should reserve # bytes */ + __le32 s_flags; /* Miscellaneous flags */ + __le16 s_raid_stride; /* RAID stride */ + __le16 s_mmp_interval; /* # seconds to wait in MMP checking */ + __le64 s_mmp_block; /* Block for multi-mount protection */ + __le32 s_raid_stripe_width; /* blocks on all data disks (N*stride)*/ + __u8 s_log_groups_per_flex; /* FLEX_BG group size */ + __u8 s_reserved_char_pad2; + __le16 s_reserved_pad; + __u32 s_reserved[162]; /* Padding to the end of the block */ }; #ifdef __KERNEL__ @@ -694,6 +741,9 @@ static inline __le16 ext3_rec_len_to_disk(unsigned len) #define DX_HASH_LEGACY 0 #define DX_HASH_HALF_MD4 1 #define DX_HASH_TEA 2 +#define DX_HASH_LEGACY_UNSIGNED 3 +#define DX_HASH_HALF_MD4_UNSIGNED 4 +#define DX_HASH_TEA_UNSIGNED 5 #ifdef __KERNEL__ diff --git a/include/linux/ext3_fs_sb.h b/include/linux/ext3_fs_sb.h index b65f0288b84..f07f34de2f0 100644 --- a/include/linux/ext3_fs_sb.h +++ b/include/linux/ext3_fs_sb.h @@ -57,10 +57,11 @@ struct ext3_sb_info { u32 s_next_generation; u32 s_hash_seed[4]; int s_def_hash_version; + int s_hash_unsigned; /* 3 if hash should be signed, 0 if not */ struct percpu_counter s_freeblocks_counter; struct percpu_counter s_freeinodes_counter; struct percpu_counter s_dirs_counter; - struct blockgroup_lock s_blockgroup_lock; + struct blockgroup_lock *s_blockgroup_lock; /* root of the per fs reservation window tree */ spinlock_t s_rsv_window_lock; @@ -83,4 +84,10 @@ struct ext3_sb_info { #endif }; +static inline spinlock_t * +sb_bgl_lock(struct ext3_sb_info *sbi, unsigned int block_group) +{ + return bgl_lock_ptr(sbi->s_blockgroup_lock, block_group); +} + #endif /* _LINUX_EXT3_FS_SB */ diff --git a/include/linux/fault-inject.h b/include/linux/fault-inject.h index 32368c4f032..06ca9b21dad 100644 --- a/include/linux/fault-inject.h +++ b/include/linux/fault-inject.h @@ -81,4 +81,13 @@ static inline void cleanup_fault_attr_dentries(struct fault_attr *attr) #endif /* CONFIG_FAULT_INJECTION */ +#ifdef CONFIG_FAILSLAB +extern bool should_failslab(size_t size, gfp_t gfpflags); +#else +static inline bool should_failslab(size_t size, gfp_t gfpflags) +{ + return false; +} +#endif /* CONFIG_FAILSLAB */ + #endif /* _LINUX_FAULT_INJECT_H */ diff --git a/include/linux/fb.h b/include/linux/fb.h index 75a81eaf343..1ee63df5be9 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -888,7 +888,7 @@ struct fb_info { #define fb_writeq sbus_writeq #define fb_memset sbus_memset_io -#elif defined(__i386__) || defined(__alpha__) || defined(__x86_64__) || defined(__hppa__) || (defined(__sh__) && !defined(__SH5__)) || defined(__powerpc__) || defined(__avr32__) +#elif defined(__i386__) || defined(__alpha__) || defined(__x86_64__) || defined(__hppa__) || defined(__sh__) || defined(__powerpc__) || defined(__avr32__) #define fb_readb __raw_readb #define fb_readw __raw_readw diff --git a/include/linux/fddidevice.h b/include/linux/fddidevice.h index e61e42dfd31..155bafd9e88 100644 --- a/include/linux/fddidevice.h +++ b/include/linux/fddidevice.h @@ -27,6 +27,7 @@ #ifdef __KERNEL__ extern __be16 fddi_type_trans(struct sk_buff *skb, struct net_device *dev); +extern int fddi_change_mtu(struct net_device *dev, int new_mtu); extern struct net_device *alloc_fddidev(int sizeof_priv); #endif diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h index 4aab6f12cfa..09d6c5bbddd 100644 --- a/include/linux/fdtable.h +++ b/include/linux/fdtable.h @@ -57,8 +57,6 @@ struct files_struct { #define files_fdtable(files) (rcu_dereference((files)->fdt)) -extern struct kmem_cache *filp_cachep; - struct file_operations; struct vfsmount; struct dentry; diff --git a/include/linux/filter.h b/include/linux/filter.h index b6ea9aa9e85..1354aaf6abb 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -122,7 +122,8 @@ struct sock_fprog /* Required for SO_ATTACH_FILTER. */ #define SKF_AD_PKTTYPE 4 #define SKF_AD_IFINDEX 8 #define SKF_AD_NLATTR 12 -#define SKF_AD_MAX 16 +#define SKF_AD_NLATTR_NEST 16 +#define SKF_AD_MAX 20 #define SKF_NET_OFF (-0x100000) #define SKF_LL_OFF (-0x200000) diff --git a/include/linux/fs.h b/include/linux/fs.h index 4a853ef6fd3..0b87b29f479 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -21,7 +21,6 @@ /* Fixed constants first: */ #undef NR_OPEN -extern int sysctl_nr_open; #define INR_OPEN 1024 /* Initial setting for nfile rlimits */ #define BLOCK_SIZE_BITS 10 @@ -38,21 +37,13 @@ struct files_stat_struct { int nr_free_files; /* read only */ int max_files; /* tunable */ }; -extern struct files_stat_struct files_stat; -extern int get_max_files(void); struct inodes_stat_t { int nr_inodes; int nr_unused; int dummy[5]; /* padding for sysctl ABI compatibility */ }; -extern struct inodes_stat_t inodes_stat; -extern int leases_enable, lease_break_time; - -#ifdef CONFIG_DNOTIFY -extern int dir_notify_enable; -#endif #define NR_FILE 8192 /* this can well be larger on a larger system */ @@ -82,6 +73,14 @@ extern int dir_notify_enable; (specialy hack for floppy.c) */ #define FMODE_WRITE_IOCTL ((__force fmode_t)128) +/* + * Don't update ctime and mtime. + * + * Currently a special hack for the XFS open_by_handle ioctl, but we'll + * hopefully graduate it to a proper O_CMTIME flag supported by open(2) soon. + */ +#define FMODE_NOCMTIME ((__force fmode_t)2048) + #define RW_MASK 1 #define RWA_MASK 2 #define READ 0 @@ -316,11 +315,21 @@ struct poll_table_struct; struct kstatfs; struct vm_area_struct; struct vfsmount; +struct cred; extern void __init inode_init(void); extern void __init inode_init_early(void); extern void __init files_init(unsigned long); +extern struct files_stat_struct files_stat; +extern int get_max_files(void); +extern int sysctl_nr_open; +extern struct inodes_stat_t inodes_stat; +extern int leases_enable, lease_break_time; +#ifdef CONFIG_DNOTIFY +extern int dir_notify_enable; +#endif + struct buffer_head; typedef int (get_block_t)(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create); @@ -414,6 +423,9 @@ enum positive_aop_returns { #define AOP_FLAG_UNINTERRUPTIBLE 0x0001 /* will not do a short write */ #define AOP_FLAG_CONT_EXPAND 0x0002 /* called from cont_expand */ +#define AOP_FLAG_NOFS 0x0004 /* used by filesystem to direct + * helper code (eg buffer layer) + * to clear GFP_FS from alloc */ /* * oh the beauties of C type declarations. @@ -553,6 +565,7 @@ struct address_space { struct block_device { dev_t bd_dev; /* not a kdev_t - it's a search key */ struct inode * bd_inode; /* will die */ + struct super_block * bd_super; int bd_openers; struct mutex bd_mutex; /* open/close mutex */ struct semaphore bd_mount_sem; @@ -827,7 +840,7 @@ struct file { fmode_t f_mode; loff_t f_pos; struct fown_struct f_owner; - unsigned int f_uid, f_gid; + const struct cred *f_cred; struct file_ra_state f_ra; u64 f_version; @@ -1121,7 +1134,6 @@ struct super_block { struct rw_semaphore s_umount; struct mutex s_lock; int s_count; - int s_syncing; int s_need_sync_fs; atomic_t s_active; #ifdef CONFIG_SECURITY @@ -1173,6 +1185,11 @@ struct super_block { * generic_show_options() */ char *s_options; + + /* + * storage for asynchronous operations + */ + struct list_head s_async_list; }; extern struct timespec current_fs_time(struct super_block *sb); @@ -1194,7 +1211,7 @@ enum { #define has_fs_excl() atomic_read(¤t->fs_excl) #define is_owner_or_cap(inode) \ - ((current->fsuid == (inode)->i_uid) || capable(CAP_FOWNER)) + ((current_fsuid() == (inode)->i_uid) || capable(CAP_FOWNER)) /* not quite ready to be deprecated, but... */ extern void lock_super(struct super_block *); @@ -1203,7 +1220,6 @@ extern void unlock_super(struct super_block *); /* * VFS helper functions.. */ -extern int vfs_permission(struct nameidata *, int); extern int vfs_create(struct inode *, struct dentry *, int, struct nameidata *); extern int vfs_mkdir(struct inode *, struct dentry *, int); extern int vfs_mknod(struct inode *, struct dentry *, int, dev_t); @@ -1301,7 +1317,6 @@ struct file_operations { ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int); unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); int (*check_flags)(int); - int (*dir_notify)(struct file *filp, unsigned long arg); int (*flock) (struct file *, int, struct file_lock *); ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int); ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int); @@ -1375,6 +1390,7 @@ struct super_operations { ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t); ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t); #endif + int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t); }; /* @@ -1674,7 +1690,8 @@ extern int do_truncate(struct dentry *, loff_t start, unsigned int time_attrs, extern long do_sys_open(int dfd, const char __user *filename, int flags, int mode); extern struct file *filp_open(const char *, int, int); -extern struct file * dentry_open(struct dentry *, struct vfsmount *, int); +extern struct file * dentry_open(struct dentry *, struct vfsmount *, int, + const struct cred *); extern int filp_close(struct file *, fl_owner_t id); extern char * getname(const char __user *); @@ -1819,7 +1836,7 @@ extern int __filemap_fdatawrite_range(struct address_space *mapping, extern int filemap_fdatawrite_range(struct address_space *mapping, loff_t start, loff_t end); -extern long do_fsync(struct file *file, int datasync); +extern int vfs_fsync(struct file *file, struct dentry *dentry, int datasync); extern void sync_supers(void); extern void sync_filesystems(int wait); extern void __fsync_super(struct super_block *sb); @@ -1859,7 +1876,7 @@ extern void free_write_pipe(struct file *); extern struct file *do_filp_open(int dfd, const char *pathname, int open_flag, int mode); -extern int may_open(struct nameidata *, int, int); +extern int may_open(struct path *, int, int); extern int kernel_read(struct file *, unsigned long, char *, unsigned long); extern struct file * open_exec(const char *); @@ -1875,7 +1892,9 @@ extern loff_t default_llseek(struct file *file, loff_t offset, int origin); extern loff_t vfs_llseek(struct file *file, loff_t offset, int origin); +extern struct inode * inode_init_always(struct super_block *, struct inode *); extern void inode_init_once(struct inode *); +extern void inode_add_to_lists(struct super_block *, struct inode *); extern void iput(struct inode *); extern struct inode * igrab(struct inode *); extern ino_t iunique(struct super_block *, ino_t); @@ -1892,6 +1911,8 @@ extern struct inode *ilookup(struct super_block *sb, unsigned long ino); extern struct inode * iget5_locked(struct super_block *, unsigned long, int (*test)(struct inode *, void *), int (*set)(struct inode *, void *), void *); extern struct inode * iget_locked(struct super_block *, unsigned long); +extern int insert_inode_locked4(struct inode *, unsigned long, int (*test)(struct inode *, void *), void *); +extern int insert_inode_locked(struct inode *); extern void unlock_new_inode(struct inode *); extern void __iget(struct inode * inode); @@ -2023,7 +2044,7 @@ extern int page_readlink(struct dentry *, char __user *, int); extern void *page_follow_link_light(struct dentry *, struct nameidata *); extern void page_put_link(struct dentry *, struct nameidata *, void *); extern int __page_symlink(struct inode *inode, const char *symname, int len, - gfp_t gfp_mask); + int nofs); extern int page_symlink(struct inode *inode, const char *symname, int len); extern const struct inode_operations page_symlink_inode_operations; extern int generic_readlink(struct dentry *, char __user *, int); @@ -2044,6 +2065,9 @@ extern int vfs_fstat(unsigned int, struct kstat *); extern int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd, unsigned long arg); +extern int __generic_block_fiemap(struct inode *inode, + struct fiemap_extent_info *fieinfo, u64 start, + u64 len, get_block_t *get_block); extern int generic_block_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, u64 start, u64 len, get_block_t *get_block); diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h index 9e5a06e78d0..a97c053d3a9 100644 --- a/include/linux/fs_struct.h +++ b/include/linux/fs_struct.h @@ -10,12 +10,6 @@ struct fs_struct { struct path root, pwd; }; -#define INIT_FS { \ - .count = ATOMIC_INIT(1), \ - .lock = RW_LOCK_UNLOCKED, \ - .umask = 0022, \ -} - extern struct kmem_cache *fs_cachep; extern void exit_fs(struct task_struct *); diff --git a/include/linux/fsl_devices.h b/include/linux/fsl_devices.h index 708bab58d8d..d9051d717d2 100644 --- a/include/linux/fsl_devices.h +++ b/include/linux/fsl_devices.h @@ -47,12 +47,7 @@ struct gianfar_platform_data { /* device specific information */ u32 device_flags; - /* board specific information */ - u32 board_flags; - int mdio_bus; /* Bus controlled by us */ - char bus_id[MII_BUS_ID_SIZE]; /* Bus PHY is on */ - u32 phy_id; - u8 mac_addr[6]; + char bus_id[BUS_ID_SIZE]; phy_interface_t interface; }; @@ -61,17 +56,6 @@ struct gianfar_mdio_data { int irq[32]; }; -/* Flags related to gianfar device features */ -#define FSL_GIANFAR_DEV_HAS_GIGABIT 0x00000001 -#define FSL_GIANFAR_DEV_HAS_COALESCE 0x00000002 -#define FSL_GIANFAR_DEV_HAS_RMON 0x00000004 -#define FSL_GIANFAR_DEV_HAS_MULTI_INTR 0x00000008 -#define FSL_GIANFAR_DEV_HAS_CSUM 0x00000010 -#define FSL_GIANFAR_DEV_HAS_VLAN 0x00000020 -#define FSL_GIANFAR_DEV_HAS_EXTENDED_HASH 0x00000040 -#define FSL_GIANFAR_DEV_HAS_PADDING 0x00000080 -#define FSL_GIANFAR_DEV_HAS_MAGIC_PACKET 0x00000100 - /* Flags in gianfar_platform_data */ #define FSL_GIANFAR_BRD_HAS_PHY_INTR 0x00000001 /* set or use a timer */ #define FSL_GIANFAR_BRD_IS_REDUCED 0x00000002 /* Set if RGMII, RMII */ diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 9c5bc6be2b0..677432b9cb7 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -8,6 +8,8 @@ #include <linux/types.h> #include <linux/module.h> #include <linux/kallsyms.h> +#include <linux/bitops.h> +#include <linux/sched.h> #ifdef CONFIG_FUNCTION_TRACER @@ -24,6 +26,45 @@ struct ftrace_ops { struct ftrace_ops *next; }; +extern int function_trace_stop; + +/* + * Type of the current tracing. + */ +enum ftrace_tracing_type_t { + FTRACE_TYPE_ENTER = 0, /* Hook the call of the function */ + FTRACE_TYPE_RETURN, /* Hook the return of the function */ +}; + +/* Current tracing type, default is FTRACE_TYPE_ENTER */ +extern enum ftrace_tracing_type_t ftrace_tracing_type; + +/** + * ftrace_stop - stop function tracer. + * + * A quick way to stop the function tracer. Note this an on off switch, + * it is not something that is recursive like preempt_disable. + * This does not disable the calling of mcount, it only stops the + * calling of functions from mcount. + */ +static inline void ftrace_stop(void) +{ + function_trace_stop = 1; +} + +/** + * ftrace_start - start the function tracer. + * + * This function is the inverse of ftrace_stop. This does not enable + * the function tracing if the function tracer is disabled. This only + * sets the function tracer flag to continue calling the functions + * from mcount. + */ +static inline void ftrace_start(void) +{ + function_trace_stop = 0; +} + /* * The ftrace_ops must be a static and should also * be read_mostly. These functions do modify read_mostly variables @@ -42,9 +83,21 @@ extern void ftrace_stub(unsigned long a0, unsigned long a1); # define unregister_ftrace_function(ops) do { } while (0) # define clear_ftrace_function(ops) do { } while (0) static inline void ftrace_kill(void) { } +static inline void ftrace_stop(void) { } +static inline void ftrace_start(void) { } #endif /* CONFIG_FUNCTION_TRACER */ +#ifdef CONFIG_STACK_TRACER +extern int stack_tracer_enabled; +int +stack_trace_sysctl(struct ctl_table *table, int write, + struct file *file, void __user *buffer, size_t *lenp, + loff_t *ppos); +#endif + #ifdef CONFIG_DYNAMIC_FTRACE +/* asm/ftrace.h must be defined for archs supporting dynamic ftrace */ +#include <asm/ftrace.h> enum { FTRACE_FL_FREE = (1 << 0), @@ -60,6 +113,7 @@ struct dyn_ftrace { struct list_head list; unsigned long ip; /* address of mcount call-site */ unsigned long flags; + struct dyn_arch_ftrace arch; }; int ftrace_force_update(void); @@ -67,19 +121,48 @@ void ftrace_set_filter(unsigned char *buf, int len, int reset); /* defined in arch */ extern int ftrace_ip_converted(unsigned long ip); -extern unsigned char *ftrace_nop_replace(void); -extern unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr); extern int ftrace_dyn_arch_init(void *data); extern int ftrace_update_ftrace_func(ftrace_func_t func); extern void ftrace_caller(void); extern void ftrace_call(void); extern void mcount_call(void); +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +extern void ftrace_graph_caller(void); +extern int ftrace_enable_ftrace_graph_caller(void); +extern int ftrace_disable_ftrace_graph_caller(void); +#else +static inline int ftrace_enable_ftrace_graph_caller(void) { return 0; } +static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; } +#endif + +/** + * ftrace_make_nop - convert code into top + * @mod: module structure if called by module load initialization + * @rec: the mcount call site record + * @addr: the address that the call site should be calling + * + * This is a very sensitive operation and great care needs + * to be taken by the arch. The operation should carefully + * read the location, check to see if what is read is indeed + * what we expect it to be, and then on success of the compare, + * it should write to the location. + * + * The code segment at @rec->ip should be a caller to @addr + * + * Return must be: + * 0 on success + * -EFAULT on error reading the location + * -EINVAL on a failed compare of the contents + * -EPERM on error writing to the location + * Any other value will be considered a failure. + */ +extern int ftrace_make_nop(struct module *mod, + struct dyn_ftrace *rec, unsigned long addr); /** - * ftrace_modify_code - modify code segment - * @ip: the address of the code segment - * @old_code: the contents of what is expected to be there - * @new_code: the code to patch in + * ftrace_make_call - convert a nop call site into a call to addr + * @rec: the mcount call site record + * @addr: the address that the call site should call * * This is a very sensitive operation and great care needs * to be taken by the arch. The operation should carefully @@ -87,6 +170,8 @@ extern void mcount_call(void); * what we expect it to be, and then on success of the compare, * it should write to the location. * + * The code segment at @rec->ip should be a nop + * * Return must be: * 0 on success * -EFAULT on error reading the location @@ -94,8 +179,11 @@ extern void mcount_call(void); * -EPERM on error writing to the location * Any other value will be considered a failure. */ -extern int ftrace_modify_code(unsigned long ip, unsigned char *old_code, - unsigned char *new_code); +extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr); + + +/* May be defined in arch */ +extern int ftrace_arch_read_dyn_info(char *buf, int size); extern int skip_trace(unsigned long ip); @@ -103,7 +191,6 @@ extern void ftrace_release(void *start, unsigned long size); extern void ftrace_disable_daemon(void); extern void ftrace_enable_daemon(void); - #else # define skip_trace(ip) ({ 0; }) # define ftrace_force_update() ({ 0; }) @@ -182,6 +269,12 @@ static inline void __ftrace_enabled_restore(int enabled) #endif #ifdef CONFIG_TRACING +extern int ftrace_dump_on_oops; + +extern void tracing_start(void); +extern void tracing_stop(void); +extern void ftrace_off_permanent(void); + extern void ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3); @@ -210,8 +303,11 @@ extern void ftrace_dump(void); static inline void ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { } static inline int -ftrace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 0))); +ftrace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 2))); +static inline void tracing_start(void) { } +static inline void tracing_stop(void) { } +static inline void ftrace_off_permanent(void) { } static inline int ftrace_printk(const char *fmt, ...) { @@ -222,33 +318,178 @@ static inline void ftrace_dump(void) { } #ifdef CONFIG_FTRACE_MCOUNT_RECORD extern void ftrace_init(void); -extern void ftrace_init_module(unsigned long *start, unsigned long *end); +extern void ftrace_init_module(struct module *mod, + unsigned long *start, unsigned long *end); #else static inline void ftrace_init(void) { } static inline void -ftrace_init_module(unsigned long *start, unsigned long *end) { } +ftrace_init_module(struct module *mod, + unsigned long *start, unsigned long *end) { } +#endif + +enum { + POWER_NONE = 0, + POWER_CSTATE = 1, + POWER_PSTATE = 2, +}; + +struct power_trace { +#ifdef CONFIG_POWER_TRACER + ktime_t stamp; + ktime_t end; + int type; + int state; #endif +}; +#ifdef CONFIG_POWER_TRACER +extern void trace_power_start(struct power_trace *it, unsigned int type, + unsigned int state); +extern void trace_power_mark(struct power_trace *it, unsigned int type, + unsigned int state); +extern void trace_power_end(struct power_trace *it); +#else +static inline void trace_power_start(struct power_trace *it, unsigned int type, + unsigned int state) { } +static inline void trace_power_mark(struct power_trace *it, unsigned int type, + unsigned int state) { } +static inline void trace_power_end(struct power_trace *it) { } +#endif + + +/* + * Structure that defines an entry function trace. + */ +struct ftrace_graph_ent { + unsigned long func; /* Current function */ + int depth; +}; -struct boot_trace { - pid_t caller; - char func[KSYM_SYMBOL_LEN]; - int result; - unsigned long long duration; /* usecs */ - ktime_t calltime; - ktime_t rettime; +/* + * Structure that defines a return function trace. + */ +struct ftrace_graph_ret { + unsigned long func; /* Current function */ + unsigned long long calltime; + unsigned long long rettime; + /* Number of functions that overran the depth limit for current task */ + unsigned long overrun; + int depth; }; -#ifdef CONFIG_BOOT_TRACER -extern void trace_boot(struct boot_trace *it, initcall_t fn); -extern void start_boot_trace(void); -extern void stop_boot_trace(void); +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + +/* + * Sometimes we don't want to trace a function with the function + * graph tracer but we want them to keep traced by the usual function + * tracer if the function graph tracer is not configured. + */ +#define __notrace_funcgraph notrace + +/* + * We want to which function is an entrypoint of a hardirq. + * That will help us to put a signal on output. + */ +#define __irq_entry __attribute__((__section__(".irqentry.text"))) + +/* Limits of hardirq entrypoints */ +extern char __irqentry_text_start[]; +extern char __irqentry_text_end[]; + +#define FTRACE_RETFUNC_DEPTH 50 +#define FTRACE_RETSTACK_ALLOC_SIZE 32 +/* Type of the callback handlers for tracing function graph*/ +typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */ +typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */ + +extern int register_ftrace_graph(trace_func_graph_ret_t retfunc, + trace_func_graph_ent_t entryfunc); + +extern void ftrace_graph_stop(void); + +/* The current handlers in use */ +extern trace_func_graph_ret_t ftrace_graph_return; +extern trace_func_graph_ent_t ftrace_graph_entry; + +extern void unregister_ftrace_graph(void); + +extern void ftrace_graph_init_task(struct task_struct *t); +extern void ftrace_graph_exit_task(struct task_struct *t); + +static inline int task_curr_ret_stack(struct task_struct *t) +{ + return t->curr_ret_stack; +} + +static inline void pause_graph_tracing(void) +{ + atomic_inc(¤t->tracing_graph_pause); +} + +static inline void unpause_graph_tracing(void) +{ + atomic_dec(¤t->tracing_graph_pause); +} #else -static inline void trace_boot(struct boot_trace *it, initcall_t fn) { } -static inline void start_boot_trace(void) { } -static inline void stop_boot_trace(void) { } + +#define __notrace_funcgraph +#define __irq_entry + +static inline void ftrace_graph_init_task(struct task_struct *t) { } +static inline void ftrace_graph_exit_task(struct task_struct *t) { } + +static inline int task_curr_ret_stack(struct task_struct *tsk) +{ + return -1; +} + +static inline void pause_graph_tracing(void) { } +static inline void unpause_graph_tracing(void) { } #endif +#ifdef CONFIG_TRACING +#include <linux/sched.h> + +/* flags for current->trace */ +enum { + TSK_TRACE_FL_TRACE_BIT = 0, + TSK_TRACE_FL_GRAPH_BIT = 1, +}; +enum { + TSK_TRACE_FL_TRACE = 1 << TSK_TRACE_FL_TRACE_BIT, + TSK_TRACE_FL_GRAPH = 1 << TSK_TRACE_FL_GRAPH_BIT, +}; + +static inline void set_tsk_trace_trace(struct task_struct *tsk) +{ + set_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace); +} + +static inline void clear_tsk_trace_trace(struct task_struct *tsk) +{ + clear_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace); +} + +static inline int test_tsk_trace_trace(struct task_struct *tsk) +{ + return tsk->trace & TSK_TRACE_FL_TRACE; +} + +static inline void set_tsk_trace_graph(struct task_struct *tsk) +{ + set_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace); +} + +static inline void clear_tsk_trace_graph(struct task_struct *tsk) +{ + clear_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace); +} + +static inline int test_tsk_trace_graph(struct task_struct *tsk) +{ + return tsk->trace & TSK_TRACE_FL_GRAPH; +} +#endif /* CONFIG_TRACING */ #endif /* _LINUX_FTRACE_H */ diff --git a/include/linux/ftrace_irq.h b/include/linux/ftrace_irq.h new file mode 100644 index 00000000000..366a054d0b0 --- /dev/null +++ b/include/linux/ftrace_irq.h @@ -0,0 +1,13 @@ +#ifndef _LINUX_FTRACE_IRQ_H +#define _LINUX_FTRACE_IRQ_H + + +#if defined(CONFIG_DYNAMIC_FTRACE) || defined(CONFIG_FUNCTION_GRAPH_TRACER) +extern void ftrace_nmi_enter(void); +extern void ftrace_nmi_exit(void); +#else +static inline void ftrace_nmi_enter(void) { } +static inline void ftrace_nmi_exit(void) { } +#endif + +#endif /* _LINUX_FTRACE_IRQ_H */ diff --git a/include/linux/fuse.h b/include/linux/fuse.h index 350fe9767bb..162e5defe68 100644 --- a/include/linux/fuse.h +++ b/include/linux/fuse.h @@ -1,6 +1,6 @@ /* FUSE: Filesystem in Userspace - Copyright (C) 2001-2006 Miklos Szeredi <miklos@szeredi.hu> + Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu> This program can be distributed under the terms of the GNU GPL. See the file COPYING. @@ -20,29 +20,27 @@ * * 7.10 * - add nonseekable open flag + * + * 7.11 + * - add IOCTL message + * - add unsolicited notification support + * - add POLL message and NOTIFY_POLL notification */ #ifndef _LINUX_FUSE_H #define _LINUX_FUSE_H -#include <asm/types.h> -#include <linux/major.h> +#include <linux/types.h> /** Version number of this interface */ #define FUSE_KERNEL_VERSION 7 /** Minor version number of this interface */ -#define FUSE_KERNEL_MINOR_VERSION 10 +#define FUSE_KERNEL_MINOR_VERSION 11 /** The node ID of the root inode */ #define FUSE_ROOT_ID 1 -/** The major number of the fuse character device */ -#define FUSE_MAJOR MISC_MAJOR - -/** The minor number of the fuse character device */ -#define FUSE_MINOR 229 - /* Make sure all structures are padded to 64bit boundary, so 32bit userspace works under 64bit kernels */ @@ -151,6 +149,28 @@ struct fuse_file_lock { */ #define FUSE_READ_LOCKOWNER (1 << 1) +/** + * Ioctl flags + * + * FUSE_IOCTL_COMPAT: 32bit compat ioctl on 64bit machine + * FUSE_IOCTL_UNRESTRICTED: not restricted to well-formed ioctls, retry allowed + * FUSE_IOCTL_RETRY: retry with new iovecs + * + * FUSE_IOCTL_MAX_IOV: maximum of in_iovecs + out_iovecs + */ +#define FUSE_IOCTL_COMPAT (1 << 0) +#define FUSE_IOCTL_UNRESTRICTED (1 << 1) +#define FUSE_IOCTL_RETRY (1 << 2) + +#define FUSE_IOCTL_MAX_IOV 256 + +/** + * Poll flags + * + * FUSE_POLL_SCHEDULE_NOTIFY: request poll notify + */ +#define FUSE_POLL_SCHEDULE_NOTIFY (1 << 0) + enum fuse_opcode { FUSE_LOOKUP = 1, FUSE_FORGET = 2, /* no reply */ @@ -188,6 +208,13 @@ enum fuse_opcode { FUSE_INTERRUPT = 36, FUSE_BMAP = 37, FUSE_DESTROY = 38, + FUSE_IOCTL = 39, + FUSE_POLL = 40, +}; + +enum fuse_notify_code { + FUSE_NOTIFY_POLL = 1, + FUSE_NOTIFY_CODE_MAX, }; /* The read buffer is required to be at least 8k, but may be much larger */ @@ -388,6 +415,38 @@ struct fuse_bmap_out { __u64 block; }; +struct fuse_ioctl_in { + __u64 fh; + __u32 flags; + __u32 cmd; + __u64 arg; + __u32 in_size; + __u32 out_size; +}; + +struct fuse_ioctl_out { + __s32 result; + __u32 flags; + __u32 in_iovs; + __u32 out_iovs; +}; + +struct fuse_poll_in { + __u64 fh; + __u64 kh; + __u32 flags; + __u32 padding; +}; + +struct fuse_poll_out { + __u32 revents; + __u32 padding; +}; + +struct fuse_notify_poll_wakeup_out { + __u64 kh; +}; + struct fuse_in_header { __u32 len; __u32 opcode; diff --git a/include/linux/futex.h b/include/linux/futex.h index 586ab56a3ec..3bf5bb5a34f 100644 --- a/include/linux/futex.h +++ b/include/linux/futex.h @@ -25,7 +25,8 @@ union ktime; #define FUTEX_WAKE_BITSET 10 #define FUTEX_PRIVATE_FLAG 128 -#define FUTEX_CMD_MASK ~FUTEX_PRIVATE_FLAG +#define FUTEX_CLOCK_REALTIME 256 +#define FUTEX_CMD_MASK ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME) #define FUTEX_WAIT_PRIVATE (FUTEX_WAIT | FUTEX_PRIVATE_FLAG) #define FUTEX_WAKE_PRIVATE (FUTEX_WAKE | FUTEX_PRIVATE_FLAG) @@ -164,6 +165,8 @@ union futex_key { } both; }; +#define FUTEX_KEY_INIT (union futex_key) { .both = { .ptr = NULL } } + #ifdef CONFIG_FUTEX extern void exit_robust_list(struct task_struct *curr); extern void exit_pi_state_list(struct task_struct *curr); diff --git a/include/linux/generic_serial.h b/include/linux/generic_serial.h index 4cc91393981..fadff28505b 100644 --- a/include/linux/generic_serial.h +++ b/include/linux/generic_serial.h @@ -21,7 +21,6 @@ struct real_driver { void (*enable_tx_interrupts) (void *); void (*disable_rx_interrupts) (void *); void (*enable_rx_interrupts) (void *); - int (*get_CD) (void *); void (*shutdown_port) (void*); int (*set_real_termios) (void*); int (*chars_in_buffer) (void*); diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 3df7742ce24..16948eaecae 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -126,6 +126,7 @@ struct blk_scsi_cmd_filter { struct disk_part_tbl { struct rcu_head rcu_head; int len; + struct hd_struct *last_lookup; struct hd_struct *part[]; }; diff --git a/include/linux/gfp.h b/include/linux/gfp.h index e8003afeffb..dd20cd78faa 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -69,12 +69,6 @@ struct vm_area_struct; #define GFP_HIGHUSER_MOVABLE (__GFP_WAIT | __GFP_IO | __GFP_FS | \ __GFP_HARDWALL | __GFP_HIGHMEM | \ __GFP_MOVABLE) -#define GFP_NOFS_PAGECACHE (__GFP_WAIT | __GFP_IO | __GFP_MOVABLE) -#define GFP_USER_PAGECACHE (__GFP_WAIT | __GFP_IO | __GFP_FS | \ - __GFP_HARDWALL | __GFP_MOVABLE) -#define GFP_HIGHUSER_PAGECACHE (__GFP_WAIT | __GFP_IO | __GFP_FS | \ - __GFP_HARDWALL | __GFP_HIGHMEM | \ - __GFP_MOVABLE) #ifdef CONFIG_NUMA #define GFP_THISNODE (__GFP_THISNODE | __GFP_NOWARN | __GFP_NORETRY) diff --git a/include/linux/gpio_keys.h b/include/linux/gpio_keys.h index ec6ecd74781..1289fa7623c 100644 --- a/include/linux/gpio_keys.h +++ b/include/linux/gpio_keys.h @@ -15,6 +15,7 @@ struct gpio_keys_button { struct gpio_keys_platform_data { struct gpio_keys_button *buttons; int nbuttons; + unsigned int rep:1; /* enable input subsystem auto repeat */ }; #endif diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index 181006cc94a..f83288347dd 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h @@ -4,6 +4,7 @@ #include <linux/preempt.h> #include <linux/smp_lock.h> #include <linux/lockdep.h> +#include <linux/ftrace_irq.h> #include <asm/hardirq.h> #include <asm/system.h> @@ -118,13 +119,17 @@ static inline void account_system_vtime(struct task_struct *tsk) } #endif -#if defined(CONFIG_PREEMPT_RCU) && defined(CONFIG_NO_HZ) +#if defined(CONFIG_NO_HZ) && !defined(CONFIG_CLASSIC_RCU) extern void rcu_irq_enter(void); extern void rcu_irq_exit(void); +extern void rcu_nmi_enter(void); +extern void rcu_nmi_exit(void); #else # define rcu_irq_enter() do { } while (0) # define rcu_irq_exit() do { } while (0) -#endif /* CONFIG_PREEMPT_RCU */ +# define rcu_nmi_enter() do { } while (0) +# define rcu_nmi_exit() do { } while (0) +#endif /* #if defined(CONFIG_NO_HZ) && !defined(CONFIG_CLASSIC_RCU) */ /* * It is safe to do non-atomic ops on ->hardirq_context, @@ -134,7 +139,6 @@ extern void rcu_irq_exit(void); */ #define __irq_enter() \ do { \ - rcu_irq_enter(); \ account_system_vtime(current); \ add_preempt_count(HARDIRQ_OFFSET); \ trace_hardirq_enter(); \ @@ -153,7 +157,6 @@ extern void irq_enter(void); trace_hardirq_exit(); \ account_system_vtime(current); \ sub_preempt_count(HARDIRQ_OFFSET); \ - rcu_irq_exit(); \ } while (0) /* @@ -161,7 +164,20 @@ extern void irq_enter(void); */ extern void irq_exit(void); -#define nmi_enter() do { lockdep_off(); __irq_enter(); } while (0) -#define nmi_exit() do { __irq_exit(); lockdep_on(); } while (0) +#define nmi_enter() \ + do { \ + ftrace_nmi_enter(); \ + lockdep_off(); \ + rcu_nmi_enter(); \ + __irq_enter(); \ + } while (0) + +#define nmi_exit() \ + do { \ + __irq_exit(); \ + rcu_nmi_exit(); \ + lockdep_on(); \ + ftrace_nmi_exit(); \ + } while (0) #endif /* LINUX_HARDIRQ_H */ diff --git a/include/linux/hdlc.h b/include/linux/hdlc.h index c59769693be..fd47a151665 100644 --- a/include/linux/hdlc.h +++ b/include/linux/hdlc.h @@ -43,7 +43,7 @@ struct hdlc_proto { }; -/* Pointed to by dev->priv */ +/* Pointed to by netdev_priv(dev) */ typedef struct hdlc_device { /* used by HDLC layer to take control over HDLC device from hw driver*/ int (*attach)(struct net_device *dev, @@ -80,7 +80,7 @@ struct net_device *alloc_hdlcdev(void *priv); static inline struct hdlc_device* dev_to_hdlc(struct net_device *dev) { - return dev->priv; + return netdev_priv(dev); } static __inline__ void debug_frame(const struct sk_buff *skb) diff --git a/include/linux/hid.h b/include/linux/hid.h index e5780f8c934..81aa84d60c6 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -403,15 +403,6 @@ struct hid_output_fifo { #define HID_STAT_ADDED 1 #define HID_STAT_PARSED 2 -#define HID_CTRL_RUNNING 1 -#define HID_OUT_RUNNING 2 -#define HID_IN_RUNNING 3 -#define HID_RESET_PENDING 4 -#define HID_SUSPENDED 5 -#define HID_CLEAR_HALT 6 -#define HID_DISCONNECTED 7 -#define HID_STARTED 8 - struct hid_input { struct list_head list; struct hid_report *report; @@ -540,6 +531,8 @@ struct hid_usage_id { * @name: driver name (e.g. "Footech_bar-wheel") * @id_table: which devices is this driver for (must be non-NULL for probe * to be called) + * @dyn_list: list of dynamically added device ids + * @dyn_lock: lock protecting @dyn_list * @probe: new device inserted * @remove: device removed (NULL if not a hot-plug capable driver) * @report_table: on which reports to call raw_event (NULL means all) @@ -567,6 +560,9 @@ struct hid_driver { char *name; const struct hid_device_id *id_table; + struct list_head dyn_list; + spinlock_t dyn_lock; + int (*probe)(struct hid_device *dev, const struct hid_device_id *id); void (*remove)(struct hid_device *dev); @@ -797,6 +793,8 @@ dbg_hid(const char *fmt, ...) #ifdef CONFIG_HID_COMPAT #define HID_COMPAT_LOAD_DRIVER(name) \ +/* prototype to avoid sparse warning */ \ +extern void hid_compat_##name(void); \ void hid_compat_##name(void) { } \ EXPORT_SYMBOL(hid_compat_##name) #else diff --git a/include/linux/hidraw.h b/include/linux/hidraw.h index dbb5c8c374f..dd8d6926917 100644 --- a/include/linux/hidraw.h +++ b/include/linux/hidraw.h @@ -33,6 +33,8 @@ struct hidraw_devinfo { #define HIDIOCGRDESCSIZE _IOR('H', 0x01, int) #define HIDIOCGRDESC _IOR('H', 0x02, struct hidraw_report_descriptor) #define HIDIOCGRAWINFO _IOR('H', 0x03, struct hidraw_devinfo) +#define HIDIOCGRAWNAME(len) _IOC(_IOC_READ, 'H', 0x04, len) +#define HIDIOCGRAWPHYS(len) _IOC(_IOC_READ, 'H', 0x05, len) #define HIDRAW_FIRST_MINOR 0 #define HIDRAW_MAX_DEVICES 64 diff --git a/include/linux/hippidevice.h b/include/linux/hippidevice.h index bab303dafd6..f148e490841 100644 --- a/include/linux/hippidevice.h +++ b/include/linux/hippidevice.h @@ -32,7 +32,9 @@ struct hippi_cb { }; extern __be16 hippi_type_trans(struct sk_buff *skb, struct net_device *dev); - +extern int hippi_change_mtu(struct net_device *dev, int new_mtu); +extern int hippi_mac_addr(struct net_device *dev, void *p); +extern int hippi_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p); extern struct net_device *alloc_hippi_dev(int sizeof_priv); #endif diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 3eba43878dc..bd37078c2d7 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -43,26 +43,6 @@ enum hrtimer_restart { }; /* - * hrtimer callback modes: - * - * HRTIMER_CB_SOFTIRQ: Callback must run in softirq context - * HRTIMER_CB_IRQSAFE_PERCPU: Callback must run in hardirq context - * Special mode for tick emulation and - * scheduler timer. Such timers are per - * cpu and not allowed to be migrated on - * cpu unplug. - * HRTIMER_CB_IRQSAFE_UNLOCKED: Callback should run in hardirq context - * with timer->base lock unlocked - * used for timers which call wakeup to - * avoid lock order problems with rq->lock - */ -enum hrtimer_cb_mode { - HRTIMER_CB_SOFTIRQ, - HRTIMER_CB_IRQSAFE_PERCPU, - HRTIMER_CB_IRQSAFE_UNLOCKED, -}; - -/* * Values to track state of the timer * * Possible states: @@ -70,7 +50,6 @@ enum hrtimer_cb_mode { * 0x00 inactive * 0x01 enqueued into rbtree * 0x02 callback function running - * 0x04 callback pending (high resolution mode) * * Special cases: * 0x03 callback function running and enqueued @@ -92,8 +71,7 @@ enum hrtimer_cb_mode { #define HRTIMER_STATE_INACTIVE 0x00 #define HRTIMER_STATE_ENQUEUED 0x01 #define HRTIMER_STATE_CALLBACK 0x02 -#define HRTIMER_STATE_PENDING 0x04 -#define HRTIMER_STATE_MIGRATE 0x08 +#define HRTIMER_STATE_MIGRATE 0x04 /** * struct hrtimer - the basic hrtimer structure @@ -109,8 +87,6 @@ enum hrtimer_cb_mode { * @function: timer expiry callback function * @base: pointer to the timer base (per cpu and per clock) * @state: state information (See bit values above) - * @cb_mode: high resolution timer feature to select the callback execution - * mode * @cb_entry: list head to enqueue an expired timer into the callback list * @start_site: timer statistics field to store the site where the timer * was started @@ -129,7 +105,6 @@ struct hrtimer { struct hrtimer_clock_base *base; unsigned long state; struct list_head cb_entry; - enum hrtimer_cb_mode cb_mode; #ifdef CONFIG_TIMER_STATS int start_pid; void *start_site; @@ -188,15 +163,11 @@ struct hrtimer_clock_base { * @check_clocks: Indictator, when set evaluate time source and clock * event devices whether high resolution mode can be * activated. - * @cb_pending: Expired timers are moved from the rbtree to this - * list in the timer interrupt. The list is processed - * in the softirq. * @nr_events: Total number of timer interrupt events */ struct hrtimer_cpu_base { spinlock_t lock; struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES]; - struct list_head cb_pending; #ifdef CONFIG_HIGH_RES_TIMERS ktime_t expires_next; int hres_active; @@ -404,8 +375,7 @@ static inline int hrtimer_active(const struct hrtimer *timer) */ static inline int hrtimer_is_queued(struct hrtimer *timer) { - return timer->state & - (HRTIMER_STATE_ENQUEUED | HRTIMER_STATE_PENDING); + return timer->state & HRTIMER_STATE_ENQUEUED; } /* diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index e1c8afc002c..f1d2fba19ea 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -233,6 +233,10 @@ static inline unsigned long huge_page_size(struct hstate *h) return (unsigned long)PAGE_SIZE << h->order; } +extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma); + +extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma); + static inline unsigned long huge_page_mask(struct hstate *h) { return h->mask; @@ -273,6 +277,8 @@ struct hstate {}; #define hstate_inode(i) NULL #define huge_page_size(h) PAGE_SIZE #define huge_page_mask(h) PAGE_MASK +#define vma_kernel_pagesize(v) PAGE_SIZE +#define vma_mmu_pagesize(v) PAGE_SIZE #define huge_page_order(h) 0 #define huge_page_shift(h) PAGE_SHIFT static inline unsigned int pages_per_huge_page(struct hstate *h) diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 33a5992d493..20873d40246 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -393,11 +393,7 @@ static inline void i2c_set_adapdata(struct i2c_adapter *dev, void *data) #define I2C_CLASS_TV_ANALOG (1<<1) /* bttv + friends */ #define I2C_CLASS_TV_DIGITAL (1<<2) /* dvb cards */ #define I2C_CLASS_DDC (1<<3) /* DDC bus on graphics adapters */ -#define I2C_CLASS_CAM_ANALOG (1<<4) /* camera with analog CCD */ -#define I2C_CLASS_CAM_DIGITAL (1<<5) /* most webcams */ -#define I2C_CLASS_SOUND (1<<6) /* sound devices */ #define I2C_CLASS_SPD (1<<7) /* SPD EEPROMs and similar */ -#define I2C_CLASS_ALL (UINT_MAX) /* all of the above */ /* i2c_client_address_data is the struct for holding default client * addresses for a driver and for the parameters supplied on the diff --git a/include/linux/i2c/dm355evm_msp.h b/include/linux/i2c/dm355evm_msp.h new file mode 100644 index 00000000000..372470350fa --- /dev/null +++ b/include/linux/i2c/dm355evm_msp.h @@ -0,0 +1,79 @@ +/* + * dm355evm_msp.h - support MSP430 microcontroller on DM355EVM board + */ +#ifndef __LINUX_I2C_DM355EVM_MSP +#define __LINUX_I2C_DM355EVM_MSP + +/* + * Written against Spectrum's writeup for the A4 firmware revision, + * and tweaked to match source and rev D2 schematics by removing CPLD + * and NOR flash hooks (which were last appropriate in rev B boards). + * + * Note that the firmware supports a flavor of write posting ... to be + * sure a write completes, issue another read or write. + */ + +/* utilities to access "registers" emulated by msp430 firmware */ +extern int dm355evm_msp_write(u8 value, u8 reg); +extern int dm355evm_msp_read(u8 reg); + + +/* command/control registers */ +#define DM355EVM_MSP_COMMAND 0x00 +# define MSP_COMMAND_NULL 0 +# define MSP_COMMAND_RESET_COLD 1 +# define MSP_COMMAND_RESET_WARM 2 +# define MSP_COMMAND_RESET_WARM_I 3 +# define MSP_COMMAND_POWEROFF 4 +# define MSP_COMMAND_IR_REINIT 5 +#define DM355EVM_MSP_STATUS 0x01 +# define MSP_STATUS_BAD_OFFSET BIT(0) +# define MSP_STATUS_BAD_COMMAND BIT(1) +# define MSP_STATUS_POWER_ERROR BIT(2) +# define MSP_STATUS_RXBUF_OVERRUN BIT(3) +#define DM355EVM_MSP_RESET 0x02 /* 0 bits == in reset */ +# define MSP_RESET_DC5 BIT(0) +# define MSP_RESET_TVP5154 BIT(2) +# define MSP_RESET_IMAGER BIT(3) +# define MSP_RESET_ETHERNET BIT(4) +# define MSP_RESET_SYS BIT(5) +# define MSP_RESET_AIC33 BIT(7) + +/* GPIO registers ... bit patterns mostly match the source MSP ports */ +#define DM355EVM_MSP_LED 0x03 /* active low (MSP P4) */ +#define DM355EVM_MSP_SWITCH1 0x04 /* (MSP P5, masked) */ +# define MSP_SWITCH1_SW6_1 BIT(0) +# define MSP_SWITCH1_SW6_2 BIT(1) +# define MSP_SWITCH1_SW6_3 BIT(2) +# define MSP_SWITCH1_SW6_4 BIT(3) +# define MSP_SWITCH1_J1 BIT(4) /* NTSC/PAL */ +# define MSP_SWITCH1_MSP_INT BIT(5) /* active low */ +#define DM355EVM_MSP_SWITCH2 0x05 /* (MSP P6, masked) */ +# define MSP_SWITCH2_SW10 BIT(3) +# define MSP_SWITCH2_SW11 BIT(4) +# define MSP_SWITCH2_SW12 BIT(5) +# define MSP_SWITCH2_SW13 BIT(6) +# define MSP_SWITCH2_SW14 BIT(7) +#define DM355EVM_MSP_SDMMC 0x06 /* (MSP P2, masked) */ +# define MSP_SDMMC_0_WP BIT(1) +# define MSP_SDMMC_0_CD BIT(2) /* active low */ +# define MSP_SDMMC_1_WP BIT(3) +# define MSP_SDMMC_1_CD BIT(4) /* active low */ +#define DM355EVM_MSP_FIRMREV 0x07 /* not a GPIO (out of order) */ +#define DM355EVM_MSP_VIDEO_IN 0x08 /* (MSP P3, masked) */ +# define MSP_VIDEO_IMAGER BIT(7) /* low == tvp5146 */ + +/* power supply registers are currently omitted */ + +/* RTC registers */ +#define DM355EVM_MSP_RTC_0 0x12 /* LSB */ +#define DM355EVM_MSP_RTC_1 0x13 +#define DM355EVM_MSP_RTC_2 0x14 +#define DM355EVM_MSP_RTC_3 0x15 /* MSB */ + +/* input event queue registers; code == ((HIGH << 8) | LOW) */ +#define DM355EVM_MSP_INPUT_COUNT 0x16 /* decrement by reading LOW */ +#define DM355EVM_MSP_INPUT_HIGH 0x17 +#define DM355EVM_MSP_INPUT_LOW 0x18 + +#endif /* __LINUX_I2C_DM355EVM_MSP */ diff --git a/include/linux/i2c/tsc2007.h b/include/linux/i2c/tsc2007.h new file mode 100644 index 00000000000..c6361fbb7bf --- /dev/null +++ b/include/linux/i2c/tsc2007.h @@ -0,0 +1,17 @@ +#ifndef __LINUX_I2C_TSC2007_H +#define __LINUX_I2C_TSC2007_H + +/* linux/i2c/tsc2007.h */ + +struct tsc2007_platform_data { + u16 model; /* 2007. */ + u16 x_plate_ohms; + + int (*get_pendown_state)(void); + void (*clear_penirq)(void); /* If needed, clear 2nd level + interrupt source */ + int (*init_platform_hw)(void); + void (*exit_platform_hw)(void); +}; + +#endif diff --git a/include/linux/i2c/twl4030.h b/include/linux/i2c/twl4030.h index fb604dcd38f..8137f660a5c 100644 --- a/include/linux/i2c/twl4030.h +++ b/include/linux/i2c/twl4030.h @@ -78,8 +78,8 @@ int twl4030_i2c_read_u8(u8 mod_no, u8 *val, u8 reg); * IMPORTANT: For twl4030_i2c_write(), allocate num_bytes + 1 * for the value, and populate your data starting at offset 1. */ -int twl4030_i2c_write(u8 mod_no, u8 *value, u8 reg, u8 num_bytes); -int twl4030_i2c_read(u8 mod_no, u8 *value, u8 reg, u8 num_bytes); +int twl4030_i2c_write(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes); +int twl4030_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes); /*----------------------------------------------------------------------*/ @@ -234,6 +234,9 @@ struct twl4030_gpio_platform_data { /* gpio-n should control VMMC(n+1) if BIT(n) in mmc_cd is set */ u8 mmc_cd; + /* if BIT(N) is set, or VMMC(n+1) is linked, debounce GPIO-N */ + u32 debounce; + /* For gpio-N, bit (1 << N) in "pullups" is set if that pullup * should be enabled. Else, if that bit is set in "pulldowns", * that pulldown is enabled. Don't waste power by letting any @@ -278,6 +281,18 @@ struct twl4030_platform_data { struct twl4030_keypad_data *keypad; struct twl4030_usb_data *usb; + /* LDO regulators */ + struct regulator_init_data *vdac; + struct regulator_init_data *vpll1; + struct regulator_init_data *vpll2; + struct regulator_init_data *vmmc1; + struct regulator_init_data *vmmc2; + struct regulator_init_data *vsim; + struct regulator_init_data *vaux1; + struct regulator_init_data *vaux2; + struct regulator_init_data *vaux3; + struct regulator_init_data *vaux4; + /* REVISIT more to come ... _nothing_ should be hard-wired */ }; @@ -285,33 +300,6 @@ struct twl4030_platform_data { int twl4030_sih_setup(int module); -/* - * FIXME completely stop using TWL4030_IRQ_BASE ... instead, pass the - * IRQ data to subsidiary devices using platform device resources. - */ - -/* IRQ information-need base */ -#include <mach/irqs.h> -/* TWL4030 interrupts */ - -/* #define TWL4030_MODIRQ_GPIO (TWL4030_IRQ_BASE + 0) */ -#define TWL4030_MODIRQ_KEYPAD (TWL4030_IRQ_BASE + 1) -#define TWL4030_MODIRQ_BCI (TWL4030_IRQ_BASE + 2) -#define TWL4030_MODIRQ_MADC (TWL4030_IRQ_BASE + 3) -/* #define TWL4030_MODIRQ_USB (TWL4030_IRQ_BASE + 4) */ -/* #define TWL4030_MODIRQ_PWR (TWL4030_IRQ_BASE + 5) */ - -#define TWL4030_PWRIRQ_PWRBTN (TWL4030_PWR_IRQ_BASE + 0) -/* #define TWL4030_PWRIRQ_CHG_PRES (TWL4030_PWR_IRQ_BASE + 1) */ -/* #define TWL4030_PWRIRQ_USB_PRES (TWL4030_PWR_IRQ_BASE + 2) */ -/* #define TWL4030_PWRIRQ_RTC (TWL4030_PWR_IRQ_BASE + 3) */ -/* #define TWL4030_PWRIRQ_HOT_DIE (TWL4030_PWR_IRQ_BASE + 4) */ -/* #define TWL4030_PWRIRQ_PWROK_TIMEOUT (TWL4030_PWR_IRQ_BASE + 5) */ -/* #define TWL4030_PWRIRQ_MBCHG (TWL4030_PWR_IRQ_BASE + 6) */ -/* #define TWL4030_PWRIRQ_SC_DETECT (TWL4030_PWR_IRQ_BASE + 7) */ - -/* Rest are unsued currently*/ - /* Offsets to Power Registers */ #define TWL4030_VDAC_DEV_GRP 0x3B #define TWL4030_VDAC_DEDICATED 0x3E @@ -322,16 +310,6 @@ int twl4030_sih_setup(int module); #define TWL4030_VAUX3_DEV_GRP 0x1F #define TWL4030_VAUX3_DEDICATED 0x22 -/* TWL4030 GPIO interrupt definitions */ - -#define TWL4030_GPIO_IRQ_NO(n) (TWL4030_GPIO_IRQ_BASE + (n)) - -/* - * Exported TWL4030 GPIO APIs - * - * WARNING -- use standard GPIO and IRQ calls instead; these will vanish. - */ -int twl4030_set_gpio_debounce(int gpio, int enable); #if defined(CONFIG_TWL4030_BCI_BATTERY) || \ defined(CONFIG_TWL4030_BCI_BATTERY_MODULE) @@ -340,4 +318,38 @@ int twl4030_set_gpio_debounce(int gpio, int enable); static inline int twl4030charger_usb_en(int enable) { return 0; } #endif +/*----------------------------------------------------------------------*/ + +/* Linux-specific regulator identifiers ... for now, we only support + * the LDOs, and leave the three buck converters alone. VDD1 and VDD2 + * need to tie into hardware based voltage scaling (cpufreq etc), while + * VIO is generally fixed. + */ + +/* EXTERNAL dc-to-dc buck converters */ +#define TWL4030_REG_VDD1 0 +#define TWL4030_REG_VDD2 1 +#define TWL4030_REG_VIO 2 + +/* EXTERNAL LDOs */ +#define TWL4030_REG_VDAC 3 +#define TWL4030_REG_VPLL1 4 +#define TWL4030_REG_VPLL2 5 /* not on all chips */ +#define TWL4030_REG_VMMC1 6 +#define TWL4030_REG_VMMC2 7 /* not on all chips */ +#define TWL4030_REG_VSIM 8 /* not on all chips */ +#define TWL4030_REG_VAUX1 9 /* not on all chips */ +#define TWL4030_REG_VAUX2_4030 10 /* (twl4030-specific) */ +#define TWL4030_REG_VAUX2 11 /* (twl5030 and newer) */ +#define TWL4030_REG_VAUX3 12 /* not on all chips */ +#define TWL4030_REG_VAUX4 13 /* not on all chips */ + +/* INTERNAL LDOs */ +#define TWL4030_REG_VINTANA1 14 +#define TWL4030_REG_VINTANA2 15 +#define TWL4030_REG_VINTDIG 16 +#define TWL4030_REG_VUSB1V5 17 +#define TWL4030_REG_VUSB1V8 18 +#define TWL4030_REG_VUSB3V1 19 + #endif /* End of __TWL4030_H */ diff --git a/include/linux/ide.h b/include/linux/ide.h index 010fb26a157..3644f632338 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h @@ -33,24 +33,13 @@ #endif /* - * Used to indicate "no IRQ", should be a value that cannot be an IRQ - * number. - */ - -#define IDE_NO_IRQ (-1) - -typedef unsigned char byte; /* used everywhere */ - -/* * Probably not wise to fiddle with these */ +#define IDE_DEFAULT_MAX_FAILURES 1 #define ERROR_MAX 8 /* Max read/write errors per sector */ #define ERROR_RESET 3 /* Reset controller every 4th retry */ #define ERROR_RECAL 1 /* Recalibrate every 2nd retry */ -#define HWIF(drive) ((ide_hwif_t *)((drive)->hwif)) -#define HWGROUP(drive) ((ide_hwgroup_t *)(HWIF(drive)->hwgroup)) - /* * Definitions for accessing IDE controller registers */ @@ -122,8 +111,6 @@ struct ide_io_ports { #define MAX_DRIVES 2 /* per interface; 2 assumed by lots of code */ #define SECTOR_SIZE 512 -#define IDE_LARGE_SEEK(b1,b2,t) (((b1) > (b2) + (t)) || ((b2) > (b1) + (t))) - /* * Timeouts for various operations: */ @@ -172,9 +159,7 @@ typedef int (ide_ack_intr_t)(struct hwif_s *); enum { ide_unknown, ide_generic, ide_pci, ide_cmd640, ide_dtc2278, ide_ali14xx, ide_qd65xx, ide_umc8672, ide_ht6560b, - ide_rz1000, ide_trm290, - ide_cmd646, ide_cy82c693, ide_4drives, - ide_pmac, ide_acorn, + ide_4drives, ide_pmac, ide_acorn, ide_au1xxx, ide_palm3710 }; @@ -196,9 +181,6 @@ typedef struct hw_regs_s { unsigned long config; } hw_regs_t; -void ide_init_port_data(struct hwif_s *, unsigned int); -void ide_init_port_hw(struct hwif_s *, hw_regs_t *); - static inline void ide_std_init_ports(hw_regs_t *hw, unsigned long io_addr, unsigned long ctl_addr) @@ -407,6 +389,7 @@ enum { * This is used for several packet commands (not for READ/WRITE commands). */ #define IDE_PC_BUFFER_SIZE 256 +#define ATAPI_WAIT_PC (60 * HZ) struct ide_atapi_pc { /* actual packet bytes */ @@ -443,18 +426,14 @@ struct ide_atapi_pc { struct idetape_bh *bh; char *b_data; - /* idescsi only for now */ struct scatterlist *sg; unsigned int sg_cnt; - struct scsi_cmnd *scsi_cmd; - void (*done) (struct scsi_cmnd *); - unsigned long timeout; }; struct ide_devset; -struct ide_driver_s; +struct ide_driver; #ifdef CONFIG_BLK_DEV_IDEACPI struct ide_acpi_drive_link; @@ -484,55 +463,53 @@ enum { /* ide-cd */ /* Drive cannot eject the disc. */ - IDE_AFLAG_NO_EJECT = (1 << 3), + IDE_AFLAG_NO_EJECT = (1 << 1), /* Drive is a pre ATAPI 1.2 drive. */ - IDE_AFLAG_PRE_ATAPI12 = (1 << 4), + IDE_AFLAG_PRE_ATAPI12 = (1 << 2), /* TOC addresses are in BCD. */ - IDE_AFLAG_TOCADDR_AS_BCD = (1 << 5), + IDE_AFLAG_TOCADDR_AS_BCD = (1 << 3), /* TOC track numbers are in BCD. */ - IDE_AFLAG_TOCTRACKS_AS_BCD = (1 << 6), + IDE_AFLAG_TOCTRACKS_AS_BCD = (1 << 4), /* * Drive does not provide data in multiples of SECTOR_SIZE * when more than one interrupt is needed. */ - IDE_AFLAG_LIMIT_NFRAMES = (1 << 7), - /* Seeking in progress. */ - IDE_AFLAG_SEEKING = (1 << 8), + IDE_AFLAG_LIMIT_NFRAMES = (1 << 5), /* Saved TOC information is current. */ - IDE_AFLAG_TOC_VALID = (1 << 9), + IDE_AFLAG_TOC_VALID = (1 << 6), /* We think that the drive door is locked. */ - IDE_AFLAG_DOOR_LOCKED = (1 << 10), + IDE_AFLAG_DOOR_LOCKED = (1 << 7), /* SET_CD_SPEED command is unsupported. */ - IDE_AFLAG_NO_SPEED_SELECT = (1 << 11), - IDE_AFLAG_VERTOS_300_SSD = (1 << 12), - IDE_AFLAG_VERTOS_600_ESD = (1 << 13), - IDE_AFLAG_SANYO_3CD = (1 << 14), - IDE_AFLAG_FULL_CAPS_PAGE = (1 << 15), - IDE_AFLAG_PLAY_AUDIO_OK = (1 << 16), - IDE_AFLAG_LE_SPEED_FIELDS = (1 << 17), + IDE_AFLAG_NO_SPEED_SELECT = (1 << 8), + IDE_AFLAG_VERTOS_300_SSD = (1 << 9), + IDE_AFLAG_VERTOS_600_ESD = (1 << 10), + IDE_AFLAG_SANYO_3CD = (1 << 11), + IDE_AFLAG_FULL_CAPS_PAGE = (1 << 12), + IDE_AFLAG_PLAY_AUDIO_OK = (1 << 13), + IDE_AFLAG_LE_SPEED_FIELDS = (1 << 14), /* ide-floppy */ /* Avoid commands not supported in Clik drive */ - IDE_AFLAG_CLIK_DRIVE = (1 << 19), + IDE_AFLAG_CLIK_DRIVE = (1 << 15), /* Requires BH algorithm for packets */ - IDE_AFLAG_ZIP_DRIVE = (1 << 20), + IDE_AFLAG_ZIP_DRIVE = (1 << 16), /* Supports format progress report */ - IDE_AFLAG_SRFP = (1 << 22), + IDE_AFLAG_SRFP = (1 << 17), /* ide-tape */ - IDE_AFLAG_IGNORE_DSC = (1 << 23), + IDE_AFLAG_IGNORE_DSC = (1 << 18), /* 0 When the tape position is unknown */ - IDE_AFLAG_ADDRESS_VALID = (1 << 24), + IDE_AFLAG_ADDRESS_VALID = (1 << 19), /* Device already opened */ - IDE_AFLAG_BUSY = (1 << 25), + IDE_AFLAG_BUSY = (1 << 20), /* Attempt to auto-detect the current user block size */ - IDE_AFLAG_DETECT_BS = (1 << 26), + IDE_AFLAG_DETECT_BS = (1 << 21), /* Currently on a filemark */ - IDE_AFLAG_FILEMARK = (1 << 27), + IDE_AFLAG_FILEMARK = (1 << 22), /* 0 = no tape is loaded, so we don't rewind after ejecting */ - IDE_AFLAG_MEDIUM_PRESENT = (1 << 28), + IDE_AFLAG_MEDIUM_PRESENT = (1 << 23), - IDE_AFLAG_NO_AUTOCLOSE = (1 << 29), + IDE_AFLAG_NO_AUTOCLOSE = (1 << 24), }; /* device flags */ @@ -571,28 +548,26 @@ enum { IDE_DFLAG_NODMA = (1 << 16), /* powermanagment told us not to do anything, so sleep nicely */ IDE_DFLAG_BLOCKED = (1 << 17), - /* ide-scsi emulation */ - IDE_DFLAG_SCSI = (1 << 18), /* sleeping & sleep field valid */ - IDE_DFLAG_SLEEPING = (1 << 19), - IDE_DFLAG_POST_RESET = (1 << 20), - IDE_DFLAG_UDMA33_WARNED = (1 << 21), - IDE_DFLAG_LBA48 = (1 << 22), + IDE_DFLAG_SLEEPING = (1 << 18), + IDE_DFLAG_POST_RESET = (1 << 19), + IDE_DFLAG_UDMA33_WARNED = (1 << 20), + IDE_DFLAG_LBA48 = (1 << 21), /* status of write cache */ - IDE_DFLAG_WCACHE = (1 << 23), + IDE_DFLAG_WCACHE = (1 << 22), /* used for ignoring ATA_DF */ - IDE_DFLAG_NOWERR = (1 << 24), + IDE_DFLAG_NOWERR = (1 << 23), /* retrying in PIO */ - IDE_DFLAG_DMA_PIO_RETRY = (1 << 25), - IDE_DFLAG_LBA = (1 << 26), + IDE_DFLAG_DMA_PIO_RETRY = (1 << 24), + IDE_DFLAG_LBA = (1 << 25), /* don't unload heads */ - IDE_DFLAG_NO_UNLOAD = (1 << 27), + IDE_DFLAG_NO_UNLOAD = (1 << 26), /* heads unloaded, please don't reset port */ - IDE_DFLAG_PARKED = (1 << 28), - IDE_DFLAG_MEDIA_CHANGED = (1 << 29), + IDE_DFLAG_PARKED = (1 << 27), + IDE_DFLAG_MEDIA_CHANGED = (1 << 28), /* write protect */ - IDE_DFLAG_WP = (1 << 30), - IDE_DFLAG_FORMAT_IN_PROGRESS = (1 << 31), + IDE_DFLAG_WP = (1 << 29), + IDE_DFLAG_FORMAT_IN_PROGRESS = (1 << 30), }; struct ide_drive_s { @@ -602,7 +577,6 @@ struct ide_drive_s { struct request_queue *queue; /* request queue */ struct request *rq; /* current request */ - struct ide_drive_s *next; /* circular list of hwgroup drives */ void *driver_data; /* extra driver data */ u16 *id; /* identification info */ #ifdef CONFIG_IDE_PROC_FS @@ -616,8 +590,6 @@ struct ide_drive_s { unsigned long dev_flags; unsigned long sleep; /* sleep until this time */ - unsigned long service_start; /* time we started last request */ - unsigned long service_time; /* service time of last request */ unsigned long timeout; /* max time to wait for irq */ special_t special; /* special action flags */ @@ -678,6 +650,8 @@ struct ide_drive_s { int (*pc_io_buffers)(struct ide_drive_s *, struct ide_atapi_pc *, unsigned int, int); + ide_startstop_t (*irq_handler)(struct ide_drive_s *); + unsigned long atapi_flags; struct ide_atapi_pc request_sense_pc; @@ -700,7 +674,6 @@ struct ide_tp_ops { void (*exec_command)(struct hwif_s *, u8); u8 (*read_status)(struct hwif_s *); u8 (*read_altstatus)(struct hwif_s *); - u8 (*read_sff_dma_status)(struct hwif_s *); void (*set_irq)(struct hwif_s *, int); @@ -761,14 +734,17 @@ struct ide_dma_ops { int (*dma_test_irq)(struct ide_drive_s *); void (*dma_lost_irq)(struct ide_drive_s *); void (*dma_timeout)(struct ide_drive_s *); + /* + * The following method is optional and only required to be + * implemented for the SFF-8038i compatible controllers. + */ + u8 (*dma_sff_read_status)(struct hwif_s *); }; struct ide_host; typedef struct hwif_s { - struct hwif_s *next; /* for linked-list in ide_hwgroup_t */ struct hwif_s *mate; /* other hwif from same PCI chip */ - struct hwgroup_s *hwgroup; /* actually (ide_hwgroup_t *) */ struct proc_dir_entry *proc; /* /proc/ide/ directory entry */ struct ide_host *host; @@ -779,7 +755,7 @@ typedef struct hwif_s { unsigned long sata_scr[SATA_NR_PORTS]; - ide_drive_t drives[MAX_DRIVES]; /* drive info */ + ide_drive_t *devices[MAX_DRIVES + 1]; u8 major; /* our major number */ u8 index; /* 0 for ide0; 1 for ide1; ... */ @@ -845,9 +821,7 @@ typedef struct hwif_s { unsigned extra_ports; /* number of extra dma ports */ unsigned present : 1; /* this interface exists */ - unsigned serialized : 1; /* serialized all channel operation */ - unsigned sharing_irq: 1; /* 1 = sharing irq with another hwif */ - unsigned sg_mapped : 1; /* sg_table and sg_nents are ready */ + unsigned busy : 1; /* serializes devices on a port */ struct device gendev; struct device *portdev; @@ -859,19 +833,49 @@ typedef struct hwif_s { #ifdef CONFIG_BLK_DEV_IDEACPI struct ide_acpi_hwif_link *acpidata; #endif + + /* IRQ handler, if active */ + ide_startstop_t (*handler)(ide_drive_t *); + + /* BOOL: polling active & poll_timeout field valid */ + unsigned int polling : 1; + + /* current drive */ + ide_drive_t *cur_dev; + + /* current request */ + struct request *rq; + + /* failsafe timer */ + struct timer_list timer; + /* timeout value during long polls */ + unsigned long poll_timeout; + /* queried upon timeouts */ + int (*expiry)(ide_drive_t *); + + int req_gen; + int req_gen_timer; + + spinlock_t lock; } ____cacheline_internodealigned_in_smp ide_hwif_t; #define MAX_HOST_PORTS 4 struct ide_host { - ide_hwif_t *ports[MAX_HOST_PORTS]; + ide_hwif_t *ports[MAX_HOST_PORTS + 1]; unsigned int n_ports; struct device *dev[2]; unsigned int (*init_chipset)(struct pci_dev *); unsigned long host_flags; void *host_priv; + ide_hwif_t *cur_port; /* for hosts requiring serialization */ + + /* used for hosts requiring serialization */ + volatile long host_busy; }; +#define IDE_HOST_BUSY 0 + /* * internal ide interrupt handler type */ @@ -881,38 +885,6 @@ typedef int (ide_expiry_t)(ide_drive_t *); /* used by ide-cd, ide-floppy, etc. */ typedef void (xfer_func_t)(ide_drive_t *, struct request *rq, void *, unsigned); -typedef struct hwgroup_s { - /* irq handler, if active */ - ide_startstop_t (*handler)(ide_drive_t *); - - /* BOOL: protects all fields below */ - volatile int busy; - /* BOOL: wake us up on timer expiry */ - unsigned int sleeping : 1; - /* BOOL: polling active & poll_timeout field valid */ - unsigned int polling : 1; - - /* current drive */ - ide_drive_t *drive; - /* ptr to current hwif in linked-list */ - ide_hwif_t *hwif; - - /* current request */ - struct request *rq; - - /* failsafe timer */ - struct timer_list timer; - /* timeout value during long polls */ - unsigned long poll_timeout; - /* queried upon timeouts */ - int (*expiry)(ide_drive_t *); - - int req_gen; - int req_gen_timer; -} ide_hwgroup_t; - -typedef struct ide_driver_s ide_driver_t; - extern struct mutex ide_setting_mtx; /* @@ -1038,8 +1010,8 @@ void ide_proc_register_port(ide_hwif_t *); void ide_proc_port_register_devices(ide_hwif_t *); void ide_proc_unregister_device(ide_drive_t *); void ide_proc_unregister_port(ide_hwif_t *); -void ide_proc_register_driver(ide_drive_t *, ide_driver_t *); -void ide_proc_unregister_driver(ide_drive_t *, ide_driver_t *); +void ide_proc_register_driver(ide_drive_t *, struct ide_driver *); +void ide_proc_unregister_driver(ide_drive_t *, struct ide_driver *); read_proc_t proc_ide_read_capacity; read_proc_t proc_ide_read_geometry; @@ -1066,8 +1038,10 @@ static inline void ide_proc_register_port(ide_hwif_t *hwif) { ; } static inline void ide_proc_port_register_devices(ide_hwif_t *hwif) { ; } static inline void ide_proc_unregister_device(ide_drive_t *drive) { ; } static inline void ide_proc_unregister_port(ide_hwif_t *hwif) { ; } -static inline void ide_proc_register_driver(ide_drive_t *drive, ide_driver_t *driver) { ; } -static inline void ide_proc_unregister_driver(ide_drive_t *drive, ide_driver_t *driver) { ; } +static inline void ide_proc_register_driver(ide_drive_t *drive, + struct ide_driver *driver) { ; } +static inline void ide_proc_unregister_driver(ide_drive_t *drive, + struct ide_driver *driver) { ; } #define PROC_IDE_READ_RETURN(page,start,off,count,eof,len) return 0; #endif @@ -1122,17 +1096,24 @@ enum { IDE_PM_COMPLETED, }; +int generic_ide_suspend(struct device *, pm_message_t); +int generic_ide_resume(struct device *); + +void ide_complete_power_step(ide_drive_t *, struct request *); +ide_startstop_t ide_start_power_step(ide_drive_t *, struct request *); +void ide_complete_pm_request(ide_drive_t *, struct request *); +void ide_check_pm_state(ide_drive_t *, struct request *); + /* * Subdrivers support. * * The gendriver.owner field should be set to the module owner of this driver. * The gendriver.name field should be set to the name of this driver */ -struct ide_driver_s { +struct ide_driver { const char *version; ide_startstop_t (*do_request)(ide_drive_t *, struct request *, sector_t); int (*end_request)(ide_drive_t *, int, int); - ide_startstop_t (*error)(ide_drive_t *, struct request *rq, u8, u8); struct device_driver gen_driver; int (*probe)(ide_drive_t *); void (*remove)(ide_drive_t *); @@ -1144,7 +1125,7 @@ struct ide_driver_s { #endif }; -#define to_ide_driver(drv) container_of(drv, ide_driver_t, gen_driver) +#define to_ide_driver(drv) container_of(drv, struct ide_driver, gen_driver) int ide_device_get(ide_drive_t *); void ide_device_put(ide_drive_t *); @@ -1176,9 +1157,7 @@ void ide_execute_pkt_cmd(ide_drive_t *); void ide_pad_transfer(ide_drive_t *, int, int); -ide_startstop_t __ide_error(ide_drive_t *, struct request *, u8, u8); - -ide_startstop_t ide_error (ide_drive_t *drive, const char *msg, byte stat); +ide_startstop_t ide_error(ide_drive_t *, const char *, u8); void ide_fix_driveid(u16 *); @@ -1202,7 +1181,6 @@ void ide_tf_dump(const char *, struct ide_taskfile *); void ide_exec_command(ide_hwif_t *, u8); u8 ide_read_status(ide_hwif_t *); u8 ide_read_altstatus(ide_hwif_t *); -u8 ide_read_sff_dma_status(ide_hwif_t *); void ide_set_irq(ide_hwif_t *, int); @@ -1256,14 +1234,11 @@ int ide_set_media_lock(ide_drive_t *, struct gendisk *, int); void ide_create_request_sense_cmd(ide_drive_t *, struct ide_atapi_pc *); void ide_retry_pc(ide_drive_t *, struct gendisk *); -static inline unsigned long ide_scsi_get_timeout(struct ide_atapi_pc *pc) -{ - return max_t(unsigned long, WAIT_CMD, pc->timeout - jiffies); -} +int ide_cd_expiry(ide_drive_t *); -int ide_scsi_expiry(ide_drive_t *); +int ide_cd_get_xferlen(struct request *); -ide_startstop_t ide_issue_pc(ide_drive_t *, unsigned int, ide_expiry_t *); +ide_startstop_t ide_issue_pc(ide_drive_t *); ide_startstop_t do_rw_taskfile(ide_drive_t *, ide_task_t *); @@ -1320,11 +1295,11 @@ static inline int ide_hwif_setup_dma(ide_hwif_t *hwif, } #endif -typedef struct ide_pci_enablebit_s { +struct ide_pci_enablebit { u8 reg; /* byte pci reg holding the enable-bit */ u8 mask; /* mask to isolate the enable-bit */ u8 val; /* value of masked reg when "enabled" */ -} ide_pci_enablebit_t; +}; enum { /* Uses ISA control ports not PCI ones. */ @@ -1376,8 +1351,8 @@ enum { IDE_HFLAG_LEGACY_IRQS = (1 << 21), /* force use of legacy IRQs */ IDE_HFLAG_FORCE_LEGACY_IRQS = (1 << 22), - /* limit LBA48 requests to 256 sectors */ - IDE_HFLAG_RQSIZE_256 = (1 << 23), + /* host is TRM290 */ + IDE_HFLAG_TRM290 = (1 << 23), /* use 32-bit I/O ops */ IDE_HFLAG_IO_32BIT = (1 << 24), /* unmask IRQs */ @@ -1413,8 +1388,12 @@ struct ide_port_info { const struct ide_port_ops *port_ops; const struct ide_dma_ops *dma_ops; - ide_pci_enablebit_t enablebits[2]; + struct ide_pci_enablebit enablebits[2]; + hwif_chipset_t chipset; + + u16 max_sectors; /* if < than the default one */ + u32 host_flags; u8 pio_mask; u8 swdma_mask; @@ -1482,6 +1461,7 @@ void ide_dma_exec_cmd(ide_drive_t *, u8); extern void ide_dma_start(ide_drive_t *); int ide_dma_end(ide_drive_t *); int ide_dma_test_irq(ide_drive_t *); +u8 ide_dma_sff_read_status(ide_hwif_t *); extern const struct ide_dma_ops sff_dma_ops; #else static inline int config_drive_for_dma(ide_drive_t *drive) { return 0; } @@ -1519,15 +1499,13 @@ static inline void ide_acpi_port_init_devices(ide_hwif_t *hwif) { ; } static inline void ide_acpi_set_state(ide_hwif_t *hwif, int on) {} #endif -void ide_remove_port_from_hwgroup(ide_hwif_t *); -void ide_unregister(ide_hwif_t *); - void ide_register_region(struct gendisk *); void ide_unregister_region(struct gendisk *); void ide_undecoded_slave(ide_drive_t *); void ide_port_apply_params(ide_hwif_t *); +int ide_sysfs_register_port(ide_hwif_t *); struct ide_host *ide_host_alloc(const struct ide_port_info *, hw_regs_t **); void ide_host_free(struct ide_host *); @@ -1605,23 +1583,9 @@ static inline void ide_set_max_pio(ide_drive_t *drive) ide_set_pio(drive, 255); } -extern spinlock_t ide_lock; -extern struct mutex ide_cfg_mtx; -/* - * Structure locking: - * - * ide_cfg_mtx and ide_lock together protect changes to - * ide_hwif_t->{next,hwgroup} - * ide_drive_t->next - * - * ide_hwgroup_t->busy: ide_lock - * ide_hwgroup_t->hwif: ide_lock - * ide_hwif_t->mate: constant, no locking - * ide_drive_t->hwif: constant, no locking - */ - -#define local_irq_set(flags) do { local_save_flags((flags)); local_irq_enable_in_hardirq(); } while (0) +char *ide_media_string(ide_drive_t *); +extern struct device_attribute ide_dev_attrs[]; extern struct bus_type ide_bus_type; extern struct class *ide_port_class; @@ -1637,8 +1601,15 @@ static inline int hwif_to_node(ide_hwif_t *hwif) static inline ide_drive_t *ide_get_pair_dev(ide_drive_t *drive) { - ide_drive_t *peer = &drive->hwif->drives[(drive->dn ^ 1) & 1]; + ide_drive_t *peer = drive->hwif->devices[(drive->dn ^ 1) & 1]; return (peer->dev_flags & IDE_DFLAG_PRESENT) ? peer : NULL; } + +#define ide_port_for_each_dev(i, dev, port) \ + for ((i) = 0; ((dev) = (port)->devices[i]) || (i) < MAX_DRIVES; (i)++) + +#define ide_host_for_each_port(i, port, host) \ + for ((i) = 0; ((port) = (host)->ports[i]) || (i) < MAX_HOST_PORTS; (i)++) + #endif /* _IDE_H */ diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 14126bc3664..c4e6ca1a630 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -12,8 +12,8 @@ * published by the Free Software Foundation. */ -#ifndef IEEE80211_H -#define IEEE80211_H +#ifndef LINUX_IEEE80211_H +#define LINUX_IEEE80211_H #include <linux/types.h> #include <asm/byteorder.h> @@ -97,7 +97,10 @@ #define IEEE80211_MAX_FRAME_LEN 2352 #define IEEE80211_MAX_SSID_LEN 32 + #define IEEE80211_MAX_MESH_ID_LEN 32 +#define IEEE80211_MESH_CONFIG_LEN 19 + #define IEEE80211_QOS_CTL_LEN 2 #define IEEE80211_QOS_CTL_TID_MASK 0x000F #define IEEE80211_QOS_CTL_TAG1D_MASK 0x0007 @@ -666,6 +669,13 @@ struct ieee80211_cts { u8 ra[6]; } __attribute__ ((packed)); +struct ieee80211_pspoll { + __le16 frame_control; + __le16 aid; + u8 bssid[6]; + u8 ta[6]; +} __attribute__ ((packed)); + /** * struct ieee80211_bar - HT Block Ack Request * @@ -685,28 +695,88 @@ struct ieee80211_bar { #define IEEE80211_BAR_CTRL_ACK_POLICY_NORMAL 0x0000 #define IEEE80211_BAR_CTRL_CBMTID_COMPRESSED_BA 0x0004 + +#define IEEE80211_HT_MCS_MASK_LEN 10 + +/** + * struct ieee80211_mcs_info - MCS information + * @rx_mask: RX mask + * @rx_highest: highest supported RX rate + * @tx_params: TX parameters + */ +struct ieee80211_mcs_info { + u8 rx_mask[IEEE80211_HT_MCS_MASK_LEN]; + __le16 rx_highest; + u8 tx_params; + u8 reserved[3]; +} __attribute__((packed)); + +/* 802.11n HT capability MSC set */ +#define IEEE80211_HT_MCS_RX_HIGHEST_MASK 0x3ff +#define IEEE80211_HT_MCS_TX_DEFINED 0x01 +#define IEEE80211_HT_MCS_TX_RX_DIFF 0x02 +/* value 0 == 1 stream etc */ +#define IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK 0x0C +#define IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT 2 +#define IEEE80211_HT_MCS_TX_MAX_STREAMS 4 +#define IEEE80211_HT_MCS_TX_UNEQUAL_MODULATION 0x10 + +/* + * 802.11n D5.0 20.3.5 / 20.6 says: + * - indices 0 to 7 and 32 are single spatial stream + * - 8 to 31 are multiple spatial streams using equal modulation + * [8..15 for two streams, 16..23 for three and 24..31 for four] + * - remainder are multiple spatial streams using unequal modulation + */ +#define IEEE80211_HT_MCS_UNEQUAL_MODULATION_START 33 +#define IEEE80211_HT_MCS_UNEQUAL_MODULATION_START_BYTE \ + (IEEE80211_HT_MCS_UNEQUAL_MODULATION_START / 8) + /** * struct ieee80211_ht_cap - HT capabilities * - * This structure refers to "HT capabilities element" as - * described in 802.11n draft section 7.3.2.52 + * This structure is the "HT capabilities element" as + * described in 802.11n D5.0 7.3.2.57 */ struct ieee80211_ht_cap { __le16 cap_info; u8 ampdu_params_info; - u8 supp_mcs_set[16]; + + /* 16 bytes MCS information */ + struct ieee80211_mcs_info mcs; + __le16 extended_ht_cap_info; __le32 tx_BF_cap_info; u8 antenna_selection_info; } __attribute__ ((packed)); +/* 802.11n HT capabilities masks (for cap_info) */ +#define IEEE80211_HT_CAP_LDPC_CODING 0x0001 +#define IEEE80211_HT_CAP_SUP_WIDTH_20_40 0x0002 +#define IEEE80211_HT_CAP_SM_PS 0x000C +#define IEEE80211_HT_CAP_GRN_FLD 0x0010 +#define IEEE80211_HT_CAP_SGI_20 0x0020 +#define IEEE80211_HT_CAP_SGI_40 0x0040 +#define IEEE80211_HT_CAP_TX_STBC 0x0080 +#define IEEE80211_HT_CAP_RX_STBC 0x0300 +#define IEEE80211_HT_CAP_DELAY_BA 0x0400 +#define IEEE80211_HT_CAP_MAX_AMSDU 0x0800 +#define IEEE80211_HT_CAP_DSSSCCK40 0x1000 +#define IEEE80211_HT_CAP_PSMP_SUPPORT 0x2000 +#define IEEE80211_HT_CAP_40MHZ_INTOLERANT 0x4000 +#define IEEE80211_HT_CAP_LSIG_TXOP_PROT 0x8000 + +/* 802.11n HT capability AMPDU settings (for ampdu_params_info) */ +#define IEEE80211_HT_AMPDU_PARM_FACTOR 0x03 +#define IEEE80211_HT_AMPDU_PARM_DENSITY 0x1C + /** - * struct ieee80211_ht_cap - HT additional information + * struct ieee80211_ht_info - HT information * - * This structure refers to "HT information element" as - * described in 802.11n draft section 7.3.2.53 + * This structure is the "HT information element" as + * described in 802.11n D5.0 7.3.2.58 */ -struct ieee80211_ht_addt_info { +struct ieee80211_ht_info { u8 control_chan; u8 ht_param; __le16 operation_mode; @@ -714,36 +784,33 @@ struct ieee80211_ht_addt_info { u8 basic_set[16]; } __attribute__ ((packed)); -/* 802.11n HT capabilities masks */ -#define IEEE80211_HT_CAP_SUP_WIDTH 0x0002 -#define IEEE80211_HT_CAP_SM_PS 0x000C -#define IEEE80211_HT_CAP_GRN_FLD 0x0010 -#define IEEE80211_HT_CAP_SGI_20 0x0020 -#define IEEE80211_HT_CAP_SGI_40 0x0040 -#define IEEE80211_HT_CAP_DELAY_BA 0x0400 -#define IEEE80211_HT_CAP_MAX_AMSDU 0x0800 -#define IEEE80211_HT_CAP_DSSSCCK40 0x1000 -/* 802.11n HT capability AMPDU settings */ -#define IEEE80211_HT_CAP_AMPDU_FACTOR 0x03 -#define IEEE80211_HT_CAP_AMPDU_DENSITY 0x1C -/* 802.11n HT capability MSC set */ -#define IEEE80211_SUPP_MCS_SET_UEQM 4 -#define IEEE80211_HT_CAP_MAX_STREAMS 4 -#define IEEE80211_SUPP_MCS_SET_LEN 10 -/* maximum streams the spec allows */ -#define IEEE80211_HT_CAP_MCS_TX_DEFINED 0x01 -#define IEEE80211_HT_CAP_MCS_TX_RX_DIFF 0x02 -#define IEEE80211_HT_CAP_MCS_TX_STREAMS 0x0C -#define IEEE80211_HT_CAP_MCS_TX_UEQM 0x10 -/* 802.11n HT IE masks */ -#define IEEE80211_HT_IE_CHA_SEC_OFFSET 0x03 -#define IEEE80211_HT_IE_CHA_SEC_NONE 0x00 -#define IEEE80211_HT_IE_CHA_SEC_ABOVE 0x01 -#define IEEE80211_HT_IE_CHA_SEC_BELOW 0x03 -#define IEEE80211_HT_IE_CHA_WIDTH 0x04 -#define IEEE80211_HT_IE_HT_PROTECTION 0x0003 -#define IEEE80211_HT_IE_NON_GF_STA_PRSNT 0x0004 -#define IEEE80211_HT_IE_NON_HT_STA_PRSNT 0x0010 +/* for ht_param */ +#define IEEE80211_HT_PARAM_CHA_SEC_OFFSET 0x03 +#define IEEE80211_HT_PARAM_CHA_SEC_NONE 0x00 +#define IEEE80211_HT_PARAM_CHA_SEC_ABOVE 0x01 +#define IEEE80211_HT_PARAM_CHA_SEC_BELOW 0x03 +#define IEEE80211_HT_PARAM_CHAN_WIDTH_ANY 0x04 +#define IEEE80211_HT_PARAM_RIFS_MODE 0x08 +#define IEEE80211_HT_PARAM_SPSMP_SUPPORT 0x10 +#define IEEE80211_HT_PARAM_SERV_INTERVAL_GRAN 0xE0 + +/* for operation_mode */ +#define IEEE80211_HT_OP_MODE_PROTECTION 0x0003 +#define IEEE80211_HT_OP_MODE_PROTECTION_NONE 0 +#define IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER 1 +#define IEEE80211_HT_OP_MODE_PROTECTION_20MHZ 2 +#define IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED 3 +#define IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT 0x0004 +#define IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT 0x0010 + +/* for stbc_param */ +#define IEEE80211_HT_STBC_PARAM_DUAL_BEACON 0x0040 +#define IEEE80211_HT_STBC_PARAM_DUAL_CTS_PROT 0x0080 +#define IEEE80211_HT_STBC_PARAM_STBC_BEACON 0x0100 +#define IEEE80211_HT_STBC_PARAM_LSIG_TXOP_FULLPROT 0x0200 +#define IEEE80211_HT_STBC_PARAM_PCO_ACTIVE 0x0400 +#define IEEE80211_HT_STBC_PARAM_PCO_PHASE 0x0800 + /* block-ack parameters */ #define IEEE80211_ADDBA_PARAM_POLICY_MASK 0x0002 @@ -769,7 +836,6 @@ struct ieee80211_ht_addt_info { /* Authentication algorithms */ #define WLAN_AUTH_OPEN 0 #define WLAN_AUTH_SHARED_KEY 1 -#define WLAN_AUTH_FAST_BSS_TRANSITION 2 #define WLAN_AUTH_LEAP 128 #define WLAN_AUTH_CHALLENGE_LEN 128 @@ -949,7 +1015,7 @@ enum ieee80211_eid { WLAN_EID_EXT_SUPP_RATES = 50, /* 802.11n */ WLAN_EID_HT_CAPABILITY = 45, - WLAN_EID_HT_EXTRA_INFO = 61, + WLAN_EID_HT_INFORMATION = 61, /* 802.11i */ WLAN_EID_RSN = 48, WLAN_EID_WPA = 221, @@ -976,6 +1042,68 @@ enum ieee80211_spectrum_mgmt_actioncode { WLAN_ACTION_SPCT_CHL_SWITCH = 4, }; +/* + * IEEE 802.11-2007 7.3.2.9 Country information element + * + * Minimum length is 8 octets, ie len must be evenly + * divisible by 2 + */ + +/* Although the spec says 8 I'm seeing 6 in practice */ +#define IEEE80211_COUNTRY_IE_MIN_LEN 6 + +/* + * For regulatory extension stuff see IEEE 802.11-2007 + * Annex I (page 1141) and Annex J (page 1147). Also + * review 7.3.2.9. + * + * When dot11RegulatoryClassesRequired is true and the + * first_channel/reg_extension_id is >= 201 then the IE + * compromises of the 'ext' struct represented below: + * + * - Regulatory extension ID - when generating IE this just needs + * to be monotonically increasing for each triplet passed in + * the IE + * - Regulatory class - index into set of rules + * - Coverage class - index into air propagation time (Table 7-27), + * in microseconds, you can compute the air propagation time from + * the index by multiplying by 3, so index 10 yields a propagation + * of 10 us. Valid values are 0-31, values 32-255 are not defined + * yet. A value of 0 inicates air propagation of <= 1 us. + * + * See also Table I.2 for Emission limit sets and table + * I.3 for Behavior limit sets. Table J.1 indicates how to map + * a reg_class to an emission limit set and behavior limit set. + */ +#define IEEE80211_COUNTRY_EXTENSION_ID 201 + +/* + * Channels numbers in the IE must be monotonically increasing + * if dot11RegulatoryClassesRequired is not true. + * + * If dot11RegulatoryClassesRequired is true consecutive + * subband triplets following a regulatory triplet shall + * have monotonically increasing first_channel number fields. + * + * Channel numbers shall not overlap. + * + * Note that max_power is signed. + */ +struct ieee80211_country_ie_triplet { + union { + struct { + u8 first_channel; + u8 num_channels; + s8 max_power; + } __attribute__ ((packed)) chans; + struct { + u8 reg_extension_id; + u8 reg_class; + u8 coverage_class; + } __attribute__ ((packed)) ext; + }; +} __attribute__ ((packed)); + /* BACK action code */ enum ieee80211_back_actioncode { WLAN_ACTION_ADDBA_REQ = 0, @@ -1057,4 +1185,4 @@ static inline u8 *ieee80211_get_DA(struct ieee80211_hdr *hdr) return hdr->addr1; } -#endif /* IEEE80211_H */ +#endif /* LINUX_IEEE80211_H */ diff --git a/include/linux/if.h b/include/linux/if.h index 65246846c84..2a6e29620a9 100644 --- a/include/linux/if.h +++ b/include/linux/if.h @@ -65,6 +65,7 @@ #define IFF_BONDING 0x20 /* bonding master or slave */ #define IFF_SLAVE_NEEDARP 0x40 /* need ARPs for validation */ #define IFF_ISATAP 0x80 /* ISATAP interface (RFC4214) */ +#define IFF_MASTER_ARPMON 0x100 /* bonding master, ARP mon in use */ #define IF_GET_IFACE 0x0001 /* for querying only */ #define IF_GET_PROTO 0x0002 diff --git a/include/linux/if_arp.h b/include/linux/if_arp.h index 4d3401812e6..5ff89809a58 100644 --- a/include/linux/if_arp.h +++ b/include/linux/if_arp.h @@ -87,6 +87,9 @@ #define ARPHRD_IEEE80211_PRISM 802 /* IEEE 802.11 + Prism2 header */ #define ARPHRD_IEEE80211_RADIOTAP 803 /* IEEE 802.11 + radiotap header */ +#define ARPHRD_PHONET 820 /* PhoNet media type */ +#define ARPHRD_PHONET_PIPE 821 /* PhoNet pipe header */ + #define ARPHRD_VOID 0xFFFF /* Void type, nothing is known */ #define ARPHRD_NONE 0xFFFE /* zero header length */ diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index a5cb0c3f6dc..f8ff918c208 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -115,6 +115,11 @@ extern u16 vlan_dev_vlan_id(const struct net_device *dev); extern int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, u16 vlan_tci, int polling); extern int vlan_hwaccel_do_receive(struct sk_buff *skb); +extern int vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp, + unsigned int vlan_tci, struct sk_buff *skb); +extern int vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp, + unsigned int vlan_tci, + struct napi_gro_fraginfo *info); #else static inline struct net_device *vlan_dev_real_dev(const struct net_device *dev) @@ -140,6 +145,20 @@ static inline int vlan_hwaccel_do_receive(struct sk_buff *skb) { return 0; } + +static inline int vlan_gro_receive(struct napi_struct *napi, + struct vlan_group *grp, + unsigned int vlan_tci, struct sk_buff *skb) +{ + return NET_RX_DROP; +} + +static inline int vlan_gro_frags(struct napi_struct *napi, + struct vlan_group *grp, unsigned int vlan_tci, + struct napi_gro_fraginfo *info) +{ + return NET_RX_DROP; +} #endif /** diff --git a/include/linux/in.h b/include/linux/in.h index db458beef19..d60122a3a08 100644 --- a/include/linux/in.h +++ b/include/linux/in.h @@ -80,6 +80,10 @@ struct in_addr { /* BSD compatibility */ #define IP_RECVRETOPTS IP_RETOPTS +/* TProxy original addresses */ +#define IP_ORIGDSTADDR 20 +#define IP_RECVORIGDSTADDR IP_ORIGDSTADDR + /* IP_MTU_DISCOVER values */ #define IP_PMTUDISC_DONT 0 /* Never send DF frames */ #define IP_PMTUDISC_WANT 1 /* Use per route hints */ diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 23fd8909b9e..2f3c2d4ef73 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -12,6 +12,7 @@ #include <net/net_namespace.h> extern struct files_struct init_files; +extern struct fs_struct init_fs; #define INIT_KIOCTX(name, which_mm) \ { \ @@ -57,7 +58,6 @@ extern struct nsproxy init_nsproxy; .mnt_ns = NULL, \ INIT_NET_NS(net_ns) \ INIT_IPC_NS(ipc_ns) \ - .user_ns = &init_user_ns, \ } #define INIT_SIGHAND(sighand) { \ @@ -113,6 +113,8 @@ extern struct group_info init_groups; # define CAP_INIT_BSET CAP_INIT_EFF_SET #endif +extern struct cred init_cred; + /* * INIT_TASK is used to set up the first task table, touch at * your own risk!. Base=0, limit=0x1fffff (=2MB) @@ -147,13 +149,10 @@ extern struct group_info init_groups; .children = LIST_HEAD_INIT(tsk.children), \ .sibling = LIST_HEAD_INIT(tsk.sibling), \ .group_leader = &tsk, \ - .group_info = &init_groups, \ - .cap_effective = CAP_INIT_EFF_SET, \ - .cap_inheritable = CAP_INIT_INH_SET, \ - .cap_permitted = CAP_FULL_SET, \ - .cap_bset = CAP_INIT_BSET, \ - .securebits = SECUREBITS_DEFAULT, \ - .user = INIT_USER, \ + .real_cred = &init_cred, \ + .cred = &init_cred, \ + .cred_exec_mutex = \ + __MUTEX_INITIALIZER(tsk.cred_exec_mutex), \ .comm = "swapper", \ .thread = INIT_THREAD, \ .fs = &init_fs, \ diff --git a/include/linux/input.h b/include/linux/input.h index 5341e8251f8..9a6355f74db 100644 --- a/include/linux/input.h +++ b/include/linux/input.h @@ -659,6 +659,8 @@ struct input_absinfo { #define SW_RADIO SW_RFKILL_ALL /* deprecated */ #define SW_MICROPHONE_INSERT 0x04 /* set = inserted */ #define SW_DOCK 0x05 /* set = plugged into dock */ +#define SW_LINEOUT_INSERT 0x06 /* set = inserted */ +#define SW_JACK_PHYSICAL_INSERT 0x07 /* set = mechanical switch set */ #define SW_MAX 0x0f #define SW_CNT (SW_MAX+1) diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 3d017cfd245..c4f6c101dbc 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -23,8 +23,6 @@ #define _INTEL_IOMMU_H_ #include <linux/types.h> -#include <linux/msi.h> -#include <linux/sysdev.h> #include <linux/iova.h> #include <linux/io.h> #include <linux/dma_remapping.h> @@ -289,10 +287,10 @@ struct intel_iommu { void __iomem *reg; /* Pointer to hardware regs, virtual addr */ u64 cap; u64 ecap; - int seg; u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */ spinlock_t register_lock; /* protect register handling */ int seq_id; /* sequence id of the iommu */ + int agaw; /* agaw of this iommu */ #ifdef CONFIG_DMAR unsigned long *domain_ids; /* bitmap of domains */ @@ -302,8 +300,6 @@ struct intel_iommu { unsigned int irq; unsigned char name[7]; /* Device Name */ - struct msi_msg saved_msg; - struct sys_device sysdev; struct iommu_flush flush; #endif struct q_inval *qi; /* Queued invalidation info */ @@ -334,25 +330,6 @@ extern int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, extern void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu); -void intel_iommu_domain_exit(struct dmar_domain *domain); -struct dmar_domain *intel_iommu_domain_alloc(struct pci_dev *pdev); -int intel_iommu_context_mapping(struct dmar_domain *domain, - struct pci_dev *pdev); -int intel_iommu_page_mapping(struct dmar_domain *domain, dma_addr_t iova, - u64 hpa, size_t size, int prot); -void intel_iommu_detach_dev(struct dmar_domain *domain, u8 bus, u8 devfn); -struct dmar_domain *intel_iommu_find_domain(struct pci_dev *pdev); -u64 intel_iommu_iova_to_pfn(struct dmar_domain *domain, u64 iova); - -#ifdef CONFIG_DMAR -int intel_iommu_found(void); -#else /* CONFIG_DMAR */ -static inline int intel_iommu_found(void) -{ - return 0; -} -#endif /* CONFIG_DMAR */ - extern void *intel_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t); extern void intel_free_coherent(struct device *, size_t, void *, dma_addr_t); extern dma_addr_t intel_map_single(struct device *, phys_addr_t, size_t, int); diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index f58a0cf8929..9127f6b51a3 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -14,6 +14,7 @@ #include <linux/irqflags.h> #include <linux/smp.h> #include <linux/percpu.h> + #include <asm/atomic.h> #include <asm/ptrace.h> #include <asm/system.h> @@ -107,15 +108,15 @@ extern void enable_irq(unsigned int irq); #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) -extern cpumask_t irq_default_affinity; +extern cpumask_var_t irq_default_affinity; -extern int irq_set_affinity(unsigned int irq, cpumask_t cpumask); +extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask); extern int irq_can_set_affinity(unsigned int irq); extern int irq_select_affinity(unsigned int irq); #else /* CONFIG_SMP */ -static inline int irq_set_affinity(unsigned int irq, cpumask_t cpumask) +static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m) { return -EINVAL; } @@ -251,10 +252,8 @@ enum BLOCK_SOFTIRQ, TASKLET_SOFTIRQ, SCHED_SOFTIRQ, -#ifdef CONFIG_HIGH_RES_TIMERS HRTIMER_SOFTIRQ, -#endif - RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */ + RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */ NR_SOFTIRQS }; @@ -465,4 +464,10 @@ static inline void init_irq_proc(void) int show_interrupts(struct seq_file *p, void *v); +struct irq_desc; + +extern int early_irq_init(void); +extern int arch_early_irq_init(void); +extern int arch_init_chip_data(struct irq_desc *desc, int cpu); + #endif diff --git a/include/linux/iommu.h b/include/linux/iommu.h new file mode 100644 index 00000000000..8a7bfb1b6ca --- /dev/null +++ b/include/linux/iommu.h @@ -0,0 +1,112 @@ +/* + * Copyright (C) 2007-2008 Advanced Micro Devices, Inc. + * Author: Joerg Roedel <joerg.roedel@amd.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __LINUX_IOMMU_H +#define __LINUX_IOMMU_H + +#define IOMMU_READ (1) +#define IOMMU_WRITE (2) + +struct device; + +struct iommu_domain { + void *priv; +}; + +struct iommu_ops { + int (*domain_init)(struct iommu_domain *domain); + void (*domain_destroy)(struct iommu_domain *domain); + int (*attach_dev)(struct iommu_domain *domain, struct device *dev); + void (*detach_dev)(struct iommu_domain *domain, struct device *dev); + int (*map)(struct iommu_domain *domain, unsigned long iova, + phys_addr_t paddr, size_t size, int prot); + void (*unmap)(struct iommu_domain *domain, unsigned long iova, + size_t size); + phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, + unsigned long iova); +}; + +#ifdef CONFIG_IOMMU_API + +extern void register_iommu(struct iommu_ops *ops); +extern bool iommu_found(void); +extern struct iommu_domain *iommu_domain_alloc(void); +extern void iommu_domain_free(struct iommu_domain *domain); +extern int iommu_attach_device(struct iommu_domain *domain, + struct device *dev); +extern void iommu_detach_device(struct iommu_domain *domain, + struct device *dev); +extern int iommu_map_range(struct iommu_domain *domain, unsigned long iova, + phys_addr_t paddr, size_t size, int prot); +extern void iommu_unmap_range(struct iommu_domain *domain, unsigned long iova, + size_t size); +extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, + unsigned long iova); + +#else /* CONFIG_IOMMU_API */ + +static inline void register_iommu(struct iommu_ops *ops) +{ +} + +static inline bool iommu_found(void) +{ + return false; +} + +static inline struct iommu_domain *iommu_domain_alloc(void) +{ + return NULL; +} + +static inline void iommu_domain_free(struct iommu_domain *domain) +{ +} + +static inline int iommu_attach_device(struct iommu_domain *domain, + struct device *dev) +{ + return -ENODEV; +} + +static inline void iommu_detach_device(struct iommu_domain *domain, + struct device *dev) +{ +} + +static inline int iommu_map_range(struct iommu_domain *domain, + unsigned long iova, phys_addr_t paddr, + size_t size, int prot) +{ + return -ENODEV; +} + +static inline void iommu_unmap_range(struct iommu_domain *domain, + unsigned long iova, size_t size) +{ +} + +static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, + unsigned long iova) +{ + return 0; +} + +#endif /* CONFIG_IOMMU_API */ + +#endif /* __LINUX_IOMMU_H */ diff --git a/include/linux/ioport.h b/include/linux/ioport.h index 041e95aac2b..f6bb2ca8e3b 100644 --- a/include/linux/ioport.h +++ b/include/linux/ioport.h @@ -49,6 +49,7 @@ struct resource_list { #define IORESOURCE_SIZEALIGN 0x00020000 /* size indicates alignment */ #define IORESOURCE_STARTALIGN 0x00040000 /* start field is alignment */ +#define IORESOURCE_EXCLUSIVE 0x08000000 /* Userland may not map this resource */ #define IORESOURCE_DISABLED 0x10000000 #define IORESOURCE_UNSET 0x20000000 #define IORESOURCE_AUTO 0x40000000 @@ -133,13 +134,16 @@ static inline unsigned long resource_type(struct resource *res) } /* Convenience shorthand with allocation */ -#define request_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name)) -#define request_mem_region(start,n,name) __request_region(&iomem_resource, (start), (n), (name)) +#define request_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name), 0) +#define __request_mem_region(start,n,name, excl) __request_region(&iomem_resource, (start), (n), (name), excl) +#define request_mem_region(start,n,name) __request_region(&iomem_resource, (start), (n), (name), 0) +#define request_mem_region_exclusive(start,n,name) \ + __request_region(&iomem_resource, (start), (n), (name), IORESOURCE_EXCLUSIVE) #define rename_region(region, newname) do { (region)->name = (newname); } while (0) extern struct resource * __request_region(struct resource *, resource_size_t start, - resource_size_t n, const char *name); + resource_size_t n, const char *name, int relaxed); /* Compatibility cruft */ #define release_region(start,n) __release_region(&ioport_resource, (start), (n)) @@ -175,6 +179,7 @@ extern struct resource * __devm_request_region(struct device *dev, extern void __devm_release_region(struct device *dev, struct resource *parent, resource_size_t start, resource_size_t n); extern int iomem_map_sanity_check(resource_size_t addr, unsigned long size); +extern int iomem_is_exclusive(u64 addr); #endif /* __ASSEMBLY__ */ #endif /* _LINUX_IOPORT_H */ diff --git a/include/linux/ioprio.h b/include/linux/ioprio.h index f98a656b17e..76dad480884 100644 --- a/include/linux/ioprio.h +++ b/include/linux/ioprio.h @@ -86,4 +86,6 @@ static inline int task_nice_ioclass(struct task_struct *task) */ extern int ioprio_best(unsigned short aprio, unsigned short bprio); +extern int set_task_ioprio(struct task_struct *task, int ioprio); + #endif diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index 641e026eee8..0b816cae533 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -278,6 +278,7 @@ struct ipv6_pinfo { struct in6_addr saddr; struct in6_addr rcv_saddr; struct in6_addr daddr; + struct in6_pktinfo sticky_pktinfo; struct in6_addr *daddr_cache; #ifdef CONFIG_IPV6_SUBTREES struct in6_addr *saddr_cache; diff --git a/include/linux/irq.h b/include/linux/irq.h index 3dddfa703eb..f899b502f18 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -113,7 +113,8 @@ struct irq_chip { void (*eoi)(unsigned int irq); void (*end)(unsigned int irq); - void (*set_affinity)(unsigned int irq, cpumask_t dest); + void (*set_affinity)(unsigned int irq, + const struct cpumask *dest); int (*retrigger)(unsigned int irq); int (*set_type)(unsigned int irq, unsigned int flow_type); int (*set_wake)(unsigned int irq, unsigned int on); @@ -129,9 +130,14 @@ struct irq_chip { const char *typename; }; +struct timer_rand_state; +struct irq_2_iommu; /** * struct irq_desc - interrupt descriptor * @irq: interrupt number for this descriptor + * @timer_rand_state: pointer to timer rand state struct + * @kstat_irqs: irq stats per cpu + * @irq_2_iommu: iommu with this irq * @handle_irq: highlevel irq-events handler [if NULL, __do_IRQ()] * @chip: low level interrupt hardware access * @msi_desc: MSI descriptor @@ -143,8 +149,8 @@ struct irq_chip { * @depth: disable-depth, for nested irq_disable() calls * @wake_depth: enable depth, for multiple set_irq_wake() callers * @irq_count: stats field to detect stalled irqs - * @irqs_unhandled: stats field for spurious unhandled interrupts * @last_unhandled: aging timer for unhandled count + * @irqs_unhandled: stats field for spurious unhandled interrupts * @lock: locking for SMP * @affinity: IRQ affinity on SMP * @cpu: cpu index useful for balancing @@ -154,6 +160,13 @@ struct irq_chip { */ struct irq_desc { unsigned int irq; +#ifdef CONFIG_SPARSE_IRQ + struct timer_rand_state *timer_rand_state; + unsigned int *kstat_irqs; +# ifdef CONFIG_INTR_REMAP + struct irq_2_iommu *irq_2_iommu; +# endif +#endif irq_flow_handler_t handle_irq; struct irq_chip *chip; struct msi_desc *msi_desc; @@ -165,8 +178,8 @@ struct irq_desc { unsigned int depth; /* nested irq disables */ unsigned int wake_depth; /* nested wake enables */ unsigned int irq_count; /* For detecting broken IRQs */ - unsigned int irqs_unhandled; unsigned long last_unhandled; /* Aging timer for unhandled count */ + unsigned int irqs_unhandled; spinlock_t lock; #ifdef CONFIG_SMP cpumask_t affinity; @@ -181,12 +194,32 @@ struct irq_desc { const char *name; } ____cacheline_internodealigned_in_smp; +extern void arch_init_copy_chip_data(struct irq_desc *old_desc, + struct irq_desc *desc, int cpu); +extern void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc); +#ifndef CONFIG_SPARSE_IRQ extern struct irq_desc irq_desc[NR_IRQS]; +#else /* CONFIG_SPARSE_IRQ */ +extern struct irq_desc *move_irq_desc(struct irq_desc *old_desc, int cpu); + +#define kstat_irqs_this_cpu(DESC) \ + ((DESC)->kstat_irqs[smp_processor_id()]) +#define kstat_incr_irqs_this_cpu(irqno, DESC) \ + ((DESC)->kstat_irqs[smp_processor_id()]++) -static inline struct irq_desc *irq_to_desc(unsigned int irq) +#endif /* CONFIG_SPARSE_IRQ */ + +extern struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu); + +static inline struct irq_desc * +irq_remap_to_desc(unsigned int irq, struct irq_desc *desc) { - return (irq < nr_irqs) ? irq_desc + irq : NULL; +#ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC + return irq_to_desc(irq); +#else + return desc; +#endif } /* @@ -380,6 +413,11 @@ extern int set_irq_msi(unsigned int irq, struct msi_desc *entry); #define get_irq_data(irq) (irq_to_desc(irq)->handler_data) #define get_irq_msi(irq) (irq_to_desc(irq)->msi_desc) +#define get_irq_desc_chip(desc) ((desc)->chip) +#define get_irq_desc_chip_data(desc) ((desc)->chip_data) +#define get_irq_desc_data(desc) ((desc)->handler_data) +#define get_irq_desc_msi(desc) ((desc)->msi_desc) + #endif /* CONFIG_GENERIC_HARDIRQS */ #endif /* !CONFIG_S390 */ diff --git a/include/linux/irqnr.h b/include/linux/irqnr.h index 452c280c811..86af92e9e84 100644 --- a/include/linux/irqnr.h +++ b/include/linux/irqnr.h @@ -1,24 +1,46 @@ #ifndef _LINUX_IRQNR_H #define _LINUX_IRQNR_H +/* + * Generic irq_desc iterators: + */ +#ifdef __KERNEL__ + #ifndef CONFIG_GENERIC_HARDIRQS #include <asm/irq.h> -# define nr_irqs NR_IRQS + +/* + * Wrappers for non-genirq architectures: + */ +#define nr_irqs NR_IRQS +#define irq_to_desc(irq) (&irq_desc[irq]) # define for_each_irq_desc(irq, desc) \ for (irq = 0; irq < nr_irqs; irq++) -#else + +# define for_each_irq_desc_reverse(irq, desc) \ + for (irq = nr_irqs - 1; irq >= 0; irq--) +#else /* CONFIG_GENERIC_HARDIRQS */ + extern int nr_irqs; +extern struct irq_desc *irq_to_desc(unsigned int irq); + +# define for_each_irq_desc(irq, desc) \ + for (irq = 0, desc = irq_to_desc(irq); irq < nr_irqs; \ + irq++, desc = irq_to_desc(irq)) \ + if (desc) -# define for_each_irq_desc(irq, desc) \ - for (irq = 0, desc = irq_desc; irq < nr_irqs; irq++, desc++) # define for_each_irq_desc_reverse(irq, desc) \ - for (irq = nr_irqs - 1, desc = irq_desc + (nr_irqs - 1); \ - irq >= 0; irq--, desc--) -#endif + for (irq = nr_irqs - 1, desc = irq_to_desc(irq); irq >= 0; \ + irq--, desc = irq_to_desc(irq)) \ + if (desc) -#define for_each_irq_nr(irq) \ - for (irq = 0; irq < nr_irqs; irq++) +#endif /* CONFIG_GENERIC_HARDIRQS */ + +#define for_each_irq_nr(irq) \ + for (irq = 0; irq < nr_irqs; irq++) + +#endif /* __KERNEL__ */ #endif diff --git a/include/linux/istallion.h b/include/linux/istallion.h index 0d184072324..7faca98c7d1 100644 --- a/include/linux/istallion.h +++ b/include/linux/istallion.h @@ -59,9 +59,7 @@ struct stliport { unsigned int devnr; int baud_base; int custom_divisor; - int close_delay; int closing_wait; - int openwaitcnt; int rc; int argsize; void *argp; diff --git a/include/linux/jbd.h b/include/linux/jbd.h index 346e2b80be7..6384b19efe6 100644 --- a/include/linux/jbd.h +++ b/include/linux/jbd.h @@ -543,6 +543,11 @@ struct transaction_s unsigned long t_expires; /* + * When this transaction started, in nanoseconds [no locking] + */ + ktime_t t_start_time; + + /* * How many handles used this transaction? [t_handle_lock] */ int t_handle_count; @@ -798,9 +803,19 @@ struct journal_s struct buffer_head **j_wbuf; int j_wbufsize; + /* + * this is the pid of the last person to run a synchronous operation + * through the journal. + */ pid_t j_last_sync_writer; /* + * the average amount of time in nanoseconds it takes to commit a + * transaction to the disk. [j_state_lock] + */ + u64 j_average_commit_time; + + /* * An opaque pointer to fs-private information. ext3 puts its * superblock pointer here */ diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index c7d106ef22e..b45109c61fb 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -329,6 +329,7 @@ enum jbd_state_bits { BH_State, /* Pins most journal_head state */ BH_JournalHead, /* Pins bh->b_private and jh->b_bh */ BH_Unshadow, /* Dummy bit, for BJ_Shadow wakeup filtering */ + BH_JBDPrivateStart, /* First bit available for private use by FS */ }; BUFFER_FNS(JBD, jbd) @@ -637,6 +638,11 @@ struct transaction_s unsigned long t_expires; /* + * When this transaction started, in nanoseconds [no locking] + */ + ktime_t t_start_time; + + /* * How many handles used this transaction? [t_handle_lock] */ int t_handle_count; @@ -681,6 +687,8 @@ jbd2_time_diff(unsigned long start, unsigned long end) return end + (MAX_JIFFY_OFFSET - start); } +#define JBD2_NR_BATCH 64 + /** * struct journal_s - The journal_s type is the concrete type associated with * journal_t. @@ -825,6 +833,14 @@ struct journal_s struct mutex j_checkpoint_mutex; /* + * List of buffer heads used by the checkpoint routine. This + * was moved from jbd2_log_do_checkpoint() to reduce stack + * usage. Access to this array is controlled by the + * j_checkpoint_mutex. [j_checkpoint_mutex] + */ + struct buffer_head *j_chkpt_bhs[JBD2_NR_BATCH]; + + /* * Journal head: identifies the first unused block in the journal. * [j_state_lock] */ @@ -938,8 +954,26 @@ struct journal_s struct buffer_head **j_wbuf; int j_wbufsize; + /* + * this is the pid of hte last person to run a synchronous operation + * through the journal + */ pid_t j_last_sync_writer; + /* + * the average amount of time in nanoseconds it takes to commit a + * transaction to disk. [j_state_lock] + */ + u64 j_average_commit_time; + + /* + * minimum and maximum times that we should wait for + * additional filesystem operations to get batched into a + * synchronous handle in microseconds + */ + u32 j_min_batch_time; + u32 j_max_batch_time; + /* This function is called when a transaction is closed */ void (*j_commit_callback)(journal_t *, transaction_t *); @@ -1007,6 +1041,35 @@ int __jbd2_journal_clean_checkpoint_list(journal_t *journal); int __jbd2_journal_remove_checkpoint(struct journal_head *); void __jbd2_journal_insert_checkpoint(struct journal_head *, transaction_t *); + +/* + * Triggers + */ + +struct jbd2_buffer_trigger_type { + /* + * Fired just before a buffer is written to the journal. + * mapped_data is a mapped buffer that is the frozen data for + * commit. + */ + void (*t_commit)(struct jbd2_buffer_trigger_type *type, + struct buffer_head *bh, void *mapped_data, + size_t size); + + /* + * Fired during journal abort for dirty buffers that will not be + * committed. + */ + void (*t_abort)(struct jbd2_buffer_trigger_type *type, + struct buffer_head *bh); +}; + +extern void jbd2_buffer_commit_trigger(struct journal_head *jh, + void *mapped_data, + struct jbd2_buffer_trigger_type *triggers); +extern void jbd2_buffer_abort_trigger(struct journal_head *jh, + struct jbd2_buffer_trigger_type *triggers); + /* Buffer IO */ extern int jbd2_journal_write_metadata_buffer(transaction_t *transaction, @@ -1045,6 +1108,8 @@ extern int jbd2_journal_extend (handle_t *, int nblocks); extern int jbd2_journal_get_write_access(handle_t *, struct buffer_head *); extern int jbd2_journal_get_create_access (handle_t *, struct buffer_head *); extern int jbd2_journal_get_undo_access(handle_t *, struct buffer_head *); +void jbd2_journal_set_triggers(struct buffer_head *, + struct jbd2_buffer_trigger_type *type); extern int jbd2_journal_dirty_metadata (handle_t *, struct buffer_head *); extern void jbd2_journal_release_buffer (handle_t *, struct buffer_head *); extern int jbd2_journal_forget (handle_t *, struct buffer_head *); @@ -1070,7 +1135,6 @@ extern int jbd2_journal_set_features (journal_t *, unsigned long, unsigned long, unsigned long); extern void jbd2_journal_clear_features (journal_t *, unsigned long, unsigned long, unsigned long); -extern int jbd2_journal_create (journal_t *); extern int jbd2_journal_load (journal_t *journal); extern int jbd2_journal_destroy (journal_t *); extern int jbd2_journal_recover (journal_t *journal); @@ -1145,8 +1209,8 @@ int jbd2_log_wait_commit(journal_t *journal, tid_t tid); int jbd2_log_do_checkpoint(journal_t *journal); void __jbd2_log_wait_for_space(journal_t *journal); -extern void __jbd2_journal_drop_transaction(journal_t *, transaction_t *); -extern int jbd2_cleanup_journal_tail(journal_t *); +extern void __jbd2_journal_drop_transaction(journal_t *, transaction_t *); +extern int jbd2_cleanup_journal_tail(journal_t *); /* Debugging code only: */ diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h index abb6ac639e8..1a9cf78bfce 100644 --- a/include/linux/jiffies.h +++ b/include/linux/jiffies.h @@ -115,10 +115,20 @@ static inline u64 get_jiffies_64(void) ((long)(a) - (long)(b) >= 0)) #define time_before_eq(a,b) time_after_eq(b,a) +/* + * Calculate whether a is in the range of [b, c]. + */ #define time_in_range(a,b,c) \ (time_after_eq(a,b) && \ time_before_eq(a,c)) +/* + * Calculate whether a is in the range of [b, c). + */ +#define time_in_range_open(a,b,c) \ + (time_after_eq(a,b) && \ + time_before(a,c)) + /* Same as above, but does so with platform independent 64bit types. * These must be used when utilizing jiffies_64 (i.e. return value of * get_jiffies_64() */ diff --git a/include/linux/journal-head.h b/include/linux/journal-head.h index bb70ebb6a2d..525aac3c97d 100644 --- a/include/linux/journal-head.h +++ b/include/linux/journal-head.h @@ -12,6 +12,8 @@ typedef unsigned int tid_t; /* Unique transaction ID */ typedef struct transaction_s transaction_t; /* Compound transaction type */ + + struct buffer_head; struct journal_head { @@ -87,6 +89,12 @@ struct journal_head { * [j_list_lock] */ struct journal_head *b_cpnext, *b_cpprev; + + /* Trigger type */ + struct jbd2_buffer_trigger_type *b_triggers; + + /* Trigger type for the committing transaction's frozen data */ + struct jbd2_buffer_trigger_type *b_frozen_triggers; }; #endif /* JOURNAL_HEAD_H_INCLUDED */ diff --git a/include/linux/kernel.h b/include/linux/kernel.h index dc7e0d0a647..343df9ef241 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -48,6 +48,12 @@ extern const char linux_proc_banner[]; #define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f)) #define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) #define roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y)) +#define DIV_ROUND_CLOSEST(x, divisor)( \ +{ \ + typeof(divisor) __divisor = divisor; \ + (((x) + ((__divisor) / 2)) / (__divisor)); \ +} \ +) #define _RET_IP_ (unsigned long)__builtin_return_address(0) #define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; }) @@ -141,6 +147,15 @@ extern int _cond_resched(void); (__x < 0) ? -__x : __x; \ }) +#ifdef CONFIG_PROVE_LOCKING +void might_fault(void); +#else +static inline void might_fault(void) +{ + might_sleep(); +} +#endif + extern struct atomic_notifier_head panic_notifier_list; extern long (*panic_blink)(long time); NORET_TYPE void panic(const char * fmt, ...) @@ -188,6 +203,8 @@ extern unsigned long long memparse(const char *ptr, char **retptr); extern int core_kernel_text(unsigned long addr); extern int __kernel_text_address(unsigned long addr); extern int kernel_text_address(unsigned long addr); +extern int func_ptr_is_kernel_text(void *ptr); + struct pid; extern struct pid *session_of_pgrp(struct pid *pgrp); @@ -338,13 +355,13 @@ static inline char *pack_hex_byte(char *buf, u8 byte) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) /* If you are writing a driver, please use dev_dbg instead */ -#if defined(CONFIG_DYNAMIC_PRINTK_DEBUG) +#if defined(DEBUG) +#define pr_debug(fmt, ...) \ + printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) +#elif defined(CONFIG_DYNAMIC_PRINTK_DEBUG) #define pr_debug(fmt, ...) do { \ dynamic_pr_debug(pr_fmt(fmt), ##__VA_ARGS__); \ } while (0) -#elif defined(DEBUG) -#define pr_debug(fmt, ...) \ - printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) #else #define pr_debug(fmt, ...) \ ({ if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); 0; }) @@ -361,18 +378,6 @@ static inline char *pack_hex_byte(char *buf, u8 byte) ((unsigned char *)&addr)[3] #define NIPQUAD_FMT "%u.%u.%u.%u" -#define NIP6(addr) \ - ntohs((addr).s6_addr16[0]), \ - ntohs((addr).s6_addr16[1]), \ - ntohs((addr).s6_addr16[2]), \ - ntohs((addr).s6_addr16[3]), \ - ntohs((addr).s6_addr16[4]), \ - ntohs((addr).s6_addr16[5]), \ - ntohs((addr).s6_addr16[6]), \ - ntohs((addr).s6_addr16[7]) -#define NIP6_FMT "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x" -#define NIP6_SEQFMT "%04x%04x%04x%04x%04x%04x%04x%04x" - #if defined(__LITTLE_ENDIAN) #define HIPQUAD(addr) \ ((unsigned char *)&addr)[3], \ @@ -471,6 +476,12 @@ static inline char *pack_hex_byte(char *buf, u8 byte) __val = __val < __min ? __min: __val; \ __val > __max ? __max: __val; }) + +/* + * swap - swap value of @a and @b + */ +#define swap(a, b) ({ typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; }) + /** * container_of - cast a member of a structure out to the containing structure * @ptr: the pointer to the member. diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h index 4a145caeee0..570d2041311 100644 --- a/include/linux/kernel_stat.h +++ b/include/linux/kernel_stat.h @@ -28,7 +28,9 @@ struct cpu_usage_stat { struct kernel_stat { struct cpu_usage_stat cpustat; - unsigned int irqs[NR_IRQS]; +#ifndef CONFIG_SPARSE_IRQ + unsigned int irqs[NR_IRQS]; +#endif }; DECLARE_PER_CPU(struct kernel_stat, kstat); @@ -39,6 +41,10 @@ DECLARE_PER_CPU(struct kernel_stat, kstat); extern unsigned long long nr_context_switches(void); +#ifndef CONFIG_SPARSE_IRQ +#define kstat_irqs_this_cpu(irq) \ + (kstat_this_cpu.irqs[irq]) + struct irq_desc; static inline void kstat_incr_irqs_this_cpu(unsigned int irq, @@ -46,11 +52,17 @@ static inline void kstat_incr_irqs_this_cpu(unsigned int irq, { kstat_this_cpu.irqs[irq]++; } +#endif + +#ifndef CONFIG_SPARSE_IRQ static inline unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) { return kstat_cpu(cpu).irqs[irq]; } +#else +extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu); +#endif /* * Number of interrupts per specific IRQ source, since bootup @@ -67,10 +79,13 @@ static inline unsigned int kstat_irqs(unsigned int irq) } extern unsigned long long task_delta_exec(struct task_struct *); -extern void account_user_time(struct task_struct *, cputime_t); -extern void account_user_time_scaled(struct task_struct *, cputime_t); -extern void account_system_time(struct task_struct *, int, cputime_t); -extern void account_system_time_scaled(struct task_struct *, cputime_t); -extern void account_steal_time(struct task_struct *, cputime_t); +extern void account_user_time(struct task_struct *, cputime_t, cputime_t); +extern void account_system_time(struct task_struct *, int, cputime_t, cputime_t); +extern void account_steal_time(cputime_t); +extern void account_idle_time(cputime_t); + +extern void account_process_tick(struct task_struct *, int user); +extern void account_steal_ticks(unsigned long ticks); +extern void account_idle_ticks(unsigned long ticks); #endif /* _LINUX_KERNEL_STAT_H */ diff --git a/include/linux/kexec.h b/include/linux/kexec.h index 17f76fc0517..adc34f2c6ef 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -100,6 +100,10 @@ struct kimage { #define KEXEC_TYPE_DEFAULT 0 #define KEXEC_TYPE_CRASH 1 unsigned int preserve_context : 1; + +#ifdef ARCH_HAS_KIMAGE_ARCH + struct kimage_arch arch; +#endif }; diff --git a/include/linux/key-ui.h b/include/linux/key-ui.h deleted file mode 100644 index e8b8a7a5c49..00000000000 --- a/include/linux/key-ui.h +++ /dev/null @@ -1,66 +0,0 @@ -/* key-ui.h: key userspace interface stuff - * - * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#ifndef _LINUX_KEY_UI_H -#define _LINUX_KEY_UI_H - -#include <linux/key.h> - -/* the key tree */ -extern struct rb_root key_serial_tree; -extern spinlock_t key_serial_lock; - -/* required permissions */ -#define KEY_VIEW 0x01 /* require permission to view attributes */ -#define KEY_READ 0x02 /* require permission to read content */ -#define KEY_WRITE 0x04 /* require permission to update / modify */ -#define KEY_SEARCH 0x08 /* require permission to search (keyring) or find (key) */ -#define KEY_LINK 0x10 /* require permission to link */ -#define KEY_SETATTR 0x20 /* require permission to change attributes */ -#define KEY_ALL 0x3f /* all the above permissions */ - -/* - * the keyring payload contains a list of the keys to which the keyring is - * subscribed - */ -struct keyring_list { - struct rcu_head rcu; /* RCU deletion hook */ - unsigned short maxkeys; /* max keys this list can hold */ - unsigned short nkeys; /* number of keys currently held */ - unsigned short delkey; /* key to be unlinked by RCU */ - struct key *keys[0]; -}; - -/* - * check to see whether permission is granted to use a key in the desired way - */ -extern int key_task_permission(const key_ref_t key_ref, - struct task_struct *context, - key_perm_t perm); - -static inline int key_permission(const key_ref_t key_ref, key_perm_t perm) -{ - return key_task_permission(key_ref, current, perm); -} - -extern key_ref_t lookup_user_key(struct task_struct *context, - key_serial_t id, int create, int partial, - key_perm_t perm); - -extern long join_session_keyring(const char *name); - -extern struct key_type *key_type_lookup(const char *type); -extern void key_type_put(struct key_type *ktype); - -#define key_negative_timeout 60 /* default timeout on a negative key's existence */ - - -#endif /* _LINUX_KEY_UI_H */ diff --git a/include/linux/key.h b/include/linux/key.h index 1b70e35a71e..21d32a142c0 100644 --- a/include/linux/key.h +++ b/include/linux/key.h @@ -73,6 +73,7 @@ struct key; struct seq_file; struct user_struct; struct signal_struct; +struct cred; struct key_type; struct key_owner; @@ -181,7 +182,7 @@ struct key { extern struct key *key_alloc(struct key_type *type, const char *desc, uid_t uid, gid_t gid, - struct task_struct *ctx, + const struct cred *cred, key_perm_t perm, unsigned long flags); @@ -249,7 +250,7 @@ extern int key_unlink(struct key *keyring, struct key *key); extern struct key *keyring_alloc(const char *description, uid_t uid, gid_t gid, - struct task_struct *ctx, + const struct cred *cred, unsigned long flags, struct key *dest); @@ -276,24 +277,11 @@ extern ctl_table key_sysctls[]; /* * the userspace interface */ -extern void switch_uid_keyring(struct user_struct *new_user); -extern int copy_keys(unsigned long clone_flags, struct task_struct *tsk); -extern int copy_thread_group_keys(struct task_struct *tsk); -extern void exit_keys(struct task_struct *tsk); -extern void exit_thread_group_keys(struct signal_struct *tg); -extern int suid_keys(struct task_struct *tsk); -extern int exec_keys(struct task_struct *tsk); +extern int install_thread_keyring_to_cred(struct cred *cred); extern void key_fsuid_changed(struct task_struct *tsk); extern void key_fsgid_changed(struct task_struct *tsk); extern void key_init(void); -#define __install_session_keyring(tsk, keyring) \ -({ \ - struct key *old_session = tsk->signal->session_keyring; \ - tsk->signal->session_keyring = keyring; \ - old_session; \ -}) - #else /* CONFIG_KEYS */ #define key_validate(k) 0 @@ -302,17 +290,9 @@ extern void key_init(void); #define key_revoke(k) do { } while(0) #define key_put(k) do { } while(0) #define key_ref_put(k) do { } while(0) -#define make_key_ref(k, p) ({ NULL; }) -#define key_ref_to_ptr(k) ({ NULL; }) +#define make_key_ref(k, p) NULL +#define key_ref_to_ptr(k) NULL #define is_key_possessed(k) 0 -#define switch_uid_keyring(u) do { } while(0) -#define __install_session_keyring(t, k) ({ NULL; }) -#define copy_keys(f,t) 0 -#define copy_thread_group_keys(t) 0 -#define exit_keys(t) do { } while(0) -#define exit_thread_group_keys(tg) do { } while(0) -#define suid_keys(t) do { } while(0) -#define exec_keys(t) do { } while(0) #define key_fsuid_changed(t) do { } while(0) #define key_fsgid_changed(t) do { } while(0) #define key_init() do { } while(0) diff --git a/include/linux/keyctl.h b/include/linux/keyctl.h index 656ee6b77a4..c0688eb7209 100644 --- a/include/linux/keyctl.h +++ b/include/linux/keyctl.h @@ -1,6 +1,6 @@ /* keyctl.h: keyctl command IDs * - * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. + * Copyright (C) 2004, 2008 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or @@ -20,6 +20,7 @@ #define KEY_SPEC_USER_SESSION_KEYRING -5 /* - key ID for UID-session keyring */ #define KEY_SPEC_GROUP_KEYRING -6 /* - key ID for GID-specific keyring */ #define KEY_SPEC_REQKEY_AUTH_KEY -7 /* - key ID for assumed request_key auth key */ +#define KEY_SPEC_REQUESTOR_KEYRING -8 /* - key ID for request_key() dest keyring */ /* request-key default keyrings */ #define KEY_REQKEY_DEFL_NO_CHANGE -1 @@ -30,6 +31,7 @@ #define KEY_REQKEY_DEFL_USER_KEYRING 4 #define KEY_REQKEY_DEFL_USER_SESSION_KEYRING 5 #define KEY_REQKEY_DEFL_GROUP_KEYRING 6 +#define KEY_REQKEY_DEFL_REQUESTOR_KEYRING 7 /* keyctl commands */ #define KEYCTL_GET_KEYRING_ID 0 /* ask for a keyring's ID */ diff --git a/include/linux/klist.h b/include/linux/klist.h index 8ea98db223e..d5a27af9dba 100644 --- a/include/linux/klist.h +++ b/include/linux/klist.h @@ -13,7 +13,6 @@ #define _LINUX_KLIST_H #include <linux/spinlock.h> -#include <linux/completion.h> #include <linux/kref.h> #include <linux/list.h> @@ -41,7 +40,6 @@ struct klist_node { void *n_klist; /* never access directly */ struct list_head n_node; struct kref n_ref; - struct completion n_removed; }; extern void klist_add_tail(struct klist_node *n, struct klist *k); diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 497b1d1f7a0..d6ea19e314b 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -69,9 +69,6 @@ struct kprobe { /* list of kprobes for multi-handler support */ struct list_head list; - /* Indicates that the corresponding module has been ref counted */ - unsigned int mod_refcounted; - /*count the number of times this probe was temporarily disarmed */ unsigned long nmissed; @@ -103,8 +100,19 @@ struct kprobe { /* copy of the original instruction */ struct arch_specific_insn ainsn; + + /* Indicates various status flags. Protected by kprobe_mutex. */ + u32 flags; }; +/* Kprobe status flags */ +#define KPROBE_FLAG_GONE 1 /* breakpoint has already gone */ + +static inline int kprobe_gone(struct kprobe *p) +{ + return p->flags & KPROBE_FLAG_GONE; +} + /* * Special probe type that uses setjmp-longjmp type tricks to resume * execution at a specified entry with a matching prototype corresponding @@ -201,7 +209,6 @@ static inline int init_test_probes(void) } #endif /* CONFIG_KPROBES_SANITY_TEST */ -extern struct mutex kprobe_mutex; extern int arch_prepare_kprobe(struct kprobe *p); extern void arch_arm_kprobe(struct kprobe *p); extern void arch_disarm_kprobe(struct kprobe *p); diff --git a/include/linux/kvm.h b/include/linux/kvm.h index f18b86fa865..35525ac6333 100644 --- a/include/linux/kvm.h +++ b/include/linux/kvm.h @@ -83,6 +83,7 @@ struct kvm_irqchip { #define KVM_EXIT_S390_SIEIC 13 #define KVM_EXIT_S390_RESET 14 #define KVM_EXIT_DCR 15 +#define KVM_EXIT_NMI 16 /* for KVM_RUN, returned by mmap(vcpu_fd, offset=0) */ struct kvm_run { @@ -387,6 +388,14 @@ struct kvm_trace_rec { #define KVM_CAP_DEVICE_ASSIGNMENT 17 #endif #define KVM_CAP_IOMMU 18 +#if defined(CONFIG_X86) +#define KVM_CAP_DEVICE_MSI 20 +#endif +/* Bug in KVM_SET_USER_MEMORY_REGION fixed: */ +#define KVM_CAP_DESTROY_MEMORY_REGION_WORKS 21 +#if defined(CONFIG_X86) +#define KVM_CAP_USER_NMI 22 +#endif /* * ioctls for VM fds @@ -458,6 +467,8 @@ struct kvm_trace_rec { #define KVM_S390_INITIAL_RESET _IO(KVMIO, 0x97) #define KVM_GET_MP_STATE _IOR(KVMIO, 0x98, struct kvm_mp_state) #define KVM_SET_MP_STATE _IOW(KVMIO, 0x99, struct kvm_mp_state) +/* Available with KVM_CAP_NMI */ +#define KVM_NMI _IO(KVMIO, 0x9a) #define KVM_TRC_INJ_VIRQ (KVM_TRC_HANDLER + 0x02) #define KVM_TRC_REDELIVER_EVT (KVM_TRC_HANDLER + 0x03) @@ -500,10 +511,17 @@ struct kvm_assigned_irq { __u32 guest_irq; __u32 flags; union { + struct { + __u32 addr_lo; + __u32 addr_hi; + __u32 data; + } guest_msi; __u32 reserved[12]; }; }; #define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0) +#define KVM_DEV_IRQ_ASSIGN_ENABLE_MSI (1 << 0) + #endif diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index bb92be2153b..ec49d0be7f5 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -16,6 +16,7 @@ #include <linux/mm.h> #include <linux/preempt.h> #include <linux/marker.h> +#include <linux/msi.h> #include <asm/signal.h> #include <linux/kvm.h> @@ -306,9 +307,16 @@ struct kvm_assigned_dev_kernel { int host_busnr; int host_devfn; int host_irq; + bool host_irq_disabled; int guest_irq; - int irq_requested; + struct msi_msg guest_msi; +#define KVM_ASSIGNED_DEV_GUEST_INTX (1 << 0) +#define KVM_ASSIGNED_DEV_GUEST_MSI (1 << 1) +#define KVM_ASSIGNED_DEV_HOST_INTX (1 << 8) +#define KVM_ASSIGNED_DEV_HOST_MSI (1 << 9) + unsigned long irq_requested_type; int irq_source_id; + int flags; struct pci_dev *dev; struct kvm *kvm; }; @@ -316,18 +324,20 @@ void kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level); void kvm_notify_acked_irq(struct kvm *kvm, unsigned gsi); void kvm_register_irq_ack_notifier(struct kvm *kvm, struct kvm_irq_ack_notifier *kian); -void kvm_unregister_irq_ack_notifier(struct kvm *kvm, - struct kvm_irq_ack_notifier *kian); +void kvm_unregister_irq_ack_notifier(struct kvm_irq_ack_notifier *kian); int kvm_request_irq_source_id(struct kvm *kvm); void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id); -#ifdef CONFIG_DMAR +#ifdef CONFIG_IOMMU_API int kvm_iommu_map_pages(struct kvm *kvm, gfn_t base_gfn, unsigned long npages); -int kvm_iommu_map_guest(struct kvm *kvm, - struct kvm_assigned_dev_kernel *assigned_dev); +int kvm_iommu_map_guest(struct kvm *kvm); int kvm_iommu_unmap_guest(struct kvm *kvm); -#else /* CONFIG_DMAR */ +int kvm_assign_device(struct kvm *kvm, + struct kvm_assigned_dev_kernel *assigned_dev); +int kvm_deassign_device(struct kvm *kvm, + struct kvm_assigned_dev_kernel *assigned_dev); +#else /* CONFIG_IOMMU_API */ static inline int kvm_iommu_map_pages(struct kvm *kvm, gfn_t base_gfn, unsigned long npages) @@ -335,9 +345,7 @@ static inline int kvm_iommu_map_pages(struct kvm *kvm, return 0; } -static inline int kvm_iommu_map_guest(struct kvm *kvm, - struct kvm_assigned_dev_kernel - *assigned_dev) +static inline int kvm_iommu_map_guest(struct kvm *kvm) { return -ENODEV; } @@ -346,7 +354,19 @@ static inline int kvm_iommu_unmap_guest(struct kvm *kvm) { return 0; } -#endif /* CONFIG_DMAR */ + +static inline int kvm_assign_device(struct kvm *kvm, + struct kvm_assigned_dev_kernel *assigned_dev) +{ + return 0; +} + +static inline int kvm_deassign_device(struct kvm *kvm, + struct kvm_assigned_dev_kernel *assigned_dev) +{ + return 0; +} +#endif /* CONFIG_IOMMU_API */ static inline void kvm_guest_enter(void) { diff --git a/include/linux/lguest_launcher.h b/include/linux/lguest_launcher.h index e7217dc58f3..a53407a4165 100644 --- a/include/linux/lguest_launcher.h +++ b/include/linux/lguest_launcher.h @@ -54,9 +54,13 @@ struct lguest_vqconfig { /* Write command first word is a request. */ enum lguest_req { - LHREQ_INITIALIZE, /* + base, pfnlimit, pgdir, start */ + LHREQ_INITIALIZE, /* + base, pfnlimit, start */ LHREQ_GETDMA, /* No longer used */ LHREQ_IRQ, /* + irq */ LHREQ_BREAK, /* + on/off flag (on blocks until someone does off) */ }; + +/* The alignment to use between consumer and producer parts of vring. + * x86 pagesize for historical reasons. */ +#define LGUEST_VRING_ALIGN 4096 #endif /* _LINUX_LGUEST_LAUNCHER */ diff --git a/include/linux/libata.h b/include/linux/libata.h index ed3f26eb5df..4f7c8fb4d3f 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -213,10 +213,11 @@ enum { ATA_PFLAG_FROZEN = (1 << 2), /* port is frozen */ ATA_PFLAG_RECOVERED = (1 << 3), /* recovery action performed */ ATA_PFLAG_LOADING = (1 << 4), /* boot/loading probe */ - ATA_PFLAG_UNLOADING = (1 << 5), /* module is unloading */ ATA_PFLAG_SCSI_HOTPLUG = (1 << 6), /* SCSI hotplug scheduled */ ATA_PFLAG_INITIALIZING = (1 << 7), /* being initialized, don't touch */ ATA_PFLAG_RESETTING = (1 << 8), /* reset in progress */ + ATA_PFLAG_UNLOADING = (1 << 9), /* driver is being unloaded */ + ATA_PFLAG_UNLOADED = (1 << 10), /* driver is unloaded */ ATA_PFLAG_SUSPENDED = (1 << 17), /* port is suspended (power) */ ATA_PFLAG_PM_PENDING = (1 << 18), /* PM operation pending */ @@ -1285,26 +1286,62 @@ static inline int ata_link_active(struct ata_link *link) return ata_tag_valid(link->active_tag) || link->sactive; } -extern struct ata_link *__ata_port_next_link(struct ata_port *ap, - struct ata_link *link, - bool dev_only); +/* + * Iterators + * + * ATA_LITER_* constants are used to select link iteration mode and + * ATA_DITER_* device iteration mode. + * + * For a custom iteration directly using ata_{link|dev}_next(), if + * @link or @dev, respectively, is NULL, the first element is + * returned. @dev and @link can be any valid device or link and the + * next element according to the iteration mode will be returned. + * After the last element, NULL is returned. + */ +enum ata_link_iter_mode { + ATA_LITER_EDGE, /* if present, PMP links only; otherwise, + * host link. no slave link */ + ATA_LITER_HOST_FIRST, /* host link followed by PMP or slave links */ + ATA_LITER_PMP_FIRST, /* PMP links followed by host link, + * slave link still comes after host link */ +}; + +enum ata_dev_iter_mode { + ATA_DITER_ENABLED, + ATA_DITER_ENABLED_REVERSE, + ATA_DITER_ALL, + ATA_DITER_ALL_REVERSE, +}; -#define __ata_port_for_each_link(link, ap) \ - for ((link) = __ata_port_next_link((ap), NULL, false); (link); \ - (link) = __ata_port_next_link((ap), (link), false)) +extern struct ata_link *ata_link_next(struct ata_link *link, + struct ata_port *ap, + enum ata_link_iter_mode mode); -#define ata_port_for_each_link(link, ap) \ - for ((link) = __ata_port_next_link((ap), NULL, true); (link); \ - (link) = __ata_port_next_link((ap), (link), true)) +extern struct ata_device *ata_dev_next(struct ata_device *dev, + struct ata_link *link, + enum ata_dev_iter_mode mode); -#define ata_link_for_each_dev(dev, link) \ - for ((dev) = (link)->device; \ - (dev) < (link)->device + ata_link_max_devices(link) || ((dev) = NULL); \ - (dev)++) +/* + * Shortcut notation for iterations + * + * ata_for_each_link() iterates over each link of @ap according to + * @mode. @link points to the current link in the loop. @link is + * NULL after loop termination. ata_for_each_dev() works the same way + * except that it iterates over each device of @link. + * + * Note that the mode prefixes ATA_{L|D}ITER_ shouldn't need to be + * specified when using the following shorthand notations. Only the + * mode itself (EDGE, HOST_FIRST, ENABLED, etc...) should be + * specified. This not only increases brevity but also makes it + * impossible to use ATA_LITER_* for device iteration or vice-versa. + */ +#define ata_for_each_link(link, ap, mode) \ + for ((link) = ata_link_next(NULL, (ap), ATA_LITER_##mode); (link); \ + (link) = ata_link_next((link), (ap), ATA_LITER_##mode)) -#define ata_link_for_each_dev_reverse(dev, link) \ - for ((dev) = (link)->device + ata_link_max_devices(link) - 1; \ - (dev) >= (link)->device || ((dev) = NULL); (dev)--) +#define ata_for_each_dev(dev, link, mode) \ + for ((dev) = ata_dev_next(NULL, (link), ATA_DITER_##mode); (dev); \ + (dev) = ata_dev_next((dev), (link), ATA_DITER_##mode)) /** * ata_ncq_enabled - Test whether NCQ is enabled @@ -1481,6 +1518,7 @@ extern void sata_pmp_error_handler(struct ata_port *ap); extern const struct ata_port_operations ata_sff_port_ops; extern const struct ata_port_operations ata_bmdma_port_ops; +extern const struct ata_port_operations ata_bmdma32_port_ops; /* PIO only, sg_tablesize and dma_boundary limits can be removed */ #define ATA_PIO_SHT(drv_name) \ @@ -1508,6 +1546,8 @@ extern void ata_sff_exec_command(struct ata_port *ap, const struct ata_taskfile *tf); extern unsigned int ata_sff_data_xfer(struct ata_device *dev, unsigned char *buf, unsigned int buflen, int rw); +extern unsigned int ata_sff_data_xfer32(struct ata_device *dev, + unsigned char *buf, unsigned int buflen, int rw); extern unsigned int ata_sff_data_xfer_noirq(struct ata_device *dev, unsigned char *buf, unsigned int buflen, int rw); extern u8 ata_sff_irq_on(struct ata_port *ap); diff --git a/include/linux/libps2.h b/include/linux/libps2.h index afc41336910..b94534b7e26 100644 --- a/include/linux/libps2.h +++ b/include/linux/libps2.h @@ -18,11 +18,13 @@ #define PS2_RET_ID 0x00 #define PS2_RET_ACK 0xfa #define PS2_RET_NAK 0xfe +#define PS2_RET_ERR 0xfc #define PS2_FLAG_ACK 1 /* Waiting for ACK/NAK */ #define PS2_FLAG_CMD 2 /* Waiting for command to finish */ #define PS2_FLAG_CMD1 4 /* Waiting for the first byte of command response */ #define PS2_FLAG_WAITID 8 /* Command execiting is GET ID */ +#define PS2_FLAG_NAK 16 /* Last transmission was NAKed */ struct ps2dev { struct serio *serio; diff --git a/include/linux/linkage.h b/include/linux/linkage.h index 9fd1f859021..fee9e59649c 100644 --- a/include/linux/linkage.h +++ b/include/linux/linkage.h @@ -64,14 +64,6 @@ name: #endif -#define KPROBE_ENTRY(name) \ - .pushsection .kprobes.text, "ax"; \ - ENTRY(name) - -#define KPROBE_END(name) \ - END(name); \ - .popsection - #ifndef END #define END(name) \ .size name, .-name diff --git a/include/linux/list_nulls.h b/include/linux/list_nulls.h new file mode 100644 index 00000000000..93150ecf3ea --- /dev/null +++ b/include/linux/list_nulls.h @@ -0,0 +1,94 @@ +#ifndef _LINUX_LIST_NULLS_H +#define _LINUX_LIST_NULLS_H + +/* + * Special version of lists, where end of list is not a NULL pointer, + * but a 'nulls' marker, which can have many different values. + * (up to 2^31 different values guaranteed on all platforms) + * + * In the standard hlist, termination of a list is the NULL pointer. + * In this special 'nulls' variant, we use the fact that objects stored in + * a list are aligned on a word (4 or 8 bytes alignment). + * We therefore use the last significant bit of 'ptr' : + * Set to 1 : This is a 'nulls' end-of-list marker (ptr >> 1) + * Set to 0 : This is a pointer to some object (ptr) + */ + +struct hlist_nulls_head { + struct hlist_nulls_node *first; +}; + +struct hlist_nulls_node { + struct hlist_nulls_node *next, **pprev; +}; +#define INIT_HLIST_NULLS_HEAD(ptr, nulls) \ + ((ptr)->first = (struct hlist_nulls_node *) (1UL | (((long)nulls) << 1))) + +#define hlist_nulls_entry(ptr, type, member) container_of(ptr,type,member) +/** + * ptr_is_a_nulls - Test if a ptr is a nulls + * @ptr: ptr to be tested + * + */ +static inline int is_a_nulls(const struct hlist_nulls_node *ptr) +{ + return ((unsigned long)ptr & 1); +} + +/** + * get_nulls_value - Get the 'nulls' value of the end of chain + * @ptr: end of chain + * + * Should be called only if is_a_nulls(ptr); + */ +static inline unsigned long get_nulls_value(const struct hlist_nulls_node *ptr) +{ + return ((unsigned long)ptr) >> 1; +} + +static inline int hlist_nulls_unhashed(const struct hlist_nulls_node *h) +{ + return !h->pprev; +} + +static inline int hlist_nulls_empty(const struct hlist_nulls_head *h) +{ + return is_a_nulls(h->first); +} + +static inline void __hlist_nulls_del(struct hlist_nulls_node *n) +{ + struct hlist_nulls_node *next = n->next; + struct hlist_nulls_node **pprev = n->pprev; + *pprev = next; + if (!is_a_nulls(next)) + next->pprev = pprev; +} + +/** + * hlist_nulls_for_each_entry - iterate over list of given type + * @tpos: the type * to use as a loop cursor. + * @pos: the &struct hlist_node to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the hlist_node within the struct. + * + */ +#define hlist_nulls_for_each_entry(tpos, pos, head, member) \ + for (pos = (head)->first; \ + (!is_a_nulls(pos)) && \ + ({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1;}); \ + pos = pos->next) + +/** + * hlist_nulls_for_each_entry_from - iterate over a hlist continuing from current point + * @tpos: the type * to use as a loop cursor. + * @pos: the &struct hlist_node to use as a loop cursor. + * @member: the name of the hlist_node within the struct. + * + */ +#define hlist_nulls_for_each_entry_from(tpos, pos, member) \ + for (; (!is_a_nulls(pos)) && \ + ({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1;}); \ + pos = pos->next) + +#endif diff --git a/include/linux/lockd/bind.h b/include/linux/lockd/bind.h index e5872dc994c..fbc48f89852 100644 --- a/include/linux/lockd/bind.h +++ b/include/linux/lockd/bind.h @@ -41,6 +41,7 @@ struct nlmclnt_initdata { size_t addrlen; unsigned short protocol; u32 nfs_version; + int noresvport; }; /* diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h index b56d5aa9b19..aa6fe7026de 100644 --- a/include/linux/lockd/lockd.h +++ b/include/linux/lockd/lockd.h @@ -43,12 +43,13 @@ struct nlm_host { struct sockaddr_storage h_addr; /* peer address */ size_t h_addrlen; struct sockaddr_storage h_srcaddr; /* our address (optional) */ - struct rpc_clnt * h_rpcclnt; /* RPC client to talk to peer */ - char * h_name; /* remote hostname */ + struct rpc_clnt *h_rpcclnt; /* RPC client to talk to peer */ + char *h_name; /* remote hostname */ u32 h_version; /* interface version */ unsigned short h_proto; /* transport proto */ unsigned short h_reclaiming : 1, h_server : 1, /* server side, not client side */ + h_noresvport : 1, h_inuse : 1; wait_queue_head_t h_gracewait; /* wait while reclaiming */ struct rw_semaphore h_rwsem; /* Reboot recovery lock */ @@ -63,21 +64,29 @@ struct nlm_host { spinlock_t h_lock; struct list_head h_granted; /* Locks in GRANTED state */ struct list_head h_reclaim; /* Locks in RECLAIM state */ - struct nsm_handle * h_nsmhandle; /* NSM status handle */ - - char h_addrbuf[48], /* address eyecatchers */ - h_srcaddrbuf[48]; + struct nsm_handle *h_nsmhandle; /* NSM status handle */ + char *h_addrbuf; /* address eyecatcher */ }; +/* + * The largest string sm_addrbuf should hold is a full-size IPv6 address + * (no "::" anywhere) with a scope ID. The buffer size is computed to + * hold eight groups of colon-separated four-hex-digit numbers, a + * percent sign, a scope id (at most 32 bits, in decimal), and NUL. + */ +#define NSM_ADDRBUF ((8 * 4 + 7) + (1 + 10) + 1) + struct nsm_handle { struct list_head sm_link; atomic_t sm_count; - char * sm_name; + char *sm_mon_name; + char *sm_name; struct sockaddr_storage sm_addr; size_t sm_addrlen; unsigned int sm_monitored : 1, sm_sticky : 1; /* don't unmonitor */ - char sm_addrbuf[48]; /* address eyecatcher */ + struct nsm_private sm_priv; + char sm_addrbuf[NSM_ADDRBUF]; }; /* @@ -103,16 +112,6 @@ static inline struct sockaddr *nlm_srcaddr(const struct nlm_host *host) return (struct sockaddr *)&host->h_srcaddr; } -static inline struct sockaddr_in *nsm_addr_in(const struct nsm_handle *handle) -{ - return (struct sockaddr_in *)&handle->sm_addr; -} - -static inline struct sockaddr *nsm_addr(const struct nsm_handle *handle) -{ - return (struct sockaddr *)&handle->sm_addr; -} - /* * Map an fl_owner_t into a unique 32-bit "pid" */ @@ -196,6 +195,7 @@ extern struct svc_procedure nlmsvc_procedures4[]; extern int nlmsvc_grace_period; extern unsigned long nlmsvc_timeout; extern int nsm_use_hostnames; +extern int nsm_local_state; /* * Lockd client functions @@ -220,7 +220,8 @@ struct nlm_host *nlmclnt_lookup_host(const struct sockaddr *sap, const size_t salen, const unsigned short protocol, const u32 version, - const char *hostname); + const char *hostname, + int noresvport); struct nlm_host *nlmsvc_lookup_host(const struct svc_rqst *rqstp, const char *hostname, const size_t hostname_len); @@ -229,10 +230,20 @@ void nlm_rebind_host(struct nlm_host *); struct nlm_host * nlm_get_host(struct nlm_host *); void nlm_release_host(struct nlm_host *); void nlm_shutdown_hosts(void); -extern void nlm_host_rebooted(const struct sockaddr_in *, const char *, - unsigned int, u32); -void nsm_release(struct nsm_handle *); +void nlm_host_rebooted(const struct nlm_reboot *); + +/* + * Host monitoring + */ +int nsm_monitor(const struct nlm_host *host); +void nsm_unmonitor(const struct nlm_host *host); +struct nsm_handle *nsm_get_handle(const struct sockaddr *sap, + const size_t salen, + const char *hostname, + const size_t hostname_len); +struct nsm_handle *nsm_reboot_lookup(const struct nlm_reboot *info); +void nsm_release(struct nsm_handle *nsm); /* * This is used in garbage collection and resource reclaim @@ -280,16 +291,25 @@ static inline struct inode *nlmsvc_file_inode(struct nlm_file *file) static inline int __nlm_privileged_request4(const struct sockaddr *sap) { const struct sockaddr_in *sin = (struct sockaddr_in *)sap; - return (sin->sin_addr.s_addr == htonl(INADDR_LOOPBACK)) && - (ntohs(sin->sin_port) < 1024); + + if (ntohs(sin->sin_port) > 1023) + return 0; + + return ipv4_is_loopback(sin->sin_addr.s_addr); } #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) static inline int __nlm_privileged_request6(const struct sockaddr *sap) { const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap; - return (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LOOPBACK) && - (ntohs(sin6->sin6_port) < 1024); + + if (ntohs(sin6->sin6_port) > 1023) + return 0; + + if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_MAPPED) + return ipv4_is_loopback(sin6->sin6_addr.s6_addr32[3]); + + return ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LOOPBACK; } #else /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */ static inline int __nlm_privileged_request6(const struct sockaddr *sap) diff --git a/include/linux/lockd/sm_inter.h b/include/linux/lockd/sm_inter.h deleted file mode 100644 index 5a5448bdb17..00000000000 --- a/include/linux/lockd/sm_inter.h +++ /dev/null @@ -1,48 +0,0 @@ -/* - * linux/include/linux/lockd/sm_inter.h - * - * Declarations for the kernel statd client. - * - * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> - */ - -#ifndef LINUX_LOCKD_SM_INTER_H -#define LINUX_LOCKD_SM_INTER_H - -#define SM_PROGRAM 100024 -#define SM_VERSION 1 -#define SM_STAT 1 -#define SM_MON 2 -#define SM_UNMON 3 -#define SM_UNMON_ALL 4 -#define SM_SIMU_CRASH 5 -#define SM_NOTIFY 6 - -#define SM_MAXSTRLEN 1024 -#define SM_PRIV_SIZE 16 - -/* - * Arguments for all calls to statd - */ -struct nsm_args { - __be32 addr; /* remote address */ - u32 prog; /* RPC callback info */ - u32 vers; - u32 proc; - - char * mon_name; -}; - -/* - * Result returned by statd - */ -struct nsm_res { - u32 status; - u32 state; -}; - -int nsm_monitor(struct nlm_host *); -int nsm_unmonitor(struct nlm_host *); -extern int nsm_local_state; - -#endif /* LINUX_LOCKD_SM_INTER_H */ diff --git a/include/linux/lockd/xdr.h b/include/linux/lockd/xdr.h index d6b3a802c04..7dc5b6cb44c 100644 --- a/include/linux/lockd/xdr.h +++ b/include/linux/lockd/xdr.h @@ -13,6 +13,13 @@ #include <linux/nfs.h> #include <linux/sunrpc/xdr.h> +#define SM_MAXSTRLEN 1024 +#define SM_PRIV_SIZE 16 + +struct nsm_private { + unsigned char data[SM_PRIV_SIZE]; +}; + struct svc_rqst; #define NLM_MAXCOOKIELEN 32 @@ -77,10 +84,10 @@ struct nlm_res { * statd callback when client has rebooted */ struct nlm_reboot { - char * mon; - unsigned int len; - u32 state; - __be32 addr; + char *mon; + unsigned int len; + u32 state; + struct nsm_private priv; }; /* diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 29aec6e1002..23bf02fb124 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -73,6 +73,8 @@ struct lock_class_key { struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES]; }; +#define LOCKSTAT_POINTS 4 + /* * The lock-class itself: */ @@ -119,7 +121,8 @@ struct lock_class { int name_version; #ifdef CONFIG_LOCK_STAT - unsigned long contention_point[4]; + unsigned long contention_point[LOCKSTAT_POINTS]; + unsigned long contending_point[LOCKSTAT_POINTS]; #endif }; @@ -144,6 +147,7 @@ enum bounce_type { struct lock_class_stats { unsigned long contention_point[4]; + unsigned long contending_point[4]; struct lock_time read_waittime; struct lock_time write_waittime; struct lock_time read_holdtime; @@ -165,6 +169,7 @@ struct lockdep_map { const char *name; #ifdef CONFIG_LOCK_STAT int cpu; + unsigned long ip; #endif }; @@ -309,8 +314,15 @@ extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass, extern void lock_release(struct lockdep_map *lock, int nested, unsigned long ip); -extern void lock_set_subclass(struct lockdep_map *lock, unsigned int subclass, - unsigned long ip); +extern void lock_set_class(struct lockdep_map *lock, const char *name, + struct lock_class_key *key, unsigned int subclass, + unsigned long ip); + +static inline void lock_set_subclass(struct lockdep_map *lock, + unsigned int subclass, unsigned long ip) +{ + lock_set_class(lock, lock->name, lock->key, subclass, ip); +} # define INIT_LOCKDEP .lockdep_recursion = 0, @@ -328,6 +340,7 @@ static inline void lockdep_on(void) # define lock_acquire(l, s, t, r, c, n, i) do { } while (0) # define lock_release(l, n, i) do { } while (0) +# define lock_set_class(l, n, k, s, i) do { } while (0) # define lock_set_subclass(l, s, i) do { } while (0) # define lockdep_init() do { } while (0) # define lockdep_info() do { } while (0) @@ -356,7 +369,7 @@ struct lock_class_key { }; #ifdef CONFIG_LOCK_STAT extern void lock_contended(struct lockdep_map *lock, unsigned long ip); -extern void lock_acquired(struct lockdep_map *lock); +extern void lock_acquired(struct lockdep_map *lock, unsigned long ip); #define LOCK_CONTENDED(_lock, try, lock) \ do { \ @@ -364,20 +377,20 @@ do { \ lock_contended(&(_lock)->dep_map, _RET_IP_); \ lock(_lock); \ } \ - lock_acquired(&(_lock)->dep_map); \ + lock_acquired(&(_lock)->dep_map, _RET_IP_); \ } while (0) #else /* CONFIG_LOCK_STAT */ #define lock_contended(lockdep_map, ip) do {} while (0) -#define lock_acquired(lockdep_map) do {} while (0) +#define lock_acquired(lockdep_map, ip) do {} while (0) #define LOCK_CONTENDED(_lock, try, lock) \ lock(_lock) #endif /* CONFIG_LOCK_STAT */ -#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_GENERIC_HARDIRQS) +#ifdef CONFIG_GENERIC_HARDIRQS extern void early_init_irq_lock_class(void); #else static inline void early_init_irq_lock_class(void) @@ -481,4 +494,22 @@ static inline void print_irqtrace_events(struct task_struct *curr) # define lock_map_release(l) do { } while (0) #endif +#ifdef CONFIG_PROVE_LOCKING +# define might_lock(lock) \ +do { \ + typecheck(struct lockdep_map *, &(lock)->dep_map); \ + lock_acquire(&(lock)->dep_map, 0, 0, 0, 2, NULL, _THIS_IP_); \ + lock_release(&(lock)->dep_map, 0, _THIS_IP_); \ +} while (0) +# define might_lock_read(lock) \ +do { \ + typecheck(struct lockdep_map *, &(lock)->dep_map); \ + lock_acquire(&(lock)->dep_map, 0, 0, 1, 2, NULL, _THIS_IP_); \ + lock_release(&(lock)->dep_map, 0, _THIS_IP_); \ +} while (0) +#else +# define might_lock(lock) do { } while (0) +# define might_lock_read(lock) do { } while (0) +#endif + #endif /* __LINUX_LOCKDEP_H */ diff --git a/include/linux/magic.h b/include/linux/magic.h index f7f3fdddbef..439f6f3cb0c 100644 --- a/include/linux/magic.h +++ b/include/linux/magic.h @@ -13,6 +13,7 @@ #define EFS_SUPER_MAGIC 0x414A53 #define EXT2_SUPER_MAGIC 0xEF53 #define EXT3_SUPER_MAGIC 0xEF53 +#define XENFS_SUPER_MAGIC 0xabba1974 #define EXT4_SUPER_MAGIC 0xEF53 #define HPFS_SUPER_MAGIC 0xf995e849 #define ISOFS_SUPER_MAGIC 0x9660 diff --git a/include/linux/map_to_7segment.h b/include/linux/map_to_7segment.h index 7df8432c440..12d62a54d47 100644 --- a/include/linux/map_to_7segment.h +++ b/include/linux/map_to_7segment.h @@ -75,7 +75,7 @@ struct seg7_conversion_map { unsigned char table[128]; }; -static inline int map_to_seg7(struct seg7_conversion_map *map, int c) +static __inline__ int map_to_seg7(struct seg7_conversion_map *map, int c) { return c >= 0 && c < sizeof(map->table) ? map->table[c] : -EINVAL; } diff --git a/include/linux/marker.h b/include/linux/marker.h index 889196c7fbb..b85e74ca782 100644 --- a/include/linux/marker.h +++ b/include/linux/marker.h @@ -12,6 +12,7 @@ * See the file COPYING for more details. */ +#include <stdarg.h> #include <linux/types.h> struct module; @@ -48,10 +49,28 @@ struct marker { void (*call)(const struct marker *mdata, void *call_private, ...); struct marker_probe_closure single; struct marker_probe_closure *multi; + const char *tp_name; /* Optional tracepoint name */ + void *tp_cb; /* Optional tracepoint callback */ } __attribute__((aligned(8))); #ifdef CONFIG_MARKERS +#define _DEFINE_MARKER(name, tp_name_str, tp_cb, format) \ + static const char __mstrtab_##name[] \ + __attribute__((section("__markers_strings"))) \ + = #name "\0" format; \ + static struct marker __mark_##name \ + __attribute__((section("__markers"), aligned(8))) = \ + { __mstrtab_##name, &__mstrtab_##name[sizeof(#name)], \ + 0, 0, marker_probe_cb, { __mark_empty_function, NULL},\ + NULL, tp_name_str, tp_cb } + +#define DEFINE_MARKER(name, format) \ + _DEFINE_MARKER(name, NULL, NULL, format) + +#define DEFINE_MARKER_TP(name, tp_name, tp_cb, format) \ + _DEFINE_MARKER(name, #tp_name, tp_cb, format) + /* * Note : the empty asm volatile with read constraint is used here instead of a * "used" attribute to fix a gcc 4.1.x bug. @@ -65,14 +84,7 @@ struct marker { */ #define __trace_mark(generic, name, call_private, format, args...) \ do { \ - static const char __mstrtab_##name[] \ - __attribute__((section("__markers_strings"))) \ - = #name "\0" format; \ - static struct marker __mark_##name \ - __attribute__((section("__markers"), aligned(8))) = \ - { __mstrtab_##name, &__mstrtab_##name[sizeof(#name)], \ - 0, 0, marker_probe_cb, \ - { __mark_empty_function, NULL}, NULL }; \ + DEFINE_MARKER(name, format); \ __mark_check_format(format, ## args); \ if (unlikely(__mark_##name.state)) { \ (*__mark_##name.call) \ @@ -80,14 +92,39 @@ struct marker { } \ } while (0) +#define __trace_mark_tp(name, call_private, tp_name, tp_cb, format, args...) \ + do { \ + void __check_tp_type(void) \ + { \ + register_trace_##tp_name(tp_cb); \ + } \ + DEFINE_MARKER_TP(name, tp_name, tp_cb, format); \ + __mark_check_format(format, ## args); \ + (*__mark_##name.call)(&__mark_##name, call_private, \ + ## args); \ + } while (0) + extern void marker_update_probe_range(struct marker *begin, struct marker *end); + +#define GET_MARKER(name) (__mark_##name) + #else /* !CONFIG_MARKERS */ +#define DEFINE_MARKER(name, tp_name, tp_cb, format) #define __trace_mark(generic, name, call_private, format, args...) \ __mark_check_format(format, ## args) +#define __trace_mark_tp(name, call_private, tp_name, tp_cb, format, args...) \ + do { \ + void __check_tp_type(void) \ + { \ + register_trace_##tp_name(tp_cb); \ + } \ + __mark_check_format(format, ## args); \ + } while (0) static inline void marker_update_probe_range(struct marker *begin, struct marker *end) { } +#define GET_MARKER(name) #endif /* CONFIG_MARKERS */ /** @@ -117,6 +154,20 @@ static inline void marker_update_probe_range(struct marker *begin, __trace_mark(1, name, NULL, format, ## args) /** + * trace_mark_tp - Marker in a tracepoint callback + * @name: marker name, not quoted. + * @tp_name: tracepoint name, not quoted. + * @tp_cb: tracepoint callback. Should have an associated global symbol so it + * is not optimized away by the compiler (should not be static). + * @format: format string + * @args...: variable argument list + * + * Places a marker in a tracepoint callback. + */ +#define trace_mark_tp(name, tp_name, tp_cb, format, args...) \ + __trace_mark_tp(name, NULL, tp_name, tp_cb, format, ## args) + +/** * MARK_NOARGS - Format string for a marker with no argument. */ #define MARK_NOARGS " " @@ -136,8 +187,6 @@ extern marker_probe_func __mark_empty_function; extern void marker_probe_cb(const struct marker *mdata, void *call_private, ...); -extern void marker_probe_cb_noarg(const struct marker *mdata, - void *call_private, ...); /* * Connect a probe to a marker. @@ -162,8 +211,10 @@ extern void *marker_get_private_data(const char *name, marker_probe_func *probe, /* * marker_synchronize_unregister must be called between the last marker probe - * unregistration and the end of module exit to make sure there is no caller - * executing a probe when it is freed. + * unregistration and the first one of + * - the end of module exit function + * - the free of any resource used by the probes + * to ensure the code and data are valid for any possibly running probes. */ #define marker_synchronize_unregister() synchronize_sched() diff --git a/include/linux/mdio-gpio.h b/include/linux/mdio-gpio.h new file mode 100644 index 00000000000..e9d3fdfe41d --- /dev/null +++ b/include/linux/mdio-gpio.h @@ -0,0 +1,25 @@ +/* + * MDIO-GPIO bus platform data structures + * + * Copyright (C) 2008, Paulius Zaleckas <paulius.zaleckas@teltonika.lt> + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#ifndef __LINUX_MDIO_GPIO_H +#define __LINUX_MDIO_GPIO_H + +#include <linux/mdio-bitbang.h> + +struct mdio_gpio_platform_data { + /* GPIO numbers for bus pins */ + unsigned int mdc; + unsigned int mdio; + + unsigned int phy_mask; + int irqs[PHY_MAX_ADDR]; +}; + +#endif /* __LINUX_MDIO_GPIO_H */ diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 1fbe14d3952..326f45c8653 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -19,22 +19,45 @@ #ifndef _LINUX_MEMCONTROL_H #define _LINUX_MEMCONTROL_H - +#include <linux/cgroup.h> struct mem_cgroup; struct page_cgroup; struct page; struct mm_struct; #ifdef CONFIG_CGROUP_MEM_RES_CTLR +/* + * All "charge" functions with gfp_mask should use GFP_KERNEL or + * (gfp_mask & GFP_RECLAIM_MASK). In current implementatin, memcg doesn't + * alloc memory but reclaims memory from all available zones. So, "where I want + * memory from" bits of gfp_mask has no meaning. So any bits of that field is + * available but adding a rule is better. charge functions' gfp_mask should + * be set to GFP_KERNEL or gfp_mask & GFP_RECLAIM_MASK for avoiding ambiguous + * codes. + * (Of course, if memcg does memory allocation in future, GFP_KERNEL is sane.) + */ -extern int mem_cgroup_charge(struct page *page, struct mm_struct *mm, +extern int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask); +/* for swap handling */ +extern int mem_cgroup_try_charge_swapin(struct mm_struct *mm, + struct page *page, gfp_t mask, struct mem_cgroup **ptr); +extern void mem_cgroup_commit_charge_swapin(struct page *page, + struct mem_cgroup *ptr); +extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr); + extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask); -extern void mem_cgroup_move_lists(struct page *page, enum lru_list lru); +extern void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru); +extern void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru); +extern void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru); +extern void mem_cgroup_del_lru(struct page *page); +extern void mem_cgroup_move_lists(struct page *page, + enum lru_list from, enum lru_list to); extern void mem_cgroup_uncharge_page(struct page *page); extern void mem_cgroup_uncharge_cache_page(struct page *page); -extern int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask); +extern int mem_cgroup_shrink_usage(struct page *page, + struct mm_struct *mm, gfp_t gfp_mask); extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, struct list_head *dst, @@ -47,12 +70,20 @@ int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem); extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); -#define mm_match_cgroup(mm, cgroup) \ - ((cgroup) == mem_cgroup_from_task((mm)->owner)) +static inline +int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup) +{ + struct mem_cgroup *mem; + rcu_read_lock(); + mem = mem_cgroup_from_task((mm)->owner); + rcu_read_unlock(); + return cgroup == mem; +} extern int -mem_cgroup_prepare_migration(struct page *page, struct page *newpage); -extern void mem_cgroup_end_migration(struct page *page); +mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr); +extern void mem_cgroup_end_migration(struct mem_cgroup *mem, + struct page *oldpage, struct page *newpage); /* * For memory reclaim. @@ -65,13 +96,32 @@ extern void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem, int priority); extern void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, int priority); +int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg); +unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg, + struct zone *zone, + enum lru_list lru); +struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, + struct zone *zone); +struct zone_reclaim_stat* +mem_cgroup_get_reclaim_stat_from_page(struct page *page); -extern long mem_cgroup_calc_reclaim(struct mem_cgroup *mem, struct zone *zone, - int priority, enum lru_list lru); +#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP +extern int do_swap_account; +#endif +static inline bool mem_cgroup_disabled(void) +{ + if (mem_cgroup_subsys.disabled) + return true; + return false; +} + +extern bool mem_cgroup_oom_called(struct task_struct *task); #else /* CONFIG_CGROUP_MEM_RES_CTLR */ -static inline int mem_cgroup_charge(struct page *page, +struct mem_cgroup; + +static inline int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask) { return 0; @@ -83,6 +133,21 @@ static inline int mem_cgroup_cache_charge(struct page *page, return 0; } +static inline int mem_cgroup_try_charge_swapin(struct mm_struct *mm, + struct page *page, gfp_t gfp_mask, struct mem_cgroup **ptr) +{ + return 0; +} + +static inline void mem_cgroup_commit_charge_swapin(struct page *page, + struct mem_cgroup *ptr) +{ +} + +static inline void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr) +{ +} + static inline void mem_cgroup_uncharge_page(struct page *page) { } @@ -91,12 +156,33 @@ static inline void mem_cgroup_uncharge_cache_page(struct page *page) { } -static inline int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask) +static inline int mem_cgroup_shrink_usage(struct page *page, + struct mm_struct *mm, gfp_t gfp_mask) { return 0; } -static inline void mem_cgroup_move_lists(struct page *page, bool active) +static inline void mem_cgroup_add_lru_list(struct page *page, int lru) +{ +} + +static inline void mem_cgroup_del_lru_list(struct page *page, int lru) +{ + return ; +} + +static inline void mem_cgroup_rotate_lru_list(struct page *page, int lru) +{ + return ; +} + +static inline void mem_cgroup_del_lru(struct page *page) +{ + return ; +} + +static inline void +mem_cgroup_move_lists(struct page *page, enum lru_list from, enum lru_list to) { } @@ -112,12 +198,14 @@ static inline int task_in_mem_cgroup(struct task_struct *task, } static inline int -mem_cgroup_prepare_migration(struct page *page, struct page *newpage) +mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr) { return 0; } -static inline void mem_cgroup_end_migration(struct page *page) +static inline void mem_cgroup_end_migration(struct mem_cgroup *mem, + struct page *oldpage, + struct page *newpage) { } @@ -146,12 +234,42 @@ static inline void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, { } -static inline long mem_cgroup_calc_reclaim(struct mem_cgroup *mem, - struct zone *zone, int priority, - enum lru_list lru) +static inline bool mem_cgroup_disabled(void) +{ + return true; +} + +static inline bool mem_cgroup_oom_called(struct task_struct *task) +{ + return false; +} + +static inline int +mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg) +{ + return 1; +} + +static inline unsigned long +mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg, struct zone *zone, + enum lru_list lru) { return 0; } + + +static inline struct zone_reclaim_stat* +mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, struct zone *zone) +{ + return NULL; +} + +static inline struct zone_reclaim_stat* +mem_cgroup_get_reclaim_stat_from_page(struct page *page) +{ + return NULL; +} + #endif /* CONFIG_CGROUP_MEM_CONT */ #endif /* _LINUX_MEMCONTROL_H */ diff --git a/include/linux/memory.h b/include/linux/memory.h index 36c82c9e6ea..3fdc10806d3 100644 --- a/include/linux/memory.h +++ b/include/linux/memory.h @@ -79,14 +79,14 @@ static inline int memory_notify(unsigned long val, void *v) #else extern int register_memory_notifier(struct notifier_block *nb); extern void unregister_memory_notifier(struct notifier_block *nb); -extern int register_new_memory(struct mem_section *); +extern int register_new_memory(int, struct mem_section *); extern int unregister_memory_section(struct mem_section *); extern int memory_dev_init(void); extern int remove_memory_block(unsigned long, struct mem_section *, int); extern int memory_notify(unsigned long val, void *v); +extern struct memory_block *find_memory_block(struct mem_section *); #define CONFIG_MEM_BLOCK_SIZE (PAGES_PER_SECTION<<PAGE_SHIFT) - - +enum mem_add_context { BOOT, HOTPLUG }; #endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */ #ifdef CONFIG_MEMORY_HOTPLUG diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index 763ba81fc0f..d95f72e79b8 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -72,7 +72,7 @@ extern void __offline_isolated_pages(unsigned long, unsigned long); extern int offline_pages(unsigned long, unsigned long, unsigned long); /* reasonably generic interface to expand the physical pages in a zone */ -extern int __add_pages(struct zone *zone, unsigned long start_pfn, +extern int __add_pages(int nid, struct zone *zone, unsigned long start_pfn, unsigned long nr_pages); extern int __remove_pages(struct zone *zone, unsigned long start_pfn, unsigned long nr_pages); diff --git a/include/linux/mfd/da903x.h b/include/linux/mfd/da903x.h index cad314c1243..115dbe96508 100644 --- a/include/linux/mfd/da903x.h +++ b/include/linux/mfd/da903x.h @@ -32,6 +32,7 @@ enum { DA9030_ID_LDO18, DA9030_ID_LDO19, DA9030_ID_LDO_INT, /* LDO Internal */ + DA9030_ID_BAT, /* battery charger */ DA9034_ID_LED_1, DA9034_ID_LED_2, @@ -93,6 +94,43 @@ struct da9034_touch_pdata { int y_inverted; }; +/* DA9030 battery charger data */ +struct power_supply_info; + +struct da9030_battery_info { + /* battery parameters */ + struct power_supply_info *battery_info; + + /* current and voltage to use for battery charging */ + unsigned int charge_milliamp; + unsigned int charge_millivolt; + + /* voltage thresholds (in millivolts) */ + int vbat_low; + int vbat_crit; + int vbat_charge_start; + int vbat_charge_stop; + int vbat_charge_restart; + + /* battery nominal minimal and maximal voltages in millivolts */ + int vcharge_min; + int vcharge_max; + + /* Temperature thresholds. These are DA9030 register values + "as is" and should be measured for each battery type */ + int tbat_low; + int tbat_high; + int tbat_restart; + + + /* battery monitor interval (seconds) */ + unsigned int batmon_interval; + + /* platform callbacks for battery low and critical events */ + void (*battery_low)(void); + void (*battery_critical)(void); +}; + struct da903x_subdev_info { int id; const char *name; @@ -190,11 +228,13 @@ extern int da903x_unregister_notifier(struct device *dev, extern int da903x_query_status(struct device *dev, unsigned int status); -/* NOTE: the two functions below are not intended for use outside - * of the DA9034 sub-device drivers +/* NOTE: the functions below are not intended for use outside + * of the DA903x sub-device drivers */ extern int da903x_write(struct device *dev, int reg, uint8_t val); +extern int da903x_writes(struct device *dev, int reg, int len, uint8_t *val); extern int da903x_read(struct device *dev, int reg, uint8_t *val); +extern int da903x_reads(struct device *dev, int reg, int len, uint8_t *val); extern int da903x_update(struct device *dev, int reg, uint8_t val, uint8_t mask); extern int da903x_set_bits(struct device *dev, int reg, uint8_t bit_mask); extern int da903x_clr_bits(struct device *dev, int reg, uint8_t bit_mask); diff --git a/include/linux/mfd/wm8350/audio.h b/include/linux/mfd/wm8350/audio.h index 217bb22ebb8..af95a1d2f3a 100644 --- a/include/linux/mfd/wm8350/audio.h +++ b/include/linux/mfd/wm8350/audio.h @@ -1,7 +1,7 @@ /* * audio.h -- Audio Driver for Wolfson WM8350 PMIC * - * Copyright 2007 Wolfson Microelectronics PLC + * Copyright 2007, 2008 Wolfson Microelectronics PLC * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -70,9 +70,9 @@ #define WM8350_CODEC_ISEL_0_5 3 /* x0.5 */ #define WM8350_VMID_OFF 0 -#define WM8350_VMID_500K 1 -#define WM8350_VMID_100K 2 -#define WM8350_VMID_10K 3 +#define WM8350_VMID_300K 1 +#define WM8350_VMID_50K 2 +#define WM8350_VMID_5K 3 /* * R40 (0x28) - Clock Control 1 @@ -591,8 +591,38 @@ #define WM8350_IRQ_CODEC_MICSCD 41 #define WM8350_IRQ_CODEC_MICD 42 +/* + * WM8350 Platform data. + * + * This must be initialised per platform for best audio performance. + * Please see WM8350 datasheet for information. + */ +struct wm8350_audio_platform_data { + int vmid_discharge_msecs; /* VMID --> OFF discharge time */ + int drain_msecs; /* OFF drain time */ + int cap_discharge_msecs; /* Cap ON (from OFF) discharge time */ + int vmid_charge_msecs; /* vmid power up time */ + u32 vmid_s_curve:2; /* vmid enable s curve speed */ + u32 dis_out4:2; /* out4 discharge speed */ + u32 dis_out3:2; /* out3 discharge speed */ + u32 dis_out2:2; /* out2 discharge speed */ + u32 dis_out1:2; /* out1 discharge speed */ + u32 vroi_out4:1; /* out4 tie off */ + u32 vroi_out3:1; /* out3 tie off */ + u32 vroi_out2:1; /* out2 tie off */ + u32 vroi_out1:1; /* out1 tie off */ + u32 vroi_enable:1; /* enable tie off */ + u32 codec_current_on:2; /* current level ON */ + u32 codec_current_standby:2; /* current level STANDBY */ + u32 codec_current_charge:2; /* codec current @ vmid charge */ +}; + +struct snd_soc_codec; + struct wm8350_codec { struct platform_device *pdev; + struct snd_soc_codec *codec; + struct wm8350_audio_platform_data *platform_data; }; #endif diff --git a/include/linux/mfd/wm8350/comparator.h b/include/linux/mfd/wm8350/comparator.h index 05378864945..54bc5d0fd50 100644 --- a/include/linux/mfd/wm8350/comparator.h +++ b/include/linux/mfd/wm8350/comparator.h @@ -164,4 +164,12 @@ #define WM8350_AUXADC_BATT 6 #define WM8350_AUXADC_TEMP 7 +struct wm8350; + +/* + * AUX ADC Readback + */ +int wm8350_read_auxadc(struct wm8350 *wm8350, int channel, int scale, + int vref); + #endif diff --git a/include/linux/mfd/wm8350/core.h b/include/linux/mfd/wm8350/core.h index 6ebf97f2a47..980669d50dc 100644 --- a/include/linux/mfd/wm8350/core.h +++ b/include/linux/mfd/wm8350/core.h @@ -29,6 +29,7 @@ */ #define WM8350_RESET_ID 0x00 #define WM8350_ID 0x01 +#define WM8350_REVISION 0x02 #define WM8350_SYSTEM_CONTROL_1 0x03 #define WM8350_SYSTEM_CONTROL_2 0x04 #define WM8350_SYSTEM_HIBERNATE 0x05 @@ -57,6 +58,10 @@ #define WM8350_OVER_CURRENT_INT_STATUS_MASK 0x25 #define WM8350_GPIO_INT_STATUS_MASK 0x26 #define WM8350_COMPARATOR_INT_STATUS_MASK 0x27 +#define WM8350_CHARGER_OVERRIDES 0xE2 +#define WM8350_MISC_OVERRIDES 0xE3 +#define WM8350_COMPARATOR_OVERRIDES 0xE7 +#define WM8350_STATE_MACHINE_STATUS 0xE9 #define WM8350_MAX_REGISTER 0xFF @@ -77,6 +82,11 @@ #define WM8350_CUST_ID_MASK 0x00FF /* + * R2 (0x02) - Revision + */ +#define WM8350_MASK_REV_MASK 0x00FF + +/* * R3 (0x03) - System Control 1 */ #define WM8350_CHIP_ON 0x8000 @@ -523,6 +533,35 @@ #define WM8350_DC2_STS 0x0002 #define WM8350_DC1_STS 0x0001 +/* + * R226 (0xE2) - Charger status + */ +#define WM8350_CHG_BATT_HOT_OVRDE 0x8000 +#define WM8350_CHG_BATT_COLD_OVRDE 0x4000 + +/* + * R227 (0xE3) - Misc Overrides + */ +#define WM8350_USB_LIMIT_OVRDE 0x0400 + +/* + * R227 (0xE7) - Comparator Overrides + */ +#define WM8350_USB_FB_OVRDE 0x8000 +#define WM8350_WALL_FB_OVRDE 0x4000 +#define WM8350_BATT_FB_OVRDE 0x2000 + + +/* + * R233 (0xE9) - State Machinine Status + */ +#define WM8350_USB_SM_MASK 0x0700 +#define WM8350_USB_SM_SHIFT 8 + +#define WM8350_USB_SM_100_SLV 1 +#define WM8350_USB_SM_500_SLV 5 +#define WM8350_USB_SM_STDBY_SLV 7 + /* WM8350 wake up conditions */ #define WM8350_IRQ_WKUP_OFF_STATE 43 #define WM8350_IRQ_WKUP_HIB_STATE 44 @@ -536,6 +575,7 @@ #define WM8350_REV_E 0x4 #define WM8350_REV_F 0x5 #define WM8350_REV_G 0x6 +#define WM8350_REV_H 0x7 #define WM8350_NUM_IRQ 63 @@ -549,6 +589,14 @@ extern const u16 wm8350_mode0_defaults[]; extern const u16 wm8350_mode1_defaults[]; extern const u16 wm8350_mode2_defaults[]; extern const u16 wm8350_mode3_defaults[]; +extern const u16 wm8351_mode0_defaults[]; +extern const u16 wm8351_mode1_defaults[]; +extern const u16 wm8351_mode2_defaults[]; +extern const u16 wm8351_mode3_defaults[]; +extern const u16 wm8352_mode0_defaults[]; +extern const u16 wm8352_mode1_defaults[]; +extern const u16 wm8352_mode2_defaults[]; +extern const u16 wm8352_mode3_defaults[]; struct wm8350; @@ -558,8 +606,6 @@ struct wm8350_irq { }; struct wm8350 { - int rev; /* chip revision */ - struct device *dev; /* device IO */ @@ -572,6 +618,8 @@ struct wm8350 { void *src); u16 *reg_cache; + struct mutex auxadc_mutex; + /* Interrupt handling */ struct work_struct irq_work; struct mutex irq_mutex; /* IRQ table mutex */ diff --git a/include/linux/mfd/wm8350/pmic.h b/include/linux/mfd/wm8350/pmic.h index 69b69e07f62..96acbfc8aa1 100644 --- a/include/linux/mfd/wm8350/pmic.h +++ b/include/linux/mfd/wm8350/pmic.h @@ -701,6 +701,10 @@ struct platform_device; struct regulator_init_data; struct wm8350_pmic { + /* Number of regulators of each type on this device */ + int max_dcdc; + int max_isink; + /* ISINK to DCDC mapping */ int isink_A_dcdc; int isink_B_dcdc; diff --git a/include/linux/mfd/wm8350/supply.h b/include/linux/mfd/wm8350/supply.h index 1c8f3cde79b..2b9479310bb 100644 --- a/include/linux/mfd/wm8350/supply.h +++ b/include/linux/mfd/wm8350/supply.h @@ -13,7 +13,8 @@ #ifndef __LINUX_MFD_WM8350_SUPPLY_H_ #define __LINUX_MFD_WM8350_SUPPLY_H_ -#include <linux/platform_device.h> +#include <linux/mutex.h> +#include <linux/power_supply.h> /* * Charger registers @@ -104,8 +105,30 @@ #define WM8350_IRQ_EXT_WALL_FB 37 #define WM8350_IRQ_EXT_BAT_FB 38 +/* + * Policy to control charger state machine. + */ +struct wm8350_charger_policy { + + /* charger state machine policy - set in machine driver */ + int eoc_mA; /* end of charge current (mA) */ + int charge_mV; /* charge voltage */ + int fast_limit_mA; /* fast charge current limit */ + int fast_limit_USB_mA; /* USB fast charge current limit */ + int charge_timeout; /* charge timeout (mins) */ + int trickle_start_mV; /* trickle charge starts at mV */ + int trickle_charge_mA; /* trickle charge current */ + int trickle_charge_USB_mA; /* USB trickle charge current */ +}; + struct wm8350_power { struct platform_device *pdev; + struct power_supply battery; + struct power_supply usb; + struct power_supply ac; + struct wm8350_charger_policy *policy; + + int rev_g_coeff; }; #endif diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 3f34005068d..527602cdea1 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -7,6 +7,8 @@ typedef struct page *new_page_t(struct page *, unsigned long private, int **); #ifdef CONFIG_MIGRATION +#define PAGE_MIGRATION 1 + extern int putback_lru_pages(struct list_head *l); extern int migrate_page(struct address_space *, struct page *, struct page *); @@ -20,6 +22,8 @@ extern int migrate_vmas(struct mm_struct *mm, const nodemask_t *from, const nodemask_t *to, unsigned long flags); #else +#define PAGE_MIGRATION 0 + static inline int putback_lru_pages(struct list_head *l) { return 0; } static inline int migrate_pages(struct list_head *l, new_page_t x, unsigned long private) { return -ENOSYS; } diff --git a/include/linux/mii.h b/include/linux/mii.h index 151b7e0182c..ad748588faf 100644 --- a/include/linux/mii.h +++ b/include/linux/mii.h @@ -135,6 +135,10 @@ #define LPA_1000FULL 0x0800 /* Link partner 1000BASE-T full duplex */ #define LPA_1000HALF 0x0400 /* Link partner 1000BASE-T half duplex */ +/* Flow control flags */ +#define FLOW_CTRL_TX 0x01 +#define FLOW_CTRL_RX 0x02 + /* This structure is used in all SIOCxMIIxxx ioctl calls */ struct mii_ioctl_data { __u16 phy_id; @@ -235,5 +239,34 @@ static inline unsigned int mii_duplex (unsigned int duplex_lock, return 0; } +/** + * mii_resolve_flowctrl_fdx + * @lcladv: value of MII ADVERTISE register + * @rmtadv: value of MII LPA register + * + * Resolve full duplex flow control as per IEEE 802.3-2005 table 28B-3 + */ +static inline u8 mii_resolve_flowctrl_fdx(u16 lcladv, u16 rmtadv) +{ + u8 cap = 0; + + if (lcladv & ADVERTISE_PAUSE_CAP) { + if (lcladv & ADVERTISE_PAUSE_ASYM) { + if (rmtadv & LPA_PAUSE_CAP) + cap = FLOW_CTRL_TX | FLOW_CTRL_RX; + else if (rmtadv & LPA_PAUSE_ASYM) + cap = FLOW_CTRL_RX; + } else { + if (rmtadv & LPA_PAUSE_CAP) + cap = FLOW_CTRL_TX | FLOW_CTRL_RX; + } + } else if (lcladv & ADVERTISE_PAUSE_ASYM) { + if ((rmtadv & LPA_PAUSE_CAP) && (rmtadv & LPA_PAUSE_ASYM)) + cap = FLOW_CTRL_TX; + } + + return cap; +} + #endif /* __KERNEL__ */ #endif /* __LINUX_MII_H__ */ diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h index 26433ec520b..a820f816a49 100644 --- a/include/linux/miscdevice.h +++ b/include/linux/miscdevice.h @@ -3,33 +3,33 @@ #include <linux/module.h> #include <linux/major.h> -#define PSMOUSE_MINOR 1 -#define MS_BUSMOUSE_MINOR 2 -#define ATIXL_BUSMOUSE_MINOR 3 -/*#define AMIGAMOUSE_MINOR 4 FIXME OBSOLETE */ -#define ATARIMOUSE_MINOR 5 -#define SUN_MOUSE_MINOR 6 -#define APOLLO_MOUSE_MINOR 7 -#define PC110PAD_MINOR 9 -/*#define ADB_MOUSE_MINOR 10 FIXME OBSOLETE */ +#define PSMOUSE_MINOR 1 +#define MS_BUSMOUSE_MINOR 2 +#define ATIXL_BUSMOUSE_MINOR 3 +/*#define AMIGAMOUSE_MINOR 4 FIXME OBSOLETE */ +#define ATARIMOUSE_MINOR 5 +#define SUN_MOUSE_MINOR 6 +#define APOLLO_MOUSE_MINOR 7 +#define PC110PAD_MINOR 9 +/*#define ADB_MOUSE_MINOR 10 FIXME OBSOLETE */ #define WATCHDOG_MINOR 130 /* Watchdog timer */ #define TEMP_MINOR 131 /* Temperature Sensor */ -#define RTC_MINOR 135 +#define RTC_MINOR 135 #define EFI_RTC_MINOR 136 /* EFI Time services */ -#define SUN_OPENPROM_MINOR 139 +#define SUN_OPENPROM_MINOR 139 #define DMAPI_MINOR 140 /* DMAPI */ -#define NVRAM_MINOR 144 -#define SGI_MMTIMER 153 +#define NVRAM_MINOR 144 +#define SGI_MMTIMER 153 #define STORE_QUEUE_MINOR 155 -#define I2O_MINOR 166 +#define I2O_MINOR 166 #define MICROCODE_MINOR 184 -#define MWAVE_MINOR 219 /* ACP/Mwave Modem */ -#define MPT_MINOR 220 -#define MISC_DYNAMIC_MINOR 255 - -#define TUN_MINOR 200 -#define HPET_MINOR 228 -#define KVM_MINOR 232 +#define TUN_MINOR 200 +#define MWAVE_MINOR 219 /* ACP/Mwave Modem */ +#define MPT_MINOR 220 +#define HPET_MINOR 228 +#define FUSE_MINOR 229 +#define KVM_MINOR 232 +#define MISC_DYNAMIC_MINOR 255 struct device; diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 371086fd946..8f659cc2996 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -206,6 +206,7 @@ struct mlx4_caps { int reserved_cqs; int num_eqs; int reserved_eqs; + int num_comp_vectors; int num_mpts; int num_mtt_segs; int fmr_reserved_mtts; @@ -328,6 +329,7 @@ struct mlx4_cq { int arm_sn; int cqn; + unsigned vector; atomic_t refcount; struct completion free; @@ -437,7 +439,7 @@ void mlx4_free_hwq_res(struct mlx4_dev *mdev, struct mlx4_hwq_resources *wqres, int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt, struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq, - int collapsed); + unsigned vector, int collapsed); void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq); int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base); diff --git a/include/linux/mm.h b/include/linux/mm.h index ffee2f74341..4a3d28c8644 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -145,6 +145,23 @@ extern pgprot_t protection_map[16]; #define FAULT_FLAG_WRITE 0x01 /* Fault was a write access */ #define FAULT_FLAG_NONLINEAR 0x02 /* Fault was via a nonlinear mapping */ +/* + * This interface is used by x86 PAT code to identify a pfn mapping that is + * linear over entire vma. This is to optimize PAT code that deals with + * marking the physical region with a particular prot. This is not for generic + * mm use. Note also that this check will not work if the pfn mapping is + * linear for a vma starting at physical address 0. In which case PAT code + * falls back to slow path of reserving physical range page by page. + */ +static inline int is_linear_pfn_mapping(struct vm_area_struct *vma) +{ + return ((vma->vm_flags & VM_PFNMAP) && vma->vm_pgoff); +} + +static inline int is_pfn_mapping(struct vm_area_struct *vma) +{ + return (vma->vm_flags & VM_PFNMAP); +} /* * vm_fault is filled by the the pagefault handler and passed to the vma's @@ -700,6 +717,11 @@ static inline int page_mapped(struct page *page) #define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS) +/* + * Can be called by the pagefault handler when it gets a VM_FAULT_OOM. + */ +extern void pagefault_out_of_memory(void); + #define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK) extern void show_free_areas(void); @@ -781,6 +803,8 @@ int copy_page_range(struct mm_struct *dst, struct mm_struct *src, struct vm_area_struct *vma); void unmap_mapping_range(struct address_space *mapping, loff_t const holebegin, loff_t const holelen, int even_cows); +int follow_phys(struct vm_area_struct *vma, unsigned long address, + unsigned int flags, unsigned long *prot, resource_size_t *phys); int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, void *buf, int len, int write); @@ -1286,5 +1310,7 @@ int vmemmap_populate_basepages(struct page *start_page, int vmemmap_populate(struct page *start_page, unsigned long pages, int node); void vmemmap_populate_print_last(void); +extern void *alloc_locked_buffer(size_t size); +extern void free_locked_buffer(void *buffer, size_t size); #endif /* __KERNEL__ */ #endif /* _LINUX_MM_H */ diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index c948350c378..7fbb9726755 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -28,6 +28,7 @@ add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list l) { list_add(&page->lru, &zone->lru[l].list); __inc_zone_state(zone, NR_LRU_BASE + l); + mem_cgroup_add_lru_list(page, l); } static inline void @@ -35,6 +36,7 @@ del_page_from_lru_list(struct zone *zone, struct page *page, enum lru_list l) { list_del(&page->lru); __dec_zone_state(zone, NR_LRU_BASE + l); + mem_cgroup_del_lru_list(page, l); } static inline void @@ -54,6 +56,7 @@ del_page_from_lru(struct zone *zone, struct page *page) l += page_is_file_cache(page); } __dec_zone_state(zone, NR_LRU_BASE + l); + mem_cgroup_del_lru_list(page, l); } /** @@ -78,23 +81,4 @@ static inline enum lru_list page_lru(struct page *page) return lru; } -/** - * inactive_anon_is_low - check if anonymous pages need to be deactivated - * @zone: zone to check - * - * Returns true if the zone does not have enough inactive anon pages, - * meaning some active anon pages need to be deactivated. - */ -static inline int inactive_anon_is_low(struct zone *zone) -{ - unsigned long active, inactive; - - active = zone_page_state(zone, NR_ACTIVE_ANON); - inactive = zone_page_state(zone, NR_INACTIVE_ANON); - - if (inactive * zone->inactive_ratio < active) - return 1; - - return 0; -} #endif diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index fe825471d5a..9cfc9b627fd 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -232,8 +232,9 @@ struct mm_struct { struct core_state *core_state; /* coredumping support */ /* aio bits */ - rwlock_t ioctx_list_lock; /* aio lock */ - struct kioctx *ioctx_list; + spinlock_t ioctx_lock; + struct hlist_head ioctx_list; + #ifdef CONFIG_MM_OWNER /* * "owner" points to a task that is regarded as the canonical diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h index 143cebf0586..7ac8b500d55 100644 --- a/include/linux/mmc/core.h +++ b/include/linux/mmc/core.h @@ -151,4 +151,6 @@ static inline void mmc_claim_host(struct mmc_host *host) __mmc_claim_host(host, NULL); } +extern u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max); + #endif diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index f842f234e44..4e457256bd3 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -41,6 +41,7 @@ struct mmc_ios { #define MMC_BUS_WIDTH_1 0 #define MMC_BUS_WIDTH_4 2 +#define MMC_BUS_WIDTH_8 3 unsigned char timing; /* timing specification used */ @@ -116,6 +117,7 @@ struct mmc_host { #define MMC_CAP_SDIO_IRQ (1 << 3) /* Can signal pending SDIO IRQs */ #define MMC_CAP_SPI (1 << 4) /* Talks only SPI protocols */ #define MMC_CAP_NEEDS_POLL (1 << 5) /* Needs polling for card-detection */ +#define MMC_CAP_8_BIT_DATA (1 << 6) /* Can the host do 8 bit transfers */ /* host specific block data */ unsigned int max_seg_size; /* see blk_queue_max_segment_size */ diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 35a7b5e1946..09c14e213b6 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -263,6 +263,19 @@ enum zone_type { #error ZONES_SHIFT -- too many zones configured adjust calculation #endif +struct zone_reclaim_stat { + /* + * The pageout code in vmscan.c keeps track of how many of the + * mem/swap backed and file backed pages are refeferenced. + * The higher the rotated/scanned ratio, the more valuable + * that cache is. + * + * The anon LRU stats live in [0], file LRU stats in [1] + */ + unsigned long recent_rotated[2]; + unsigned long recent_scanned[2]; +}; + struct zone { /* Fields commonly accessed by the page allocator */ unsigned long pages_min, pages_low, pages_high; @@ -315,16 +328,7 @@ struct zone { unsigned long nr_scan; } lru[NR_LRU_LISTS]; - /* - * The pageout code in vmscan.c keeps track of how many of the - * mem/swap backed and file backed pages are refeferenced. - * The higher the rotated/scanned ratio, the more valuable - * that cache is. - * - * The anon LRU stats live in [0], file LRU stats in [1] - */ - unsigned long recent_rotated[2]; - unsigned long recent_scanned[2]; + struct zone_reclaim_stat reclaim_stat; unsigned long pages_scanned; /* since last reclaim */ unsigned long flags; /* zone flags, see below */ diff --git a/include/linux/module.h b/include/linux/module.h index 3bfed013350..4f7ea12463d 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -294,9 +294,6 @@ struct module /* The size of the executable code in each section. */ unsigned int init_text_size, core_text_size; - /* The handle returned from unwind_add_table. */ - void *unwind_info; - /* Arch-specific module values */ struct mod_arch_specific arch; @@ -368,6 +365,18 @@ struct module *module_text_address(unsigned long addr); struct module *__module_text_address(unsigned long addr); int is_module_address(unsigned long addr); +static inline int within_module_core(unsigned long addr, struct module *mod) +{ + return (unsigned long)mod->module_core <= addr && + addr < (unsigned long)mod->module_core + mod->core_size; +} + +static inline int within_module_init(unsigned long addr, struct module *mod) +{ + return (unsigned long)mod->module_init <= addr && + addr < (unsigned long)mod->module_init + mod->init_size; +} + /* Returns 0 and fills in value, defined and namebuf, or -ERANGE if symnum out of range. */ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type, diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h index eb103395748..c1f40c2f7ff 100644 --- a/include/linux/moduleloader.h +++ b/include/linux/moduleloader.h @@ -13,6 +13,9 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, char *secstrings, struct module *mod); +/* Additional bytes needed by arch in front of individual sections */ +unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section); + /* Allocator used for allocating struct module, core sections and init sections. Returns NULL on failure. */ void *module_alloc(unsigned long size); diff --git a/include/linux/mroute6.h b/include/linux/mroute6.h index 6f4c180179e..5375faca1f7 100644 --- a/include/linux/mroute6.h +++ b/include/linux/mroute6.h @@ -117,6 +117,7 @@ struct sioc_mif_req6 #include <linux/pim.h> #include <linux/skbuff.h> /* for struct sk_buff_head */ +#include <net/net_namespace.h> #ifdef CONFIG_IPV6_MROUTE static inline int ip6_mroute_opt(int opt) @@ -187,6 +188,9 @@ struct mif_device struct mfc6_cache { struct mfc6_cache *next; /* Next entry on cache line */ +#ifdef CONFIG_NET_NS + struct net *mfc6_net; +#endif struct in6_addr mf6c_mcastgrp; /* Group the entry belongs to */ struct in6_addr mf6c_origin; /* Source of packet */ mifi_t mf6c_parent; /* Source interface */ @@ -209,6 +213,18 @@ struct mfc6_cache } mfc_un; }; +static inline +struct net *mfc6_net(const struct mfc6_cache *mfc) +{ + return read_pnet(&mfc->mfc6_net); +} + +static inline +void mfc6_net_set(struct mfc6_cache *mfc, struct net *net) +{ + write_pnet(&mfc->mfc6_net, hold_net(net)); +} + #define MFC_STATIC 1 #define MFC_NOTIFY 2 @@ -229,13 +245,17 @@ struct mfc6_cache #ifdef __KERNEL__ struct rtmsg; -extern int ip6mr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait); +extern int ip6mr_get_route(struct net *net, struct sk_buff *skb, + struct rtmsg *rtm, int nowait); #ifdef CONFIG_IPV6_MROUTE -extern struct sock *mroute6_socket; +static inline struct sock *mroute6_socket(struct net *net) +{ + return net->ipv6.mroute6_sk; +} extern int ip6mr_sk_done(struct sock *sk); #else -#define mroute6_socket NULL +static inline struct sock *mroute6_socket(struct net *net) { return NULL; } static inline int ip6mr_sk_done(struct sock *sk) { return 0; } #endif #endif diff --git a/include/linux/msi.h b/include/linux/msi.h index 8f293922720..d2b8a1e8ca1 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h @@ -10,8 +10,11 @@ struct msi_msg { }; /* Helper functions */ +struct irq_desc; extern void mask_msi_irq(unsigned int irq); extern void unmask_msi_irq(unsigned int irq); +extern void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg); +extern void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg); extern void read_msi_msg(unsigned int irq, struct msi_msg *msg); extern void write_msi_msg(unsigned int irq, struct msi_msg *msg); diff --git a/include/linux/mtd/concat.h b/include/linux/mtd/concat.h index c02f3d264ec..e80c674daeb 100644 --- a/include/linux/mtd/concat.h +++ b/include/linux/mtd/concat.h @@ -13,7 +13,7 @@ struct mtd_info *mtd_concat_create( struct mtd_info *subdev[], /* subdevices to concatenate */ int num_devs, /* number of subdevices */ - char *name); /* name for the new device */ + const char *name); /* name for the new device */ void mtd_concat_destroy(struct mtd_info *mtd); diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h index eae26bb6430..64433eb411d 100644 --- a/include/linux/mtd/mtd.h +++ b/include/linux/mtd/mtd.h @@ -83,7 +83,7 @@ typedef enum { * @datbuf: data buffer - if NULL only oob data are read/written * @oobbuf: oob data buffer * - * Note, it is allowed to read more then one OOB area at one go, but not write. + * Note, it is allowed to read more than one OOB area at one go, but not write. * The interface assumes that the OOB write requests program only one page's * OOB area. */ diff --git a/include/linux/mutex.h b/include/linux/mutex.h index bc6da10ceee..7a0e5c4f807 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h @@ -144,6 +144,8 @@ extern int __must_check mutex_lock_killable(struct mutex *lock); /* * NOTE: mutex_trylock() follows the spin_trylock() convention, * not the down_trylock() convention! + * + * Returns 1 if the mutex has been acquired successfully, and 0 on contention. */ extern int mutex_trylock(struct mutex *lock); extern void mutex_unlock(struct mutex *lock); diff --git a/include/linux/namei.h b/include/linux/namei.h index 99eb80306dc..fc2e0357987 100644 --- a/include/linux/namei.h +++ b/include/linux/namei.h @@ -94,4 +94,9 @@ static inline char *nd_get_link(struct nameidata *nd) return nd->saved_names[nd->depth]; } +static inline void nd_terminate_link(void *name, size_t len, size_t maxlen) +{ + ((char *) name)[min(len, maxlen)] = '\0'; +} + #endif /* _LINUX_NAMEI_H */ diff --git a/include/linux/ncp_fs.h b/include/linux/ncp_fs.h index 9f2d76347f1..f69e66d151c 100644 --- a/include/linux/ncp_fs.h +++ b/include/linux/ncp_fs.h @@ -87,7 +87,7 @@ struct ncp_objectname_ioctl #define NCP_AUTH_NDS 0x32 int auth_type; size_t object_name_len; - void __user * object_name; /* an userspace data, in most cases user name */ + void __user * object_name; /* a userspace data, in most cases user name */ }; struct ncp_privatedata_ioctl diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index e26f5495289..114091be887 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -43,6 +43,9 @@ #include <net/net_namespace.h> #include <net/dsa.h> +#ifdef CONFIG_DCB +#include <net/dcbnl.h> +#endif struct vlan_group; struct ethtool_ops; @@ -310,9 +313,11 @@ struct napi_struct { #ifdef CONFIG_NETPOLL spinlock_t poll_lock; int poll_owner; +#endif struct net_device *dev; struct list_head dev_list; -#endif + struct sk_buff *gro_list; + struct sk_buff *skb; }; enum @@ -373,22 +378,8 @@ static inline int napi_reschedule(struct napi_struct *napi) * * Mark NAPI processing as complete. */ -static inline void __napi_complete(struct napi_struct *n) -{ - BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state)); - list_del(&n->poll_list); - smp_mb__before_clear_bit(); - clear_bit(NAPI_STATE_SCHED, &n->state); -} - -static inline void napi_complete(struct napi_struct *n) -{ - unsigned long flags; - - local_irq_save(flags); - __napi_complete(n); - local_irq_restore(flags); -} +extern void __napi_complete(struct napi_struct *n); +extern void napi_complete(struct napi_struct *n); /** * napi_disable - prevent NAPI from scheduling @@ -452,6 +443,147 @@ struct netdev_queue { struct Qdisc *qdisc_sleeping; } ____cacheline_aligned_in_smp; + +/* + * This structure defines the management hooks for network devices. + * The following hooks can be defined; unless noted otherwise, they are + * optional and can be filled with a null pointer. + * + * int (*ndo_init)(struct net_device *dev); + * This function is called once when network device is registered. + * The network device can use this to any late stage initializaton + * or semantic validattion. It can fail with an error code which will + * be propogated back to register_netdev + * + * void (*ndo_uninit)(struct net_device *dev); + * This function is called when device is unregistered or when registration + * fails. It is not called if init fails. + * + * int (*ndo_open)(struct net_device *dev); + * This function is called when network device transistions to the up + * state. + * + * int (*ndo_stop)(struct net_device *dev); + * This function is called when network device transistions to the down + * state. + * + * int (*ndo_hard_start_xmit)(struct sk_buff *skb, struct net_device *dev); + * Called when a packet needs to be transmitted. + * Must return NETDEV_TX_OK , NETDEV_TX_BUSY, or NETDEV_TX_LOCKED, + * Required can not be NULL. + * + * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb); + * Called to decide which queue to when device supports multiple + * transmit queues. + * + * void (*ndo_change_rx_flags)(struct net_device *dev, int flags); + * This function is called to allow device receiver to make + * changes to configuration when multicast or promiscious is enabled. + * + * void (*ndo_set_rx_mode)(struct net_device *dev); + * This function is called device changes address list filtering. + * + * void (*ndo_set_multicast_list)(struct net_device *dev); + * This function is called when the multicast address list changes. + * + * int (*ndo_set_mac_address)(struct net_device *dev, void *addr); + * This function is called when the Media Access Control address + * needs to be changed. If not this interface is not defined, the + * mac address can not be changed. + * + * int (*ndo_validate_addr)(struct net_device *dev); + * Test if Media Access Control address is valid for the device. + * + * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd); + * Called when a user request an ioctl which can't be handled by + * the generic interface code. If not defined ioctl's return + * not supported error code. + * + * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map); + * Used to set network devices bus interface parameters. This interface + * is retained for legacy reason, new devices should use the bus + * interface (PCI) for low level management. + * + * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu); + * Called when a user wants to change the Maximum Transfer Unit + * of a device. If not defined, any request to change MTU will + * will return an error. + * + * void (*ndo_tx_timeout)(struct net_device *dev); + * Callback uses when the transmitter has not made any progress + * for dev->watchdog ticks. + * + * struct net_device_stats* (*get_stats)(struct net_device *dev); + * Called when a user wants to get the network device usage + * statistics. If not defined, the counters in dev->stats will + * be used. + * + * void (*ndo_vlan_rx_register)(struct net_device *dev, struct vlan_group *grp); + * If device support VLAN receive accleration + * (ie. dev->features & NETIF_F_HW_VLAN_RX), then this function is called + * when vlan groups for the device changes. Note: grp is NULL + * if no vlan's groups are being used. + * + * void (*ndo_vlan_rx_add_vid)(struct net_device *dev, unsigned short vid); + * If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER) + * this function is called when a VLAN id is registered. + * + * void (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid); + * If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER) + * this function is called when a VLAN id is unregistered. + * + * void (*ndo_poll_controller)(struct net_device *dev); + */ +#define HAVE_NET_DEVICE_OPS +struct net_device_ops { + int (*ndo_init)(struct net_device *dev); + void (*ndo_uninit)(struct net_device *dev); + int (*ndo_open)(struct net_device *dev); + int (*ndo_stop)(struct net_device *dev); + int (*ndo_start_xmit) (struct sk_buff *skb, + struct net_device *dev); + u16 (*ndo_select_queue)(struct net_device *dev, + struct sk_buff *skb); +#define HAVE_CHANGE_RX_FLAGS + void (*ndo_change_rx_flags)(struct net_device *dev, + int flags); +#define HAVE_SET_RX_MODE + void (*ndo_set_rx_mode)(struct net_device *dev); +#define HAVE_MULTICAST + void (*ndo_set_multicast_list)(struct net_device *dev); +#define HAVE_SET_MAC_ADDR + int (*ndo_set_mac_address)(struct net_device *dev, + void *addr); +#define HAVE_VALIDATE_ADDR + int (*ndo_validate_addr)(struct net_device *dev); +#define HAVE_PRIVATE_IOCTL + int (*ndo_do_ioctl)(struct net_device *dev, + struct ifreq *ifr, int cmd); +#define HAVE_SET_CONFIG + int (*ndo_set_config)(struct net_device *dev, + struct ifmap *map); +#define HAVE_CHANGE_MTU + int (*ndo_change_mtu)(struct net_device *dev, + int new_mtu); + int (*ndo_neigh_setup)(struct net_device *dev, + struct neigh_parms *); +#define HAVE_TX_TIMEOUT + void (*ndo_tx_timeout) (struct net_device *dev); + + struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); + + void (*ndo_vlan_rx_register)(struct net_device *dev, + struct vlan_group *grp); + void (*ndo_vlan_rx_add_vid)(struct net_device *dev, + unsigned short vid); + void (*ndo_vlan_rx_kill_vid)(struct net_device *dev, + unsigned short vid); +#ifdef CONFIG_NET_POLL_CONTROLLER +#define HAVE_NETDEV_POLL + void (*ndo_poll_controller)(struct net_device *dev); +#endif +}; + /* * The DEVICE structure. * Actually, this whole structure is a big mistake. It mixes I/O @@ -496,14 +628,7 @@ struct net_device unsigned long state; struct list_head dev_list; -#ifdef CONFIG_NETPOLL struct list_head napi_list; -#endif - - /* The device initialization function. Called only once. */ - int (*init)(struct net_device *dev); - - /* ------- Fields preinitialized in Space.c finish here ------- */ /* Net device features */ unsigned long features; @@ -522,6 +647,7 @@ struct net_device #define NETIF_F_LLTX 4096 /* LockLess TX - deprecated. Please */ /* do not use LLTX in new drivers */ #define NETIF_F_NETNS_LOCAL 8192 /* Does not change network namespaces */ +#define NETIF_F_GRO 16384 /* Generic receive offload */ #define NETIF_F_LRO 32768 /* large receive offload */ /* Segmentation offload features */ @@ -547,15 +673,13 @@ struct net_device * for all in netdev_increment_features. */ #define NETIF_F_ONE_FOR_ALL (NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ROBUST | \ - NETIF_F_SG | NETIF_F_HIGHDMA | \ + NETIF_F_SG | NETIF_F_HIGHDMA | \ NETIF_F_FRAGLIST) /* Interface index. Unique device identifier */ int ifindex; int iflink; - - struct net_device_stats* (*get_stats)(struct net_device *dev); struct net_device_stats stats; #ifdef CONFIG_WIRELESS_EXT @@ -565,18 +689,13 @@ struct net_device /* Instance data managed by the core of Wireless Extensions. */ struct iw_public_data * wireless_data; #endif + /* Management operations */ + const struct net_device_ops *netdev_ops; const struct ethtool_ops *ethtool_ops; /* Hardware header description */ const struct header_ops *header_ops; - /* - * This marks the end of the "visible" part of the structure. All - * fields hereafter are internal to the system, and may change at - * will (read: may be cleaned up at will). - */ - - unsigned int flags; /* interface flags (a la BSD) */ unsigned short gflags; unsigned short priv_flags; /* Like 'flags' but invisible to userspace. */ @@ -635,7 +754,7 @@ struct net_device unsigned long last_rx; /* Time of last Rx */ /* Interface address info used in eth_type_trans() */ unsigned char dev_addr[MAX_ADDR_LEN]; /* hw address, (before bcast - because most packets are unicast) */ + because most packets are unicast) */ unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */ @@ -654,18 +773,12 @@ struct net_device /* * One part is mostly used on xmit path (device) */ - void *priv; /* pointer to private data */ - int (*hard_start_xmit) (struct sk_buff *skb, - struct net_device *dev); /* These may be needed for future network-power-down code. */ unsigned long trans_start; /* Time (in jiffies) of last Tx */ int watchdog_timeo; /* used by dev_watchdog() */ struct timer_list watchdog_timer; -/* - * refcnt is a very hot point, so align it on SMP - */ /* Number of references to this device */ atomic_t refcnt ____cacheline_aligned_in_smp; @@ -684,56 +797,12 @@ struct net_device NETREG_RELEASED, /* called free_netdev */ } reg_state; - /* Called after device is detached from network. */ - void (*uninit)(struct net_device *dev); - /* Called after last user reference disappears. */ - void (*destructor)(struct net_device *dev); - - /* Pointers to interface service routines. */ - int (*open)(struct net_device *dev); - int (*stop)(struct net_device *dev); -#define HAVE_NETDEV_POLL -#define HAVE_CHANGE_RX_FLAGS - void (*change_rx_flags)(struct net_device *dev, - int flags); -#define HAVE_SET_RX_MODE - void (*set_rx_mode)(struct net_device *dev); -#define HAVE_MULTICAST - void (*set_multicast_list)(struct net_device *dev); -#define HAVE_SET_MAC_ADDR - int (*set_mac_address)(struct net_device *dev, - void *addr); -#define HAVE_VALIDATE_ADDR - int (*validate_addr)(struct net_device *dev); -#define HAVE_PRIVATE_IOCTL - int (*do_ioctl)(struct net_device *dev, - struct ifreq *ifr, int cmd); -#define HAVE_SET_CONFIG - int (*set_config)(struct net_device *dev, - struct ifmap *map); -#define HAVE_CHANGE_MTU - int (*change_mtu)(struct net_device *dev, int new_mtu); - -#define HAVE_TX_TIMEOUT - void (*tx_timeout) (struct net_device *dev); + /* Called from unregister, can be used to call free_netdev */ + void (*destructor)(struct net_device *dev); - void (*vlan_rx_register)(struct net_device *dev, - struct vlan_group *grp); - void (*vlan_rx_add_vid)(struct net_device *dev, - unsigned short vid); - void (*vlan_rx_kill_vid)(struct net_device *dev, - unsigned short vid); - - int (*neigh_setup)(struct net_device *dev, struct neigh_parms *); #ifdef CONFIG_NETPOLL struct netpoll_info *npinfo; #endif -#ifdef CONFIG_NET_POLL_CONTROLLER - void (*poll_controller)(struct net_device *dev); -#endif - - u16 (*select_queue)(struct net_device *dev, - struct sk_buff *skb); #ifdef CONFIG_NET_NS /* Network namespace this network device is inside */ @@ -764,6 +833,49 @@ struct net_device /* for setting kernel sock attribute on TCP connection setup */ #define GSO_MAX_SIZE 65536 unsigned int gso_max_size; + +#ifdef CONFIG_DCB + /* Data Center Bridging netlink ops */ + struct dcbnl_rtnl_ops *dcbnl_ops; +#endif + +#ifdef CONFIG_COMPAT_NET_DEV_OPS + struct { + int (*init)(struct net_device *dev); + void (*uninit)(struct net_device *dev); + int (*open)(struct net_device *dev); + int (*stop)(struct net_device *dev); + int (*hard_start_xmit) (struct sk_buff *skb, + struct net_device *dev); + u16 (*select_queue)(struct net_device *dev, + struct sk_buff *skb); + void (*change_rx_flags)(struct net_device *dev, + int flags); + void (*set_rx_mode)(struct net_device *dev); + void (*set_multicast_list)(struct net_device *dev); + int (*set_mac_address)(struct net_device *dev, + void *addr); + int (*validate_addr)(struct net_device *dev); + int (*do_ioctl)(struct net_device *dev, + struct ifreq *ifr, int cmd); + int (*set_config)(struct net_device *dev, + struct ifmap *map); + int (*change_mtu)(struct net_device *dev, int new_mtu); + int (*neigh_setup)(struct net_device *dev, + struct neigh_parms *); + void (*tx_timeout) (struct net_device *dev); + struct net_device_stats* (*get_stats)(struct net_device *dev); + void (*vlan_rx_register)(struct net_device *dev, + struct vlan_group *grp); + void (*vlan_rx_add_vid)(struct net_device *dev, + unsigned short vid); + void (*vlan_rx_kill_vid)(struct net_device *dev, + unsigned short vid); +#ifdef CONFIG_NET_POLL_CONTROLLER + void (*poll_controller)(struct net_device *dev); +#endif + }; +#endif }; #define to_net_dev(d) container_of(d, struct net_device, dev) @@ -859,22 +971,8 @@ static inline void *netdev_priv(const struct net_device *dev) * netif_napi_add() must be used to initialize a napi context prior to calling * *any* of the other napi related functions. */ -static inline void netif_napi_add(struct net_device *dev, - struct napi_struct *napi, - int (*poll)(struct napi_struct *, int), - int weight) -{ - INIT_LIST_HEAD(&napi->poll_list); - napi->poll = poll; - napi->weight = weight; -#ifdef CONFIG_NETPOLL - napi->dev = dev; - list_add(&napi->dev_list, &dev->napi_list); - spin_lock_init(&napi->poll_lock); - napi->poll_owner = -1; -#endif - set_bit(NAPI_STATE_SCHED, &napi->state); -} +void netif_napi_add(struct net_device *dev, struct napi_struct *napi, + int (*poll)(struct napi_struct *, int), int weight); /** * netif_napi_del - remove a napi context @@ -882,12 +980,23 @@ static inline void netif_napi_add(struct net_device *dev, * * netif_napi_del() removes a napi context from the network device napi list */ -static inline void netif_napi_del(struct napi_struct *napi) -{ -#ifdef CONFIG_NETPOLL - list_del(&napi->dev_list); -#endif -} +void netif_napi_del(struct napi_struct *napi); + +struct napi_gro_cb { + /* This is non-zero if the packet may be of the same flow. */ + int same_flow; + + /* This is non-zero if the packet cannot be merged with the new skb. */ + int flush; + + /* Number of segments aggregated. */ + int count; + + /* Free the skb? */ + int free; +}; + +#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb) struct packet_type { __be16 type; /* This is really htons(ether_type). */ @@ -899,10 +1008,21 @@ struct packet_type { struct sk_buff *(*gso_segment)(struct sk_buff *skb, int features); int (*gso_send_check)(struct sk_buff *skb); + struct sk_buff **(*gro_receive)(struct sk_buff **head, + struct sk_buff *skb); + int (*gro_complete)(struct sk_buff *skb); void *af_packet_priv; struct list_head list; }; +struct napi_gro_fraginfo { + skb_frag_t frags[MAX_SKB_FRAGS]; + unsigned int nr_frags; + unsigned int ip_summed; + unsigned int len; + __wsum csum; +}; + #include <linux/interrupt.h> #include <linux/notifier.h> @@ -1252,6 +1372,17 @@ extern int netif_rx(struct sk_buff *skb); extern int netif_rx_ni(struct sk_buff *skb); #define HAVE_NETIF_RECEIVE_SKB 1 extern int netif_receive_skb(struct sk_buff *skb); +extern void napi_gro_flush(struct napi_struct *napi); +extern int dev_gro_receive(struct napi_struct *napi, + struct sk_buff *skb); +extern int napi_gro_receive(struct napi_struct *napi, + struct sk_buff *skb); +extern void napi_reuse_skb(struct napi_struct *napi, + struct sk_buff *skb); +extern struct sk_buff * napi_fraginfo_skb(struct napi_struct *napi, + struct napi_gro_fraginfo *info); +extern int napi_gro_frags(struct napi_struct *napi, + struct napi_gro_fraginfo *info); extern void netif_nit_deliver(struct sk_buff *skb); extern int dev_valid_name(const char *name); extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *); @@ -1444,8 +1575,7 @@ static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits) } /* Test if receive needs to be scheduled but only if up */ -static inline int netif_rx_schedule_prep(struct net_device *dev, - struct napi_struct *napi) +static inline int netif_rx_schedule_prep(struct napi_struct *napi) { return napi_schedule_prep(napi); } @@ -1453,27 +1583,24 @@ static inline int netif_rx_schedule_prep(struct net_device *dev, /* Add interface to tail of rx poll list. This assumes that _prep has * already been called and returned 1. */ -static inline void __netif_rx_schedule(struct net_device *dev, - struct napi_struct *napi) +static inline void __netif_rx_schedule(struct napi_struct *napi) { __napi_schedule(napi); } /* Try to reschedule poll. Called by irq handler. */ -static inline void netif_rx_schedule(struct net_device *dev, - struct napi_struct *napi) +static inline void netif_rx_schedule(struct napi_struct *napi) { - if (netif_rx_schedule_prep(dev, napi)) - __netif_rx_schedule(dev, napi); + if (netif_rx_schedule_prep(napi)) + __netif_rx_schedule(napi); } /* Try to reschedule poll. Called by dev->poll() after netif_rx_complete(). */ -static inline int netif_rx_reschedule(struct net_device *dev, - struct napi_struct *napi) +static inline int netif_rx_reschedule(struct napi_struct *napi) { if (napi_schedule_prep(napi)) { - __netif_rx_schedule(dev, napi); + __netif_rx_schedule(napi); return 1; } return 0; @@ -1482,8 +1609,7 @@ static inline int netif_rx_reschedule(struct net_device *dev, /* same as netif_rx_complete, except that local_irq_save(flags) * has already been issued */ -static inline void __netif_rx_complete(struct net_device *dev, - struct napi_struct *napi) +static inline void __netif_rx_complete(struct napi_struct *napi) { __napi_complete(napi); } @@ -1493,20 +1619,9 @@ static inline void __netif_rx_complete(struct net_device *dev, * it completes the work. The device cannot be out of poll list at this * moment, it is BUG(). */ -static inline void netif_rx_complete(struct net_device *dev, - struct napi_struct *napi) +static inline void netif_rx_complete(struct napi_struct *napi) { - unsigned long flags; - - /* - * don't let napi dequeue from the cpu poll list - * just in case its running on a different cpu - */ - if (unlikely(test_bit(NAPI_STATE_NPSVC, &napi->state))) - return; - local_irq_save(flags); - __netif_rx_complete(dev, napi); - local_irq_restore(flags); + napi_complete(napi); } static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) @@ -1683,6 +1798,8 @@ extern void netdev_features_change(struct net_device *dev); /* Load a device via the kmod */ extern void dev_load(struct net *net, const char *name); extern void dev_mcast_init(void); +extern const struct net_device_stats *dev_get_stats(struct net_device *dev); + extern int netdev_max_backlog; extern int weight_p; extern int netdev_set_master(struct net_device *dev, struct net_device *master); @@ -1731,6 +1848,8 @@ static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb) { return skb_is_gso(skb) && (!skb_gso_ok(skb, dev->features) || + (skb_shinfo(skb)->frag_list && + !(dev->features & NETIF_F_FRAGLIST)) || unlikely(skb->ip_summed != CHECKSUM_PARTIAL)); } @@ -1749,26 +1868,31 @@ static inline int skb_bond_should_drop(struct sk_buff *skb) struct net_device *dev = skb->dev; struct net_device *master = dev->master; - if (master && - (dev->priv_flags & IFF_SLAVE_INACTIVE)) { - if ((dev->priv_flags & IFF_SLAVE_NEEDARP) && - skb->protocol == __constant_htons(ETH_P_ARP)) - return 0; + if (master) { + if (master->priv_flags & IFF_MASTER_ARPMON) + dev->last_rx = jiffies; - if (master->priv_flags & IFF_MASTER_ALB) { - if (skb->pkt_type != PACKET_BROADCAST && - skb->pkt_type != PACKET_MULTICAST) + if (dev->priv_flags & IFF_SLAVE_INACTIVE) { + if ((dev->priv_flags & IFF_SLAVE_NEEDARP) && + skb->protocol == __constant_htons(ETH_P_ARP)) return 0; - } - if (master->priv_flags & IFF_MASTER_8023AD && - skb->protocol == __constant_htons(ETH_P_SLOW)) - return 0; - return 1; + if (master->priv_flags & IFF_MASTER_ALB) { + if (skb->pkt_type != PACKET_BROADCAST && + skb->pkt_type != PACKET_MULTICAST) + return 0; + } + if (master->priv_flags & IFF_MASTER_8023AD && + skb->protocol == __constant_htons(ETH_P_SLOW)) + return 0; + + return 1; + } } return 0; } +extern struct pernet_operations __net_initdata loopback_net_ops; #endif /* __KERNEL__ */ #endif /* _LINUX_DEV_H */ diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h index d45e29cd1cf..e40ddb94b1a 100644 --- a/include/linux/netfilter_bridge/ebtables.h +++ b/include/linux/netfilter_bridge/ebtables.h @@ -300,7 +300,8 @@ struct ebt_table #define EBT_ALIGN(s) (((s) + (__alignof__(struct ebt_replace)-1)) & \ ~(__alignof__(struct ebt_replace)-1)) -extern int ebt_register_table(struct ebt_table *table); +extern struct ebt_table *ebt_register_table(struct net *net, + struct ebt_table *table); extern void ebt_unregister_table(struct ebt_table *table); extern unsigned int ebt_do_table(unsigned int hook, struct sk_buff *skb, const struct net_device *in, const struct net_device *out, diff --git a/include/linux/netfilter_ipv4/ipt_policy.h b/include/linux/netfilter_ipv4/ipt_policy.h index b9478a25530..1037fb2cd20 100644 --- a/include/linux/netfilter_ipv4/ipt_policy.h +++ b/include/linux/netfilter_ipv4/ipt_policy.h @@ -1,6 +1,8 @@ #ifndef _IPT_POLICY_H #define _IPT_POLICY_H +#include <linux/netfilter/xt_policy.h> + #define IPT_POLICY_MAX_ELEM XT_POLICY_MAX_ELEM /* ipt_policy_flags */ diff --git a/include/linux/netfilter_ipv6/ip6t_policy.h b/include/linux/netfilter_ipv6/ip6t_policy.h index 6bab3163d2f..b1c449d7ec8 100644 --- a/include/linux/netfilter_ipv6/ip6t_policy.h +++ b/include/linux/netfilter_ipv6/ip6t_policy.h @@ -1,6 +1,8 @@ #ifndef _IP6T_POLICY_H #define _IP6T_POLICY_H +#include <linux/netfilter/xt_policy.h> + #define IP6T_POLICY_MAX_ELEM XT_POLICY_MAX_ELEM /* ip6t_policy_flags */ diff --git a/include/linux/netlink.h b/include/linux/netlink.h index 9ff1b54908f..51b09a1f46c 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h @@ -242,7 +242,8 @@ __nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, int type, int len, int flags) nlh->nlmsg_flags = flags; nlh->nlmsg_pid = pid; nlh->nlmsg_seq = seq; - memset(NLMSG_DATA(nlh) + len, 0, NLMSG_ALIGN(size) - size); + if (!__builtin_constant_p(size) || NLMSG_ALIGN(size) - size != 0) + memset(NLMSG_DATA(nlh) + len, 0, NLMSG_ALIGN(size) - size); return nlh; } diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h index e3d79593fb3..e38d3c9dccd 100644 --- a/include/linux/netpoll.h +++ b/include/linux/netpoll.h @@ -94,11 +94,6 @@ static inline void netpoll_poll_unlock(void *have) rcu_read_unlock(); } -static inline void netpoll_netdev_init(struct net_device *dev) -{ - INIT_LIST_HEAD(&dev->napi_list); -} - #else static inline int netpoll_rx(struct sk_buff *skb) { diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h index ea036676948..b912311a56b 100644 --- a/include/linux/nfs4.h +++ b/include/linux/nfs4.h @@ -88,6 +88,8 @@ #define NFS4_ACE_GENERIC_EXECUTE 0x001200A0 #define NFS4_ACE_MASK_ALL 0x001F01FF +#define NFS4_MAX_UINT64 (~(u64)0) + enum nfs4_acl_whotype { NFS4_ACL_WHO_NAMED = 0, NFS4_ACL_WHO_OWNER, diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 4eaa8347a0d..db867b04ac3 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -83,7 +83,7 @@ struct nfs_open_context { struct rpc_cred *cred; struct nfs4_state *state; fl_owner_t lockowner; - int mode; + fmode_t mode; unsigned long flags; #define NFS_CONTEXT_ERROR_WRITE (0) @@ -130,7 +130,10 @@ struct nfs_inode { * * We need to revalidate the cached attrs for this inode if * - * jiffies - read_cache_jiffies > attrtimeo + * jiffies - read_cache_jiffies >= attrtimeo + * + * Please note the comparison is greater than or equal + * so that zero timeout values can be specified. */ unsigned long read_cache_jiffies; unsigned long attrtimeo; @@ -180,7 +183,7 @@ struct nfs_inode { /* NFSv4 state */ struct list_head open_states; struct nfs_delegation *delegation; - int delegation_state; + fmode_t delegation_state; struct rw_semaphore rwsem; #endif /* CONFIG_NFS_V4*/ struct inode vfs_inode; @@ -342,7 +345,7 @@ extern int nfs_setattr(struct dentry *, struct iattr *); extern void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr); extern struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx); extern void put_nfs_open_context(struct nfs_open_context *ctx); -extern struct nfs_open_context *nfs_find_open_context(struct inode *inode, struct rpc_cred *cred, int mode); +extern struct nfs_open_context *nfs_find_open_context(struct inode *inode, struct rpc_cred *cred, fmode_t mode); extern u64 nfs_compat_user_ino64(u64 fileid); extern void nfs_fattr_init(struct nfs_fattr *fattr); @@ -533,12 +536,6 @@ static inline void nfs3_forget_cached_acls(struct inode *inode) #endif /* CONFIG_NFS_V3_ACL */ /* - * linux/fs/mount_clnt.c - */ -extern int nfs_mount(struct sockaddr *, size_t, char *, char *, - int, int, struct nfs_fh *); - -/* * inline functions */ diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index 4e477ae5869..9bb81aec91c 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h @@ -42,12 +42,6 @@ struct nfs_client { struct rb_root cl_openowner_id; struct rb_root cl_lockowner_id; - /* - * The following rwsem ensures exclusive access to the server - * while we recover the state following a lease expiration. - */ - struct rw_semaphore cl_sem; - struct list_head cl_delegations; struct rb_root cl_state_owners; spinlock_t cl_lock; diff --git a/include/linux/nfs_mount.h b/include/linux/nfs_mount.h index 6549a06ac16..4499016e6d0 100644 --- a/include/linux/nfs_mount.h +++ b/include/linux/nfs_mount.h @@ -45,7 +45,7 @@ struct nfs_mount_data { char context[NFS_MAX_CONTEXT_LEN + 1]; /* 6 */ }; -/* bits in the flags field */ +/* bits in the flags field visible to user space */ #define NFS_MOUNT_SOFT 0x0001 /* 1 */ #define NFS_MOUNT_INTR 0x0002 /* 1 */ /* now unused, but ABI */ @@ -68,5 +68,6 @@ struct nfs_mount_data { /* The following are for internal use only */ #define NFS_MOUNT_LOOKUP_CACHE_NONEG 0x10000 #define NFS_MOUNT_LOOKUP_CACHE_NONE 0x20000 +#define NFS_MOUNT_NORESVPORT 0x40000 #endif diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index c1c31acb8a2..a550b528319 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -120,13 +120,14 @@ struct nfs_openargs { const struct nfs_fh * fh; struct nfs_seqid * seqid; int open_flags; + fmode_t fmode; __u64 clientid; __u64 id; union { struct iattr * attrs; /* UNCHECKED, GUARDED */ nfs4_verifier verifier; /* EXCLUSIVE */ nfs4_stateid delegation; /* CLAIM_DELEGATE_CUR */ - int delegation_type; /* CLAIM_PREVIOUS */ + fmode_t delegation_type; /* CLAIM_PREVIOUS */ } u; const struct qstr * name; const struct nfs_server *server; /* Needed for ID mapping */ @@ -143,7 +144,7 @@ struct nfs_openres { struct nfs_fattr * dir_attr; struct nfs_seqid * seqid; const struct nfs_server *server; - int delegation_type; + fmode_t delegation_type; nfs4_stateid delegation; __u32 do_recall; __u64 maxsize; @@ -171,7 +172,7 @@ struct nfs_closeargs { struct nfs_fh * fh; nfs4_stateid * stateid; struct nfs_seqid * seqid; - int open_flags; + fmode_t fmode; const u32 * bitmask; }; diff --git a/include/linux/nfsd/nfsd.h b/include/linux/nfsd/nfsd.h index 21269405ffe..e19f45991b2 100644 --- a/include/linux/nfsd/nfsd.h +++ b/include/linux/nfsd/nfsd.h @@ -23,7 +23,6 @@ /* * nfsd version */ -#define NFSD_VERSION "0.5" #define NFSD_SUPPORTED_MINOR_VERSION 0 /* diff --git a/include/linux/nfsd/nfsfh.h b/include/linux/nfsd/nfsfh.h index d1941cb965e..b2e093870bc 100644 --- a/include/linux/nfsd/nfsfh.h +++ b/include/linux/nfsd/nfsfh.h @@ -68,6 +68,10 @@ struct nfs_fhbase_old { * 1 - 4 byte user specified identifier * 2 - 4 byte major, 4 byte minor, 4 byte inode number - DEPRECATED * 3 - 4 byte device id, encoded for user-space, 4 byte inode number + * 4 - 4 byte inode number and 4 byte uuid + * 5 - 8 byte uuid + * 6 - 16 byte uuid + * 7 - 8 byte inode number and 16 byte uuid * * The fileid_type identified how the file within the filesystem is encoded. * This is (will be) passed to, and set by, the underlying filesystem if it supports diff --git a/include/linux/nfsd/state.h b/include/linux/nfsd/state.h index d0fe2e37845..128298c0362 100644 --- a/include/linux/nfsd/state.h +++ b/include/linux/nfsd/state.h @@ -124,6 +124,8 @@ struct nfs4_client { nfs4_verifier cl_verifier; /* generated by client */ time_t cl_time; /* time of last lease renewal */ __be32 cl_addr; /* client ipaddress */ + u32 cl_flavor; /* setclientid pseudoflavor */ + char *cl_principal; /* setclientid principal name */ struct svc_cred cl_cred; /* setclientid principal */ clientid_t cl_clientid; /* generated by server */ nfs4_verifier cl_confirm; /* generated by server */ diff --git a/include/linux/nl80211.h b/include/linux/nl80211.h index 9bad65400fb..e86ed59f9ad 100644 --- a/include/linux/nl80211.h +++ b/include/linux/nl80211.h @@ -3,7 +3,26 @@ /* * 802.11 netlink interface public header * - * Copyright 2006, 2007 Johannes Berg <johannes@sipsolutions.net> + * Copyright 2006, 2007, 2008 Johannes Berg <johannes@sipsolutions.net> + * Copyright 2008 Michael Wu <flamingice@sourmilk.net> + * Copyright 2008 Luis Carlos Cobo <luisca@cozybit.com> + * Copyright 2008 Michael Buesch <mb@bu3sch.de> + * Copyright 2008 Luis R. Rodriguez <lrodriguez@atheros.com> + * Copyright 2008 Jouni Malinen <jouni.malinen@atheros.com> + * Copyright 2008 Colin McCabe <colin@cozybit.com> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * */ /** @@ -25,8 +44,10 @@ * * @NL80211_CMD_GET_WIPHY: request information about a wiphy or dump request * to get a list of all present wiphys. - * @NL80211_CMD_SET_WIPHY: set wiphy name, needs %NL80211_ATTR_WIPHY and - * %NL80211_ATTR_WIPHY_NAME. + * @NL80211_CMD_SET_WIPHY: set wiphy parameters, needs %NL80211_ATTR_WIPHY or + * %NL80211_ATTR_IFINDEX; can be used to set %NL80211_ATTR_WIPHY_NAME, + * %NL80211_ATTR_WIPHY_TXQ_PARAMS, %NL80211_ATTR_WIPHY_FREQ, and/or + * %NL80211_ATTR_WIPHY_SEC_CHAN_OFFSET. * @NL80211_CMD_NEW_WIPHY: Newly created wiphy, response to get request * or rename notification. Has attributes %NL80211_ATTR_WIPHY and * %NL80211_ATTR_WIPHY_NAME. @@ -106,6 +127,12 @@ * to the the specified ISO/IEC 3166-1 alpha2 country code. The core will * store this as a valid request and then query userspace for it. * + * @NL80211_CMD_GET_MESH_PARAMS: Get mesh networking properties for the + * interface identified by %NL80211_ATTR_IFINDEX + * + * @NL80211_CMD_SET_MESH_PARAMS: Set mesh networking properties for the + * interface identified by %NL80211_ATTR_IFINDEX + * * @NL80211_CMD_MAX: highest used command number * @__NL80211_CMD_AFTER_LAST: internal use */ @@ -148,6 +175,9 @@ enum nl80211_commands { NL80211_CMD_SET_REG, NL80211_CMD_REQ_SET_REG, + NL80211_CMD_GET_MESH_PARAMS, + NL80211_CMD_SET_MESH_PARAMS, + /* add new commands above here */ /* used to define NL80211_CMD_MAX below */ @@ -169,6 +199,15 @@ enum nl80211_commands { * @NL80211_ATTR_WIPHY: index of wiphy to operate on, cf. * /sys/class/ieee80211/<phyname>/index * @NL80211_ATTR_WIPHY_NAME: wiphy name (used for renaming) + * @NL80211_ATTR_WIPHY_TXQ_PARAMS: a nested array of TX queue parameters + * @NL80211_ATTR_WIPHY_FREQ: frequency of the selected channel in MHz + * @NL80211_ATTR_WIPHY_CHANNEL_TYPE: included with NL80211_ATTR_WIPHY_FREQ + * if HT20 or HT40 are allowed (i.e., 802.11n disabled if not included): + * NL80211_CHAN_NO_HT = HT not allowed (i.e., same as not including + * this attribute) + * NL80211_CHAN_HT20 = HT20 only + * NL80211_CHAN_HT40MINUS = secondary channel is below the primary channel + * NL80211_CHAN_HT40PLUS = secondary channel is above the primary channel * * @NL80211_ATTR_IFINDEX: network interface index of the device to operate on * @NL80211_ATTR_IFNAME: network interface name @@ -234,6 +273,9 @@ enum nl80211_commands { * (u8, 0 or 1) * @NL80211_ATTR_BSS_SHORT_SLOT_TIME: whether short slot time enabled * (u8, 0 or 1) + * @NL80211_ATTR_BSS_BASIC_RATES: basic rates, array of basic + * rates in format defined by IEEE 802.11 7.3.2.2 but without the length + * restriction (at most %NL80211_MAX_SUPP_RATES). * * @NL80211_ATTR_HT_CAPABILITY: HT Capability information element (from * association request when used with NL80211_CMD_NEW_STATION) @@ -296,6 +338,14 @@ enum nl80211_attrs { NL80211_ATTR_REG_ALPHA2, NL80211_ATTR_REG_RULES, + NL80211_ATTR_MESH_PARAMS, + + NL80211_ATTR_BSS_BASIC_RATES, + + NL80211_ATTR_WIPHY_TXQ_PARAMS, + NL80211_ATTR_WIPHY_FREQ, + NL80211_ATTR_WIPHY_CHANNEL_TYPE, + /* add attributes here, update the policy in nl80211.c */ __NL80211_ATTR_AFTER_LAST, @@ -307,6 +357,10 @@ enum nl80211_attrs { * here */ #define NL80211_ATTR_HT_CAPABILITY NL80211_ATTR_HT_CAPABILITY +#define NL80211_ATTR_BSS_BASIC_RATES NL80211_ATTR_BSS_BASIC_RATES +#define NL80211_ATTR_WIPHY_TXQ_PARAMS NL80211_ATTR_WIPHY_TXQ_PARAMS +#define NL80211_ATTR_WIPHY_FREQ NL80211_ATTR_WIPHY_FREQ +#define NL80211_ATTR_WIPHY_SEC_CHAN_OFFSET NL80211_ATTR_WIPHY_SEC_CHAN_OFFSET #define NL80211_MAX_SUPP_RATES 32 #define NL80211_MAX_SUPP_REG_RULES 32 @@ -371,6 +425,32 @@ enum nl80211_sta_flags { }; /** + * enum nl80211_rate_info - bitrate information + * + * These attribute types are used with %NL80211_STA_INFO_TXRATE + * when getting information about the bitrate of a station. + * + * @__NL80211_RATE_INFO_INVALID: attribute number 0 is reserved + * @NL80211_RATE_INFO_BITRATE: total bitrate (u16, 100kbit/s) + * @NL80211_RATE_INFO_MCS: mcs index for 802.11n (u8) + * @NL80211_RATE_INFO_40_MHZ_WIDTH: 40 Mhz dualchannel bitrate + * @NL80211_RATE_INFO_SHORT_GI: 400ns guard interval + * @NL80211_RATE_INFO_MAX: highest rate_info number currently defined + * @__NL80211_RATE_INFO_AFTER_LAST: internal use + */ +enum nl80211_rate_info { + __NL80211_RATE_INFO_INVALID, + NL80211_RATE_INFO_BITRATE, + NL80211_RATE_INFO_MCS, + NL80211_RATE_INFO_40_MHZ_WIDTH, + NL80211_RATE_INFO_SHORT_GI, + + /* keep last */ + __NL80211_RATE_INFO_AFTER_LAST, + NL80211_RATE_INFO_MAX = __NL80211_RATE_INFO_AFTER_LAST - 1 +}; + +/** * enum nl80211_sta_info - station information * * These attribute types are used with %NL80211_ATTR_STA_INFO @@ -382,6 +462,9 @@ enum nl80211_sta_flags { * @NL80211_STA_INFO_TX_BYTES: total transmitted bytes (u32, to this station) * @__NL80211_STA_INFO_AFTER_LAST: internal * @NL80211_STA_INFO_MAX: highest possible station info attribute + * @NL80211_STA_INFO_SIGNAL: signal strength of last received PPDU (u8, dBm) + * @NL80211_STA_INFO_TX_BITRATE: current unicast tx rate, nested attribute + * containing info as possible, see &enum nl80211_sta_info_txrate. */ enum nl80211_sta_info { __NL80211_STA_INFO_INVALID, @@ -391,6 +474,8 @@ enum nl80211_sta_info { NL80211_STA_INFO_LLID, NL80211_STA_INFO_PLID, NL80211_STA_INFO_PLINK_STATE, + NL80211_STA_INFO_SIGNAL, + NL80211_STA_INFO_TX_BITRATE, /* keep last */ __NL80211_STA_INFO_AFTER_LAST, @@ -452,17 +537,29 @@ enum nl80211_mpath_info { * an array of nested frequency attributes * @NL80211_BAND_ATTR_RATES: supported bitrates in this band, * an array of nested bitrate attributes + * @NL80211_BAND_ATTR_HT_MCS_SET: 16-byte attribute containing the MCS set as + * defined in 802.11n + * @NL80211_BAND_ATTR_HT_CAPA: HT capabilities, as in the HT information IE + * @NL80211_BAND_ATTR_HT_AMPDU_FACTOR: A-MPDU factor, as in 11n + * @NL80211_BAND_ATTR_HT_AMPDU_DENSITY: A-MPDU density, as in 11n */ enum nl80211_band_attr { __NL80211_BAND_ATTR_INVALID, NL80211_BAND_ATTR_FREQS, NL80211_BAND_ATTR_RATES, + NL80211_BAND_ATTR_HT_MCS_SET, + NL80211_BAND_ATTR_HT_CAPA, + NL80211_BAND_ATTR_HT_AMPDU_FACTOR, + NL80211_BAND_ATTR_HT_AMPDU_DENSITY, + /* keep last */ __NL80211_BAND_ATTR_AFTER_LAST, NL80211_BAND_ATTR_MAX = __NL80211_BAND_ATTR_AFTER_LAST - 1 }; +#define NL80211_BAND_ATTR_HT_CAPA NL80211_BAND_ATTR_HT_CAPA + /** * enum nl80211_frequency_attr - frequency attributes * @NL80211_FREQUENCY_ATTR_FREQ: Frequency in MHz @@ -474,6 +571,8 @@ enum nl80211_band_attr { * on this channel in current regulatory domain. * @NL80211_FREQUENCY_ATTR_RADAR: Radar detection is mandatory * on this channel in current regulatory domain. + * @NL80211_FREQUENCY_ATTR_MAX_TX_POWER: Maximum transmission power in mBm + * (100 * dBm). */ enum nl80211_frequency_attr { __NL80211_FREQUENCY_ATTR_INVALID, @@ -482,12 +581,15 @@ enum nl80211_frequency_attr { NL80211_FREQUENCY_ATTR_PASSIVE_SCAN, NL80211_FREQUENCY_ATTR_NO_IBSS, NL80211_FREQUENCY_ATTR_RADAR, + NL80211_FREQUENCY_ATTR_MAX_TX_POWER, /* keep last */ __NL80211_FREQUENCY_ATTR_AFTER_LAST, NL80211_FREQUENCY_ATTR_MAX = __NL80211_FREQUENCY_ATTR_AFTER_LAST - 1 }; +#define NL80211_FREQUENCY_ATTR_MAX_TX_POWER NL80211_FREQUENCY_ATTR_MAX_TX_POWER + /** * enum nl80211_bitrate_attr - bitrate attributes * @NL80211_BITRATE_ATTR_RATE: Bitrate in units of 100 kbps @@ -594,4 +696,119 @@ enum nl80211_mntr_flags { NL80211_MNTR_FLAG_MAX = __NL80211_MNTR_FLAG_AFTER_LAST - 1 }; +/** + * enum nl80211_meshconf_params - mesh configuration parameters + * + * Mesh configuration parameters + * + * @__NL80211_MESHCONF_INVALID: internal use + * + * @NL80211_MESHCONF_RETRY_TIMEOUT: specifies the initial retry timeout in + * millisecond units, used by the Peer Link Open message + * + * @NL80211_MESHCONF_CONFIRM_TIMEOUT: specifies the inital confirm timeout, in + * millisecond units, used by the peer link management to close a peer link + * + * @NL80211_MESHCONF_HOLDING_TIMEOUT: specifies the holding timeout, in + * millisecond units + * + * @NL80211_MESHCONF_MAX_PEER_LINKS: maximum number of peer links allowed + * on this mesh interface + * + * @NL80211_MESHCONF_MAX_RETRIES: specifies the maximum number of peer link + * open retries that can be sent to establish a new peer link instance in a + * mesh + * + * @NL80211_MESHCONF_TTL: specifies the value of TTL field set at a source mesh + * point. + * + * @NL80211_MESHCONF_AUTO_OPEN_PLINKS: whether we should automatically + * open peer links when we detect compatible mesh peers. + * + * @NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES: the number of action frames + * containing a PREQ that an MP can send to a particular destination (path + * target) + * + * @NL80211_MESHCONF_PATH_REFRESH_TIME: how frequently to refresh mesh paths + * (in milliseconds) + * + * @NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT: minimum length of time to wait + * until giving up on a path discovery (in milliseconds) + * + * @NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT: The time (in TUs) for which mesh + * points receiving a PREQ shall consider the forwarding information from the + * root to be valid. (TU = time unit) + * + * @NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL: The minimum interval of time (in + * TUs) during which an MP can send only one action frame containing a PREQ + * reference element + * + * @NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME: The interval of time (in TUs) + * that it takes for an HWMP information element to propagate across the mesh + * + * @NL80211_MESHCONF_ATTR_MAX: highest possible mesh configuration attribute + * + * @__NL80211_MESHCONF_ATTR_AFTER_LAST: internal use + */ +enum nl80211_meshconf_params { + __NL80211_MESHCONF_INVALID, + NL80211_MESHCONF_RETRY_TIMEOUT, + NL80211_MESHCONF_CONFIRM_TIMEOUT, + NL80211_MESHCONF_HOLDING_TIMEOUT, + NL80211_MESHCONF_MAX_PEER_LINKS, + NL80211_MESHCONF_MAX_RETRIES, + NL80211_MESHCONF_TTL, + NL80211_MESHCONF_AUTO_OPEN_PLINKS, + NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES, + NL80211_MESHCONF_PATH_REFRESH_TIME, + NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT, + NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT, + NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL, + NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME, + + /* keep last */ + __NL80211_MESHCONF_ATTR_AFTER_LAST, + NL80211_MESHCONF_ATTR_MAX = __NL80211_MESHCONF_ATTR_AFTER_LAST - 1 +}; + +/** + * enum nl80211_txq_attr - TX queue parameter attributes + * @__NL80211_TXQ_ATTR_INVALID: Attribute number 0 is reserved + * @NL80211_TXQ_ATTR_QUEUE: TX queue identifier (NL80211_TXQ_Q_*) + * @NL80211_TXQ_ATTR_TXOP: Maximum burst time in units of 32 usecs, 0 meaning + * disabled + * @NL80211_TXQ_ATTR_CWMIN: Minimum contention window [a value of the form + * 2^n-1 in the range 1..32767] + * @NL80211_TXQ_ATTR_CWMAX: Maximum contention window [a value of the form + * 2^n-1 in the range 1..32767] + * @NL80211_TXQ_ATTR_AIFS: Arbitration interframe space [0..255] + * @__NL80211_TXQ_ATTR_AFTER_LAST: Internal + * @NL80211_TXQ_ATTR_MAX: Maximum TXQ attribute number + */ +enum nl80211_txq_attr { + __NL80211_TXQ_ATTR_INVALID, + NL80211_TXQ_ATTR_QUEUE, + NL80211_TXQ_ATTR_TXOP, + NL80211_TXQ_ATTR_CWMIN, + NL80211_TXQ_ATTR_CWMAX, + NL80211_TXQ_ATTR_AIFS, + + /* keep last */ + __NL80211_TXQ_ATTR_AFTER_LAST, + NL80211_TXQ_ATTR_MAX = __NL80211_TXQ_ATTR_AFTER_LAST - 1 +}; + +enum nl80211_txq_q { + NL80211_TXQ_Q_VO, + NL80211_TXQ_Q_VI, + NL80211_TXQ_Q_BE, + NL80211_TXQ_Q_BK +}; + +enum nl80211_channel_type { + NL80211_CHAN_NO_HT, + NL80211_CHAN_HT20, + NL80211_CHAN_HT40MINUS, + NL80211_CHAN_HT40PLUS +}; #endif /* __LINUX_NL80211_H */ diff --git a/include/linux/node.h b/include/linux/node.h index bc001bc225c..681a697b9a8 100644 --- a/include/linux/node.h +++ b/include/linux/node.h @@ -26,6 +26,7 @@ struct node { struct sys_device sysdev; }; +struct memory_block; extern struct node node_devices[]; extern int register_node(struct node *, int, struct node *); @@ -35,6 +36,9 @@ extern int register_one_node(int nid); extern void unregister_one_node(int nid); extern int register_cpu_under_node(unsigned int cpu, unsigned int nid); extern int unregister_cpu_under_node(unsigned int cpu, unsigned int nid); +extern int register_mem_sect_under_node(struct memory_block *mem_blk, + int nid); +extern int unregister_mem_sect_under_nodes(struct memory_block *mem_blk); #else static inline int register_one_node(int nid) { @@ -52,6 +56,15 @@ static inline int unregister_cpu_under_node(unsigned int cpu, unsigned int nid) { return 0; } +static inline int register_mem_sect_under_node(struct memory_block *mem_blk, + int nid) +{ + return 0; +} +static inline int unregister_mem_sect_under_nodes(struct memory_block *mem_blk) +{ + return 0; +} #endif #define to_node(sys_device) container_of(sys_device, struct node, sysdev) diff --git a/include/linux/nsproxy.h b/include/linux/nsproxy.h index c8a768e5964..afad7dec1b3 100644 --- a/include/linux/nsproxy.h +++ b/include/linux/nsproxy.h @@ -27,7 +27,6 @@ struct nsproxy { struct ipc_namespace *ipc_ns; struct mnt_namespace *mnt_ns; struct pid_namespace *pid_ns; - struct user_namespace *user_ns; struct net *net_ns; }; extern struct nsproxy init_nsproxy; diff --git a/include/linux/nwpserial.h b/include/linux/nwpserial.h new file mode 100644 index 00000000000..9acb21572ea --- /dev/null +++ b/include/linux/nwpserial.h @@ -0,0 +1,18 @@ +/* + * Serial Port driver for a NWP uart device + * + * Copyright (C) 2008 IBM Corp., Benjamin Krill <ben@codiert.org> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + */ +#ifndef _NWPSERIAL_H +#define _NWPSERIAL_H + +int nwpserial_register_port(struct uart_port *port); +void nwpserial_unregister_port(int line); + +#endif /* _NWPSERIAL_H */ diff --git a/include/linux/of.h b/include/linux/of.h index e2488f5e7cb..6a7efa242f5 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -57,6 +57,12 @@ extern struct device_node *of_get_next_child(const struct device_node *node, for (child = of_get_next_child(parent, NULL); child != NULL; \ child = of_get_next_child(parent, child)) +extern struct device_node *of_find_node_with_property( + struct device_node *from, const char *prop_name); +#define for_each_node_with_property(dn, prop_name) \ + for (dn = of_find_node_with_property(NULL, prop_name); dn; \ + dn = of_find_node_with_property(dn, prop_name)) + extern struct property *of_find_property(const struct device_node *np, const char *name, int *lenp); diff --git a/include/linux/of_gpio.h b/include/linux/of_gpio.h index 67db101d0eb..fc2472c3c25 100644 --- a/include/linux/of_gpio.h +++ b/include/linux/of_gpio.h @@ -14,9 +14,22 @@ #ifndef __LINUX_OF_GPIO_H #define __LINUX_OF_GPIO_H +#include <linux/compiler.h> +#include <linux/kernel.h> #include <linux/errno.h> #include <linux/gpio.h> +struct device_node; + +/* + * This is Linux-specific flags. By default controllers' and Linux' mapping + * match, but GPIO controllers are free to translate their own flags to + * Linux-specific in their .xlate callback. Though, 1:1 mapping is recommended. + */ +enum of_gpio_flags { + OF_GPIO_ACTIVE_LOW = 0x1, +}; + #ifdef CONFIG_OF_GPIO /* @@ -26,7 +39,7 @@ struct of_gpio_chip { struct gpio_chip gc; int gpio_cells; int (*xlate)(struct of_gpio_chip *of_gc, struct device_node *np, - const void *gpio_spec); + const void *gpio_spec, enum of_gpio_flags *flags); }; static inline struct of_gpio_chip *to_of_gpio_chip(struct gpio_chip *gc) @@ -50,20 +63,43 @@ static inline struct of_mm_gpio_chip *to_of_mm_gpio_chip(struct gpio_chip *gc) return container_of(of_gc, struct of_mm_gpio_chip, of_gc); } -extern int of_get_gpio(struct device_node *np, int index); +extern int of_get_gpio_flags(struct device_node *np, int index, + enum of_gpio_flags *flags); +extern unsigned int of_gpio_count(struct device_node *np); + extern int of_mm_gpiochip_add(struct device_node *np, struct of_mm_gpio_chip *mm_gc); extern int of_gpio_simple_xlate(struct of_gpio_chip *of_gc, struct device_node *np, - const void *gpio_spec); + const void *gpio_spec, + enum of_gpio_flags *flags); #else /* Drivers may not strictly depend on the GPIO support, so let them link. */ -static inline int of_get_gpio(struct device_node *np, int index) +static inline int of_get_gpio_flags(struct device_node *np, int index, + enum of_gpio_flags *flags) { return -ENOSYS; } +static inline unsigned int of_gpio_count(struct device_node *np) +{ + return 0; +} + #endif /* CONFIG_OF_GPIO */ +/** + * of_get_gpio - Get a GPIO number to use with GPIO API + * @np: device node to get GPIO from + * @index: index of the GPIO + * + * Returns GPIO number to use with Linux generic GPIO API, or one of the errno + * value on the error condition. + */ +static inline int of_get_gpio(struct device_node *np, int index) +{ + return of_get_gpio_flags(np, index, NULL); +} + #endif /* __LINUX_OF_GPIO_H */ diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h index a8efcfeea73..3d327b67d7e 100644 --- a/include/linux/of_platform.h +++ b/include/linux/of_platform.h @@ -26,8 +26,7 @@ extern struct bus_type of_platform_bus_type; /* * An of_platform_driver driver is attached to a basic of_device on - * the "platform bus" (of_platform_bus_type) (or ISA, EBUS and SBUS - * busses on sparc). + * the "platform bus" (of_platform_bus_type). */ struct of_platform_driver { diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h index 5231861f357..1ce9fe572e5 100644 --- a/include/linux/oprofile.h +++ b/include/linux/oprofile.h @@ -86,8 +86,7 @@ int oprofile_arch_init(struct oprofile_operations * ops); void oprofile_arch_exit(void); /** - * Add a sample. This may be called from any context. Pass - * smp_processor_id() as cpu. + * Add a sample. This may be called from any context. */ void oprofile_add_sample(struct pt_regs * const regs, unsigned long event); diff --git a/include/linux/oxu210hp.h b/include/linux/oxu210hp.h new file mode 100644 index 00000000000..0bf96eae538 --- /dev/null +++ b/include/linux/oxu210hp.h @@ -0,0 +1,7 @@ +/* platform data for the OXU210HP HCD */ + +struct oxu210hp_platform_data { + unsigned int bus16:1; + unsigned int use_hcd_otg:1; + unsigned int use_hcd_sph:1; +}; diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index b12f93a3c34..219a523ecdb 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -228,6 +228,7 @@ PAGEFLAG_FALSE(HighMem) PAGEFLAG(SwapCache, swapcache) #else PAGEFLAG_FALSE(SwapCache) + SETPAGEFLAG_NOOP(SwapCache) CLEARPAGEFLAG_NOOP(SwapCache) #endif #ifdef CONFIG_UNEVICTABLE_LRU @@ -372,31 +373,22 @@ static inline void __ClearPageTail(struct page *page) #define __PG_MLOCKED 0 #endif -#define PAGE_FLAGS (1 << PG_lru | 1 << PG_private | 1 << PG_locked | \ - 1 << PG_buddy | 1 << PG_writeback | \ - 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \ - __PG_UNEVICTABLE | __PG_MLOCKED) - -/* - * Flags checked in bad_page(). Pages on the free list should not have - * these flags set. It they are, there is a problem. - */ -#define PAGE_FLAGS_CLEAR_WHEN_BAD (PAGE_FLAGS | \ - 1 << PG_reclaim | 1 << PG_dirty | 1 << PG_swapbacked) - /* * Flags checked when a page is freed. Pages being freed should not have * these flags set. It they are, there is a problem. */ -#define PAGE_FLAGS_CHECK_AT_FREE (PAGE_FLAGS | 1 << PG_reserved) +#define PAGE_FLAGS_CHECK_AT_FREE \ + (1 << PG_lru | 1 << PG_private | 1 << PG_locked | \ + 1 << PG_buddy | 1 << PG_writeback | 1 << PG_reserved | \ + 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \ + __PG_UNEVICTABLE | __PG_MLOCKED) /* * Flags checked when a page is prepped for return by the page allocator. - * Pages being prepped should not have these flags set. It they are, there - * is a problem. + * Pages being prepped should not have any flags set. It they are set, + * there has been a kernel bug or struct page corruption. */ -#define PAGE_FLAGS_CHECK_AT_PREP (PAGE_FLAGS | \ - 1 << PG_reserved | 1 << PG_dirty | 1 << PG_swapbacked) +#define PAGE_FLAGS_CHECK_AT_PREP ((1 << NR_PAGEFLAGS) - 1) #endif /* !__GENERATING_BOUNDS_H */ #endif /* PAGE_FLAGS_H */ diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h index 1e6d34bfa09..602cc1fdee9 100644 --- a/include/linux/page_cgroup.h +++ b/include/linux/page_cgroup.h @@ -26,10 +26,6 @@ enum { PCG_LOCK, /* page cgroup is locked */ PCG_CACHE, /* charged as cache */ PCG_USED, /* this object is in use. */ - /* flags for LRU placement */ - PCG_ACTIVE, /* page is active in this cgroup */ - PCG_FILE, /* page is file system backed */ - PCG_UNEVICTABLE, /* page is unevictableable */ }; #define TESTPCGFLAG(uname, lname) \ @@ -50,19 +46,6 @@ TESTPCGFLAG(Cache, CACHE) TESTPCGFLAG(Used, USED) CLEARPCGFLAG(Used, USED) -/* LRU management flags (from global-lru definition) */ -TESTPCGFLAG(File, FILE) -SETPCGFLAG(File, FILE) -CLEARPCGFLAG(File, FILE) - -TESTPCGFLAG(Active, ACTIVE) -SETPCGFLAG(Active, ACTIVE) -CLEARPCGFLAG(Active, ACTIVE) - -TESTPCGFLAG(Unevictable, UNEVICTABLE) -SETPCGFLAG(Unevictable, UNEVICTABLE) -CLEARPCGFLAG(Unevictable, UNEVICTABLE) - static inline int page_cgroup_nid(struct page_cgroup *pc) { return page_to_nid(pc->page); @@ -105,4 +88,39 @@ static inline void page_cgroup_init(void) } #endif + +#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP +#include <linux/swap.h> +extern struct mem_cgroup * +swap_cgroup_record(swp_entry_t ent, struct mem_cgroup *mem); +extern struct mem_cgroup *lookup_swap_cgroup(swp_entry_t ent); +extern int swap_cgroup_swapon(int type, unsigned long max_pages); +extern void swap_cgroup_swapoff(int type); +#else +#include <linux/swap.h> + +static inline +struct mem_cgroup *swap_cgroup_record(swp_entry_t ent, struct mem_cgroup *mem) +{ + return NULL; +} + +static inline +struct mem_cgroup *lookup_swap_cgroup(swp_entry_t ent) +{ + return NULL; +} + +static inline int +swap_cgroup_swapon(int type, unsigned long max_pages) +{ + return 0; +} + +static inline void swap_cgroup_swapoff(int type) +{ + return; +} + +#endif #endif diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 709742be02f..01ca0856caf 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -241,7 +241,8 @@ unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start, unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index, int tag, unsigned int nr_pages, struct page **pages); -struct page *__grab_cache_page(struct address_space *mapping, pgoff_t index); +struct page *grab_cache_page_write_begin(struct address_space *mapping, + pgoff_t index, unsigned flags); /* * Returns locked page at given index in given cache, creating it if needed. diff --git a/include/linux/pagevec.h b/include/linux/pagevec.h index e90a2cb0291..7b2886fa7fd 100644 --- a/include/linux/pagevec.h +++ b/include/linux/pagevec.h @@ -21,7 +21,6 @@ struct pagevec { }; void __pagevec_release(struct pagevec *pvec); -void __pagevec_release_nonlru(struct pagevec *pvec); void __pagevec_free(struct pagevec *pvec); void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru); void pagevec_strip(struct pagevec *pvec); @@ -69,12 +68,6 @@ static inline void pagevec_release(struct pagevec *pvec) __pagevec_release(pvec); } -static inline void pagevec_release_nonlru(struct pagevec *pvec) -{ - if (pagevec_count(pvec)) - __pagevec_release_nonlru(pvec); -} - static inline void pagevec_free(struct pagevec *pvec) { if (pagevec_count(pvec)) diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h index 8837928fbf3..042c166f65d 100644 --- a/include/linux/pci-acpi.h +++ b/include/linux/pci-acpi.h @@ -8,6 +8,8 @@ #ifndef _PCI_ACPI_H_ #define _PCI_ACPI_H_ +#include <linux/acpi.h> + #define OSC_QUERY_TYPE 0 #define OSC_SUPPORT_TYPE 1 #define OSC_CONTROL_TYPE 2 @@ -48,15 +50,7 @@ #ifdef CONFIG_ACPI extern acpi_status pci_osc_control_set(acpi_handle handle, u32 flags); -extern acpi_status __pci_osc_support_set(u32 flags, const char *hid); -static inline acpi_status pci_osc_support_set(u32 flags) -{ - return __pci_osc_support_set(flags, PCI_ROOT_HID_STRING); -} -static inline acpi_status pcie_osc_support_set(u32 flags) -{ - return __pci_osc_support_set(flags, PCI_EXPRESS_ROOT_HID_STRING); -} +int pci_acpi_osc_support(acpi_handle handle, u32 flags); static inline acpi_handle acpi_find_root_bridge_handle(struct pci_dev *pdev) { /* Find root host bridge */ @@ -66,6 +60,15 @@ static inline acpi_handle acpi_find_root_bridge_handle(struct pci_dev *pdev) return acpi_get_pci_rootbridge_handle(pci_domain_nr(pdev->bus), pdev->bus->number); } + +static inline acpi_handle acpi_pci_get_bridge_handle(struct pci_bus *pbus) +{ + int seg = pci_domain_nr(pbus), busnr = pbus->number; + struct pci_dev *bridge = pbus->self; + if (bridge) + return DEVICE_ACPI_HANDLE(&(bridge->dev)); + return acpi_get_pci_rootbridge_handle(seg, busnr); +} #else #if !defined(AE_ERROR) typedef u32 acpi_status; @@ -73,8 +76,6 @@ typedef u32 acpi_status; #endif static inline acpi_status pci_osc_control_set(acpi_handle handle, u32 flags) {return AE_ERROR;} -static inline acpi_status pci_osc_support_set(u32 flags) {return AE_ERROR;} -static inline acpi_status pcie_osc_support_set(u32 flags) {return AE_ERROR;} static inline acpi_handle acpi_find_root_bridge_handle(struct pci_dev *pdev) { return NULL; } #endif diff --git a/include/linux/pci.h b/include/linux/pci.h index feb4657bb04..80f8b8b65fd 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -82,7 +82,30 @@ enum pci_mmap_state { #define PCI_DMA_FROMDEVICE 2 #define PCI_DMA_NONE 3 -#define DEVICE_COUNT_RESOURCE 12 +/* + * For PCI devices, the region numbers are assigned this way: + */ +enum { + /* #0-5: standard PCI resources */ + PCI_STD_RESOURCES, + PCI_STD_RESOURCE_END = 5, + + /* #6: expansion ROM resource */ + PCI_ROM_RESOURCE, + + /* resources assigned to buses behind the bridge */ +#define PCI_BRIDGE_RESOURCE_NUM 4 + + PCI_BRIDGE_RESOURCES, + PCI_BRIDGE_RESOURCE_END = PCI_BRIDGE_RESOURCES + + PCI_BRIDGE_RESOURCE_NUM - 1, + + /* total resources associated with a PCI device */ + PCI_NUM_RESOURCES, + + /* preserve this for compatibility */ + DEVICE_COUNT_RESOURCE +}; typedef int __bitwise pci_power_t; @@ -134,6 +157,11 @@ enum pci_dev_flags { PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) 2, }; +enum pci_irq_reroute_variant { + INTEL_IRQ_REROUTE_VARIANT = 1, + MAX_IRQ_REROUTE_VARIANTS = 3 +}; + typedef unsigned short __bitwise pci_bus_flags_t; enum pci_bus_flags { PCI_BUS_FLAGS_NO_MSI = (__force pci_bus_flags_t) 1, @@ -218,6 +246,7 @@ struct pci_dev { unsigned int no_msi:1; /* device may not use msi */ unsigned int block_ucfg_access:1; /* userspace config space access is blocked */ unsigned int broken_parity_status:1; /* Device generates false positive parity */ + unsigned int irq_reroute_variant:2; /* device needs IRQ rerouting variant */ unsigned int msi_enabled:1; unsigned int msix_enabled:1; unsigned int ari_enabled:1; /* ARI forwarding */ @@ -268,18 +297,6 @@ static inline void pci_add_saved_cap(struct pci_dev *pci_dev, hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space); } -/* - * For PCI devices, the region numbers are assigned this way: - * - * 0-5 standard PCI regions - * 6 expansion ROM - * 7-10 bridges: address space assigned to buses behind the bridge - */ - -#define PCI_ROM_RESOURCE 6 -#define PCI_BRIDGE_RESOURCES 7 -#define PCI_NUM_RESOURCES 11 - #ifndef PCI_BUS_NUM_RESOURCES #define PCI_BUS_NUM_RESOURCES 16 #endif @@ -319,6 +336,15 @@ struct pci_bus { #define pci_bus_b(n) list_entry(n, struct pci_bus, node) #define to_pci_bus(n) container_of(n, struct pci_bus, dev) +#ifdef CONFIG_PCI_MSI +static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) +{ + return pci_dev->msi_enabled || pci_dev->msix_enabled; +} +#else +static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) { return false; } +#endif + /* * Error values that may be returned by PCI functions. */ @@ -415,7 +441,6 @@ struct pci_driver { int (*resume_early) (struct pci_dev *dev); int (*resume) (struct pci_dev *dev); /* Device woken up */ void (*shutdown) (struct pci_dev *dev); - struct pm_ext_ops *pm; struct pci_error_handlers *err_handler; struct device_driver driver; struct pci_dynids dynids; @@ -527,7 +552,9 @@ int __must_check pci_bus_add_device(struct pci_dev *dev); void pci_read_bridge_bases(struct pci_bus *child); struct resource *pci_find_parent_resource(const struct pci_dev *dev, struct resource *res); +u8 pci_swizzle_interrupt_pin(struct pci_dev *dev, u8 pin); int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge); +u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp); extern struct pci_dev *pci_dev_get(struct pci_dev *dev); extern void pci_dev_put(struct pci_dev *dev); extern void pci_remove_bus(struct pci_bus *b); @@ -624,6 +651,7 @@ static inline int pci_is_managed(struct pci_dev *pdev) void pci_disable_device(struct pci_dev *dev); void pci_set_master(struct pci_dev *dev); +void pci_clear_master(struct pci_dev *dev); int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state); #define HAVE_PCI_SET_MWI int __must_check pci_set_mwi(struct pci_dev *dev); @@ -642,7 +670,7 @@ int pcie_get_readrq(struct pci_dev *dev); int pcie_set_readrq(struct pci_dev *dev, int rq); int pci_reset_function(struct pci_dev *dev); int pci_execute_reset_function(struct pci_dev *dev); -void pci_update_resource(struct pci_dev *dev, struct resource *res, int resno); +void pci_update_resource(struct pci_dev *dev, int resno); int __must_check pci_assign_resource(struct pci_dev *dev, int i); int pci_select_bars(struct pci_dev *dev, unsigned long flags); @@ -669,6 +697,11 @@ int pci_back_from_sleep(struct pci_dev *dev); /* Functions for PCI Hotplug drivers to use */ int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap); +/* Vital product data routines */ +ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf); +ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf); +int pci_vpd_truncate(struct pci_dev *dev, size_t size); + /* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */ void pci_bus_assign_resources(struct pci_bus *bus); void pci_bus_size_bridges(struct pci_bus *bus); @@ -681,10 +714,13 @@ void pci_fixup_irqs(u8 (*)(struct pci_dev *, u8 *), int (*)(struct pci_dev *, u8, u8)); #define HAVE_PCI_REQ_REGIONS 2 int __must_check pci_request_regions(struct pci_dev *, const char *); +int __must_check pci_request_regions_exclusive(struct pci_dev *, const char *); void pci_release_regions(struct pci_dev *); int __must_check pci_request_region(struct pci_dev *, int, const char *); +int __must_check pci_request_region_exclusive(struct pci_dev *, int, const char *); void pci_release_region(struct pci_dev *, int); int pci_request_selected_regions(struct pci_dev *, int, const char *); +int pci_request_selected_regions_exclusive(struct pci_dev *, int, const char *); void pci_release_selected_regions(struct pci_dev *, int); /* drivers/pci/bus.c */ @@ -774,6 +810,10 @@ static inline void msi_remove_pci_irq_vectors(struct pci_dev *dev) static inline void pci_restore_msi_state(struct pci_dev *dev) { } +static inline int pci_msi_enabled(void) +{ + return 0; +} #else extern int pci_enable_msi(struct pci_dev *dev); extern void pci_msi_shutdown(struct pci_dev *dev); @@ -784,6 +824,16 @@ extern void pci_msix_shutdown(struct pci_dev *dev); extern void pci_disable_msix(struct pci_dev *dev); extern void msi_remove_pci_irq_vectors(struct pci_dev *dev); extern void pci_restore_msi_state(struct pci_dev *dev); +extern int pci_msi_enabled(void); +#endif + +#ifndef CONFIG_PCIEASPM +static inline int pcie_aspm_enabled(void) +{ + return 0; +} +#else +extern int pcie_aspm_enabled(void); #endif #ifdef CONFIG_HT_IRQ @@ -1135,20 +1185,9 @@ static inline void pci_mmcfg_early_init(void) { } static inline void pci_mmcfg_late_init(void) { } #endif -#ifdef CONFIG_HAS_IOMEM -static inline void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar) -{ - /* - * Make sure the BAR is actually a memory resource, not an IO resource - */ - if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) { - WARN_ON(1); - return NULL; - } - return ioremap_nocache(pci_resource_start(pdev, bar), - pci_resource_len(pdev, bar)); -} -#endif +int pci_ext_cfg_avail(struct pci_dev *dev); + +void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar); #endif /* __KERNEL__ */ #endif /* LINUX_PCI_H */ diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h index c2d1a7d1886..20998746518 100644 --- a/include/linux/pci_hotplug.h +++ b/include/linux/pci_hotplug.h @@ -227,6 +227,8 @@ extern acpi_status acpi_get_hp_params_from_firmware(struct pci_bus *bus, struct hotplug_params *hpp); int acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev, u32 flags); int acpi_root_bridge(acpi_handle handle); +int acpi_pci_check_ejectable(struct pci_bus *pbus, acpi_handle handle); +int acpi_pci_detect_ejectable(struct pci_bus *pbus); #endif #endif diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 1800f1d6e40..d543365518a 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -1658,6 +1658,7 @@ #define PCI_VENDOR_ID_ROCKWELL 0x127A #define PCI_VENDOR_ID_ITE 0x1283 +#define PCI_DEVICE_ID_ITE_8172 0x8172 #define PCI_DEVICE_ID_ITE_8211 0x8211 #define PCI_DEVICE_ID_ITE_8212 0x8212 #define PCI_DEVICE_ID_ITE_8213 0x8213 @@ -1766,6 +1767,7 @@ #define PCI_DEVICE_ID_SIIG_8S_20x_650 0x2081 #define PCI_DEVICE_ID_SIIG_8S_20x_850 0x2082 #define PCI_SUBDEVICE_ID_SIIG_QUARTET_SERIAL 0x2050 +#define PCI_SUBDEVICE_ID_SIIG_DUAL_SERIAL 0x2530 #define PCI_VENDOR_ID_RADISYS 0x1331 @@ -1795,6 +1797,7 @@ #define PCI_DEVICE_ID_SEALEVEL_UCOMM232 0x7202 #define PCI_DEVICE_ID_SEALEVEL_COMM4 0x7401 #define PCI_DEVICE_ID_SEALEVEL_COMM8 0x7801 +#define PCI_DEVICE_ID_SEALEVEL_7803 0x7803 #define PCI_DEVICE_ID_SEALEVEL_UCOMM8 0x7804 #define PCI_VENDOR_ID_HYPERCOPE 0x1365 @@ -2304,6 +2307,10 @@ #define PCI_DEVICE_ID_INTEL_PXH_0 0x0329 #define PCI_DEVICE_ID_INTEL_PXH_1 0x032A #define PCI_DEVICE_ID_INTEL_PXHV 0x032C +#define PCI_DEVICE_ID_INTEL_80332_0 0x0330 +#define PCI_DEVICE_ID_INTEL_80332_1 0x0332 +#define PCI_DEVICE_ID_INTEL_80333_0 0x0370 +#define PCI_DEVICE_ID_INTEL_80333_1 0x0372 #define PCI_DEVICE_ID_INTEL_82375 0x0482 #define PCI_DEVICE_ID_INTEL_82424 0x0483 #define PCI_DEVICE_ID_INTEL_82378 0x0484 @@ -2376,6 +2383,7 @@ #define PCI_DEVICE_ID_INTEL_ESB_4 0x25a4 #define PCI_DEVICE_ID_INTEL_ESB_5 0x25a6 #define PCI_DEVICE_ID_INTEL_ESB_9 0x25ab +#define PCI_DEVICE_ID_INTEL_ESB_10 0x25ac #define PCI_DEVICE_ID_INTEL_82820_HB 0x2500 #define PCI_DEVICE_ID_INTEL_82820_UP_HB 0x2501 #define PCI_DEVICE_ID_INTEL_82850_HB 0x2530 diff --git a/include/linux/pci_regs.h b/include/linux/pci_regs.h index e5effd47ed7..027815b4635 100644 --- a/include/linux/pci_regs.h +++ b/include/linux/pci_regs.h @@ -210,6 +210,7 @@ #define PCI_CAP_ID_AGP3 0x0E /* AGP Target PCI-PCI bridge */ #define PCI_CAP_ID_EXP 0x10 /* PCI Express */ #define PCI_CAP_ID_MSIX 0x11 /* MSI-X */ +#define PCI_CAP_ID_AF 0x13 /* PCI Advanced Features */ #define PCI_CAP_LIST_NEXT 1 /* Next capability in the list */ #define PCI_CAP_FLAGS 2 /* Capability defined flags (16 bits) */ #define PCI_CAP_SIZEOF 4 @@ -316,6 +317,17 @@ #define PCI_CHSWP_EXT 0x40 /* ENUM# status - extraction */ #define PCI_CHSWP_INS 0x80 /* ENUM# status - insertion */ +/* PCI Advanced Feature registers */ + +#define PCI_AF_LENGTH 2 +#define PCI_AF_CAP 3 +#define PCI_AF_CAP_TP 0x01 +#define PCI_AF_CAP_FLR 0x02 +#define PCI_AF_CTRL 4 +#define PCI_AF_CTRL_FLR 0x01 +#define PCI_AF_STATUS 5 +#define PCI_AF_STATUS_TP 0x01 + /* PCI-X registers */ #define PCI_X_CMD 2 /* Modes & Features */ @@ -399,20 +411,70 @@ #define PCI_EXP_DEVSTA_AUXPD 0x10 /* AUX Power Detected */ #define PCI_EXP_DEVSTA_TRPND 0x20 /* Transactions Pending */ #define PCI_EXP_LNKCAP 12 /* Link Capabilities */ -#define PCI_EXP_LNKCAP_ASPMS 0xc00 /* ASPM Support */ -#define PCI_EXP_LNKCAP_L0SEL 0x7000 /* L0s Exit Latency */ -#define PCI_EXP_LNKCAP_L1EL 0x38000 /* L1 Exit Latency */ -#define PCI_EXP_LNKCAP_CLKPM 0x40000 /* L1 Clock Power Management */ +#define PCI_EXP_LNKCAP_SLS 0x0000000f /* Supported Link Speeds */ +#define PCI_EXP_LNKCAP_MLW 0x000003f0 /* Maximum Link Width */ +#define PCI_EXP_LNKCAP_ASPMS 0x00000c00 /* ASPM Support */ +#define PCI_EXP_LNKCAP_L0SEL 0x00007000 /* L0s Exit Latency */ +#define PCI_EXP_LNKCAP_L1EL 0x00038000 /* L1 Exit Latency */ +#define PCI_EXP_LNKCAP_CLKPM 0x00040000 /* L1 Clock Power Management */ +#define PCI_EXP_LNKCAP_SDERC 0x00080000 /* Suprise Down Error Reporting Capable */ +#define PCI_EXP_LNKCAP_DLLLARC 0x00100000 /* Data Link Layer Link Active Reporting Capable */ +#define PCI_EXP_LNKCAP_LBNC 0x00200000 /* Link Bandwidth Notification Capability */ +#define PCI_EXP_LNKCAP_PN 0xff000000 /* Port Number */ #define PCI_EXP_LNKCTL 16 /* Link Control */ -#define PCI_EXP_LNKCTL_RL 0x20 /* Retrain Link */ -#define PCI_EXP_LNKCTL_CCC 0x40 /* Common Clock COnfiguration */ +#define PCI_EXP_LNKCTL_ASPMC 0x0003 /* ASPM Control */ +#define PCI_EXP_LNKCTL_RCB 0x0008 /* Read Completion Boundary */ +#define PCI_EXP_LNKCTL_LD 0x0010 /* Link Disable */ +#define PCI_EXP_LNKCTL_RL 0x0020 /* Retrain Link */ +#define PCI_EXP_LNKCTL_CCC 0x0040 /* Common Clock Configuration */ +#define PCI_EXP_LNKCTL_ES 0x0080 /* Extended Synch */ #define PCI_EXP_LNKCTL_CLKREQ_EN 0x100 /* Enable clkreq */ +#define PCI_EXP_LNKCTL_HAWD 0x0200 /* Hardware Autonomous Width Disable */ +#define PCI_EXP_LNKCTL_LBMIE 0x0400 /* Link Bandwidth Management Interrupt Enable */ +#define PCI_EXP_LNKCTL_LABIE 0x0800 /* Lnk Autonomous Bandwidth Interrupt Enable */ #define PCI_EXP_LNKSTA 18 /* Link Status */ -#define PCI_EXP_LNKSTA_LT 0x800 /* Link Training */ +#define PCI_EXP_LNKSTA_CLS 0x000f /* Current Link Speed */ +#define PCI_EXP_LNKSTA_NLW 0x03f0 /* Nogotiated Link Width */ +#define PCI_EXP_LNKSTA_LT 0x0800 /* Link Training */ #define PCI_EXP_LNKSTA_SLC 0x1000 /* Slot Clock Configuration */ +#define PCI_EXP_LNKSTA_DLLLA 0x2000 /* Data Link Layer Link Active */ +#define PCI_EXP_LNKSTA_LBMS 0x4000 /* Link Bandwidth Management Status */ +#define PCI_EXP_LNKSTA_LABS 0x8000 /* Link Autonomous Bandwidth Status */ #define PCI_EXP_SLTCAP 20 /* Slot Capabilities */ +#define PCI_EXP_SLTCAP_ABP 0x00000001 /* Attention Button Present */ +#define PCI_EXP_SLTCAP_PCP 0x00000002 /* Power Controller Present */ +#define PCI_EXP_SLTCAP_MRLSP 0x00000004 /* MRL Sensor Present */ +#define PCI_EXP_SLTCAP_AIP 0x00000008 /* Attention Indicator Present */ +#define PCI_EXP_SLTCAP_PIP 0x00000010 /* Power Indicator Present */ +#define PCI_EXP_SLTCAP_HPS 0x00000020 /* Hot-Plug Surprise */ +#define PCI_EXP_SLTCAP_HPC 0x00000040 /* Hot-Plug Capable */ +#define PCI_EXP_SLTCAP_SPLV 0x00007f80 /* Slot Power Limit Value */ +#define PCI_EXP_SLTCAP_SPLS 0x00018000 /* Slot Power Limit Scale */ +#define PCI_EXP_SLTCAP_EIP 0x00020000 /* Electromechanical Interlock Present */ +#define PCI_EXP_SLTCAP_NCCS 0x00040000 /* No Command Completed Support */ +#define PCI_EXP_SLTCAP_PSN 0xfff80000 /* Physical Slot Number */ #define PCI_EXP_SLTCTL 24 /* Slot Control */ +#define PCI_EXP_SLTCTL_ABPE 0x0001 /* Attention Button Pressed Enable */ +#define PCI_EXP_SLTCTL_PFDE 0x0002 /* Power Fault Detected Enable */ +#define PCI_EXP_SLTCTL_MRLSCE 0x0004 /* MRL Sensor Changed Enable */ +#define PCI_EXP_SLTCTL_PDCE 0x0008 /* Presence Detect Changed Enable */ +#define PCI_EXP_SLTCTL_CCIE 0x0010 /* Command Completed Interrupt Enable */ +#define PCI_EXP_SLTCTL_HPIE 0x0020 /* Hot-Plug Interrupt Enable */ +#define PCI_EXP_SLTCTL_AIC 0x00c0 /* Attention Indicator Control */ +#define PCI_EXP_SLTCTL_PIC 0x0300 /* Power Indicator Control */ +#define PCI_EXP_SLTCTL_PCC 0x0400 /* Power Controller Control */ +#define PCI_EXP_SLTCTL_EIC 0x0800 /* Electromechanical Interlock Control */ +#define PCI_EXP_SLTCTL_DLLSCE 0x1000 /* Data Link Layer State Changed Enable */ #define PCI_EXP_SLTSTA 26 /* Slot Status */ +#define PCI_EXP_SLTSTA_ABP 0x0001 /* Attention Button Pressed */ +#define PCI_EXP_SLTSTA_PFD 0x0002 /* Power Fault Detected */ +#define PCI_EXP_SLTSTA_MRLSC 0x0004 /* MRL Sensor Changed */ +#define PCI_EXP_SLTSTA_PDC 0x0008 /* Presence Detect Changed */ +#define PCI_EXP_SLTSTA_CC 0x0010 /* Command Completed */ +#define PCI_EXP_SLTSTA_MRLSS 0x0020 /* MRL Sensor State */ +#define PCI_EXP_SLTSTA_PDS 0x0040 /* Presence Detect State */ +#define PCI_EXP_SLTSTA_EIS 0x0080 /* Electromechanical Interlock Status */ +#define PCI_EXP_SLTSTA_DLLSC 0x0100 /* Data Link Layer State Changed */ #define PCI_EXP_RTCTL 28 /* Root Control */ #define PCI_EXP_RTCTL_SECEE 0x01 /* System Error on Correctable Error */ #define PCI_EXP_RTCTL_SENFEE 0x02 /* System Error on Non-Fatal Error */ diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h index 9007ccdfc11..a7684a51399 100644 --- a/include/linux/percpu_counter.h +++ b/include/linux/percpu_counter.h @@ -24,14 +24,18 @@ struct percpu_counter { s32 *counters; }; -#if NR_CPUS >= 16 -#define FBC_BATCH (NR_CPUS*2) -#else -#define FBC_BATCH (NR_CPUS*4) -#endif +extern int percpu_counter_batch; + +int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, + struct lock_class_key *key); + +#define percpu_counter_init(fbc, value) \ + ({ \ + static struct lock_class_key __key; \ + \ + __percpu_counter_init(fbc, value, &__key); \ + }) -int percpu_counter_init(struct percpu_counter *fbc, s64 amount); -int percpu_counter_init_irq(struct percpu_counter *fbc, s64 amount); void percpu_counter_destroy(struct percpu_counter *fbc); void percpu_counter_set(struct percpu_counter *fbc, s64 amount); void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch); @@ -39,7 +43,7 @@ s64 __percpu_counter_sum(struct percpu_counter *fbc); static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount) { - __percpu_counter_add(fbc, amount, FBC_BATCH); + __percpu_counter_add(fbc, amount, percpu_counter_batch); } static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc) @@ -85,8 +89,6 @@ static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount) return 0; } -#define percpu_counter_init_irq percpu_counter_init - static inline void percpu_counter_destroy(struct percpu_counter *fbc) { } diff --git a/include/linux/phy.h b/include/linux/phy.h index 77c4ed60b98..d7e54d98869 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -467,6 +467,8 @@ int genphy_restart_aneg(struct phy_device *phydev); int genphy_config_aneg(struct phy_device *phydev); int genphy_update_link(struct phy_device *phydev); int genphy_read_status(struct phy_device *phydev); +int genphy_suspend(struct phy_device *phydev); +int genphy_resume(struct phy_device *phydev); void phy_driver_unregister(struct phy_driver *drv); int phy_driver_register(struct phy_driver *new_driver); void phy_prepare_link(struct phy_device *phydev, diff --git a/include/linux/pid.h b/include/linux/pid.h index d7e98ff8021..49f1c2f66e9 100644 --- a/include/linux/pid.h +++ b/include/linux/pid.h @@ -123,6 +123,24 @@ extern struct pid *alloc_pid(struct pid_namespace *ns); extern void free_pid(struct pid *pid); /* + * ns_of_pid() returns the pid namespace in which the specified pid was + * allocated. + * + * NOTE: + * ns_of_pid() is expected to be called for a process (task) that has + * an attached 'struct pid' (see attach_pid(), detach_pid()) i.e @pid + * is expected to be non-NULL. If @pid is NULL, caller should handle + * the resulting NULL pid-ns. + */ +static inline struct pid_namespace *ns_of_pid(struct pid *pid) +{ + struct pid_namespace *ns = NULL; + if (pid) + ns = pid->numbers[pid->level].ns; + return ns; +} + +/* * the helpers to get the pid's id seen from different namespaces * * pid_nr() : global id, i.e. the id seen from the init namespace; @@ -147,9 +165,9 @@ pid_t pid_vnr(struct pid *pid); #define do_each_pid_task(pid, type, task) \ do { \ struct hlist_node *pos___; \ - if (pid != NULL) \ + if ((pid) != NULL) \ hlist_for_each_entry_rcu((task), pos___, \ - &pid->tasks[type], pids[type].node) { + &(pid)->tasks[type], pids[type].node) { /* * Both old and new leaders may be attached to diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h index d82fe825d62..38d10326246 100644 --- a/include/linux/pid_namespace.h +++ b/include/linux/pid_namespace.h @@ -79,11 +79,7 @@ static inline void zap_pid_ns_processes(struct pid_namespace *ns) } #endif /* CONFIG_PID_NS */ -static inline struct pid_namespace *task_active_pid_ns(struct task_struct *tsk) -{ - return tsk->nsproxy->pid_ns; -} - +extern struct pid_namespace *task_active_pid_ns(struct task_struct *tsk); void pidhash_init(void); void pidmap_init(void); diff --git a/include/linux/pkt_cls.h b/include/linux/pkt_cls.h index 7cf7824df77..e6aa8482ad7 100644 --- a/include/linux/pkt_cls.h +++ b/include/linux/pkt_cls.h @@ -394,6 +394,20 @@ enum #define TCA_BASIC_MAX (__TCA_BASIC_MAX - 1) + +/* Cgroup classifier */ + +enum +{ + TCA_CGROUP_UNSPEC, + TCA_CGROUP_ACT, + TCA_CGROUP_POLICE, + TCA_CGROUP_EMATCHES, + __TCA_CGROUP_MAX, +}; + +#define TCA_CGROUP_MAX (__TCA_CGROUP_MAX - 1) + /* Extended Matches */ struct tcf_ematch_tree_hdr diff --git a/include/linux/pkt_sched.h b/include/linux/pkt_sched.h index 5d921fa91a5..e3f133adba7 100644 --- a/include/linux/pkt_sched.h +++ b/include/linux/pkt_sched.h @@ -500,4 +500,20 @@ struct tc_netem_corrupt #define NETEM_DIST_SCALE 8192 +/* DRR */ + +enum +{ + TCA_DRR_UNSPEC, + TCA_DRR_QUANTUM, + __TCA_DRR_MAX +}; + +#define TCA_DRR_MAX (__TCA_DRR_MAX - 1) + +struct tc_drr_stats +{ + u32 deficit; +}; + #endif diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h index 4b8cc6a3247..9a342699c60 100644 --- a/include/linux/platform_device.h +++ b/include/linux/platform_device.h @@ -55,7 +55,6 @@ struct platform_driver { int (*suspend_late)(struct platform_device *, pm_message_t state); int (*resume_early)(struct platform_device *); int (*resume)(struct platform_device *); - struct pm_ext_ops *pm; struct device_driver driver; }; diff --git a/include/linux/pm.h b/include/linux/pm.h index 42de4003c4e..de2e0a8f672 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -41,7 +41,7 @@ typedef struct pm_message { } pm_message_t; /** - * struct pm_ops - device PM callbacks + * struct dev_pm_ops - device PM callbacks * * Several driver power state transitions are externally visible, affecting * the state of pending I/O queues and (for drivers that touch hardware) @@ -126,46 +126,6 @@ typedef struct pm_message { * On most platforms, there are no restrictions on availability of * resources like clocks during @restore(). * - * All of the above callbacks, except for @complete(), return error codes. - * However, the error codes returned by the resume operations, @resume(), - * @thaw(), and @restore(), do not cause the PM core to abort the resume - * transition during which they are returned. The error codes returned in - * that cases are only printed by the PM core to the system logs for debugging - * purposes. Still, it is recommended that drivers only return error codes - * from their resume methods in case of an unrecoverable failure (i.e. when the - * device being handled refuses to resume and becomes unusable) to allow us to - * modify the PM core in the future, so that it can avoid attempting to handle - * devices that failed to resume and their children. - * - * It is allowed to unregister devices while the above callbacks are being - * executed. However, it is not allowed to unregister a device from within any - * of its own callbacks. - */ - -struct pm_ops { - int (*prepare)(struct device *dev); - void (*complete)(struct device *dev); - int (*suspend)(struct device *dev); - int (*resume)(struct device *dev); - int (*freeze)(struct device *dev); - int (*thaw)(struct device *dev); - int (*poweroff)(struct device *dev); - int (*restore)(struct device *dev); -}; - -/** - * struct pm_ext_ops - extended device PM callbacks - * - * Some devices require certain operations related to suspend and hibernation - * to be carried out with interrupts disabled. Thus, 'struct pm_ext_ops' below - * is defined, adding callbacks to be executed with interrupts disabled to - * 'struct pm_ops'. - * - * The following callbacks included in 'struct pm_ext_ops' are executed with - * the nonboot CPUs switched off and with interrupts disabled on the only - * functional CPU. They also are executed with the PM core list of devices - * locked, so they must NOT unregister any devices. - * * @suspend_noirq: Complete the operations of ->suspend() by carrying out any * actions required for suspending the device that need interrupts to be * disabled @@ -190,18 +150,32 @@ struct pm_ops { * actions required for restoring the operations of the device that need * interrupts to be disabled * - * All of the above callbacks return error codes, but the error codes returned - * by the resume operations, @resume_noirq(), @thaw_noirq(), and - * @restore_noirq(), do not cause the PM core to abort the resume transition - * during which they are returned. The error codes returned in that cases are - * only printed by the PM core to the system logs for debugging purposes. - * Still, as stated above, it is recommended that drivers only return error - * codes from their resume methods if the device being handled fails to resume - * and is not usable any more. + * All of the above callbacks, except for @complete(), return error codes. + * However, the error codes returned by the resume operations, @resume(), + * @thaw(), @restore(), @resume_noirq(), @thaw_noirq(), and @restore_noirq() do + * not cause the PM core to abort the resume transition during which they are + * returned. The error codes returned in that cases are only printed by the PM + * core to the system logs for debugging purposes. Still, it is recommended + * that drivers only return error codes from their resume methods in case of an + * unrecoverable failure (i.e. when the device being handled refuses to resume + * and becomes unusable) to allow us to modify the PM core in the future, so + * that it can avoid attempting to handle devices that failed to resume and + * their children. + * + * It is allowed to unregister devices while the above callbacks are being + * executed. However, it is not allowed to unregister a device from within any + * of its own callbacks. */ -struct pm_ext_ops { - struct pm_ops base; +struct dev_pm_ops { + int (*prepare)(struct device *dev); + void (*complete)(struct device *dev); + int (*suspend)(struct device *dev); + int (*resume)(struct device *dev); + int (*freeze)(struct device *dev); + int (*thaw)(struct device *dev); + int (*poweroff)(struct device *dev); + int (*restore)(struct device *dev); int (*suspend_noirq)(struct device *dev); int (*resume_noirq)(struct device *dev); int (*freeze_noirq)(struct device *dev); @@ -278,7 +252,7 @@ struct pm_ext_ops { #define PM_EVENT_SLEEP (PM_EVENT_SUSPEND | PM_EVENT_HIBERNATE) #define PM_EVENT_USER_SUSPEND (PM_EVENT_USER | PM_EVENT_SUSPEND) #define PM_EVENT_USER_RESUME (PM_EVENT_USER | PM_EVENT_RESUME) -#define PM_EVENT_REMOTE_WAKEUP (PM_EVENT_REMOTE | PM_EVENT_RESUME) +#define PM_EVENT_REMOTE_RESUME (PM_EVENT_REMOTE | PM_EVENT_RESUME) #define PM_EVENT_AUTO_SUSPEND (PM_EVENT_AUTO | PM_EVENT_SUSPEND) #define PM_EVENT_AUTO_RESUME (PM_EVENT_AUTO | PM_EVENT_RESUME) @@ -291,15 +265,15 @@ struct pm_ext_ops { #define PMSG_THAW ((struct pm_message){ .event = PM_EVENT_THAW, }) #define PMSG_RESTORE ((struct pm_message){ .event = PM_EVENT_RESTORE, }) #define PMSG_RECOVER ((struct pm_message){ .event = PM_EVENT_RECOVER, }) -#define PMSG_USER_SUSPEND ((struct pm_messge) \ +#define PMSG_USER_SUSPEND ((struct pm_message) \ { .event = PM_EVENT_USER_SUSPEND, }) -#define PMSG_USER_RESUME ((struct pm_messge) \ +#define PMSG_USER_RESUME ((struct pm_message) \ { .event = PM_EVENT_USER_RESUME, }) -#define PMSG_REMOTE_RESUME ((struct pm_messge) \ +#define PMSG_REMOTE_RESUME ((struct pm_message) \ { .event = PM_EVENT_REMOTE_RESUME, }) -#define PMSG_AUTO_SUSPEND ((struct pm_messge) \ +#define PMSG_AUTO_SUSPEND ((struct pm_message) \ { .event = PM_EVENT_AUTO_SUSPEND, }) -#define PMSG_AUTO_RESUME ((struct pm_messge) \ +#define PMSG_AUTO_RESUME ((struct pm_message) \ { .event = PM_EVENT_AUTO_RESUME, }) /** diff --git a/include/linux/poll.h b/include/linux/poll.h index badd98ab06f..8c24ef8d997 100644 --- a/include/linux/poll.h +++ b/include/linux/poll.h @@ -46,9 +46,9 @@ static inline void init_poll_funcptr(poll_table *pt, poll_queue_proc qproc) } struct poll_table_entry { - struct file * filp; + struct file *filp; wait_queue_t wait; - wait_queue_head_t * wait_address; + wait_queue_head_t *wait_address; }; /* @@ -56,7 +56,9 @@ struct poll_table_entry { */ struct poll_wqueues { poll_table pt; - struct poll_table_page * table; + struct poll_table_page *table; + struct task_struct *polling_task; + int triggered; int error; int inline_index; struct poll_table_entry inline_entries[N_INLINE_POLL_ENTRIES]; @@ -64,6 +66,13 @@ struct poll_wqueues { extern void poll_initwait(struct poll_wqueues *pwq); extern void poll_freewait(struct poll_wqueues *pwq); +extern int poll_schedule_timeout(struct poll_wqueues *pwq, int state, + ktime_t *expires, unsigned long slack); + +static inline int poll_schedule(struct poll_wqueues *pwq, int state) +{ + return poll_schedule_timeout(pwq, state, NULL, 0); +} /* * Scaleable version of the fd_set. diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h index a7c72135554..4f71bf4e628 100644 --- a/include/linux/posix-timers.h +++ b/include/linux/posix-timers.h @@ -45,7 +45,11 @@ struct k_itimer { int it_requeue_pending; /* waiting to requeue this timer */ #define REQUEUE_PENDING 1 int it_sigev_notify; /* notify word of sigevent struct */ - struct task_struct *it_process; /* process to send signal to */ + struct signal_struct *it_signal; + union { + struct pid *it_pid; /* pid of process to send signal to */ + struct task_struct *it_process; /* for clock_nanosleep */ + }; struct sigqueue *sigq; /* signal queue entry. */ union { struct { diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index f9348cba6dc..8ff25e0e7f7 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -45,6 +45,7 @@ enum { POWER_SUPPLY_HEALTH_DEAD, POWER_SUPPLY_HEALTH_OVERVOLTAGE, POWER_SUPPLY_HEALTH_UNSPEC_FAILURE, + POWER_SUPPLY_HEALTH_COLD, }; enum { diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index 22641d5d45d..98b93ca4db0 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h @@ -94,6 +94,7 @@ extern void ptrace_notify(int exit_code); extern void __ptrace_link(struct task_struct *child, struct task_struct *new_parent); extern void __ptrace_unlink(struct task_struct *child); +extern void ptrace_fork(struct task_struct *task, unsigned long clone_flags); #define PTRACE_MODE_READ 1 #define PTRACE_MODE_ATTACH 2 /* Returns 0 on success, -errno on denial. */ @@ -313,6 +314,27 @@ static inline void user_enable_block_step(struct task_struct *task) #define arch_ptrace_stop(code, info) do { } while (0) #endif +#ifndef arch_ptrace_untrace +/* + * Do machine-specific work before untracing child. + * + * This is called for a normal detach as well as from ptrace_exit() + * when the tracing task dies. + * + * Called with write_lock(&tasklist_lock) held. + */ +#define arch_ptrace_untrace(task) do { } while (0) +#endif + +#ifndef arch_ptrace_fork +/* + * Do machine-specific work to initialize a new task. + * + * This is called from copy_process(). + */ +#define arch_ptrace_fork(child, clone_flags) do { } while (0) +#endif + extern int task_current_syscall(struct task_struct *target, long *callno, unsigned long args[6], unsigned int maxargs, unsigned long *sp, unsigned long *pc); diff --git a/include/linux/qnx4_fs.h b/include/linux/qnx4_fs.h index 34a196ee794..787d19ea9f4 100644 --- a/include/linux/qnx4_fs.h +++ b/include/linux/qnx4_fs.h @@ -2,14 +2,12 @@ * Name : qnx4_fs.h * Author : Richard Frowijn * Function : qnx4 global filesystem definitions - * Version : 1.0.2 - * Last modified : 2000-01-31 - * * History : 23-03-1998 created */ #ifndef _LINUX_QNX4_FS_H #define _LINUX_QNX4_FS_H +#include <linux/types.h> #include <linux/qnxtypes.h> #include <linux/magic.h> diff --git a/include/linux/qnxtypes.h b/include/linux/qnxtypes.h index a3eb1137857..bebbe5cc4fb 100644 --- a/include/linux/qnxtypes.h +++ b/include/linux/qnxtypes.h @@ -2,9 +2,6 @@ * Name : qnxtypes.h * Author : Richard Frowijn * Function : standard qnx types - * Version : 1.0.2 - * Last modified : 2000-01-06 - * * History : 22-03-1998 created * */ @@ -12,6 +9,8 @@ #ifndef _QNX4TYPES_H #define _QNX4TYPES_H +#include <linux/types.h> + typedef __le16 qnx4_nxtnt_t; typedef __u8 qnx4_ftype_t; diff --git a/include/linux/quota.h b/include/linux/quota.h index 40401b55448..d72d5d84fde 100644 --- a/include/linux/quota.h +++ b/include/linux/quota.h @@ -36,17 +36,7 @@ #include <linux/errno.h> #include <linux/types.h> -#define __DQUOT_VERSION__ "dquot_6.5.1" -#define __DQUOT_NUM_VERSION__ 6*10000+5*100+1 - -/* Size of blocks in which are counted size limits */ -#define QUOTABLOCK_BITS 10 -#define QUOTABLOCK_SIZE (1 << QUOTABLOCK_BITS) - -/* Conversion routines from and to quota blocks */ -#define qb2kb(x) ((x) << (QUOTABLOCK_BITS-10)) -#define kb2qb(x) ((x) >> (QUOTABLOCK_BITS-10)) -#define toqb(x) (((x) + QUOTABLOCK_SIZE - 1) >> QUOTABLOCK_BITS) +#define __DQUOT_VERSION__ "dquot_6.5.2" #define MAXQUOTAS 2 #define USRQUOTA 0 /* element used for user quotas */ @@ -80,16 +70,34 @@ #define Q_GETQUOTA 0x800007 /* get user quota structure */ #define Q_SETQUOTA 0x800008 /* set user quota structure */ +/* Quota format type IDs */ +#define QFMT_VFS_OLD 1 +#define QFMT_VFS_V0 2 + +/* Size of block in which space limits are passed through the quota + * interface */ +#define QIF_DQBLKSIZE_BITS 10 +#define QIF_DQBLKSIZE (1 << QIF_DQBLKSIZE_BITS) + /* * Quota structure used for communication with userspace via quotactl * Following flags are used to specify which fields are valid */ -#define QIF_BLIMITS 1 -#define QIF_SPACE 2 -#define QIF_ILIMITS 4 -#define QIF_INODES 8 -#define QIF_BTIME 16 -#define QIF_ITIME 32 +enum { + QIF_BLIMITS_B = 0, + QIF_SPACE_B, + QIF_ILIMITS_B, + QIF_INODES_B, + QIF_BTIME_B, + QIF_ITIME_B, +}; + +#define QIF_BLIMITS (1 << QIF_BLIMITS_B) +#define QIF_SPACE (1 << QIF_SPACE_B) +#define QIF_ILIMITS (1 << QIF_ILIMITS_B) +#define QIF_INODES (1 << QIF_INODES_B) +#define QIF_BTIME (1 << QIF_BTIME_B) +#define QIF_ITIME (1 << QIF_ITIME_B) #define QIF_LIMITS (QIF_BLIMITS | QIF_ILIMITS) #define QIF_USAGE (QIF_SPACE | QIF_INODES) #define QIF_TIMES (QIF_BTIME | QIF_ITIME) @@ -172,7 +180,7 @@ enum { #include <asm/atomic.h> typedef __kernel_uid32_t qid_t; /* Type in which we store ids in memory */ -typedef __u64 qsize_t; /* Type in which we store sizes */ +typedef long long qsize_t; /* Type in which we store sizes */ extern spinlock_t dq_data_lock; @@ -187,12 +195,12 @@ extern spinlock_t dq_data_lock; * Data for one user/group kept in memory */ struct mem_dqblk { - __u32 dqb_bhardlimit; /* absolute limit on disk blks alloc */ - __u32 dqb_bsoftlimit; /* preferred limit on disk blks */ + qsize_t dqb_bhardlimit; /* absolute limit on disk blks alloc */ + qsize_t dqb_bsoftlimit; /* preferred limit on disk blks */ qsize_t dqb_curspace; /* current used space */ - __u32 dqb_ihardlimit; /* absolute limit on allocated inodes */ - __u32 dqb_isoftlimit; /* preferred inode limit */ - __u32 dqb_curinodes; /* current # allocated inodes */ + qsize_t dqb_ihardlimit; /* absolute limit on allocated inodes */ + qsize_t dqb_isoftlimit; /* preferred inode limit */ + qsize_t dqb_curinodes; /* current # allocated inodes */ time_t dqb_btime; /* time limit for excessive disk use */ time_t dqb_itime; /* time limit for excessive inode use */ }; @@ -212,10 +220,7 @@ struct mem_dqinfo { unsigned int dqi_igrace; qsize_t dqi_maxblimit; qsize_t dqi_maxilimit; - union { - struct v1_mem_dqinfo v1_i; - struct v2_mem_dqinfo v2_i; - } u; + void *dqi_priv; }; struct super_block; @@ -249,6 +254,11 @@ extern struct dqstats dqstats; #define DQ_FAKE_B 3 /* no limits only usage */ #define DQ_READ_B 4 /* dquot was read into memory */ #define DQ_ACTIVE_B 5 /* dquot is active (dquot_release not called) */ +#define DQ_LASTSET_B 6 /* Following 6 bits (see QIF_) are reserved\ + * for the mask of entries set via SETQUOTA\ + * quotactl. They are set under dq_data_lock\ + * and the quota format handling dquot can\ + * clear them when it sees fit. */ struct dquot { struct hlist_node dq_hash; /* Hash list in memory */ @@ -287,11 +297,13 @@ struct dquot_operations { int (*initialize) (struct inode *, int); int (*drop) (struct inode *); int (*alloc_space) (struct inode *, qsize_t, int); - int (*alloc_inode) (const struct inode *, unsigned long); + int (*alloc_inode) (const struct inode *, qsize_t); int (*free_space) (struct inode *, qsize_t); - int (*free_inode) (const struct inode *, unsigned long); + int (*free_inode) (const struct inode *, qsize_t); int (*transfer) (struct inode *, struct iattr *); int (*write_dquot) (struct dquot *); /* Ordinary dquot write */ + struct dquot *(*alloc_dquot)(struct super_block *, int); /* Allocate memory for new dquot */ + void (*destroy_dquot)(struct dquot *); /* Free memory for dquot */ int (*acquire_dquot) (struct dquot *); /* Quota is going to be created on disk */ int (*release_dquot) (struct dquot *); /* Quota is going to be deleted from disk */ int (*mark_dirty) (struct dquot *); /* Dquot is marked dirty */ @@ -320,12 +332,42 @@ struct quota_format_type { struct quota_format_type *qf_next; }; -#define DQUOT_USR_ENABLED 0x01 /* User diskquotas enabled */ -#define DQUOT_GRP_ENABLED 0x02 /* Group diskquotas enabled */ -#define DQUOT_USR_SUSPENDED 0x04 /* User diskquotas are off, but +/* Quota state flags - they actually come in two flavors - for users and groups */ +enum { + _DQUOT_USAGE_ENABLED = 0, /* Track disk usage for users */ + _DQUOT_LIMITS_ENABLED, /* Enforce quota limits for users */ + _DQUOT_SUSPENDED, /* User diskquotas are off, but * we have necessary info in * memory to turn them on */ -#define DQUOT_GRP_SUSPENDED 0x08 /* The same for group quotas */ + _DQUOT_STATE_FLAGS +}; +#define DQUOT_USAGE_ENABLED (1 << _DQUOT_USAGE_ENABLED) +#define DQUOT_LIMITS_ENABLED (1 << _DQUOT_LIMITS_ENABLED) +#define DQUOT_SUSPENDED (1 << _DQUOT_SUSPENDED) +#define DQUOT_STATE_FLAGS (DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED | \ + DQUOT_SUSPENDED) +/* Other quota flags */ +#define DQUOT_QUOTA_SYS_FILE (1 << 6) /* Quota file is a special + * system file and user cannot + * touch it. Filesystem is + * responsible for setting + * S_NOQUOTA, S_NOATIME flags + */ +#define DQUOT_NEGATIVE_USAGE (1 << 7) /* Allow negative quota usage */ + +static inline unsigned int dquot_state_flag(unsigned int flags, int type) +{ + if (type == USRQUOTA) + return flags; + return flags << _DQUOT_STATE_FLAGS; +} + +static inline unsigned int dquot_generic_flag(unsigned int flags, int type) +{ + if (type == USRQUOTA) + return flags; + return flags >> _DQUOT_STATE_FLAGS; +} struct quota_info { unsigned int flags; /* Flags for diskquotas on this device */ diff --git a/include/linux/quotaio_v1.h b/include/linux/quotaio_v1.h deleted file mode 100644 index 746654b5de7..00000000000 --- a/include/linux/quotaio_v1.h +++ /dev/null @@ -1,33 +0,0 @@ -#ifndef _LINUX_QUOTAIO_V1_H -#define _LINUX_QUOTAIO_V1_H - -#include <linux/types.h> - -/* - * The following constants define the amount of time given a user - * before the soft limits are treated as hard limits (usually resulting - * in an allocation failure). The timer is started when the user crosses - * their soft limit, it is reset when they go below their soft limit. - */ -#define MAX_IQ_TIME 604800 /* (7*24*60*60) 1 week */ -#define MAX_DQ_TIME 604800 /* (7*24*60*60) 1 week */ - -/* - * The following structure defines the format of the disk quota file - * (as it appears on disk) - the file is an array of these structures - * indexed by user or group number. - */ -struct v1_disk_dqblk { - __u32 dqb_bhardlimit; /* absolute limit on disk blks alloc */ - __u32 dqb_bsoftlimit; /* preferred limit on disk blks */ - __u32 dqb_curblocks; /* current block count */ - __u32 dqb_ihardlimit; /* absolute limit on allocated inodes */ - __u32 dqb_isoftlimit; /* preferred inode limit */ - __u32 dqb_curinodes; /* current # allocated inodes */ - time_t dqb_btime; /* time limit for excessive disk use */ - time_t dqb_itime; /* time limit for excessive inode use */ -}; - -#define v1_dqoff(UID) ((loff_t)((UID) * sizeof (struct v1_disk_dqblk))) - -#endif /* _LINUX_QUOTAIO_V1_H */ diff --git a/include/linux/quotaio_v2.h b/include/linux/quotaio_v2.h deleted file mode 100644 index 303d7cbe30d..00000000000 --- a/include/linux/quotaio_v2.h +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Definitions of structures for vfsv0 quota format - */ - -#ifndef _LINUX_QUOTAIO_V2_H -#define _LINUX_QUOTAIO_V2_H - -#include <linux/types.h> -#include <linux/quota.h> - -/* - * Definitions of magics and versions of current quota files - */ -#define V2_INITQMAGICS {\ - 0xd9c01f11, /* USRQUOTA */\ - 0xd9c01927 /* GRPQUOTA */\ -} - -#define V2_INITQVERSIONS {\ - 0, /* USRQUOTA */\ - 0 /* GRPQUOTA */\ -} - -/* - * The following structure defines the format of the disk quota file - * (as it appears on disk) - the file is a radix tree whose leaves point - * to blocks of these structures. - */ -struct v2_disk_dqblk { - __le32 dqb_id; /* id this quota applies to */ - __le32 dqb_ihardlimit; /* absolute limit on allocated inodes */ - __le32 dqb_isoftlimit; /* preferred inode limit */ - __le32 dqb_curinodes; /* current # allocated inodes */ - __le32 dqb_bhardlimit; /* absolute limit on disk space (in QUOTABLOCK_SIZE) */ - __le32 dqb_bsoftlimit; /* preferred limit on disk space (in QUOTABLOCK_SIZE) */ - __le64 dqb_curspace; /* current space occupied (in bytes) */ - __le64 dqb_btime; /* time limit for excessive disk use */ - __le64 dqb_itime; /* time limit for excessive inode use */ -}; - -/* - * Here are header structures as written on disk and their in-memory copies - */ -/* First generic header */ -struct v2_disk_dqheader { - __le32 dqh_magic; /* Magic number identifying file */ - __le32 dqh_version; /* File version */ -}; - -/* Header with type and version specific information */ -struct v2_disk_dqinfo { - __le32 dqi_bgrace; /* Time before block soft limit becomes hard limit */ - __le32 dqi_igrace; /* Time before inode soft limit becomes hard limit */ - __le32 dqi_flags; /* Flags for quotafile (DQF_*) */ - __le32 dqi_blocks; /* Number of blocks in file */ - __le32 dqi_free_blk; /* Number of first free block in the list */ - __le32 dqi_free_entry; /* Number of block with at least one free entry */ -}; - -/* - * Structure of header of block with quota structures. It is padded to 16 bytes so - * there will be space for exactly 21 quota-entries in a block - */ -struct v2_disk_dqdbheader { - __le32 dqdh_next_free; /* Number of next block with free entry */ - __le32 dqdh_prev_free; /* Number of previous block with free entry */ - __le16 dqdh_entries; /* Number of valid entries in block */ - __le16 dqdh_pad1; - __le32 dqdh_pad2; -}; - -#define V2_DQINFOOFF sizeof(struct v2_disk_dqheader) /* Offset of info header in file */ -#define V2_DQBLKSIZE_BITS 10 -#define V2_DQBLKSIZE (1 << V2_DQBLKSIZE_BITS) /* Size of block with quota structures */ -#define V2_DQTREEOFF 1 /* Offset of tree in file in blocks */ -#define V2_DQTREEDEPTH 4 /* Depth of quota tree */ -#define V2_DQSTRINBLK ((V2_DQBLKSIZE - sizeof(struct v2_disk_dqdbheader)) / sizeof(struct v2_disk_dqblk)) /* Number of entries in one blocks */ - -#endif /* _LINUX_QUOTAIO_V2_H */ diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h index a558a4c1d35..21b781a3350 100644 --- a/include/linux/quotaops.h +++ b/include/linux/quotaops.h @@ -24,12 +24,21 @@ void sync_dquots(struct super_block *sb, int type); int dquot_initialize(struct inode *inode, int type); int dquot_drop(struct inode *inode); +int dquot_drop_locked(struct inode *inode); +struct dquot *dqget(struct super_block *sb, unsigned int id, int type); +void dqput(struct dquot *dquot); +int dquot_is_cached(struct super_block *sb, unsigned int id, int type); +int dquot_scan_active(struct super_block *sb, + int (*fn)(struct dquot *dquot, unsigned long priv), + unsigned long priv); +struct dquot *dquot_alloc(struct super_block *sb, int type); +void dquot_destroy(struct dquot *dquot); int dquot_alloc_space(struct inode *inode, qsize_t number, int prealloc); -int dquot_alloc_inode(const struct inode *inode, unsigned long number); +int dquot_alloc_inode(const struct inode *inode, qsize_t number); int dquot_free_space(struct inode *inode, qsize_t number); -int dquot_free_inode(const struct inode *inode, unsigned long number); +int dquot_free_inode(const struct inode *inode, qsize_t number); int dquot_transfer(struct inode *inode, struct iattr *iattr); int dquot_commit(struct dquot *dquot); @@ -40,11 +49,14 @@ int dquot_mark_dquot_dirty(struct dquot *dquot); int vfs_quota_on(struct super_block *sb, int type, int format_id, char *path, int remount); +int vfs_quota_enable(struct inode *inode, int type, int format_id, + unsigned int flags); int vfs_quota_on_path(struct super_block *sb, int type, int format_id, struct path *path); int vfs_quota_on_mount(struct super_block *sb, char *qf_name, int format_id, int type); int vfs_quota_off(struct super_block *sb, int type, int remount); +int vfs_quota_disable(struct super_block *sb, int type, unsigned int flags); int vfs_quota_sync(struct super_block *sb, int type); int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii); int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii); @@ -64,24 +76,22 @@ static inline struct mem_dqinfo *sb_dqinfo(struct super_block *sb, int type) * Functions for checking status of quota */ -static inline int sb_has_quota_enabled(struct super_block *sb, int type) +static inline int sb_has_quota_usage_enabled(struct super_block *sb, int type) { - if (type == USRQUOTA) - return sb_dqopt(sb)->flags & DQUOT_USR_ENABLED; - return sb_dqopt(sb)->flags & DQUOT_GRP_ENABLED; + return sb_dqopt(sb)->flags & + dquot_state_flag(DQUOT_USAGE_ENABLED, type); } -static inline int sb_any_quota_enabled(struct super_block *sb) +static inline int sb_has_quota_limits_enabled(struct super_block *sb, int type) { - return sb_has_quota_enabled(sb, USRQUOTA) || - sb_has_quota_enabled(sb, GRPQUOTA); + return sb_dqopt(sb)->flags & + dquot_state_flag(DQUOT_LIMITS_ENABLED, type); } static inline int sb_has_quota_suspended(struct super_block *sb, int type) { - if (type == USRQUOTA) - return sb_dqopt(sb)->flags & DQUOT_USR_SUSPENDED; - return sb_dqopt(sb)->flags & DQUOT_GRP_SUSPENDED; + return sb_dqopt(sb)->flags & + dquot_state_flag(DQUOT_SUSPENDED, type); } static inline int sb_any_quota_suspended(struct super_block *sb) @@ -90,6 +100,31 @@ static inline int sb_any_quota_suspended(struct super_block *sb) sb_has_quota_suspended(sb, GRPQUOTA); } +/* Does kernel know about any quota information for given sb + type? */ +static inline int sb_has_quota_loaded(struct super_block *sb, int type) +{ + /* Currently if anything is on, then quota usage is on as well */ + return sb_has_quota_usage_enabled(sb, type); +} + +static inline int sb_any_quota_loaded(struct super_block *sb) +{ + return sb_has_quota_loaded(sb, USRQUOTA) || + sb_has_quota_loaded(sb, GRPQUOTA); +} + +static inline int sb_has_quota_active(struct super_block *sb, int type) +{ + return sb_has_quota_loaded(sb, type) && + !sb_has_quota_suspended(sb, type); +} + +static inline int sb_any_quota_active(struct super_block *sb) +{ + return sb_has_quota_active(sb, USRQUOTA) || + sb_has_quota_active(sb, GRPQUOTA); +} + /* * Operations supported for diskquotas. */ @@ -104,7 +139,7 @@ extern struct quotactl_ops vfs_quotactl_ops; static inline void vfs_dq_init(struct inode *inode) { BUG_ON(!inode->i_sb); - if (sb_any_quota_enabled(inode->i_sb) && !IS_NOQUOTA(inode)) + if (sb_any_quota_active(inode->i_sb) && !IS_NOQUOTA(inode)) inode->i_sb->dq_op->initialize(inode, -1); } @@ -112,7 +147,7 @@ static inline void vfs_dq_init(struct inode *inode) * a transaction (deadlocks possible otherwise) */ static inline int vfs_dq_prealloc_space_nodirty(struct inode *inode, qsize_t nr) { - if (sb_any_quota_enabled(inode->i_sb)) { + if (sb_any_quota_active(inode->i_sb)) { /* Used space is updated in alloc_space() */ if (inode->i_sb->dq_op->alloc_space(inode, nr, 1) == NO_QUOTA) return 1; @@ -132,7 +167,7 @@ static inline int vfs_dq_prealloc_space(struct inode *inode, qsize_t nr) static inline int vfs_dq_alloc_space_nodirty(struct inode *inode, qsize_t nr) { - if (sb_any_quota_enabled(inode->i_sb)) { + if (sb_any_quota_active(inode->i_sb)) { /* Used space is updated in alloc_space() */ if (inode->i_sb->dq_op->alloc_space(inode, nr, 0) == NO_QUOTA) return 1; @@ -152,7 +187,7 @@ static inline int vfs_dq_alloc_space(struct inode *inode, qsize_t nr) static inline int vfs_dq_alloc_inode(struct inode *inode) { - if (sb_any_quota_enabled(inode->i_sb)) { + if (sb_any_quota_active(inode->i_sb)) { vfs_dq_init(inode); if (inode->i_sb->dq_op->alloc_inode(inode, 1) == NO_QUOTA) return 1; @@ -162,7 +197,7 @@ static inline int vfs_dq_alloc_inode(struct inode *inode) static inline void vfs_dq_free_space_nodirty(struct inode *inode, qsize_t nr) { - if (sb_any_quota_enabled(inode->i_sb)) + if (sb_any_quota_active(inode->i_sb)) inode->i_sb->dq_op->free_space(inode, nr); else inode_sub_bytes(inode, nr); @@ -176,7 +211,7 @@ static inline void vfs_dq_free_space(struct inode *inode, qsize_t nr) static inline void vfs_dq_free_inode(struct inode *inode) { - if (sb_any_quota_enabled(inode->i_sb)) + if (sb_any_quota_active(inode->i_sb)) inode->i_sb->dq_op->free_inode(inode, 1); } @@ -197,12 +232,12 @@ static inline int vfs_dq_off(struct super_block *sb, int remount) #else -static inline int sb_has_quota_enabled(struct super_block *sb, int type) +static inline int sb_has_quota_usage_enabled(struct super_block *sb, int type) { return 0; } -static inline int sb_any_quota_enabled(struct super_block *sb) +static inline int sb_has_quota_limits_enabled(struct super_block *sb, int type) { return 0; } @@ -217,6 +252,27 @@ static inline int sb_any_quota_suspended(struct super_block *sb) return 0; } +/* Does kernel know about any quota information for given sb + type? */ +static inline int sb_has_quota_loaded(struct super_block *sb, int type) +{ + return 0; +} + +static inline int sb_any_quota_loaded(struct super_block *sb) +{ + return 0; +} + +static inline int sb_has_quota_active(struct super_block *sb, int type) +{ + return 0; +} + +static inline int sb_any_quota_active(struct super_block *sb) +{ + return 0; +} + /* * NO-OP when quota not configured. */ diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index a916c6660df..355f6e80db0 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h @@ -136,7 +136,7 @@ do { \ */ static inline void *radix_tree_deref_slot(void **pslot) { - void *ret = *pslot; + void *ret = rcu_dereference(*pslot); if (unlikely(radix_tree_is_indirect_ptr(ret))) ret = RADIX_TREE_RETRY; return ret; diff --git a/include/linux/raid/md_k.h b/include/linux/raid/md_k.h index 8fc909ef678..9743e4dbc91 100644 --- a/include/linux/raid/md_k.h +++ b/include/linux/raid/md_k.h @@ -137,6 +137,9 @@ struct mddev_s struct gendisk *gendisk; struct kobject kobj; + int hold_active; +#define UNTIL_IOCTL 1 +#define UNTIL_STOP 2 /* Superblock information */ int major_version, @@ -215,6 +218,9 @@ struct mddev_s #define MD_RECOVERY_FROZEN 9 unsigned long recovery; + int recovery_disabled; /* if we detect that recovery + * will always fail, set this + * so we don't loop trying */ int in_sync; /* know to not need resync */ struct mutex reconfig_mutex; @@ -244,6 +250,9 @@ struct mddev_s struct sysfs_dirent *sysfs_state; /* handle for 'array_state' * file in sysfs. */ + struct sysfs_dirent *sysfs_action; /* handle for 'sync_action' */ + + struct work_struct del_work; /* used for delayed sysfs removal */ spinlock_t write_lock; wait_queue_head_t sb_wait; /* for waiting on superblock updates */ @@ -334,17 +343,14 @@ static inline char * mdname (mddev_t * mddev) * iterates through some rdev ringlist. It's safe to remove the * current 'rdev'. Dont touch 'tmp' though. */ -#define rdev_for_each_list(rdev, tmp, list) \ - \ - for ((tmp) = (list).next; \ - (rdev) = (list_entry((tmp), mdk_rdev_t, same_set)), \ - (tmp) = (tmp)->next, (tmp)->prev != &(list) \ - ; ) +#define rdev_for_each_list(rdev, tmp, head) \ + list_for_each_entry_safe(rdev, tmp, head, same_set) + /* * iterates through the 'same array disks' ringlist */ #define rdev_for_each(rdev, tmp, mddev) \ - rdev_for_each_list(rdev, tmp, (mddev)->disks) + list_for_each_entry_safe(rdev, tmp, &((mddev)->disks), same_set) #define rdev_for_each_rcu(rdev, mddev) \ list_for_each_entry_rcu(rdev, &((mddev)->disks), same_set) diff --git a/include/linux/raid/md_p.h b/include/linux/raid/md_p.h index 8b4de4a41ff..9491026afe6 100644 --- a/include/linux/raid/md_p.h +++ b/include/linux/raid/md_p.h @@ -194,6 +194,8 @@ static inline __u64 md_event(mdp_super_t *sb) { return (ev<<32)| sb->events_lo; } +#define MD_SUPERBLOCK_1_TIME_SEC_MASK ((1ULL<<40) - 1) + /* * The version-1 superblock : * All numeric fields are little-endian. diff --git a/include/linux/raid/raid0.h b/include/linux/raid/raid0.h index 1b2dda035f8..fd42aa87c39 100644 --- a/include/linux/raid/raid0.h +++ b/include/linux/raid/raid0.h @@ -5,9 +5,9 @@ struct strip_zone { - sector_t zone_offset; /* Zone offset in md_dev */ - sector_t dev_offset; /* Zone offset in real dev */ - sector_t size; /* Zone size */ + sector_t zone_start; /* Zone offset in md_dev (in sectors) */ + sector_t dev_start; /* Zone offset in real dev (in sectors) */ + sector_t sectors; /* Zone size in sectors */ int nb_dev; /* # of devices attached to the zone */ mdk_rdev_t **dev; /* Devices attached to the zone */ }; @@ -19,8 +19,8 @@ struct raid0_private_data mdk_rdev_t **devlist; /* lists of rdevs, pointed to by strip_zone->dev */ int nr_strip_zones; - sector_t hash_spacing; - int preshift; /* shift this before divide by hash_spacing */ + sector_t spacing; + int sector_shift; /* shift this before divide by spacing */ }; typedef struct raid0_private_data raid0_conf_t; diff --git a/include/linux/random.h b/include/linux/random.h index 36f125c0c60..407ea3646f8 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -8,6 +8,7 @@ #define _LINUX_RANDOM_H #include <linux/ioctl.h> +#include <linux/irqnr.h> /* ioctl()'s for the random number generator */ diff --git a/include/linux/rcuclassic.h b/include/linux/rcuclassic.h index 5f89b62e698..f3f697df1d7 100644 --- a/include/linux/rcuclassic.h +++ b/include/linux/rcuclassic.h @@ -41,7 +41,7 @@ #include <linux/seqlock.h> #ifdef CONFIG_RCU_CPU_STALL_DETECTOR -#define RCU_SECONDS_TILL_STALL_CHECK ( 3 * HZ) /* for rcp->jiffies_stall */ +#define RCU_SECONDS_TILL_STALL_CHECK (10 * HZ) /* for rcp->jiffies_stall */ #define RCU_SECONDS_TILL_STALL_RECHECK (30 * HZ) /* for rcp->jiffies_stall */ #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ @@ -59,8 +59,8 @@ struct rcu_ctrlblk { int signaled; spinlock_t lock ____cacheline_internodealigned_in_smp; - cpumask_t cpumask; /* CPUs that need to switch in order */ - /* for current batch to proceed. */ + DECLARE_BITMAP(cpumask, NR_CPUS); /* CPUs that need to switch for */ + /* current batch to proceed. */ } ____cacheline_internodealigned_in_smp; /* Is batch a before batch b ? */ diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h new file mode 100644 index 00000000000..f9ddd03961a --- /dev/null +++ b/include/linux/rculist_nulls.h @@ -0,0 +1,110 @@ +#ifndef _LINUX_RCULIST_NULLS_H +#define _LINUX_RCULIST_NULLS_H + +#ifdef __KERNEL__ + +/* + * RCU-protected list version + */ +#include <linux/list_nulls.h> +#include <linux/rcupdate.h> + +/** + * hlist_nulls_del_init_rcu - deletes entry from hash list with re-initialization + * @n: the element to delete from the hash list. + * + * Note: hlist_nulls_unhashed() on the node return true after this. It is + * useful for RCU based read lockfree traversal if the writer side + * must know if the list entry is still hashed or already unhashed. + * + * In particular, it means that we can not poison the forward pointers + * that may still be used for walking the hash list and we can only + * zero the pprev pointer so list_unhashed() will return true after + * this. + * + * The caller must take whatever precautions are necessary (such as + * holding appropriate locks) to avoid racing with another + * list-mutation primitive, such as hlist_nulls_add_head_rcu() or + * hlist_nulls_del_rcu(), running on this same list. However, it is + * perfectly legal to run concurrently with the _rcu list-traversal + * primitives, such as hlist_nulls_for_each_entry_rcu(). + */ +static inline void hlist_nulls_del_init_rcu(struct hlist_nulls_node *n) +{ + if (!hlist_nulls_unhashed(n)) { + __hlist_nulls_del(n); + n->pprev = NULL; + } +} + +/** + * hlist_nulls_del_rcu - deletes entry from hash list without re-initialization + * @n: the element to delete from the hash list. + * + * Note: hlist_nulls_unhashed() on entry does not return true after this, + * the entry is in an undefined state. It is useful for RCU based + * lockfree traversal. + * + * In particular, it means that we can not poison the forward + * pointers that may still be used for walking the hash list. + * + * The caller must take whatever precautions are necessary + * (such as holding appropriate locks) to avoid racing + * with another list-mutation primitive, such as hlist_nulls_add_head_rcu() + * or hlist_nulls_del_rcu(), running on this same list. + * However, it is perfectly legal to run concurrently with + * the _rcu list-traversal primitives, such as + * hlist_nulls_for_each_entry(). + */ +static inline void hlist_nulls_del_rcu(struct hlist_nulls_node *n) +{ + __hlist_nulls_del(n); + n->pprev = LIST_POISON2; +} + +/** + * hlist_nulls_add_head_rcu + * @n: the element to add to the hash list. + * @h: the list to add to. + * + * Description: + * Adds the specified element to the specified hlist_nulls, + * while permitting racing traversals. + * + * The caller must take whatever precautions are necessary + * (such as holding appropriate locks) to avoid racing + * with another list-mutation primitive, such as hlist_nulls_add_head_rcu() + * or hlist_nulls_del_rcu(), running on this same list. + * However, it is perfectly legal to run concurrently with + * the _rcu list-traversal primitives, such as + * hlist_nulls_for_each_entry_rcu(), used to prevent memory-consistency + * problems on Alpha CPUs. Regardless of the type of CPU, the + * list-traversal primitive must be guarded by rcu_read_lock(). + */ +static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n, + struct hlist_nulls_head *h) +{ + struct hlist_nulls_node *first = h->first; + + n->next = first; + n->pprev = &h->first; + rcu_assign_pointer(h->first, n); + if (!is_a_nulls(first)) + first->pprev = &n->next; +} +/** + * hlist_nulls_for_each_entry_rcu - iterate over rcu list of given type + * @tpos: the type * to use as a loop cursor. + * @pos: the &struct hlist_nulls_node to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the hlist_nulls_node within the struct. + * + */ +#define hlist_nulls_for_each_entry_rcu(tpos, pos, head, member) \ + for (pos = rcu_dereference((head)->first); \ + (!is_a_nulls(pos)) && \ + ({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1; }); \ + pos = rcu_dereference(pos->next)) + +#endif +#endif diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 86f1f5e43e3..921340a7b71 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -52,11 +52,15 @@ struct rcu_head { void (*func)(struct rcu_head *head); }; -#ifdef CONFIG_CLASSIC_RCU +#if defined(CONFIG_CLASSIC_RCU) #include <linux/rcuclassic.h> -#else /* #ifdef CONFIG_CLASSIC_RCU */ +#elif defined(CONFIG_TREE_RCU) +#include <linux/rcutree.h> +#elif defined(CONFIG_PREEMPT_RCU) #include <linux/rcupreempt.h> -#endif /* #else #ifdef CONFIG_CLASSIC_RCU */ +#else +#error "Unknown RCU implementation specified to kernel configuration" +#endif /* #else #if defined(CONFIG_CLASSIC_RCU) */ #define RCU_HEAD_INIT { .next = NULL, .func = NULL } #define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT @@ -142,6 +146,7 @@ struct rcu_head { * on the write-side to insure proper synchronization. */ #define rcu_read_lock_sched() preempt_disable() +#define rcu_read_lock_sched_notrace() preempt_disable_notrace() /* * rcu_read_unlock_sched - marks the end of a RCU-classic critical section @@ -149,6 +154,7 @@ struct rcu_head { * See rcu_read_lock_sched for more information. */ #define rcu_read_unlock_sched() preempt_enable() +#define rcu_read_unlock_sched_notrace() preempt_enable_notrace() @@ -198,18 +204,6 @@ struct rcu_synchronize { extern void wakeme_after_rcu(struct rcu_head *head); -#define synchronize_rcu_xxx(name, func) \ -void name(void) \ -{ \ - struct rcu_synchronize rcu; \ - \ - init_completion(&rcu.completion); \ - /* Will wake me after RCU finished. */ \ - func(&rcu.head, wakeme_after_rcu); \ - /* Wait for it. */ \ - wait_for_completion(&rcu.completion); \ -} - /** * synchronize_sched - block until all CPUs have exited any non-preemptive * kernel code sequences. diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h new file mode 100644 index 00000000000..d4368b7975c --- /dev/null +++ b/include/linux/rcutree.h @@ -0,0 +1,329 @@ +/* + * Read-Copy Update mechanism for mutual exclusion (tree-based version) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright IBM Corporation, 2008 + * + * Author: Dipankar Sarma <dipankar@in.ibm.com> + * Paul E. McKenney <paulmck@linux.vnet.ibm.com> Hierarchical algorithm + * + * Based on the original work by Paul McKenney <paulmck@us.ibm.com> + * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. + * + * For detailed explanation of Read-Copy Update mechanism see - + * Documentation/RCU + */ + +#ifndef __LINUX_RCUTREE_H +#define __LINUX_RCUTREE_H + +#include <linux/cache.h> +#include <linux/spinlock.h> +#include <linux/threads.h> +#include <linux/percpu.h> +#include <linux/cpumask.h> +#include <linux/seqlock.h> + +/* + * Define shape of hierarchy based on NR_CPUS and CONFIG_RCU_FANOUT. + * In theory, it should be possible to add more levels straightforwardly. + * In practice, this has not been tested, so there is probably some + * bug somewhere. + */ +#define MAX_RCU_LVLS 3 +#define RCU_FANOUT (CONFIG_RCU_FANOUT) +#define RCU_FANOUT_SQ (RCU_FANOUT * RCU_FANOUT) +#define RCU_FANOUT_CUBE (RCU_FANOUT_SQ * RCU_FANOUT) + +#if NR_CPUS <= RCU_FANOUT +# define NUM_RCU_LVLS 1 +# define NUM_RCU_LVL_0 1 +# define NUM_RCU_LVL_1 (NR_CPUS) +# define NUM_RCU_LVL_2 0 +# define NUM_RCU_LVL_3 0 +#elif NR_CPUS <= RCU_FANOUT_SQ +# define NUM_RCU_LVLS 2 +# define NUM_RCU_LVL_0 1 +# define NUM_RCU_LVL_1 (((NR_CPUS) + RCU_FANOUT - 1) / RCU_FANOUT) +# define NUM_RCU_LVL_2 (NR_CPUS) +# define NUM_RCU_LVL_3 0 +#elif NR_CPUS <= RCU_FANOUT_CUBE +# define NUM_RCU_LVLS 3 +# define NUM_RCU_LVL_0 1 +# define NUM_RCU_LVL_1 (((NR_CPUS) + RCU_FANOUT_SQ - 1) / RCU_FANOUT_SQ) +# define NUM_RCU_LVL_2 (((NR_CPUS) + (RCU_FANOUT) - 1) / (RCU_FANOUT)) +# define NUM_RCU_LVL_3 NR_CPUS +#else +# error "CONFIG_RCU_FANOUT insufficient for NR_CPUS" +#endif /* #if (NR_CPUS) <= RCU_FANOUT */ + +#define RCU_SUM (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3) +#define NUM_RCU_NODES (RCU_SUM - NR_CPUS) + +/* + * Dynticks per-CPU state. + */ +struct rcu_dynticks { + int dynticks_nesting; /* Track nesting level, sort of. */ + int dynticks; /* Even value for dynticks-idle, else odd. */ + int dynticks_nmi; /* Even value for either dynticks-idle or */ + /* not in nmi handler, else odd. So this */ + /* remains even for nmi from irq handler. */ +}; + +/* + * Definition for node within the RCU grace-period-detection hierarchy. + */ +struct rcu_node { + spinlock_t lock; + unsigned long qsmask; /* CPUs or groups that need to switch in */ + /* order for current grace period to proceed.*/ + unsigned long qsmaskinit; + /* Per-GP initialization for qsmask. */ + unsigned long grpmask; /* Mask to apply to parent qsmask. */ + int grplo; /* lowest-numbered CPU or group here. */ + int grphi; /* highest-numbered CPU or group here. */ + u8 grpnum; /* CPU/group number for next level up. */ + u8 level; /* root is at level 0. */ + struct rcu_node *parent; +} ____cacheline_internodealigned_in_smp; + +/* Index values for nxttail array in struct rcu_data. */ +#define RCU_DONE_TAIL 0 /* Also RCU_WAIT head. */ +#define RCU_WAIT_TAIL 1 /* Also RCU_NEXT_READY head. */ +#define RCU_NEXT_READY_TAIL 2 /* Also RCU_NEXT head. */ +#define RCU_NEXT_TAIL 3 +#define RCU_NEXT_SIZE 4 + +/* Per-CPU data for read-copy update. */ +struct rcu_data { + /* 1) quiescent-state and grace-period handling : */ + long completed; /* Track rsp->completed gp number */ + /* in order to detect GP end. */ + long gpnum; /* Highest gp number that this CPU */ + /* is aware of having started. */ + long passed_quiesc_completed; + /* Value of completed at time of qs. */ + bool passed_quiesc; /* User-mode/idle loop etc. */ + bool qs_pending; /* Core waits for quiesc state. */ + bool beenonline; /* CPU online at least once. */ + struct rcu_node *mynode; /* This CPU's leaf of hierarchy */ + unsigned long grpmask; /* Mask to apply to leaf qsmask. */ + + /* 2) batch handling */ + /* + * If nxtlist is not NULL, it is partitioned as follows. + * Any of the partitions might be empty, in which case the + * pointer to that partition will be equal to the pointer for + * the following partition. When the list is empty, all of + * the nxttail elements point to nxtlist, which is NULL. + * + * [*nxttail[RCU_NEXT_READY_TAIL], NULL = *nxttail[RCU_NEXT_TAIL]): + * Entries that might have arrived after current GP ended + * [*nxttail[RCU_WAIT_TAIL], *nxttail[RCU_NEXT_READY_TAIL]): + * Entries known to have arrived before current GP ended + * [*nxttail[RCU_DONE_TAIL], *nxttail[RCU_WAIT_TAIL]): + * Entries that batch # <= ->completed - 1: waiting for current GP + * [nxtlist, *nxttail[RCU_DONE_TAIL]): + * Entries that batch # <= ->completed + * The grace period for these entries has completed, and + * the other grace-period-completed entries may be moved + * here temporarily in rcu_process_callbacks(). + */ + struct rcu_head *nxtlist; + struct rcu_head **nxttail[RCU_NEXT_SIZE]; + long qlen; /* # of queued callbacks */ + long blimit; /* Upper limit on a processed batch */ + +#ifdef CONFIG_NO_HZ + /* 3) dynticks interface. */ + struct rcu_dynticks *dynticks; /* Shared per-CPU dynticks state. */ + int dynticks_snap; /* Per-GP tracking for dynticks. */ + int dynticks_nmi_snap; /* Per-GP tracking for dynticks_nmi. */ +#endif /* #ifdef CONFIG_NO_HZ */ + + /* 4) reasons this CPU needed to be kicked by force_quiescent_state */ +#ifdef CONFIG_NO_HZ + unsigned long dynticks_fqs; /* Kicked due to dynticks idle. */ +#endif /* #ifdef CONFIG_NO_HZ */ + unsigned long offline_fqs; /* Kicked due to being offline. */ + unsigned long resched_ipi; /* Sent a resched IPI. */ + + /* 5) state to allow this CPU to force_quiescent_state on others */ + long n_rcu_pending; /* rcu_pending() calls since boot. */ + long n_rcu_pending_force_qs; /* when to force quiescent states. */ + + int cpu; +}; + +/* Values for signaled field in struct rcu_state. */ +#define RCU_GP_INIT 0 /* Grace period being initialized. */ +#define RCU_SAVE_DYNTICK 1 /* Need to scan dyntick state. */ +#define RCU_FORCE_QS 2 /* Need to force quiescent state. */ +#ifdef CONFIG_NO_HZ +#define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK +#else /* #ifdef CONFIG_NO_HZ */ +#define RCU_SIGNAL_INIT RCU_FORCE_QS +#endif /* #else #ifdef CONFIG_NO_HZ */ + +#define RCU_JIFFIES_TILL_FORCE_QS 3 /* for rsp->jiffies_force_qs */ +#ifdef CONFIG_RCU_CPU_STALL_DETECTOR +#define RCU_SECONDS_TILL_STALL_CHECK (10 * HZ) /* for rsp->jiffies_stall */ +#define RCU_SECONDS_TILL_STALL_RECHECK (30 * HZ) /* for rsp->jiffies_stall */ +#define RCU_STALL_RAT_DELAY 2 /* Allow other CPUs time */ + /* to take at least one */ + /* scheduling clock irq */ + /* before ratting on them. */ + +#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ + +/* + * RCU global state, including node hierarchy. This hierarchy is + * represented in "heap" form in a dense array. The root (first level) + * of the hierarchy is in ->node[0] (referenced by ->level[0]), the second + * level in ->node[1] through ->node[m] (->node[1] referenced by ->level[1]), + * and the third level in ->node[m+1] and following (->node[m+1] referenced + * by ->level[2]). The number of levels is determined by the number of + * CPUs and by CONFIG_RCU_FANOUT. Small systems will have a "hierarchy" + * consisting of a single rcu_node. + */ +struct rcu_state { + struct rcu_node node[NUM_RCU_NODES]; /* Hierarchy. */ + struct rcu_node *level[NUM_RCU_LVLS]; /* Hierarchy levels. */ + u32 levelcnt[MAX_RCU_LVLS + 1]; /* # nodes in each level. */ + u8 levelspread[NUM_RCU_LVLS]; /* kids/node in each level. */ + struct rcu_data *rda[NR_CPUS]; /* array of rdp pointers. */ + + /* The following fields are guarded by the root rcu_node's lock. */ + + u8 signaled ____cacheline_internodealigned_in_smp; + /* Force QS state. */ + long gpnum; /* Current gp number. */ + long completed; /* # of last completed gp. */ + spinlock_t onofflock; /* exclude on/offline and */ + /* starting new GP. */ + spinlock_t fqslock; /* Only one task forcing */ + /* quiescent states. */ + unsigned long jiffies_force_qs; /* Time at which to invoke */ + /* force_quiescent_state(). */ + unsigned long n_force_qs; /* Number of calls to */ + /* force_quiescent_state(). */ + unsigned long n_force_qs_lh; /* ~Number of calls leaving */ + /* due to lock unavailable. */ + unsigned long n_force_qs_ngp; /* Number of calls leaving */ + /* due to no GP active. */ +#ifdef CONFIG_RCU_CPU_STALL_DETECTOR + unsigned long gp_start; /* Time at which GP started, */ + /* but in jiffies. */ + unsigned long jiffies_stall; /* Time at which to check */ + /* for CPU stalls. */ +#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ +#ifdef CONFIG_NO_HZ + long dynticks_completed; /* Value of completed @ snap. */ +#endif /* #ifdef CONFIG_NO_HZ */ +}; + +extern struct rcu_state rcu_state; +DECLARE_PER_CPU(struct rcu_data, rcu_data); + +extern struct rcu_state rcu_bh_state; +DECLARE_PER_CPU(struct rcu_data, rcu_bh_data); + +/* + * Increment the quiescent state counter. + * The counter is a bit degenerated: We do not need to know + * how many quiescent states passed, just if there was at least + * one since the start of the grace period. Thus just a flag. + */ +static inline void rcu_qsctr_inc(int cpu) +{ + struct rcu_data *rdp = &per_cpu(rcu_data, cpu); + rdp->passed_quiesc = 1; + rdp->passed_quiesc_completed = rdp->completed; +} +static inline void rcu_bh_qsctr_inc(int cpu) +{ + struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu); + rdp->passed_quiesc = 1; + rdp->passed_quiesc_completed = rdp->completed; +} + +extern int rcu_pending(int cpu); +extern int rcu_needs_cpu(int cpu); + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +extern struct lockdep_map rcu_lock_map; +# define rcu_read_acquire() \ + lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_) +# define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_) +#else +# define rcu_read_acquire() do { } while (0) +# define rcu_read_release() do { } while (0) +#endif + +static inline void __rcu_read_lock(void) +{ + preempt_disable(); + __acquire(RCU); + rcu_read_acquire(); +} +static inline void __rcu_read_unlock(void) +{ + rcu_read_release(); + __release(RCU); + preempt_enable(); +} +static inline void __rcu_read_lock_bh(void) +{ + local_bh_disable(); + __acquire(RCU_BH); + rcu_read_acquire(); +} +static inline void __rcu_read_unlock_bh(void) +{ + rcu_read_release(); + __release(RCU_BH); + local_bh_enable(); +} + +#define __synchronize_sched() synchronize_rcu() + +#define call_rcu_sched(head, func) call_rcu(head, func) + +static inline void rcu_init_sched(void) +{ +} + +extern void __rcu_init(void); +extern void rcu_check_callbacks(int cpu, int user); +extern void rcu_restart_cpu(int cpu); + +extern long rcu_batches_completed(void); +extern long rcu_batches_completed_bh(void); + +#ifdef CONFIG_NO_HZ +void rcu_enter_nohz(void); +void rcu_exit_nohz(void); +#else /* CONFIG_NO_HZ */ +static inline void rcu_enter_nohz(void) +{ +} +static inline void rcu_exit_nohz(void) +{ +} +#endif /* CONFIG_NO_HZ */ + +#endif /* __LINUX_RCUTREE_H */ diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h index afdc4558bb9..801bf77ff4e 100644 --- a/include/linux/regulator/consumer.h +++ b/include/linux/regulator/consumer.h @@ -104,10 +104,10 @@ struct regulator; /** * struct regulator_bulk_data - Data used for bulk regulator operations. * - * @supply The name of the supply. Initialised by the user before - * using the bulk regulator APIs. - * @consumer The regulator consumer for the supply. This will be managed - * by the bulk API. + * @supply: The name of the supply. Initialised by the user before + * using the bulk regulator APIs. + * @consumer: The regulator consumer for the supply. This will be managed + * by the bulk API. * * The regulator APIs provide a series of regulator_bulk_() API calls as * a convenience to consumers which require multiple supplies. This diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h index e37d8056198..2dae05705f1 100644 --- a/include/linux/regulator/driver.h +++ b/include/linux/regulator/driver.h @@ -24,7 +24,33 @@ struct regulator_init_data; /** * struct regulator_ops - regulator operations. * - * This struct describes regulator operations. + * This struct describes regulator operations which can be implemented by + * regulator chip drivers. + * + * @enable: Enable the regulator. + * @disable: Disable the regulator. + * @is_enabled: Return 1 if the regulator is enabled, 0 otherwise. + * + * @set_voltage: Set the voltage for the regulator within the range specified. + * The driver should select the voltage closest to min_uV. + * @get_voltage: Return the currently configured voltage for the regulator. + * + * @set_current_limit: Configure a limit for a current-limited regulator. + * @get_current_limit: Get the limit for a current-limited regulator. + * + * @set_mode: Set the operating mode for the regulator. + * @get_mode: Get the current operating mode for the regulator. + * @get_optimum_mode: Get the most efficient operating mode for the regulator + * when running with the specified parameters. + * + * @set_suspend_voltage: Set the voltage for the regulator when the system + * is suspended. + * @set_suspend_enable: Mark the regulator as enabled when the system is + * suspended. + * @set_suspend_disable: Mark the regulator as disabled when the system is + * suspended. + * @set_suspend_mode: Set the operating mode for the regulator when the + * system is suspended. */ struct regulator_ops { @@ -75,6 +101,15 @@ enum regulator_type { /** * struct regulator_desc - Regulator descriptor * + * Each regulator registered with the core is described with a structure of + * this type. + * + * @name: Identifying name for the regulator. + * @id: Numerical identifier for the regulator. + * @ops: Regulator operations table. + * @irq: Interrupt number for the regulator. + * @type: Indicates if the regulator is a voltage or current regulator. + * @owner: Module providing the regulator, used for refcounting. */ struct regulator_desc { const char *name; diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h index c6d69331a81..3794773b23d 100644 --- a/include/linux/regulator/machine.h +++ b/include/linux/regulator/machine.h @@ -44,6 +44,10 @@ struct regulator; * struct regulator_state - regulator state during low power syatem states * * This describes a regulators state during a system wide low power state. + * + * @uV: Operating voltage during suspend. + * @mode: Operating mode during suspend. + * @enabled: Enabled during suspend. */ struct regulator_state { int uV; /* suspend voltage */ @@ -55,6 +59,30 @@ struct regulator_state { * struct regulation_constraints - regulator operating constraints. * * This struct describes regulator and board/machine specific constraints. + * + * @name: Descriptive name for the constraints, used for display purposes. + * + * @min_uV: Smallest voltage consumers may set. + * @max_uV: Largest voltage consumers may set. + * + * @min_uA: Smallest consumers consumers may set. + * @max_uA: Largest current consumers may set. + * + * @valid_modes_mask: Mask of modes which may be configured by consumers. + * @valid_ops_mask: Operations which may be performed by consumers. + * + * @always_on: Set if the regulator should never be disabled. + * @boot_on: Set if the regulator is enabled when the system is initially + * started. + * @apply_uV: Apply the voltage constraint when initialising. + * + * @input_uV: Input voltage for regulator when supplied by another regulator. + * + * @state_disk: State for regulator when system is suspended in disk mode. + * @state_mem: State for regulator when system is suspended in mem mode. + * @state_standby: State for regulator when system is suspended in standby + * mode. + * @initial_state: Suspend state to set by default. */ struct regulation_constraints { @@ -93,6 +121,9 @@ struct regulation_constraints { * struct regulator_consumer_supply - supply -> device mapping * * This maps a supply name to a device. + * + * @dev: Device structure for the consumer. + * @supply: Name for the supply. */ struct regulator_consumer_supply { struct device *dev; /* consumer */ @@ -103,6 +134,16 @@ struct regulator_consumer_supply { * struct regulator_init_data - regulator platform initialisation data. * * Initialisation constraints, our supply and consumers supplies. + * + * @supply_regulator_dev: Parent regulator (if any). + * + * @constraints: Constraints. These must be specified for the regulator to + * be usable. + * @num_consumer_supplies: Number of consumer device supplies. + * @consumer_supplies: Consumer device supply configuration. + * + * @regulator_init: Callback invoked when the regulator has been registered. + * @driver_data: Data passed to regulator_init. */ struct regulator_init_data { struct device *supply_regulator_dev; /* or NULL for LINE */ diff --git a/include/linux/res_counter.h b/include/linux/res_counter.h index 271c1c2c9f6..dede0a2cfc4 100644 --- a/include/linux/res_counter.h +++ b/include/linux/res_counter.h @@ -43,6 +43,10 @@ struct res_counter { * the routines below consider this to be IRQ-safe */ spinlock_t lock; + /* + * Parent counter, used for hierarchial resource accounting + */ + struct res_counter *parent; }; /** @@ -87,7 +91,7 @@ enum { * helpers for accounting */ -void res_counter_init(struct res_counter *counter); +void res_counter_init(struct res_counter *counter, struct res_counter *parent); /* * charge - try to consume more resource. @@ -103,7 +107,7 @@ void res_counter_init(struct res_counter *counter); int __must_check res_counter_charge_locked(struct res_counter *counter, unsigned long val); int __must_check res_counter_charge(struct res_counter *counter, - unsigned long val); + unsigned long val, struct res_counter **limit_fail_at); /* * uncharge - tell that some portion of the resource is released diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h index 4cd64b0d982..164332cbb77 100644 --- a/include/linux/rfkill.h +++ b/include/linux/rfkill.h @@ -108,6 +108,7 @@ struct rfkill { struct device dev; struct list_head node; + enum rfkill_state state_for_resume; }; #define to_rfkill(d) container_of(d, struct rfkill, dev) @@ -148,11 +149,4 @@ static inline char *rfkill_get_led_name(struct rfkill *rfkill) #endif } -/* rfkill notification chain */ -#define RFKILL_STATE_CHANGED 0x0001 /* state of a normal rfkill - switch has changed */ - -int register_rfkill_notifier(struct notifier_block *nb); -int unregister_rfkill_notifier(struct notifier_block *nb); - #endif /* RFKILL_H */ diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h index e097c2e6b6d..b3b35966008 100644 --- a/include/linux/ring_buffer.h +++ b/include/linux/ring_buffer.h @@ -28,17 +28,19 @@ struct ring_buffer_event { * size = 8 bytes * * @RINGBUF_TYPE_TIME_STAMP: Sync time stamp with external clock - * array[0] = tv_nsec - * array[1] = tv_sec + * array[0] = tv_nsec + * array[1..2] = tv_sec * size = 16 bytes * * @RINGBUF_TYPE_DATA: Data record * If len is zero: * array[0] holds the actual length - * array[1..(length+3)/4-1] holds data + * array[1..(length+3)/4] holds data + * size = 4 + 4 + length (bytes) * else * length = len << 2 - * array[0..(length+3)/4] holds data + * array[0..(length+3)/4-1] holds data + * size = 4 + length (bytes) */ enum ring_buffer_type { RINGBUF_TYPE_PADDING, @@ -116,12 +118,20 @@ void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu); unsigned long ring_buffer_entries(struct ring_buffer *buffer); unsigned long ring_buffer_overruns(struct ring_buffer *buffer); +unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu); +unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu); u64 ring_buffer_time_stamp(int cpu); void ring_buffer_normalize_time_stamp(int cpu, u64 *ts); void tracing_on(void); void tracing_off(void); +void tracing_off_permanent(void); + +void *ring_buffer_alloc_read_page(struct ring_buffer *buffer); +void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data); +int ring_buffer_read_page(struct ring_buffer *buffer, + void **data_page, int cpu, int full); enum ring_buffer_flags { RB_FL_OVERWRITE = 1 << 0, diff --git a/include/linux/rio_drv.h b/include/linux/rio_drv.h index 90987b7bcc1..c93a58a4003 100644 --- a/include/linux/rio_drv.h +++ b/include/linux/rio_drv.h @@ -391,7 +391,6 @@ static inline int rio_add_inb_buffer(struct rio_mport *mport, int mbox, * rio_get_inb_message - Get A RIO message from an inbound mailbox queue * @mport: Master port containing the inbound mailbox * @mbox: The inbound mailbox number - * @buffer: Pointer to the message buffer * * Get a RIO message from an inbound mailbox queue. Returns 0 on success. */ @@ -427,9 +426,9 @@ void rio_dev_put(struct rio_dev *); * Get the unique RIO device identifier. Returns the device * identifier string. */ -static inline char *rio_name(struct rio_dev *rdev) +static inline const char *rio_name(struct rio_dev *rdev) { - return rdev->dev.bus_id; + return dev_name(&rdev->dev); } /** diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 89f0564b10c..b35bc0e19cd 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -63,16 +63,13 @@ void anon_vma_unlink(struct vm_area_struct *); void anon_vma_link(struct vm_area_struct *); void __anon_vma_link(struct vm_area_struct *); -extern struct anon_vma *page_lock_anon_vma(struct page *page); -extern void page_unlock_anon_vma(struct anon_vma *anon_vma); - /* * rmap interfaces called when adding or removing pte of page */ void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); void page_add_file_rmap(struct page *); -void page_remove_rmap(struct page *, struct vm_area_struct *); +void page_remove_rmap(struct page *); #ifdef CONFIG_DEBUG_VM void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address); diff --git a/include/linux/rtc.h b/include/linux/rtc.h index 91f597ad6ac..4046b75563c 100644 --- a/include/linux/rtc.h +++ b/include/linux/rtc.h @@ -145,6 +145,8 @@ struct rtc_class_ops { int (*irq_set_state)(struct device *, int enabled); int (*irq_set_freq)(struct device *, int freq); int (*read_callback)(struct device *, int data); + int (*alarm_irq_enable)(struct device *, unsigned int enabled); + int (*update_irq_enable)(struct device *, unsigned int enabled); }; #define RTC_DEVICE_NAME_SIZE 20 @@ -181,7 +183,7 @@ struct rtc_device struct timer_list uie_timer; /* Those fields are protected by rtc->irq_lock */ unsigned int oldsecs; - unsigned int irq_active:1; + unsigned int uie_irq_active:1; unsigned int stop_uie_polling:1; unsigned int uie_task_active:1; unsigned int uie_timer_active:1; @@ -216,6 +218,10 @@ extern int rtc_irq_set_state(struct rtc_device *rtc, struct rtc_task *task, int enabled); extern int rtc_irq_set_freq(struct rtc_device *rtc, struct rtc_task *task, int freq); +extern int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled); +extern int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled); +extern int rtc_dev_update_irq_enable_emul(struct rtc_device *rtc, + unsigned int enabled); typedef struct rtc_task { void (*func)(void *private_data); diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index 2b3d51c6ec9..e88f7058b3a 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h @@ -107,6 +107,11 @@ enum { RTM_GETADDRLABEL, #define RTM_GETADDRLABEL RTM_GETADDRLABEL + RTM_GETDCB = 78, +#define RTM_GETDCB RTM_GETDCB + RTM_SETDCB, +#define RTM_SETDCB RTM_SETDCB + __RTM_MAX, #define RTM_MAX (((__RTM_MAX + 3) & ~3) - 1) }; diff --git a/include/linux/sched.h b/include/linux/sched.h index 55e30d11447..4cae9b81a1f 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -96,6 +96,7 @@ struct exec_domain; struct futex_pi_state; struct robust_list_head; struct bio; +struct bts_tracer; /* * List of flags we want to share for kernel threads, @@ -249,7 +250,7 @@ extern void init_idle_bootup_task(struct task_struct *idle); extern int runqueue_is_locked(void); extern void task_rq_unlock_wait(struct task_struct *p); -extern cpumask_t nohz_cpu_mask; +extern cpumask_var_t nohz_cpu_mask; #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) extern int select_nohz_load_balancer(int cpu); #else @@ -259,8 +260,6 @@ static inline int select_nohz_load_balancer(int cpu) } #endif -extern unsigned long rt_needs_cpu(int cpu); - /* * Only dump TASK_* tasks. (0 for all tasks) */ @@ -285,7 +284,6 @@ long io_schedule_timeout(long timeout); extern void cpu_init (void); extern void trap_init(void); -extern void account_process_tick(struct task_struct *task, int user); extern void update_process_times(int user); extern void scheduler_tick(void); @@ -388,6 +386,9 @@ extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long); (mm)->hiwater_vm = (mm)->total_vm; \ } while (0) +#define get_mm_hiwater_rss(mm) max((mm)->hiwater_rss, get_mm_rss(mm)) +#define get_mm_hiwater_vm(mm) max((mm)->hiwater_vm, (mm)->total_vm) + extern void set_dumpable(struct mm_struct *mm, int value); extern int get_dumpable(struct mm_struct *mm); @@ -572,12 +573,6 @@ struct signal_struct { */ struct rlimit rlim[RLIM_NLIMITS]; - /* keep the process-shared keyrings here so that they do the right - * thing in threads created with CLONE_THREAD */ -#ifdef CONFIG_KEYS - struct key *session_keyring; /* keyring inherited over fork */ - struct key *process_keyring; /* keyring private to this process */ -#endif #ifdef CONFIG_BSD_PROCESS_ACCT struct pacct_struct pacct; /* per-process accounting information */ #endif @@ -648,6 +643,7 @@ struct user_struct { /* Hash table maintenance information */ struct hlist_node uidhash_node; uid_t uid; + struct user_namespace *user_ns; #ifdef CONFIG_USER_SCHED struct task_group *tg; @@ -665,6 +661,7 @@ extern struct user_struct *find_user(uid_t); extern struct user_struct root_user; #define INIT_USER (&root_user) + struct backing_dev_info; struct reclaim_state; @@ -672,8 +669,7 @@ struct reclaim_state; struct sched_info { /* cumulative counters */ unsigned long pcount; /* # of times run on this cpu */ - unsigned long long cpu_time, /* time spent on the cpu */ - run_delay; /* time spent waiting on a runqueue */ + unsigned long long run_delay; /* time spent waiting on a runqueue */ /* timestamps */ unsigned long long last_arrival,/* when we last ran on a cpu */ @@ -764,20 +760,51 @@ enum cpu_idle_type { #define SD_SERIALIZE 1024 /* Only a single load balancing instance */ #define SD_WAKE_IDLE_FAR 2048 /* Gain latency sacrificing cache hit */ -#define BALANCE_FOR_MC_POWER \ - (sched_smt_power_savings ? SD_POWERSAVINGS_BALANCE : 0) +enum powersavings_balance_level { + POWERSAVINGS_BALANCE_NONE = 0, /* No power saving load balance */ + POWERSAVINGS_BALANCE_BASIC, /* Fill one thread/core/package + * first for long running threads + */ + POWERSAVINGS_BALANCE_WAKEUP, /* Also bias task wakeups to semi-idle + * cpu package for power savings + */ + MAX_POWERSAVINGS_BALANCE_LEVELS +}; + +extern int sched_mc_power_savings, sched_smt_power_savings; + +static inline int sd_balance_for_mc_power(void) +{ + if (sched_smt_power_savings) + return SD_POWERSAVINGS_BALANCE; -#define BALANCE_FOR_PKG_POWER \ - ((sched_mc_power_savings || sched_smt_power_savings) ? \ - SD_POWERSAVINGS_BALANCE : 0) + return 0; +} -#define test_sd_parent(sd, flag) ((sd->parent && \ - (sd->parent->flags & flag)) ? 1 : 0) +static inline int sd_balance_for_package_power(void) +{ + if (sched_mc_power_savings | sched_smt_power_savings) + return SD_POWERSAVINGS_BALANCE; + return 0; +} + +/* + * Optimise SD flags for power savings: + * SD_BALANCE_NEWIDLE helps agressive task consolidation and power savings. + * Keep default SD flags if sched_{smt,mc}_power_saving=0 + */ + +static inline int sd_power_saving_flags(void) +{ + if (sched_mc_power_savings | sched_smt_power_savings) + return SD_BALANCE_NEWIDLE; + + return 0; +} struct sched_group { struct sched_group *next; /* Must be a circular list */ - cpumask_t cpumask; /* * CPU power of this group, SCHED_LOAD_SCALE being max power for a @@ -790,8 +817,15 @@ struct sched_group { * (see include/linux/reciprocal_div.h) */ u32 reciprocal_cpu_power; + + unsigned long cpumask[]; }; +static inline struct cpumask *sched_group_cpus(struct sched_group *sg) +{ + return to_cpumask(sg->cpumask); +} + enum sched_domain_level { SD_LV_NONE = 0, SD_LV_SIBLING, @@ -815,7 +849,6 @@ struct sched_domain { struct sched_domain *parent; /* top domain must be null terminated */ struct sched_domain *child; /* bottom domain must be null terminated */ struct sched_group *groups; /* the balancing groups of the domain */ - cpumask_t span; /* span of all CPUs in this domain */ unsigned long min_interval; /* Minimum balance interval ms */ unsigned long max_interval; /* Maximum balance interval ms */ unsigned int busy_factor; /* less balancing by factor if busy */ @@ -870,56 +903,41 @@ struct sched_domain { #ifdef CONFIG_SCHED_DEBUG char *name; #endif + + /* span of all CPUs in this domain */ + unsigned long span[]; }; -extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new, +static inline struct cpumask *sched_domain_span(struct sched_domain *sd) +{ + return to_cpumask(sd->span); +} + +extern void partition_sched_domains(int ndoms_new, struct cpumask *doms_new, struct sched_domain_attr *dattr_new); -extern int arch_reinit_sched_domains(void); + +/* Test a flag in parent sched domain */ +static inline int test_sd_parent(struct sched_domain *sd, int flag) +{ + if (sd->parent && (sd->parent->flags & flag)) + return 1; + + return 0; +} #else /* CONFIG_SMP */ struct sched_domain_attr; static inline void -partition_sched_domains(int ndoms_new, cpumask_t *doms_new, +partition_sched_domains(int ndoms_new, struct cpumask *doms_new, struct sched_domain_attr *dattr_new) { } #endif /* !CONFIG_SMP */ struct io_context; /* See blkdev.h */ -#define NGROUPS_SMALL 32 -#define NGROUPS_PER_BLOCK ((unsigned int)(PAGE_SIZE / sizeof(gid_t))) -struct group_info { - int ngroups; - atomic_t usage; - gid_t small_block[NGROUPS_SMALL]; - int nblocks; - gid_t *blocks[0]; -}; -/* - * get_group_info() must be called with the owning task locked (via task_lock()) - * when task != current. The reason being that the vast majority of callers are - * looking at current->group_info, which can not be changed except by the - * current task. Changing current->group_info requires the task lock, too. - */ -#define get_group_info(group_info) do { \ - atomic_inc(&(group_info)->usage); \ -} while (0) - -#define put_group_info(group_info) do { \ - if (atomic_dec_and_test(&(group_info)->usage)) \ - groups_free(group_info); \ -} while (0) - -extern struct group_info *groups_alloc(int gidsetsize); -extern void groups_free(struct group_info *group_info); -extern int set_current_groups(struct group_info *group_info); -extern int groups_search(struct group_info *group_info, gid_t grp); -/* access the groups "array" with this macro */ -#define GROUP_AT(gi, i) \ - ((gi)->blocks[(i)/NGROUPS_PER_BLOCK][(i)%NGROUPS_PER_BLOCK]) #ifdef ARCH_HAS_PREFETCH_SWITCH_STACK extern void prefetch_stack(struct task_struct *t); @@ -963,7 +981,7 @@ struct sched_class { void (*task_wake_up) (struct rq *this_rq, struct task_struct *task); void (*set_cpus_allowed)(struct task_struct *p, - const cpumask_t *newmask); + const struct cpumask *newmask); void (*rq_online)(struct rq *rq); void (*rq_offline)(struct rq *rq); @@ -1165,6 +1183,19 @@ struct task_struct { struct list_head ptraced; struct list_head ptrace_entry; +#ifdef CONFIG_X86_PTRACE_BTS + /* + * This is the tracer handle for the ptrace BTS extension. + * This field actually belongs to the ptracer task. + */ + struct bts_tracer *bts; + /* + * The buffer to hold the BTS data. + */ + void *bts_buffer; + size_t bts_size; +#endif /* CONFIG_X86_PTRACE_BTS */ + /* PID/PID hash table linkage. */ struct pid_link pids[PIDTYPE_MAX]; struct list_head thread_group; @@ -1186,17 +1217,12 @@ struct task_struct { struct list_head cpu_timers[3]; /* process credentials */ - uid_t uid,euid,suid,fsuid; - gid_t gid,egid,sgid,fsgid; - struct group_info *group_info; - kernel_cap_t cap_effective, cap_inheritable, cap_permitted, cap_bset; - struct user_struct *user; - unsigned securebits; -#ifdef CONFIG_KEYS - unsigned char jit_keyring; /* default keyring to attach requested keys to */ - struct key *request_key_auth; /* assumed request_key authority */ - struct key *thread_keyring; /* keyring private to this thread */ -#endif + const struct cred *real_cred; /* objective and real subjective task + * credentials (COW) */ + const struct cred *cred; /* effective (overridable) subjective task + * credentials (COW) */ + struct mutex cred_exec_mutex; /* execve vs ptrace cred calculation mutex */ + char comm[TASK_COMM_LEN]; /* executable name excluding path - access with [gs]et_task_comm (which lock it with task_lock()) @@ -1233,9 +1259,6 @@ struct task_struct { int (*notifier)(void *priv); void *notifier_data; sigset_t *notifier_mask; -#ifdef CONFIG_SECURITY - void *security; -#endif struct audit_context *audit_context; #ifdef CONFIG_AUDITSYSCALL uid_t loginuid; @@ -1356,6 +1379,23 @@ struct task_struct { unsigned long default_timer_slack_ns; struct list_head *scm_work_list; +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + /* Index of current stored adress in ret_stack */ + int curr_ret_stack; + /* Stack of return addresses for return function tracing */ + struct ftrace_ret_stack *ret_stack; + /* + * Number of functions that haven't been traced + * because of depth overrun. + */ + atomic_t trace_overrun; + /* Pause for the tracing */ + atomic_t tracing_graph_pause; +#endif +#ifdef CONFIG_TRACING + /* state flags for use by tracers */ + unsigned long trace; +#endif }; /* @@ -1594,12 +1634,12 @@ extern cputime_t task_gtime(struct task_struct *p); #ifdef CONFIG_SMP extern int set_cpus_allowed_ptr(struct task_struct *p, - const cpumask_t *new_mask); + const struct cpumask *new_mask); #else static inline int set_cpus_allowed_ptr(struct task_struct *p, - const cpumask_t *new_mask) + const struct cpumask *new_mask) { - if (!cpu_isset(0, *new_mask)) + if (!cpumask_test_cpu(0, new_mask)) return -EINVAL; return 0; } @@ -1666,16 +1706,16 @@ extern void wake_up_idle_cpu(int cpu); static inline void wake_up_idle_cpu(int cpu) { } #endif -#ifdef CONFIG_SCHED_DEBUG extern unsigned int sysctl_sched_latency; extern unsigned int sysctl_sched_min_granularity; extern unsigned int sysctl_sched_wakeup_granularity; +extern unsigned int sysctl_sched_shares_ratelimit; +extern unsigned int sysctl_sched_shares_thresh; +#ifdef CONFIG_SCHED_DEBUG extern unsigned int sysctl_sched_child_runs_first; extern unsigned int sysctl_sched_features; extern unsigned int sysctl_sched_migration_cost; extern unsigned int sysctl_sched_nr_migrate; -extern unsigned int sysctl_sched_shares_ratelimit; -extern unsigned int sysctl_sched_shares_thresh; int sched_nr_latency_handler(struct ctl_table *table, int write, struct file *file, void __user *buffer, size_t *length, @@ -1775,7 +1815,6 @@ static inline struct user_struct *get_uid(struct user_struct *u) return u; } extern void free_uid(struct user_struct *); -extern void switch_uid(struct user_struct *); extern void release_uids(struct user_namespace *ns); #include <asm/current.h> @@ -1794,9 +1833,6 @@ extern void wake_up_new_task(struct task_struct *tsk, extern void sched_fork(struct task_struct *p, int clone_flags); extern void sched_dead(struct task_struct *p); -extern int in_group_p(gid_t); -extern int in_egroup_p(gid_t); - extern void proc_caches_init(void); extern void flush_signals(struct task_struct *); extern void ignore_signals(struct task_struct *); @@ -1928,6 +1964,8 @@ static inline unsigned long wait_task_inactive(struct task_struct *p, #define for_each_process(p) \ for (p = &init_task ; (p = next_task(p)) != &init_task ; ) +extern bool is_single_threaded(struct task_struct *); + /* * Careful: do_each_thread/while_each_thread is a double loop so * 'break' will not work as expected - use goto instead. @@ -2212,10 +2250,8 @@ __trace_special(void *__tr, void *__data, } #endif -extern long sched_setaffinity(pid_t pid, const cpumask_t *new_mask); -extern long sched_getaffinity(pid_t pid, cpumask_t *mask); - -extern int sched_mc_power_savings, sched_smt_power_savings; +extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); +extern long sched_getaffinity(pid_t pid, struct cpumask *mask); extern void normalize_rt_tasks(void); @@ -2224,6 +2260,7 @@ extern void normalize_rt_tasks(void); extern struct task_group init_task_group; #ifdef CONFIG_USER_SCHED extern struct task_group root_task_group; +extern void set_tg_uid(struct user_struct *user); #endif extern struct task_group *sched_create_group(struct task_group *parent); diff --git a/include/linux/securebits.h b/include/linux/securebits.h index 92f09bdf117..d2c5ed845bc 100644 --- a/include/linux/securebits.h +++ b/include/linux/securebits.h @@ -32,7 +32,7 @@ setting is locked or not. A setting which is locked cannot be changed from user-level. */ #define issecure_mask(X) (1 << (X)) -#define issecure(X) (issecure_mask(X) & current->securebits) +#define issecure(X) (issecure_mask(X) & current_cred_xxx(securebits)) #define SECURE_ALL_BITS (issecure_mask(SECURE_NOROOT) | \ issecure_mask(SECURE_NO_SETUID_FIXUP) | \ diff --git a/include/linux/security.h b/include/linux/security.h index e3d4ecda267..1f2ab6353c0 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -37,6 +37,10 @@ /* Maximum number of letters for an LSM name string */ #define SECURITY_NAME_MAX 10 +/* If capable should audit the security request */ +#define SECURITY_CAP_NOAUDIT 0 +#define SECURITY_CAP_AUDIT 1 + struct ctl_table; struct audit_krule; @@ -44,25 +48,26 @@ struct audit_krule; * These functions are in security/capability.c and are used * as the default capabilities functions */ -extern int cap_capable(struct task_struct *tsk, int cap); +extern int cap_capable(struct task_struct *tsk, const struct cred *cred, + int cap, int audit); extern int cap_settime(struct timespec *ts, struct timezone *tz); extern int cap_ptrace_may_access(struct task_struct *child, unsigned int mode); extern int cap_ptrace_traceme(struct task_struct *parent); extern int cap_capget(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted); -extern int cap_capset_check(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted); -extern void cap_capset_set(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted); -extern int cap_bprm_set_security(struct linux_binprm *bprm); -extern void cap_bprm_apply_creds(struct linux_binprm *bprm, int unsafe); +extern int cap_capset(struct cred *new, const struct cred *old, + const kernel_cap_t *effective, + const kernel_cap_t *inheritable, + const kernel_cap_t *permitted); +extern int cap_bprm_set_creds(struct linux_binprm *bprm); extern int cap_bprm_secureexec(struct linux_binprm *bprm); extern int cap_inode_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags); extern int cap_inode_removexattr(struct dentry *dentry, const char *name); extern int cap_inode_need_killpriv(struct dentry *dentry); extern int cap_inode_killpriv(struct dentry *dentry); -extern int cap_task_post_setuid(uid_t old_ruid, uid_t old_euid, uid_t old_suid, int flags); -extern void cap_task_reparent_to_init(struct task_struct *p); +extern int cap_task_fix_setuid(struct cred *new, const struct cred *old, int flags); extern int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3, - unsigned long arg4, unsigned long arg5, long *rc_p); + unsigned long arg4, unsigned long arg5); extern int cap_task_setscheduler(struct task_struct *p, int policy, struct sched_param *lp); extern int cap_task_setioprio(struct task_struct *p, int ioprio); extern int cap_task_setnice(struct task_struct *p, int nice); @@ -105,7 +110,7 @@ extern unsigned long mmap_min_addr; struct sched_param; struct request_sock; -/* bprm_apply_creds unsafe reasons */ +/* bprm->unsafe reasons */ #define LSM_UNSAFE_SHARE 1 #define LSM_UNSAFE_PTRACE 2 #define LSM_UNSAFE_PTRACE_CAP 4 @@ -149,36 +154,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * * Security hooks for program execution operations. * - * @bprm_alloc_security: - * Allocate and attach a security structure to the @bprm->security field. - * The security field is initialized to NULL when the bprm structure is - * allocated. - * @bprm contains the linux_binprm structure to be modified. - * Return 0 if operation was successful. - * @bprm_free_security: - * @bprm contains the linux_binprm structure to be modified. - * Deallocate and clear the @bprm->security field. - * @bprm_apply_creds: - * Compute and set the security attributes of a process being transformed - * by an execve operation based on the old attributes (current->security) - * and the information saved in @bprm->security by the set_security hook. - * Since this hook function (and its caller) are void, this hook can not - * return an error. However, it can leave the security attributes of the - * process unchanged if an access failure occurs at this point. - * bprm_apply_creds is called under task_lock. @unsafe indicates various - * reasons why it may be unsafe to change security state. - * @bprm contains the linux_binprm structure. - * @bprm_post_apply_creds: - * Runs after bprm_apply_creds with the task_lock dropped, so that - * functions which cannot be called safely under the task_lock can - * be used. This hook is a good place to perform state changes on - * the process such as closing open file descriptors to which access - * is no longer granted if the attributes were changed. - * Note that a security module might need to save state between - * bprm_apply_creds and bprm_post_apply_creds to store the decision - * on whether the process may proceed. - * @bprm contains the linux_binprm structure. - * @bprm_set_security: + * @bprm_set_creds: * Save security information in the bprm->security field, typically based * on information about the bprm->file, for later use by the apply_creds * hook. This hook may also optionally check permissions (e.g. for @@ -191,15 +167,30 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * @bprm contains the linux_binprm structure. * Return 0 if the hook is successful and permission is granted. * @bprm_check_security: - * This hook mediates the point when a search for a binary handler will - * begin. It allows a check the @bprm->security value which is set in - * the preceding set_security call. The primary difference from - * set_security is that the argv list and envp list are reliably - * available in @bprm. This hook may be called multiple times - * during a single execve; and in each pass set_security is called - * first. + * This hook mediates the point when a search for a binary handler will + * begin. It allows a check the @bprm->security value which is set in the + * preceding set_creds call. The primary difference from set_creds is + * that the argv list and envp list are reliably available in @bprm. This + * hook may be called multiple times during a single execve; and in each + * pass set_creds is called first. * @bprm contains the linux_binprm structure. * Return 0 if the hook is successful and permission is granted. + * @bprm_committing_creds: + * Prepare to install the new security attributes of a process being + * transformed by an execve operation, based on the old credentials + * pointed to by @current->cred and the information set in @bprm->cred by + * the bprm_set_creds hook. @bprm points to the linux_binprm structure. + * This hook is a good place to perform state changes on the process such + * as closing open file descriptors to which access will no longer be + * granted when the attributes are changed. This is called immediately + * before commit_creds(). + * @bprm_committed_creds: + * Tidy up after the installation of the new security attributes of a + * process being transformed by an execve operation. The new credentials + * have, by this point, been set to @current->cred. @bprm points to the + * linux_binprm structure. This hook is a good place to perform state + * changes on the process such as clearing out non-inheritable signal + * state. This is called immediately after commit_creds(). * @bprm_secureexec: * Return a boolean value (0 or 1) indicating whether a "secure exec" * is required. The flag is passed in the auxiliary table @@ -345,17 +336,37 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * @dir contains the inode structure of the parent directory of the new link. * @new_dentry contains the dentry structure for the new link. * Return 0 if permission is granted. + * @path_link: + * Check permission before creating a new hard link to a file. + * @old_dentry contains the dentry structure for an existing link + * to the file. + * @new_dir contains the path structure of the parent directory of + * the new link. + * @new_dentry contains the dentry structure for the new link. + * Return 0 if permission is granted. * @inode_unlink: * Check the permission to remove a hard link to a file. * @dir contains the inode structure of parent directory of the file. * @dentry contains the dentry structure for file to be unlinked. * Return 0 if permission is granted. + * @path_unlink: + * Check the permission to remove a hard link to a file. + * @dir contains the path structure of parent directory of the file. + * @dentry contains the dentry structure for file to be unlinked. + * Return 0 if permission is granted. * @inode_symlink: * Check the permission to create a symbolic link to a file. * @dir contains the inode structure of parent directory of the symbolic link. * @dentry contains the dentry structure of the symbolic link. * @old_name contains the pathname of file. * Return 0 if permission is granted. + * @path_symlink: + * Check the permission to create a symbolic link to a file. + * @dir contains the path structure of parent directory of + * the symbolic link. + * @dentry contains the dentry structure of the symbolic link. + * @old_name contains the pathname of file. + * Return 0 if permission is granted. * @inode_mkdir: * Check permissions to create a new directory in the existing directory * associated with inode strcture @dir. @@ -363,11 +374,25 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * @dentry contains the dentry structure of new directory. * @mode contains the mode of new directory. * Return 0 if permission is granted. + * @path_mkdir: + * Check permissions to create a new directory in the existing directory + * associated with path strcture @path. + * @dir containst the path structure of parent of the directory + * to be created. + * @dentry contains the dentry structure of new directory. + * @mode contains the mode of new directory. + * Return 0 if permission is granted. * @inode_rmdir: * Check the permission to remove a directory. * @dir contains the inode structure of parent of the directory to be removed. * @dentry contains the dentry structure of directory to be removed. * Return 0 if permission is granted. + * @path_rmdir: + * Check the permission to remove a directory. + * @dir contains the path structure of parent of the directory to be + * removed. + * @dentry contains the dentry structure of directory to be removed. + * Return 0 if permission is granted. * @inode_mknod: * Check permissions when creating a special file (or a socket or a fifo * file created via the mknod system call). Note that if mknod operation @@ -378,6 +403,15 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * @mode contains the mode of the new file. * @dev contains the device number. * Return 0 if permission is granted. + * @path_mknod: + * Check permissions when creating a file. Note that this hook is called + * even if mknod operation is being done for a regular file. + * @dir contains the path structure of parent of the new file. + * @dentry contains the dentry structure of the new file. + * @mode contains the mode of the new file. + * @dev contains the undecoded device number. Use new_decode_dev() to get + * the decoded device number. + * Return 0 if permission is granted. * @inode_rename: * Check for permission to rename a file or directory. * @old_dir contains the inode structure for parent of the old link. @@ -385,6 +419,13 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * @new_dir contains the inode structure for parent of the new link. * @new_dentry contains the dentry structure of the new link. * Return 0 if permission is granted. + * @path_rename: + * Check for permission to rename a file or directory. + * @old_dir contains the path structure for parent of the old link. + * @old_dentry contains the dentry structure of the old link. + * @new_dir contains the path structure for parent of the new link. + * @new_dentry contains the dentry structure of the new link. + * Return 0 if permission is granted. * @inode_readlink: * Check the permission to read the symbolic link. * @dentry contains the dentry structure for the file link. @@ -413,6 +454,12 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * @dentry contains the dentry structure for the file. * @attr is the iattr structure containing the new file attributes. * Return 0 if permission is granted. + * @path_truncate: + * Check permission before truncating a file. + * @path contains the path structure for the file. + * @length is the new length of the file. + * @time_attrs is the flags passed to do_truncate(). + * Return 0 if permission is granted. * @inode_getattr: * Check permission before obtaining file attributes. * @mnt is the vfsmount where the dentry was looked up @@ -585,15 +632,31 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * manual page for definitions of the @clone_flags. * @clone_flags contains the flags indicating what should be shared. * Return 0 if permission is granted. - * @task_alloc_security: - * @p contains the task_struct for child process. - * Allocate and attach a security structure to the p->security field. The - * security field is initialized to NULL when the task structure is - * allocated. - * Return 0 if operation was successful. - * @task_free_security: - * @p contains the task_struct for process. - * Deallocate and clear the p->security field. + * @cred_free: + * @cred points to the credentials. + * Deallocate and clear the cred->security field in a set of credentials. + * @cred_prepare: + * @new points to the new credentials. + * @old points to the original credentials. + * @gfp indicates the atomicity of any memory allocations. + * Prepare a new set of credentials by copying the data from the old set. + * @cred_commit: + * @new points to the new credentials. + * @old points to the original credentials. + * Install a new set of credentials. + * @kernel_act_as: + * Set the credentials for a kernel service to act as (subjective context). + * @new points to the credentials to be modified. + * @secid specifies the security ID to be set + * The current task must be the one that nominated @secid. + * Return 0 if successful. + * @kernel_create_files_as: + * Set the file creation context in a set of credentials to be the same as + * the objective context of the specified inode. + * @new points to the credentials to be modified. + * @inode points to the inode to use as a reference. + * The current task must be the one that nominated @inode. + * Return 0 if successful. * @task_setuid: * Check permission before setting one or more of the user identity * attributes of the current process. The @flags parameter indicates @@ -606,15 +669,13 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * @id2 contains a uid. * @flags contains one of the LSM_SETID_* values. * Return 0 if permission is granted. - * @task_post_setuid: + * @task_fix_setuid: * Update the module's state after setting one or more of the user * identity attributes of the current process. The @flags parameter * indicates which of the set*uid system calls invoked this hook. If - * @flags is LSM_SETID_FS, then @old_ruid is the old fs uid and the other - * parameters are not used. - * @old_ruid contains the old real uid (or fs uid if LSM_SETID_FS). - * @old_euid contains the old effective uid (or -1 if LSM_SETID_FS). - * @old_suid contains the old saved uid (or -1 if LSM_SETID_FS). + * @new is the set of credentials that will be installed. Modifications + * should be made to this rather than to @current->cred. + * @old is the set of credentials that are being replaces * @flags contains one of the LSM_SETID_* values. * Return 0 on success. * @task_setgid: @@ -717,13 +778,8 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * @arg3 contains a argument. * @arg4 contains a argument. * @arg5 contains a argument. - * @rc_p contains a pointer to communicate back the forced return code - * Return 0 if permission is granted, and non-zero if the security module - * has taken responsibility (setting *rc_p) for the prctl call. - * @task_reparent_to_init: - * Set the security attributes in @p->security for a kernel thread that - * is being reparented to the init task. - * @p contains the task_struct for the kernel thread. + * Return -ENOSYS if no-one wanted to handle this op, any other value to + * cause prctl() to return immediately with that value. * @task_to_inode: * Set the security attributes for an inode based on an associated task's * security attributes, e.g. for /proc/pid inodes. @@ -1000,7 +1056,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * See whether a specific operational right is granted to a process on a * key. * @key_ref refers to the key (key pointer + possession attribute bit). - * @context points to the process to provide the context against which to + * @cred points to the credentials to provide the context against which to * evaluate the security data on the key. * @perm describes the combination of permissions required of this key. * Return 1 if permission granted, 0 if permission denied and -ve it the @@ -1162,6 +1218,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * @child process. * Security modules may also want to perform a process tracing check * during an execve in the set_security or apply_creds hooks of + * tracing check during an execve in the bprm_set_creds hook of * binprm_security_ops if the process is being traced and its security * attributes would be changed by the execve. * @child contains the task_struct structure for the target process. @@ -1185,33 +1242,22 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * @inheritable contains the inheritable capability set. * @permitted contains the permitted capability set. * Return 0 if the capability sets were successfully obtained. - * @capset_check: - * Check permission before setting the @effective, @inheritable, and - * @permitted capability sets for the @target process. - * Caveat: @target is also set to current if a set of processes is - * specified (i.e. all processes other than current and init or a - * particular process group). Hence, the capset_set hook may need to - * revalidate permission to the actual target process. - * @target contains the task_struct structure for target process. - * @effective contains the effective capability set. - * @inheritable contains the inheritable capability set. - * @permitted contains the permitted capability set. - * Return 0 if permission is granted. - * @capset_set: + * @capset: * Set the @effective, @inheritable, and @permitted capability sets for - * the @target process. Since capset_check cannot always check permission - * to the real @target process, this hook may also perform permission - * checking to determine if the current process is allowed to set the - * capability sets of the @target process. However, this hook has no way - * of returning an error due to the structure of the sys_capset code. - * @target contains the task_struct structure for target process. + * the current process. + * @new contains the new credentials structure for target process. + * @old contains the current credentials structure for target process. * @effective contains the effective capability set. * @inheritable contains the inheritable capability set. * @permitted contains the permitted capability set. + * Return 0 and update @new if permission is granted. * @capable: - * Check whether the @tsk process has the @cap capability. + * Check whether the @tsk process has the @cap capability in the indicated + * credentials. * @tsk contains the task_struct for the process. + * @cred contains the credentials to use. * @cap contains the capability <include/linux/capability.h>. + * @audit: Whether to write an audit message or not * Return 0 if the capability is granted for @tsk. * @acct: * Check permission before enabling or disabling process accounting. If @@ -1299,15 +1345,13 @@ struct security_operations { int (*capget) (struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted); - int (*capset_check) (struct task_struct *target, - kernel_cap_t *effective, - kernel_cap_t *inheritable, - kernel_cap_t *permitted); - void (*capset_set) (struct task_struct *target, - kernel_cap_t *effective, - kernel_cap_t *inheritable, - kernel_cap_t *permitted); - int (*capable) (struct task_struct *tsk, int cap); + int (*capset) (struct cred *new, + const struct cred *old, + const kernel_cap_t *effective, + const kernel_cap_t *inheritable, + const kernel_cap_t *permitted); + int (*capable) (struct task_struct *tsk, const struct cred *cred, + int cap, int audit); int (*acct) (struct file *file); int (*sysctl) (struct ctl_table *table, int op); int (*quotactl) (int cmds, int type, int id, struct super_block *sb); @@ -1316,18 +1360,16 @@ struct security_operations { int (*settime) (struct timespec *ts, struct timezone *tz); int (*vm_enough_memory) (struct mm_struct *mm, long pages); - int (*bprm_alloc_security) (struct linux_binprm *bprm); - void (*bprm_free_security) (struct linux_binprm *bprm); - void (*bprm_apply_creds) (struct linux_binprm *bprm, int unsafe); - void (*bprm_post_apply_creds) (struct linux_binprm *bprm); - int (*bprm_set_security) (struct linux_binprm *bprm); + int (*bprm_set_creds) (struct linux_binprm *bprm); int (*bprm_check_security) (struct linux_binprm *bprm); int (*bprm_secureexec) (struct linux_binprm *bprm); + void (*bprm_committing_creds) (struct linux_binprm *bprm); + void (*bprm_committed_creds) (struct linux_binprm *bprm); int (*sb_alloc_security) (struct super_block *sb); void (*sb_free_security) (struct super_block *sb); int (*sb_copy_data) (char *orig, char *copy); - int (*sb_kern_mount) (struct super_block *sb, void *data); + int (*sb_kern_mount) (struct super_block *sb, int flags, void *data); int (*sb_show_options) (struct seq_file *m, struct super_block *sb); int (*sb_statfs) (struct dentry *dentry); int (*sb_mount) (char *dev_name, struct path *path, @@ -1350,6 +1392,22 @@ struct security_operations { struct super_block *newsb); int (*sb_parse_opts_str) (char *options, struct security_mnt_opts *opts); +#ifdef CONFIG_SECURITY_PATH + int (*path_unlink) (struct path *dir, struct dentry *dentry); + int (*path_mkdir) (struct path *dir, struct dentry *dentry, int mode); + int (*path_rmdir) (struct path *dir, struct dentry *dentry); + int (*path_mknod) (struct path *dir, struct dentry *dentry, int mode, + unsigned int dev); + int (*path_truncate) (struct path *path, loff_t length, + unsigned int time_attrs); + int (*path_symlink) (struct path *dir, struct dentry *dentry, + const char *old_name); + int (*path_link) (struct dentry *old_dentry, struct path *new_dir, + struct dentry *new_dentry); + int (*path_rename) (struct path *old_dir, struct dentry *old_dentry, + struct path *new_dir, struct dentry *new_dentry); +#endif + int (*inode_alloc_security) (struct inode *inode); void (*inode_free_security) (struct inode *inode); int (*inode_init_security) (struct inode *inode, struct inode *dir, @@ -1406,14 +1464,18 @@ struct security_operations { int (*file_send_sigiotask) (struct task_struct *tsk, struct fown_struct *fown, int sig); int (*file_receive) (struct file *file); - int (*dentry_open) (struct file *file); + int (*dentry_open) (struct file *file, const struct cred *cred); int (*task_create) (unsigned long clone_flags); - int (*task_alloc_security) (struct task_struct *p); - void (*task_free_security) (struct task_struct *p); + void (*cred_free) (struct cred *cred); + int (*cred_prepare)(struct cred *new, const struct cred *old, + gfp_t gfp); + void (*cred_commit)(struct cred *new, const struct cred *old); + int (*kernel_act_as)(struct cred *new, u32 secid); + int (*kernel_create_files_as)(struct cred *new, struct inode *inode); int (*task_setuid) (uid_t id0, uid_t id1, uid_t id2, int flags); - int (*task_post_setuid) (uid_t old_ruid /* or fsuid */ , - uid_t old_euid, uid_t old_suid, int flags); + int (*task_fix_setuid) (struct cred *new, const struct cred *old, + int flags); int (*task_setgid) (gid_t id0, gid_t id1, gid_t id2, int flags); int (*task_setpgid) (struct task_struct *p, pid_t pgid); int (*task_getpgid) (struct task_struct *p); @@ -1433,8 +1495,7 @@ struct security_operations { int (*task_wait) (struct task_struct *p); int (*task_prctl) (int option, unsigned long arg2, unsigned long arg3, unsigned long arg4, - unsigned long arg5, long *rc_p); - void (*task_reparent_to_init) (struct task_struct *p); + unsigned long arg5); void (*task_to_inode) (struct task_struct *p, struct inode *inode); int (*ipc_permission) (struct kern_ipc_perm *ipcp, short flag); @@ -1539,10 +1600,10 @@ struct security_operations { /* key management security hooks */ #ifdef CONFIG_KEYS - int (*key_alloc) (struct key *key, struct task_struct *tsk, unsigned long flags); + int (*key_alloc) (struct key *key, const struct cred *cred, unsigned long flags); void (*key_free) (struct key *key); int (*key_permission) (key_ref_t key_ref, - struct task_struct *context, + const struct cred *cred, key_perm_t perm); int (*key_getsecurity)(struct key *key, char **_buffer); #endif /* CONFIG_KEYS */ @@ -1568,15 +1629,13 @@ int security_capget(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted); -int security_capset_check(struct task_struct *target, - kernel_cap_t *effective, - kernel_cap_t *inheritable, - kernel_cap_t *permitted); -void security_capset_set(struct task_struct *target, - kernel_cap_t *effective, - kernel_cap_t *inheritable, - kernel_cap_t *permitted); -int security_capable(struct task_struct *tsk, int cap); +int security_capset(struct cred *new, const struct cred *old, + const kernel_cap_t *effective, + const kernel_cap_t *inheritable, + const kernel_cap_t *permitted); +int security_capable(int cap); +int security_real_capable(struct task_struct *tsk, int cap); +int security_real_capable_noaudit(struct task_struct *tsk, int cap); int security_acct(struct file *file); int security_sysctl(struct ctl_table *table, int op); int security_quotactl(int cmds, int type, int id, struct super_block *sb); @@ -1586,17 +1645,15 @@ int security_settime(struct timespec *ts, struct timezone *tz); int security_vm_enough_memory(long pages); int security_vm_enough_memory_mm(struct mm_struct *mm, long pages); int security_vm_enough_memory_kern(long pages); -int security_bprm_alloc(struct linux_binprm *bprm); -void security_bprm_free(struct linux_binprm *bprm); -void security_bprm_apply_creds(struct linux_binprm *bprm, int unsafe); -void security_bprm_post_apply_creds(struct linux_binprm *bprm); -int security_bprm_set(struct linux_binprm *bprm); +int security_bprm_set_creds(struct linux_binprm *bprm); int security_bprm_check(struct linux_binprm *bprm); +void security_bprm_committing_creds(struct linux_binprm *bprm); +void security_bprm_committed_creds(struct linux_binprm *bprm); int security_bprm_secureexec(struct linux_binprm *bprm); int security_sb_alloc(struct super_block *sb); void security_sb_free(struct super_block *sb); int security_sb_copy_data(char *orig, char *copy); -int security_sb_kern_mount(struct super_block *sb, void *data); +int security_sb_kern_mount(struct super_block *sb, int flags, void *data); int security_sb_show_options(struct seq_file *m, struct super_block *sb); int security_sb_statfs(struct dentry *dentry); int security_sb_mount(char *dev_name, struct path *path, @@ -1663,13 +1720,16 @@ int security_file_set_fowner(struct file *file); int security_file_send_sigiotask(struct task_struct *tsk, struct fown_struct *fown, int sig); int security_file_receive(struct file *file); -int security_dentry_open(struct file *file); +int security_dentry_open(struct file *file, const struct cred *cred); int security_task_create(unsigned long clone_flags); -int security_task_alloc(struct task_struct *p); -void security_task_free(struct task_struct *p); +void security_cred_free(struct cred *cred); +int security_prepare_creds(struct cred *new, const struct cred *old, gfp_t gfp); +void security_commit_creds(struct cred *new, const struct cred *old); +int security_kernel_act_as(struct cred *new, u32 secid); +int security_kernel_create_files_as(struct cred *new, struct inode *inode); int security_task_setuid(uid_t id0, uid_t id1, uid_t id2, int flags); -int security_task_post_setuid(uid_t old_ruid, uid_t old_euid, - uid_t old_suid, int flags); +int security_task_fix_setuid(struct cred *new, const struct cred *old, + int flags); int security_task_setgid(gid_t id0, gid_t id1, gid_t id2, int flags); int security_task_setpgid(struct task_struct *p, pid_t pgid); int security_task_getpgid(struct task_struct *p); @@ -1688,8 +1748,7 @@ int security_task_kill(struct task_struct *p, struct siginfo *info, int sig, u32 secid); int security_task_wait(struct task_struct *p); int security_task_prctl(int option, unsigned long arg2, unsigned long arg3, - unsigned long arg4, unsigned long arg5, long *rc_p); -void security_task_reparent_to_init(struct task_struct *p); + unsigned long arg4, unsigned long arg5); void security_task_to_inode(struct task_struct *p, struct inode *inode); int security_ipc_permission(struct kern_ipc_perm *ipcp, short flag); void security_ipc_getsecid(struct kern_ipc_perm *ipcp, u32 *secid); @@ -1764,25 +1823,40 @@ static inline int security_capget(struct task_struct *target, return cap_capget(target, effective, inheritable, permitted); } -static inline int security_capset_check(struct task_struct *target, - kernel_cap_t *effective, - kernel_cap_t *inheritable, - kernel_cap_t *permitted) +static inline int security_capset(struct cred *new, + const struct cred *old, + const kernel_cap_t *effective, + const kernel_cap_t *inheritable, + const kernel_cap_t *permitted) { - return cap_capset_check(target, effective, inheritable, permitted); + return cap_capset(new, old, effective, inheritable, permitted); } -static inline void security_capset_set(struct task_struct *target, - kernel_cap_t *effective, - kernel_cap_t *inheritable, - kernel_cap_t *permitted) +static inline int security_capable(int cap) { - cap_capset_set(target, effective, inheritable, permitted); + return cap_capable(current, current_cred(), cap, SECURITY_CAP_AUDIT); } -static inline int security_capable(struct task_struct *tsk, int cap) +static inline int security_real_capable(struct task_struct *tsk, int cap) { - return cap_capable(tsk, cap); + int ret; + + rcu_read_lock(); + ret = cap_capable(tsk, __task_cred(tsk), cap, SECURITY_CAP_AUDIT); + rcu_read_unlock(); + return ret; +} + +static inline +int security_real_capable_noaudit(struct task_struct *tsk, int cap) +{ + int ret; + + rcu_read_lock(); + ret = cap_capable(tsk, __task_cred(tsk), cap, + SECURITY_CAP_NOAUDIT); + rcu_read_unlock(); + return ret; } static inline int security_acct(struct file *file) @@ -1835,32 +1909,22 @@ static inline int security_vm_enough_memory_kern(long pages) return cap_vm_enough_memory(current->mm, pages); } -static inline int security_bprm_alloc(struct linux_binprm *bprm) +static inline int security_bprm_set_creds(struct linux_binprm *bprm) { - return 0; + return cap_bprm_set_creds(bprm); } -static inline void security_bprm_free(struct linux_binprm *bprm) -{ } - -static inline void security_bprm_apply_creds(struct linux_binprm *bprm, int unsafe) -{ - cap_bprm_apply_creds(bprm, unsafe); -} - -static inline void security_bprm_post_apply_creds(struct linux_binprm *bprm) +static inline int security_bprm_check(struct linux_binprm *bprm) { - return; + return 0; } -static inline int security_bprm_set(struct linux_binprm *bprm) +static inline void security_bprm_committing_creds(struct linux_binprm *bprm) { - return cap_bprm_set_security(bprm); } -static inline int security_bprm_check(struct linux_binprm *bprm) +static inline void security_bprm_committed_creds(struct linux_binprm *bprm) { - return 0; } static inline int security_bprm_secureexec(struct linux_binprm *bprm) @@ -1881,7 +1945,7 @@ static inline int security_sb_copy_data(char *orig, char *copy) return 0; } -static inline int security_sb_kern_mount(struct super_block *sb, void *data) +static inline int security_sb_kern_mount(struct super_block *sb, int flags, void *data) { return 0; } @@ -2177,7 +2241,8 @@ static inline int security_file_receive(struct file *file) return 0; } -static inline int security_dentry_open(struct file *file) +static inline int security_dentry_open(struct file *file, + const struct cred *cred) { return 0; } @@ -2187,13 +2252,31 @@ static inline int security_task_create(unsigned long clone_flags) return 0; } -static inline int security_task_alloc(struct task_struct *p) +static inline void security_cred_free(struct cred *cred) +{ } + +static inline int security_prepare_creds(struct cred *new, + const struct cred *old, + gfp_t gfp) { return 0; } -static inline void security_task_free(struct task_struct *p) -{ } +static inline void security_commit_creds(struct cred *new, + const struct cred *old) +{ +} + +static inline int security_kernel_act_as(struct cred *cred, u32 secid) +{ + return 0; +} + +static inline int security_kernel_create_files_as(struct cred *cred, + struct inode *inode) +{ + return 0; +} static inline int security_task_setuid(uid_t id0, uid_t id1, uid_t id2, int flags) @@ -2201,10 +2284,11 @@ static inline int security_task_setuid(uid_t id0, uid_t id1, uid_t id2, return 0; } -static inline int security_task_post_setuid(uid_t old_ruid, uid_t old_euid, - uid_t old_suid, int flags) +static inline int security_task_fix_setuid(struct cred *new, + const struct cred *old, + int flags) { - return cap_task_post_setuid(old_ruid, old_euid, old_suid, flags); + return cap_task_fix_setuid(new, old, flags); } static inline int security_task_setgid(gid_t id0, gid_t id1, gid_t id2, @@ -2291,14 +2375,9 @@ static inline int security_task_wait(struct task_struct *p) static inline int security_task_prctl(int option, unsigned long arg2, unsigned long arg3, unsigned long arg4, - unsigned long arg5, long *rc_p) + unsigned long arg5) { - return cap_task_prctl(option, arg2, arg3, arg3, arg5, rc_p); -} - -static inline void security_task_reparent_to_init(struct task_struct *p) -{ - cap_task_reparent_to_init(p); + return cap_task_prctl(option, arg2, arg3, arg3, arg5); } static inline void security_task_to_inode(struct task_struct *p, struct inode *inode) @@ -2721,19 +2800,84 @@ static inline void security_skb_classify_flow(struct sk_buff *skb, struct flowi #endif /* CONFIG_SECURITY_NETWORK_XFRM */ +#ifdef CONFIG_SECURITY_PATH +int security_path_unlink(struct path *dir, struct dentry *dentry); +int security_path_mkdir(struct path *dir, struct dentry *dentry, int mode); +int security_path_rmdir(struct path *dir, struct dentry *dentry); +int security_path_mknod(struct path *dir, struct dentry *dentry, int mode, + unsigned int dev); +int security_path_truncate(struct path *path, loff_t length, + unsigned int time_attrs); +int security_path_symlink(struct path *dir, struct dentry *dentry, + const char *old_name); +int security_path_link(struct dentry *old_dentry, struct path *new_dir, + struct dentry *new_dentry); +int security_path_rename(struct path *old_dir, struct dentry *old_dentry, + struct path *new_dir, struct dentry *new_dentry); +#else /* CONFIG_SECURITY_PATH */ +static inline int security_path_unlink(struct path *dir, struct dentry *dentry) +{ + return 0; +} + +static inline int security_path_mkdir(struct path *dir, struct dentry *dentry, + int mode) +{ + return 0; +} + +static inline int security_path_rmdir(struct path *dir, struct dentry *dentry) +{ + return 0; +} + +static inline int security_path_mknod(struct path *dir, struct dentry *dentry, + int mode, unsigned int dev) +{ + return 0; +} + +static inline int security_path_truncate(struct path *path, loff_t length, + unsigned int time_attrs) +{ + return 0; +} + +static inline int security_path_symlink(struct path *dir, struct dentry *dentry, + const char *old_name) +{ + return 0; +} + +static inline int security_path_link(struct dentry *old_dentry, + struct path *new_dir, + struct dentry *new_dentry) +{ + return 0; +} + +static inline int security_path_rename(struct path *old_dir, + struct dentry *old_dentry, + struct path *new_dir, + struct dentry *new_dentry) +{ + return 0; +} +#endif /* CONFIG_SECURITY_PATH */ + #ifdef CONFIG_KEYS #ifdef CONFIG_SECURITY -int security_key_alloc(struct key *key, struct task_struct *tsk, unsigned long flags); +int security_key_alloc(struct key *key, const struct cred *cred, unsigned long flags); void security_key_free(struct key *key); int security_key_permission(key_ref_t key_ref, - struct task_struct *context, key_perm_t perm); + const struct cred *cred, key_perm_t perm); int security_key_getsecurity(struct key *key, char **_buffer); #else static inline int security_key_alloc(struct key *key, - struct task_struct *tsk, + const struct cred *cred, unsigned long flags) { return 0; @@ -2744,7 +2888,7 @@ static inline void security_key_free(struct key *key) } static inline int security_key_permission(key_ref_t key_ref, - struct task_struct *context, + const struct cred *cred, key_perm_t perm) { return 0; diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h index dc50bcc282a..40ea5058c2e 100644 --- a/include/linux/seq_file.h +++ b/include/linux/seq_file.h @@ -34,6 +34,7 @@ struct seq_operations { #define SEQ_SKIP 1 +char *mangle_path(char *s, char *p, char *esc); int seq_open(struct file *, const struct seq_operations *); ssize_t seq_read(struct file *, char __user *, size_t, loff_t *); loff_t seq_lseek(struct file *, loff_t, int); @@ -49,10 +50,11 @@ int seq_path(struct seq_file *, struct path *, char *); int seq_dentry(struct seq_file *, struct dentry *, char *); int seq_path_root(struct seq_file *m, struct path *path, struct path *root, char *esc); -int seq_bitmap(struct seq_file *m, unsigned long *bits, unsigned int nr_bits); -static inline int seq_cpumask(struct seq_file *m, cpumask_t *mask) +int seq_bitmap(struct seq_file *m, const unsigned long *bits, + unsigned int nr_bits); +static inline int seq_cpumask(struct seq_file *m, const struct cpumask *mask) { - return seq_bitmap(m, mask->bits, NR_CPUS); + return seq_bitmap(m, mask->bits, nr_cpu_ids); } static inline int seq_nodemask(struct seq_file *m, nodemask_t *mask) diff --git a/include/linux/serial.h b/include/linux/serial.h index 1ea8d9265bf..9136cc5608c 100644 --- a/include/linux/serial.h +++ b/include/linux/serial.h @@ -10,8 +10,9 @@ #ifndef _LINUX_SERIAL_H #define _LINUX_SERIAL_H -#ifdef __KERNEL__ #include <linux/types.h> + +#ifdef __KERNEL__ #include <asm/page.h> /* diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h index 3d37c94abbc..d4d2a78ad43 100644 --- a/include/linux/serial_8250.h +++ b/include/linux/serial_8250.h @@ -28,6 +28,9 @@ struct plat_serial8250_port { unsigned char iotype; /* UPIO_* */ unsigned char hub6; upf_t flags; /* UPF_* flags */ + unsigned int type; /* If UPF_FIXED_TYPE */ + unsigned int (*serial_in)(struct uart_port *, int); + void (*serial_out)(struct uart_port *, int, int); }; /* diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index 4e4f1277f3b..90bbbf0b116 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -40,7 +40,8 @@ #define PORT_NS16550A 14 #define PORT_XSCALE 15 #define PORT_RM9000 16 /* PMC-Sierra RM9xxx internal UART */ -#define PORT_MAX_8250 16 /* max port ID */ +#define PORT_OCTEON 17 /* Cavium OCTEON internal UART */ +#define PORT_MAX_8250 17 /* max port ID */ /* * ARM specific type numbers. These are not currently guaranteed @@ -158,6 +159,11 @@ /* SH-SCI */ #define PORT_SCIFA 83 +#define PORT_S3C6400 84 + +/* NWPSERIAL */ +#define PORT_NWPSERIAL 85 + #ifdef __KERNEL__ #include <linux/compiler.h> @@ -246,6 +252,8 @@ struct uart_port { spinlock_t lock; /* port lock */ unsigned long iobase; /* in/out[bwl] */ unsigned char __iomem *membase; /* read/write[bwl] */ + unsigned int (*serial_in)(struct uart_port *, int); + void (*serial_out)(struct uart_port *, int, int); unsigned int irq; /* irq number */ unsigned int uartclk; /* base uart clock */ unsigned int fifosize; /* tx fifo size */ @@ -291,6 +299,8 @@ struct uart_port { #define UPF_MAGIC_MULTIPLIER ((__force upf_t) (1 << 16)) #define UPF_CONS_FLOW ((__force upf_t) (1 << 23)) #define UPF_SHARE_IRQ ((__force upf_t) (1 << 24)) +/* The exact UART type is known and should not be probed. */ +#define UPF_FIXED_TYPE ((__force upf_t) (1 << 27)) #define UPF_BOOT_AUTOCONF ((__force upf_t) (1 << 28)) #define UPF_FIXED_PORT ((__force upf_t) (1 << 29)) #define UPF_DEAD ((__force upf_t) (1 << 30)) @@ -314,35 +324,13 @@ struct uart_port { }; /* - * This is the state information which is persistent across opens. - * The low level driver must not to touch any elements contained - * within. - */ -struct uart_state { - unsigned int close_delay; /* msec */ - unsigned int closing_wait; /* msec */ - -#define USF_CLOSING_WAIT_INF (0) -#define USF_CLOSING_WAIT_NONE (~0U) - - int count; - int pm_state; - struct uart_info *info; - struct uart_port *port; - - struct mutex mutex; -}; - -#define UART_XMIT_SIZE PAGE_SIZE - -typedef unsigned int __bitwise__ uif_t; - -/* * This is the state information which is only valid when the port - * is open; it may be freed by the core driver once the device has + * is open; it may be cleared the core driver once the device has * been closed. Either the low level driver or the core can modify * stuff here. */ +typedef unsigned int __bitwise__ uif_t; + struct uart_info { struct tty_port port; struct circ_buf xmit; @@ -364,6 +352,29 @@ struct uart_info { wait_queue_head_t delta_msr_wait; }; +/* + * This is the state information which is persistent across opens. + * The low level driver must not to touch any elements contained + * within. + */ +struct uart_state { + unsigned int close_delay; /* msec */ + unsigned int closing_wait; /* msec */ + +#define USF_CLOSING_WAIT_INF (0) +#define USF_CLOSING_WAIT_NONE (~0U) + + int count; + int pm_state; + struct uart_info info; + struct uart_port *port; + + struct mutex mutex; +}; + +#define UART_XMIT_SIZE PAGE_SIZE + + /* number of characters left in xmit buffer before we ask for more */ #define WAKEUP_CHARS 256 @@ -437,8 +448,13 @@ int uart_resume_port(struct uart_driver *reg, struct uart_port *port); #define uart_circ_chars_free(circ) \ (CIRC_SPACE((circ)->head, (circ)->tail, UART_XMIT_SIZE)) -#define uart_tx_stopped(portp) \ - ((portp)->info->port.tty->stopped || (portp)->info->port.tty->hw_stopped) +static inline int uart_tx_stopped(struct uart_port *port) +{ + struct tty_struct *tty = port->info->port.tty; + if(tty->stopped || tty->hw_stopped) + return 1; + return 0; +} /* * The following are helper functions for the low level drivers. @@ -449,7 +465,7 @@ uart_handle_sysrq_char(struct uart_port *port, unsigned int ch) #ifdef SUPPORT_SYSRQ if (port->sysrq) { if (ch && time_before(jiffies, port->sysrq)) { - handle_sysrq(ch, port->info ? port->info->port.tty : NULL); + handle_sysrq(ch, port->info->port.tty); port->sysrq = 0; return 1; } diff --git a/include/linux/serio.h b/include/linux/serio.h index 25641d9e0ea..1bcb357a01a 100644 --- a/include/linux/serio.h +++ b/include/linux/serio.h @@ -213,5 +213,6 @@ static inline void serio_unpin_driver(struct serio *serio) #define SERIO_ZHENHUA 0x36 #define SERIO_INEXIO 0x37 #define SERIO_TOUCHIT213 0x37 +#define SERIO_W8001 0x39 #endif diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 2725f4e5a9b..cf2cb50f77d 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -250,6 +250,9 @@ typedef unsigned char *sk_buff_data_t; * @tc_verd: traffic control verdict * @ndisc_nodetype: router type (from link layer) * @do_not_encrypt: set to prevent encryption of this frame + * @requeue: set to indicate that the wireless core should attempt + * a software retry on this frame if we failed to + * receive an ACK for it * @dma_cookie: a cookie to one of several possible DMA operations * done by skb DMA functions * @secmark: security marking @@ -269,8 +272,9 @@ struct sk_buff { struct dst_entry *dst; struct rtable *rtable; }; +#ifdef CONFIG_XFRM struct sec_path *sp; - +#endif /* * This is the control buffer. It is free to use for every * layer. Please put your private variables there. If you @@ -325,6 +329,7 @@ struct sk_buff { #endif #if defined(CONFIG_MAC80211) || defined(CONFIG_MAC80211_MODULE) __u8 do_not_encrypt:1; + __u8 requeue:1; #endif /* 0/13/14 bit hole */ @@ -488,6 +493,19 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list, } /** + * skb_queue_is_first - check if skb is the first entry in the queue + * @list: queue head + * @skb: buffer + * + * Returns true if @skb is the first buffer on the list. + */ +static inline bool skb_queue_is_first(const struct sk_buff_head *list, + const struct sk_buff *skb) +{ + return (skb->prev == (struct sk_buff *) list); +} + +/** * skb_queue_next - return the next packet in the queue * @list: queue head * @skb: current buffer @@ -506,6 +524,24 @@ static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list, } /** + * skb_queue_prev - return the prev packet in the queue + * @list: queue head + * @skb: current buffer + * + * Return the prev packet in @list before @skb. It is only valid to + * call this if skb_queue_is_first() evaluates to false. + */ +static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list, + const struct sk_buff *skb) +{ + /* This BUG_ON may seem severe, but if we just return then we + * are going to dereference garbage. + */ + BUG_ON(skb_queue_is_first(list, skb)); + return skb->prev; +} + +/** * skb_get - reference buffer * @skb: buffer to reference * @@ -1647,8 +1683,12 @@ extern int skb_splice_bits(struct sk_buff *skb, extern void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to); extern void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len); +extern int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, + int shiftlen); extern struct sk_buff *skb_segment(struct sk_buff *skb, int features); +extern int skb_gro_receive(struct sk_buff **head, + struct sk_buff *skb); static inline void *skb_header_pointer(const struct sk_buff *skb, int offset, int len, void *buffer) @@ -1864,6 +1904,18 @@ static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_bu to->queue_mapping = from->queue_mapping; } +#ifdef CONFIG_XFRM +static inline struct sec_path *skb_sec_path(struct sk_buff *skb) +{ + return skb->sp; +} +#else +static inline struct sec_path *skb_sec_path(struct sk_buff *skb) +{ + return NULL; +} +#endif + static inline int skb_is_gso(const struct sk_buff *skb) { return skb_shinfo(skb)->gso_size; diff --git a/include/linux/slab.h b/include/linux/slab.h index 000da12b5cf..f96d13c281e 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -253,9 +253,9 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep, * request comes from. */ #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) -extern void *__kmalloc_track_caller(size_t, gfp_t, void*); +extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long); #define kmalloc_track_caller(size, flags) \ - __kmalloc_track_caller(size, flags, __builtin_return_address(0)) + __kmalloc_track_caller(size, flags, _RET_IP_) #else #define kmalloc_track_caller(size, flags) \ __kmalloc(size, flags) @@ -271,10 +271,10 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, void*); * allocation request comes from. */ #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) -extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, void *); +extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long); #define kmalloc_node_track_caller(size, flags, node) \ __kmalloc_node_track_caller(size, flags, node, \ - __builtin_return_address(0)) + _RET_IP_) #else #define kmalloc_node_track_caller(size, flags, node) \ __kmalloc_node(size, flags, node) @@ -285,7 +285,7 @@ extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, void *); #define kmalloc_node_track_caller(size, flags, node) \ kmalloc_track_caller(size, flags) -#endif /* DEBUG_SLAB */ +#endif /* CONFIG_NUMA */ /* * Shortcuts diff --git a/include/linux/smp.h b/include/linux/smp.h index 6e7ba16ff45..b8246696810 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -21,6 +21,9 @@ struct call_single_data { u16 priv; }; +/* total number of cpus in this system (may exceed NR_CPUS) */ +extern unsigned int total_cpus; + #ifdef CONFIG_SMP #include <linux/preempt.h> @@ -64,15 +67,16 @@ extern void smp_cpus_done(unsigned int max_cpus); * Call a function on all other processors */ int smp_call_function(void(*func)(void *info), void *info, int wait); -/* Deprecated: use smp_call_function_many() which uses a cpumask ptr. */ -int smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info, - int wait); +void smp_call_function_many(const struct cpumask *mask, + void (*func)(void *info), void *info, bool wait); -static inline void smp_call_function_many(const struct cpumask *mask, - void (*func)(void *info), void *info, - int wait) +/* Deprecated: Use smp_call_function_many which takes a pointer to the mask. */ +static inline int +smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info, + int wait) { - smp_call_function_mask(*mask, func, info, wait); + smp_call_function_many(&mask, func, info, wait); + return 0; } int smp_call_function_single(int cpuid, void (*func) (void *info), void *info, diff --git a/include/linux/smsc911x.h b/include/linux/smsc911x.h new file mode 100644 index 00000000000..1cbf0313add --- /dev/null +++ b/include/linux/smsc911x.h @@ -0,0 +1,47 @@ +/*************************************************************************** + * + * Copyright (C) 2004-2008 SMSC + * Copyright (C) 2005-2008 ARM + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + ***************************************************************************/ +#ifndef __LINUX_SMSC911X_H__ +#define __LINUX_SMSC911X_H__ + +#include <linux/phy.h> + +/* platform_device configuration data, should be assigned to + * the platform_device's dev.platform_data */ +struct smsc911x_platform_config { + unsigned int irq_polarity; + unsigned int irq_type; + unsigned int flags; + phy_interface_t phy_interface; +}; + +/* Constants for platform_device irq polarity configuration */ +#define SMSC911X_IRQ_POLARITY_ACTIVE_LOW 0 +#define SMSC911X_IRQ_POLARITY_ACTIVE_HIGH 1 + +/* Constants for platform_device irq type configuration */ +#define SMSC911X_IRQ_TYPE_OPEN_DRAIN 0 +#define SMSC911X_IRQ_TYPE_PUSH_PULL 1 + +/* Constants for flags */ +#define SMSC911X_USE_16BIT (BIT(0)) +#define SMSC911X_USE_32BIT (BIT(1)) + +#endif /* __LINUX_SMSC911X_H__ */ diff --git a/include/linux/snmp.h b/include/linux/snmp.h index 7a6e6bba4a7..aee3f1e1d1c 100644 --- a/include/linux/snmp.h +++ b/include/linux/snmp.h @@ -216,6 +216,9 @@ enum LINUX_MIB_TCPSPURIOUSRTOS, /* TCPSpuriousRTOs */ LINUX_MIB_TCPMD5NOTFOUND, /* TCPMD5NotFound */ LINUX_MIB_TCPMD5UNEXPECTED, /* TCPMD5Unexpected */ + LINUX_MIB_SACKSHIFTED, + LINUX_MIB_SACKMERGED, + LINUX_MIB_SACKSHIFTFALLBACK, __LINUX_MIB_MAX }; diff --git a/include/linux/spi/mmc_spi.h b/include/linux/spi/mmc_spi.h index a3626aedaec..0f4eb165f25 100644 --- a/include/linux/spi/mmc_spi.h +++ b/include/linux/spi/mmc_spi.h @@ -1,9 +1,10 @@ #ifndef __LINUX_SPI_MMC_SPI_H #define __LINUX_SPI_MMC_SPI_H +#include <linux/device.h> +#include <linux/spi/spi.h> #include <linux/interrupt.h> -struct device; struct mmc_host; /* Put this in platform_data of a device being used to manage an MMC/SD @@ -41,4 +42,16 @@ struct mmc_spi_platform_data { void (*setpower)(struct device *, unsigned int maskval); }; +#ifdef CONFIG_OF +extern struct mmc_spi_platform_data *mmc_spi_get_pdata(struct spi_device *spi); +extern void mmc_spi_put_pdata(struct spi_device *spi); +#else +static inline struct mmc_spi_platform_data * +mmc_spi_get_pdata(struct spi_device *spi) +{ + return spi->dev.platform_data; +} +static inline void mmc_spi_put_pdata(struct spi_device *spi) {} +#endif /* CONFIG_OF */ + #endif /* __LINUX_SPI_MMC_SPI_H */ diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 4be01bb4437..68bb1c501d0 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -19,6 +19,8 @@ #ifndef __LINUX_SPI_H #define __LINUX_SPI_H +#include <linux/device.h> + /* * INTERFACES between SPI master-side drivers and SPI infrastructure. * (There's no SPI slave support for Linux yet...) @@ -325,9 +327,9 @@ extern struct spi_master *spi_busnum_to_master(u16 busnum); * @tx_dma: DMA address of tx_buf, if @spi_message.is_dma_mapped * @rx_dma: DMA address of rx_buf, if @spi_message.is_dma_mapped * @len: size of rx and tx buffers (in bytes) - * @speed_hz: Select a speed other then the device default for this + * @speed_hz: Select a speed other than the device default for this * transfer. If 0 the default (from @spi_device) is used. - * @bits_per_word: select a bits_per_word other then the device default + * @bits_per_word: select a bits_per_word other than the device default * for this transfer. If 0 the default (from @spi_device) is used. * @cs_change: affects chipselect after this transfer completes * @delay_usecs: microseconds to delay after this transfer before diff --git a/include/linux/spi/spi_gpio.h b/include/linux/spi/spi_gpio.h new file mode 100644 index 00000000000..0f01a0f1f40 --- /dev/null +++ b/include/linux/spi/spi_gpio.h @@ -0,0 +1,60 @@ +#ifndef __LINUX_SPI_GPIO_H +#define __LINUX_SPI_GPIO_H + +/* + * For each bitbanged SPI bus, set up a platform_device node with: + * - name "spi_gpio" + * - id the same as the SPI bus number it implements + * - dev.platform data pointing to a struct spi_gpio_platform_data + * + * Or, see the driver code for information about speedups that are + * possible on platforms that support inlined access for GPIOs (no + * spi_gpio_platform_data is used). + * + * Use spi_board_info with these busses in the usual way, being sure + * that the controller_data being the GPIO used for each device's + * chipselect: + * + * static struct spi_board_info ... [] = { + * ... + * // this slave uses GPIO 42 for its chipselect + * .controller_data = (void *) 42, + * ... + * // this one uses GPIO 86 for its chipselect + * .controller_data = (void *) 86, + * ... + * }; + * + * If the bitbanged bus is later switched to a "native" controller, + * that platform_device and controller_data should be removed. + */ + +/** + * struct spi_gpio_platform_data - parameter for bitbanged SPI master + * @sck: number of the GPIO used for clock output + * @mosi: number of the GPIO used for Master Output, Slave In (MOSI) data + * @miso: number of the GPIO used for Master Input, Slave Output (MISO) data + * @num_chipselect: how many slaves to allow + * + * All GPIO signals used with the SPI bus managed through this driver + * (chipselects, MOSI, MISO, SCK) must be configured as GPIOs, instead + * of some alternate function. + * + * It can be convenient to use this driver with pins that have alternate + * functions associated with a "native" SPI controller if a driver for that + * controller is not available, or is missing important functionality. + * + * On platforms which can do so, configure MISO with a weak pullup unless + * there's an external pullup on that signal. That saves power by avoiding + * floating signals. (A weak pulldown would save power too, but many + * drivers expect to see all-ones data as the no slave "response".) + */ +struct spi_gpio_platform_data { + unsigned sck; + unsigned mosi; + unsigned miso; + + u16 num_chipselect; +}; + +#endif /* __LINUX_SPI_GPIO_H */ diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h index b106fd8e0d5..1a8cecc4f38 100644 --- a/include/linux/stacktrace.h +++ b/include/linux/stacktrace.h @@ -15,9 +15,17 @@ extern void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace); extern void print_stack_trace(struct stack_trace *trace, int spaces); + +#ifdef CONFIG_USER_STACKTRACE_SUPPORT +extern void save_stack_trace_user(struct stack_trace *trace); +#else +# define save_stack_trace_user(trace) do { } while (0) +#endif + #else # define save_stack_trace(trace) do { } while (0) # define save_stack_trace_tsk(tsk, trace) do { } while (0) +# define save_stack_trace_user(trace) do { } while (0) # define print_stack_trace(trace, spaces) do { } while (0) #endif diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h index faf1519b5ad..baba3a23a81 100644 --- a/include/linux/stop_machine.h +++ b/include/linux/stop_machine.h @@ -23,7 +23,7 @@ * * This can be thought of as a very heavy write lock, equivalent to * grabbing every spinlock in the kernel. */ -int stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus); +int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus); /** * __stop_machine: freeze the machine on all CPUs and run this function @@ -34,11 +34,29 @@ int stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus); * Description: This is a special version of the above, which assumes cpus * won't come or go while it's being called. Used by hotplug cpu. */ -int __stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus); +int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus); + +/** + * stop_machine_create: create all stop_machine threads + * + * Description: This causes all stop_machine threads to be created before + * stop_machine actually gets called. This can be used by subsystems that + * need a non failing stop_machine infrastructure. + */ +int stop_machine_create(void); + +/** + * stop_machine_destroy: destroy all stop_machine threads + * + * Description: This causes all stop_machine threads which were created with + * stop_machine_create to be destroyed again. + */ +void stop_machine_destroy(void); + #else static inline int stop_machine(int (*fn)(void *), void *data, - const cpumask_t *cpus) + const struct cpumask *cpus) { int ret; local_irq_disable(); @@ -46,5 +64,9 @@ static inline int stop_machine(int (*fn)(void *), void *data, local_irq_enable(); return ret; } + +static inline int stop_machine_create(void) { return 0; } +static inline void stop_machine_destroy(void) { } + #endif /* CONFIG_SMP */ #endif /* _LINUX_STOP_MACHINE */ diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h index 6f0ee1b84a4..c39a21040dc 100644 --- a/include/linux/sunrpc/clnt.h +++ b/include/linux/sunrpc/clnt.h @@ -58,6 +58,7 @@ struct rpc_clnt { struct rpc_timeout cl_timeout_default; struct rpc_program * cl_program; char cl_inline_name[32]; + char *cl_principal; /* target to authenticate to */ }; /* @@ -108,6 +109,7 @@ struct rpc_create_args { u32 version; rpc_authflavor_t authflavor; unsigned long flags; + char *client_name; }; /* Values for "flags" field */ diff --git a/include/linux/sunrpc/rpc_pipe_fs.h b/include/linux/sunrpc/rpc_pipe_fs.h index 51b977a4ca2..cea764c2359 100644 --- a/include/linux/sunrpc/rpc_pipe_fs.h +++ b/include/linux/sunrpc/rpc_pipe_fs.h @@ -15,6 +15,7 @@ struct rpc_pipe_ops { ssize_t (*upcall)(struct file *, struct rpc_pipe_msg *, char __user *, size_t); ssize_t (*downcall)(struct file *, const char __user *, size_t); void (*release_pipe)(struct inode *); + int (*open_pipe)(struct inode *); void (*destroy_msg)(struct rpc_pipe_msg *); }; diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index 3afe7fb403b..3435d24bfe5 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -58,10 +58,13 @@ struct svc_serv { struct svc_stat * sv_stats; /* RPC statistics */ spinlock_t sv_lock; unsigned int sv_nrthreads; /* # of server threads */ + unsigned int sv_maxconn; /* max connections allowed or + * '0' causing max to be based + * on number of threads. */ + unsigned int sv_max_payload; /* datagram payload size */ unsigned int sv_max_mesg; /* max_payload + 1 page for overheads */ unsigned int sv_xdrsize; /* XDR buffer size */ - struct list_head sv_permsocks; /* all permanent sockets */ struct list_head sv_tempsocks; /* all temporary sockets */ int sv_tmpcnt; /* count of temporary sockets */ diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h index 6fd7b016517..0127daca435 100644 --- a/include/linux/sunrpc/svc_xprt.h +++ b/include/linux/sunrpc/svc_xprt.h @@ -139,14 +139,14 @@ static inline char *__svc_print_addr(struct sockaddr *addr, { switch (addr->sa_family) { case AF_INET: - snprintf(buf, len, "%u.%u.%u.%u, port=%u", - NIPQUAD(((struct sockaddr_in *) addr)->sin_addr), + snprintf(buf, len, "%pI4, port=%u", + &((struct sockaddr_in *)addr)->sin_addr, ntohs(((struct sockaddr_in *) addr)->sin_port)); break; case AF_INET6: - snprintf(buf, len, "%x:%x:%x:%x:%x:%x:%x:%x, port=%u", - NIP6(((struct sockaddr_in6 *) addr)->sin6_addr), + snprintf(buf, len, "%pI6, port=%u", + &((struct sockaddr_in6 *)addr)->sin6_addr, ntohs(((struct sockaddr_in6 *) addr)->sin6_port)); break; diff --git a/include/linux/sunrpc/svcauth_gss.h b/include/linux/sunrpc/svcauth_gss.h index c9165d9771a..ca7d725861f 100644 --- a/include/linux/sunrpc/svcauth_gss.h +++ b/include/linux/sunrpc/svcauth_gss.h @@ -20,6 +20,7 @@ int gss_svc_init(void); void gss_svc_shutdown(void); int svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name); u32 svcauth_gss_flavor(struct auth_domain *dom); +char *svc_gss_principal(struct svc_rqst *); #endif /* __KERNEL__ */ #endif /* _LINUX_SUNRPC_SVCAUTH_GSS_H */ diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h index e4057d729f0..49e1eb45446 100644 --- a/include/linux/sunrpc/xdr.h +++ b/include/linux/sunrpc/xdr.h @@ -37,21 +37,6 @@ struct xdr_netobj { typedef int (*kxdrproc_t)(void *rqstp, __be32 *data, void *obj); /* - * We're still requiring the BKL in the xdr code until it's been - * more carefully audited, at which point this wrapper will become - * unnecessary. - */ -static inline int rpc_call_xdrproc(kxdrproc_t xdrproc, void *rqstp, __be32 *data, void *obj) -{ - int ret; - - lock_kernel(); - ret = xdrproc(rqstp, data, obj); - unlock_kernel(); - return ret; -} - -/* * Basic structure for transmission/reception of a client XDR message. * Features a header (for a linear buffer containing RPC headers * and the data payload for short messages), and then an array of diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index 4d80a118d53..11fc71d50c1 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h @@ -76,8 +76,7 @@ struct rpc_rqst { struct list_head rq_list; __u32 * rq_buffer; /* XDR encode buffer */ - size_t rq_bufsize, - rq_callsize, + size_t rq_callsize, rq_rcvsize; struct xdr_buf rq_private_buf; /* The receive buffer diff --git a/include/linux/swab.h b/include/linux/swab.h index bbed279f3b3..be5284d4a05 100644 --- a/include/linux/swab.h +++ b/include/linux/swab.h @@ -9,17 +9,17 @@ * casts are necessary for constants, because we never know how for sure * how U/UL/ULL map to __u16, __u32, __u64. At least not in a portable way. */ -#define __const_swab16(x) ((__u16)( \ +#define ___constant_swab16(x) ((__u16)( \ (((__u16)(x) & (__u16)0x00ffU) << 8) | \ (((__u16)(x) & (__u16)0xff00U) >> 8))) -#define __const_swab32(x) ((__u32)( \ +#define ___constant_swab32(x) ((__u32)( \ (((__u32)(x) & (__u32)0x000000ffUL) << 24) | \ (((__u32)(x) & (__u32)0x0000ff00UL) << 8) | \ (((__u32)(x) & (__u32)0x00ff0000UL) >> 8) | \ (((__u32)(x) & (__u32)0xff000000UL) >> 24))) -#define __const_swab64(x) ((__u64)( \ +#define ___constant_swab64(x) ((__u64)( \ (((__u64)(x) & (__u64)0x00000000000000ffULL) << 56) | \ (((__u64)(x) & (__u64)0x000000000000ff00ULL) << 40) | \ (((__u64)(x) & (__u64)0x0000000000ff0000ULL) << 24) | \ @@ -29,11 +29,11 @@ (((__u64)(x) & (__u64)0x00ff000000000000ULL) >> 40) | \ (((__u64)(x) & (__u64)0xff00000000000000ULL) >> 56))) -#define __const_swahw32(x) ((__u32)( \ +#define ___constant_swahw32(x) ((__u32)( \ (((__u32)(x) & (__u32)0x0000ffffUL) << 16) | \ (((__u32)(x) & (__u32)0xffff0000UL) >> 16))) -#define __const_swahb32(x) ((__u32)( \ +#define ___constant_swahb32(x) ((__u32)( \ (((__u32)(x) & (__u32)0x00ff00ffUL) << 8) | \ (((__u32)(x) & (__u32)0xff00ff00UL) >> 8))) @@ -43,52 +43,52 @@ * ___swab16, ___swab32, ___swab64, ___swahw32, ___swahb32 */ -static inline __attribute_const__ __u16 ___swab16(__u16 val) +static inline __attribute_const__ __u16 __fswab16(__u16 val) { #ifdef __arch_swab16 return __arch_swab16(val); #else - return __const_swab16(val); + return ___constant_swab16(val); #endif } -static inline __attribute_const__ __u32 ___swab32(__u32 val) +static inline __attribute_const__ __u32 __fswab32(__u32 val) { #ifdef __arch_swab32 return __arch_swab32(val); #else - return __const_swab32(val); + return ___constant_swab32(val); #endif } -static inline __attribute_const__ __u64 ___swab64(__u64 val) +static inline __attribute_const__ __u64 __fswab64(__u64 val) { #ifdef __arch_swab64 return __arch_swab64(val); #elif defined(__SWAB_64_THRU_32__) __u32 h = val >> 32; __u32 l = val & ((1ULL << 32) - 1); - return (((__u64)___swab32(l)) << 32) | ((__u64)(___swab32(h))); + return (((__u64)__fswab32(l)) << 32) | ((__u64)(__fswab32(h))); #else - return __const_swab64(val); + return ___constant_swab64(val); #endif } -static inline __attribute_const__ __u32 ___swahw32(__u32 val) +static inline __attribute_const__ __u32 __fswahw32(__u32 val) { #ifdef __arch_swahw32 return __arch_swahw32(val); #else - return __const_swahw32(val); + return ___constant_swahw32(val); #endif } -static inline __attribute_const__ __u32 ___swahb32(__u32 val) +static inline __attribute_const__ __u32 __fswahb32(__u32 val) { #ifdef __arch_swahb32 return __arch_swahb32(val); #else - return __const_swahb32(val); + return ___constant_swahb32(val); #endif } @@ -98,8 +98,8 @@ static inline __attribute_const__ __u32 ___swahb32(__u32 val) */ #define __swab16(x) \ (__builtin_constant_p((__u16)(x)) ? \ - __const_swab16((x)) : \ - ___swab16((x))) + ___constant_swab16(x) : \ + __fswab16(x)) /** * __swab32 - return a byteswapped 32-bit value @@ -107,8 +107,8 @@ static inline __attribute_const__ __u32 ___swahb32(__u32 val) */ #define __swab32(x) \ (__builtin_constant_p((__u32)(x)) ? \ - __const_swab32((x)) : \ - ___swab32((x))) + ___constant_swab32(x) : \ + __fswab32(x)) /** * __swab64 - return a byteswapped 64-bit value @@ -116,8 +116,8 @@ static inline __attribute_const__ __u32 ___swahb32(__u32 val) */ #define __swab64(x) \ (__builtin_constant_p((__u64)(x)) ? \ - __const_swab64((x)) : \ - ___swab64((x))) + ___constant_swab64(x) : \ + __fswab64(x)) /** * __swahw32 - return a word-swapped 32-bit value @@ -127,8 +127,8 @@ static inline __attribute_const__ __u32 ___swahb32(__u32 val) */ #define __swahw32(x) \ (__builtin_constant_p((__u32)(x)) ? \ - __const_swahw32((x)) : \ - ___swahw32((x))) + ___constant_swahw32(x) : \ + __fswahw32(x)) /** * __swahb32 - return a high and low byte-swapped 32-bit value @@ -138,8 +138,8 @@ static inline __attribute_const__ __u32 ___swahb32(__u32 val) */ #define __swahb32(x) \ (__builtin_constant_p((__u32)(x)) ? \ - __const_swahb32((x)) : \ - ___swahb32((x))) + ___constant_swahb32(x) : \ + __fswahb32(x)) /** * __swab16p - return a byteswapped 16-bit value from a pointer diff --git a/include/linux/swap.h b/include/linux/swap.h index a3af95b2cb6..d3021557887 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -120,7 +120,9 @@ struct swap_extent { enum { SWP_USED = (1 << 0), /* is slot in swap_info[] used? */ SWP_WRITEOK = (1 << 1), /* ok to write to this swap? */ - SWP_ACTIVE = (SWP_USED | SWP_WRITEOK), + SWP_DISCARDABLE = (1 << 2), /* blkdev supports discard */ + SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */ + SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */ /* add others here before... */ SWP_SCANNING = (1 << 8), /* refcount in scan_swap_map */ }; @@ -134,22 +136,24 @@ enum { * The in-memory structure used to track swap areas. */ struct swap_info_struct { - unsigned int flags; + unsigned long flags; int prio; /* swap priority */ + int next; /* next entry on swap list */ struct file *swap_file; struct block_device *bdev; struct list_head extent_list; struct swap_extent *curr_swap_extent; - unsigned old_block_size; - unsigned short * swap_map; + unsigned short *swap_map; unsigned int lowest_bit; unsigned int highest_bit; + unsigned int lowest_alloc; /* while preparing discard cluster */ + unsigned int highest_alloc; /* while preparing discard cluster */ unsigned int cluster_next; unsigned int cluster_nr; unsigned int pages; unsigned int max; unsigned int inuse_pages; - int next; /* next entry on swap list */ + unsigned int old_block_size; }; struct swap_list_t { @@ -163,7 +167,6 @@ struct swap_list_t { /* linux/mm/page_alloc.c */ extern unsigned long totalram_pages; extern unsigned long totalreserve_pages; -extern long nr_swap_pages; extern unsigned int nr_free_buffer_pages(void); extern unsigned int nr_free_pagecache_pages(void); @@ -174,8 +177,6 @@ extern unsigned int nr_free_pagecache_pages(void); /* linux/mm/swap.c */ extern void __lru_cache_add(struct page *, enum lru_list lru); extern void lru_cache_add_lru(struct page *, enum lru_list lru); -extern void lru_cache_add_active_or_unevictable(struct page *, - struct vm_area_struct *); extern void activate_page(struct page *); extern void mark_page_accessed(struct page *); extern void lru_add_drain(void); @@ -213,7 +214,8 @@ static inline void lru_cache_add_active_file(struct page *page) extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask); extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem, - gfp_t gfp_mask); + gfp_t gfp_mask, bool noswap, + unsigned int swappiness); extern int __isolate_lru_page(struct page *page, int mode, int file); extern unsigned long shrink_all_memory(unsigned long nr_pages); extern int vm_swappiness; @@ -280,7 +282,7 @@ extern void end_swap_bio_read(struct bio *bio, int err); extern struct address_space swapper_space; #define total_swapcache_pages swapper_space.nrpages extern void show_swap_cache_info(void); -extern int add_to_swap(struct page *, gfp_t); +extern int add_to_swap(struct page *); extern int add_to_swap_cache(struct page *, swp_entry_t, gfp_t); extern void __delete_from_swap_cache(struct page *); extern void delete_from_swap_cache(struct page *); @@ -293,6 +295,7 @@ extern struct page *swapin_readahead(swp_entry_t, gfp_t, struct vm_area_struct *vma, unsigned long addr); /* linux/mm/swapfile.c */ +extern long nr_swap_pages; extern long total_swap_pages; extern void si_swapinfo(struct sysinfo *); extern swp_entry_t get_swap_page(void); @@ -300,15 +303,14 @@ extern swp_entry_t get_swap_page_of_type(int); extern int swap_duplicate(swp_entry_t); extern int valid_swaphandles(swp_entry_t, unsigned long *); extern void swap_free(swp_entry_t); -extern void free_swap_and_cache(swp_entry_t); +extern int free_swap_and_cache(swp_entry_t); extern int swap_type_of(dev_t, sector_t, struct block_device **); extern unsigned int count_swap_pages(int, int); extern sector_t map_swap_page(struct swap_info_struct *, pgoff_t); extern sector_t swapdev_block(int, pgoff_t); extern struct swap_info_struct *get_swap_info_struct(unsigned); -extern int can_share_swap_page(struct page *); -extern int remove_exclusive_swap_page(struct page *); -extern int remove_exclusive_swap_page_ref(struct page *); +extern int reuse_swap_page(struct page *); +extern int try_to_free_swap(struct page *); struct backing_dev_info; /* linux/mm/thrash.c */ @@ -332,9 +334,26 @@ static inline void disable_swap_token(void) put_swap_token(swap_token_mm); } +#ifdef CONFIG_CGROUP_MEM_RES_CTLR +extern void mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent); +#else +static inline void +mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent) +{ +} +#endif +#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP +extern void mem_cgroup_uncharge_swap(swp_entry_t ent); +#else +static inline void mem_cgroup_uncharge_swap(swp_entry_t ent) +{ +} +#endif + #else /* CONFIG_SWAP */ -#define total_swap_pages 0 +#define nr_swap_pages 0L +#define total_swap_pages 0L #define total_swapcache_pages 0UL #define si_swapinfo(val) \ @@ -350,14 +369,8 @@ static inline void show_swap_cache_info(void) { } -static inline void free_swap_and_cache(swp_entry_t swp) -{ -} - -static inline int swap_duplicate(swp_entry_t swp) -{ - return 0; -} +#define free_swap_and_cache(swp) is_migration_entry(swp) +#define swap_duplicate(swp) is_migration_entry(swp) static inline void swap_free(swp_entry_t swp) { @@ -374,7 +387,10 @@ static inline struct page *lookup_swap_cache(swp_entry_t swp) return NULL; } -#define can_share_swap_page(p) (page_mapcount(p) == 1) +static inline int add_to_swap(struct page *page) +{ + return 0; +} static inline int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask) @@ -390,14 +406,9 @@ static inline void delete_from_swap_cache(struct page *page) { } -#define swap_token_default_timeout 0 +#define reuse_swap_page(page) (page_mapcount(page) == 1) -static inline int remove_exclusive_swap_page(struct page *p) -{ - return 0; -} - -static inline int remove_exclusive_swap_page_ref(struct page *page) +static inline int try_to_free_swap(struct page *page) { return 0; } @@ -415,6 +426,12 @@ static inline swp_entry_t get_swap_page(void) #define has_swap_token(x) 0 #define disable_swap_token() do { } while(0) +static inline int mem_cgroup_cache_charge_swapin(struct page *page, + struct mm_struct *mm, gfp_t mask, bool locked) +{ + return 0; +} + #endif /* CONFIG_SWAP */ #endif /* __KERNEL__*/ #endif /* _LINUX_SWAP_H */ diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index b18ec5533e8..dedd3c0cfe3 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -7,9 +7,32 @@ struct device; struct dma_attrs; struct scatterlist; +/* + * Maximum allowable number of contiguous slabs to map, + * must be a power of 2. What is the appropriate value ? + * The complexity of {map,unmap}_single is linearly dependent on this value. + */ +#define IO_TLB_SEGSIZE 128 + + +/* + * log of the size of each IO TLB slab. The number of slabs is command line + * controllable. + */ +#define IO_TLB_SHIFT 11 + extern void swiotlb_init(void); +extern void *swiotlb_alloc_boot(size_t bytes, unsigned long nslabs); +extern void *swiotlb_alloc(unsigned order, unsigned long nslabs); + +extern dma_addr_t swiotlb_phys_to_bus(struct device *hwdev, + phys_addr_t address); +extern phys_addr_t swiotlb_bus_to_phys(dma_addr_t address); + +extern int swiotlb_arch_range_needs_mapping(void *ptr, size_t size); + extern void *swiotlb_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags); diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 04fb47bfb92..18d0a243a7b 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -549,7 +549,7 @@ asmlinkage long sys_inotify_init(void); asmlinkage long sys_inotify_init1(int flags); asmlinkage long sys_inotify_add_watch(int fd, const char __user *path, u32 mask); -asmlinkage long sys_inotify_rm_watch(int fd, u32 wd); +asmlinkage long sys_inotify_rm_watch(int fd, __s32 wd); asmlinkage long sys_spu_run(int fd, __u32 __user *unpc, __u32 __user *ustatus); diff --git a/include/linux/threads.h b/include/linux/threads.h index 38d1a5d6568..052b12bec8b 100644 --- a/include/linux/threads.h +++ b/include/linux/threads.h @@ -8,17 +8,17 @@ */ /* - * Maximum supported processors that can run under SMP. This value is - * set via configure setting. The maximum is equal to the size of the - * bitmasks used on that platform, i.e. 32 or 64. Setting this smaller - * saves quite a bit of memory. + * Maximum supported processors. Setting this smaller saves quite a + * bit of memory. Use nr_cpu_ids instead of this except for static bitmaps. */ -#ifdef CONFIG_SMP -#define NR_CPUS CONFIG_NR_CPUS -#else -#define NR_CPUS 1 +#ifndef CONFIG_NR_CPUS +/* FIXME: This should be fixed in the arch's Kconfig */ +#define CONFIG_NR_CPUS 1 #endif +/* Places which use this should consider cpumask_var_t. */ +#define NR_CPUS CONFIG_NR_CPUS + #define MIN_THREADS_LEFT_FOR_ROOT 4 /* diff --git a/include/linux/tick.h b/include/linux/tick.h index b6ec8189ac0..469b82d88b3 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h @@ -84,10 +84,10 @@ static inline void tick_cancel_sched_timer(int cpu) { } # ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST extern struct tick_device *tick_get_broadcast_device(void); -extern cpumask_t *tick_get_broadcast_mask(void); +extern struct cpumask *tick_get_broadcast_mask(void); # ifdef CONFIG_TICK_ONESHOT -extern cpumask_t *tick_get_broadcast_oneshot_mask(void); +extern struct cpumask *tick_get_broadcast_oneshot_mask(void); # endif # endif /* BROADCAST */ diff --git a/include/linux/time.h b/include/linux/time.h index ce321ac5c8f..fbbd2a1c92b 100644 --- a/include/linux/time.h +++ b/include/linux/time.h @@ -105,6 +105,7 @@ extern unsigned long read_persistent_clock(void); extern int update_persistent_clock(struct timespec now); extern int no_sync_cmos_clock __read_mostly; void timekeeping_init(void); +extern int timekeeping_suspended; unsigned long get_seconds(void); struct timespec current_kernel_time(void); diff --git a/include/linux/timex.h b/include/linux/timex.h index 9007313b5b7..998a55d80ac 100644 --- a/include/linux/timex.h +++ b/include/linux/timex.h @@ -53,47 +53,11 @@ #ifndef _LINUX_TIMEX_H #define _LINUX_TIMEX_H -#include <linux/compiler.h> #include <linux/time.h> -#include <asm/param.h> - #define NTP_API 4 /* NTP API version */ /* - * SHIFT_KG and SHIFT_KF establish the damping of the PLL and are chosen - * for a slightly underdamped convergence characteristic. SHIFT_KH - * establishes the damping of the FLL and is chosen by wisdom and black - * art. - * - * MAXTC establishes the maximum time constant of the PLL. With the - * SHIFT_KG and SHIFT_KF values given and a time constant range from - * zero to MAXTC, the PLL will converge in 15 minutes to 16 hours, - * respectively. - */ -#define SHIFT_PLL 4 /* PLL frequency factor (shift) */ -#define SHIFT_FLL 2 /* FLL frequency factor (shift) */ -#define MAXTC 10 /* maximum time constant (shift) */ - -/* - * SHIFT_USEC defines the scaling (shift) of the time_freq and - * time_tolerance variables, which represent the current frequency - * offset and maximum frequency tolerance. - */ -#define SHIFT_USEC 16 /* frequency offset scale (shift) */ -#define PPM_SCALE (NSEC_PER_USEC << (NTP_SCALE_SHIFT - SHIFT_USEC)) -#define PPM_SCALE_INV_SHIFT 19 -#define PPM_SCALE_INV ((1ll << (PPM_SCALE_INV_SHIFT + NTP_SCALE_SHIFT)) / \ - PPM_SCALE + 1) - -#define MAXPHASE 500000000l /* max phase error (ns) */ -#define MAXFREQ 500000 /* max frequency error (ns/s) */ -#define MAXFREQ_SCALED ((s64)MAXFREQ << NTP_SCALE_SHIFT) -#define MINSEC 256 /* min interval between updates (s) */ -#define MAXSEC 2048 /* max interval between updates (s) */ -#define NTP_PHASE_LIMIT ((MAXPHASE / NSEC_PER_USEC) << 5) /* beyond max. dispersion */ - -/* * syscall interface - used (mainly by NTP daemon) * to discipline kernel clock oscillator */ @@ -199,9 +163,46 @@ struct timex { #define TIME_BAD TIME_ERROR /* bw compat */ #ifdef __KERNEL__ +#include <linux/compiler.h> +#include <linux/types.h> +#include <linux/param.h> + #include <asm/timex.h> /* + * SHIFT_KG and SHIFT_KF establish the damping of the PLL and are chosen + * for a slightly underdamped convergence characteristic. SHIFT_KH + * establishes the damping of the FLL and is chosen by wisdom and black + * art. + * + * MAXTC establishes the maximum time constant of the PLL. With the + * SHIFT_KG and SHIFT_KF values given and a time constant range from + * zero to MAXTC, the PLL will converge in 15 minutes to 16 hours, + * respectively. + */ +#define SHIFT_PLL 4 /* PLL frequency factor (shift) */ +#define SHIFT_FLL 2 /* FLL frequency factor (shift) */ +#define MAXTC 10 /* maximum time constant (shift) */ + +/* + * SHIFT_USEC defines the scaling (shift) of the time_freq and + * time_tolerance variables, which represent the current frequency + * offset and maximum frequency tolerance. + */ +#define SHIFT_USEC 16 /* frequency offset scale (shift) */ +#define PPM_SCALE (NSEC_PER_USEC << (NTP_SCALE_SHIFT - SHIFT_USEC)) +#define PPM_SCALE_INV_SHIFT 19 +#define PPM_SCALE_INV ((1ll << (PPM_SCALE_INV_SHIFT + NTP_SCALE_SHIFT)) / \ + PPM_SCALE + 1) + +#define MAXPHASE 500000000l /* max phase error (ns) */ +#define MAXFREQ 500000 /* max frequency error (ns/s) */ +#define MAXFREQ_SCALED ((s64)MAXFREQ << NTP_SCALE_SHIFT) +#define MINSEC 256 /* min interval between updates (s) */ +#define MAXSEC 2048 /* max interval between updates (s) */ +#define NTP_PHASE_LIMIT ((MAXPHASE / NSEC_PER_USEC) << 5) /* beyond max. dispersion */ + +/* * kernel variables * Note: maximum error = NTP synch distance = dispersion + delay / 2; * estimated error = NTP dispersion. diff --git a/include/linux/topology.h b/include/linux/topology.h index 117f1b7405c..e632d29f054 100644 --- a/include/linux/topology.h +++ b/include/linux/topology.h @@ -49,7 +49,7 @@ for_each_online_node(node) \ if (nr_cpus_node(node)) -void arch_update_cpu_topology(void); +int arch_update_cpu_topology(void); /* Conform to ACPI 2.0 SLIT distance definitions */ #define LOCAL_DISTANCE 10 @@ -125,7 +125,8 @@ void arch_update_cpu_topology(void); | SD_WAKE_AFFINE \ | SD_WAKE_BALANCE \ | SD_SHARE_PKG_RESOURCES\ - | BALANCE_FOR_MC_POWER, \ + | sd_balance_for_mc_power()\ + | sd_power_saving_flags(),\ .last_balance = jiffies, \ .balance_interval = 1, \ } @@ -150,7 +151,8 @@ void arch_update_cpu_topology(void); | SD_BALANCE_FORK \ | SD_WAKE_AFFINE \ | SD_WAKE_BALANCE \ - | BALANCE_FOR_PKG_POWER,\ + | sd_balance_for_package_power()\ + | sd_power_saving_flags(),\ .last_balance = jiffies, \ .balance_interval = 1, \ } diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index c5bb39c7a77..75700545836 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h @@ -24,8 +24,12 @@ struct tracepoint { const char *name; /* Tracepoint name */ int state; /* State. */ void **funcs; -} __attribute__((aligned(8))); - +} __attribute__((aligned(32))); /* + * Aligned on 32 bytes because it is + * globally visible and gcc happily + * align these on the structure size. + * Keep in sync with vmlinux.lds.h. + */ #define TPPROTO(args...) args #define TPARGS(args...) args @@ -40,14 +44,14 @@ struct tracepoint { do { \ void **it_func; \ \ - rcu_read_lock_sched(); \ + rcu_read_lock_sched_notrace(); \ it_func = rcu_dereference((tp)->funcs); \ if (it_func) { \ do { \ ((void(*)(proto))(*it_func))(args); \ } while (*(++it_func)); \ } \ - rcu_read_unlock_sched(); \ + rcu_read_unlock_sched_notrace(); \ } while (0) /* @@ -55,35 +59,40 @@ struct tracepoint { * not add unwanted padding between the beginning of the section and the * structure. Force alignment to the same alignment as the section start. */ -#define DEFINE_TRACE(name, proto, args) \ +#define DECLARE_TRACE(name, proto, args) \ + extern struct tracepoint __tracepoint_##name; \ static inline void trace_##name(proto) \ { \ - static const char __tpstrtab_##name[] \ - __attribute__((section("__tracepoints_strings"))) \ - = #name ":" #proto; \ - static struct tracepoint __tracepoint_##name \ - __attribute__((section("__tracepoints"), aligned(8))) = \ - { __tpstrtab_##name, 0, NULL }; \ if (unlikely(__tracepoint_##name.state)) \ __DO_TRACE(&__tracepoint_##name, \ TPPROTO(proto), TPARGS(args)); \ } \ static inline int register_trace_##name(void (*probe)(proto)) \ { \ - return tracepoint_probe_register(#name ":" #proto, \ - (void *)probe); \ + return tracepoint_probe_register(#name, (void *)probe); \ } \ - static inline void unregister_trace_##name(void (*probe)(proto))\ + static inline int unregister_trace_##name(void (*probe)(proto)) \ { \ - tracepoint_probe_unregister(#name ":" #proto, \ - (void *)probe); \ + return tracepoint_probe_unregister(#name, (void *)probe);\ } +#define DEFINE_TRACE(name) \ + static const char __tpstrtab_##name[] \ + __attribute__((section("__tracepoints_strings"))) = #name; \ + struct tracepoint __tracepoint_##name \ + __attribute__((section("__tracepoints"), aligned(32))) = \ + { __tpstrtab_##name, 0, NULL } + +#define EXPORT_TRACEPOINT_SYMBOL_GPL(name) \ + EXPORT_SYMBOL_GPL(__tracepoint_##name) +#define EXPORT_TRACEPOINT_SYMBOL(name) \ + EXPORT_SYMBOL(__tracepoint_##name) + extern void tracepoint_update_probe_range(struct tracepoint *begin, struct tracepoint *end); #else /* !CONFIG_TRACEPOINTS */ -#define DEFINE_TRACE(name, proto, args) \ +#define DECLARE_TRACE(name, proto, args) \ static inline void _do_trace_##name(struct tracepoint *tp, proto) \ { } \ static inline void trace_##name(proto) \ @@ -92,8 +101,14 @@ extern void tracepoint_update_probe_range(struct tracepoint *begin, { \ return -ENOSYS; \ } \ - static inline void unregister_trace_##name(void (*probe)(proto))\ - { } + static inline int unregister_trace_##name(void (*probe)(proto)) \ + { \ + return -ENOSYS; \ + } + +#define DEFINE_TRACE(name) +#define EXPORT_TRACEPOINT_SYMBOL_GPL(name) +#define EXPORT_TRACEPOINT_SYMBOL(name) static inline void tracepoint_update_probe_range(struct tracepoint *begin, struct tracepoint *end) @@ -112,6 +127,10 @@ extern int tracepoint_probe_register(const char *name, void *probe); */ extern int tracepoint_probe_unregister(const char *name, void *probe); +extern int tracepoint_probe_register_noupdate(const char *name, void *probe); +extern int tracepoint_probe_unregister_noupdate(const char *name, void *probe); +extern void tracepoint_probe_update_all(void); + struct tracepoint_iter { struct module *module; struct tracepoint *tracepoint; diff --git a/include/linux/tty.h b/include/linux/tty.h index 3b8121d4e36..fc39db95499 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -180,8 +180,17 @@ struct signal_struct; * until a hangup so don't use the wrong path. */ +struct tty_port; + +struct tty_port_operations { + /* Return 1 if the carrier is raised */ + int (*carrier_raised)(struct tty_port *port); + void (*raise_dtr_rts)(struct tty_port *port); +}; + struct tty_port { struct tty_struct *tty; /* Back pointer */ + const struct tty_port_operations *ops; /* Port operations */ spinlock_t lock; /* Lock protecting tty field */ int blocked_open; /* Waiting to open */ int count; /* Usage count */ @@ -253,6 +262,7 @@ struct tty_struct { unsigned int column; unsigned char lnext:1, erasing:1, raw:1, real_raw:1, icanon:1; unsigned char closing:1; + unsigned char echo_overrun:1; unsigned short minimum_to_wake; unsigned long overrun_time; int num_overrun; @@ -262,11 +272,16 @@ struct tty_struct { int read_tail; int read_cnt; unsigned long read_flags[N_TTY_BUF_SIZE/(8*sizeof(unsigned long))]; + unsigned char *echo_buf; + unsigned int echo_pos; + unsigned int echo_cnt; int canon_data; unsigned long canon_head; unsigned int canon_column; struct mutex atomic_read_lock; struct mutex atomic_write_lock; + struct mutex output_lock; + struct mutex echo_lock; unsigned char *write_buf; int write_cnt; spinlock_t read_lock; @@ -295,6 +310,7 @@ struct tty_struct { #define TTY_PUSH 6 /* n_tty private */ #define TTY_CLOSING 7 /* ->close() in progress */ #define TTY_LDISC 9 /* Line discipline attached */ +#define TTY_LDISC_CHANGING 10 /* Line discipline changing */ #define TTY_HW_COOK_OUT 14 /* Hardware can do output cooking */ #define TTY_HW_COOK_IN 15 /* Hardware can do input cooking */ #define TTY_PTY_LOCK 16 /* pty private */ @@ -325,7 +341,7 @@ extern struct class *tty_class; * go away */ -extern inline struct tty_struct *tty_kref_get(struct tty_struct *tty) +static inline struct tty_struct *tty_kref_get(struct tty_struct *tty) { if (tty) kref_get(&tty->kref); @@ -354,8 +370,7 @@ extern int tty_write_room(struct tty_struct *tty); extern void tty_driver_flush_buffer(struct tty_struct *tty); extern void tty_throttle(struct tty_struct *tty); extern void tty_unthrottle(struct tty_struct *tty); -extern int tty_do_resize(struct tty_struct *tty, struct tty_struct *real_tty, - struct winsize *ws); +extern int tty_do_resize(struct tty_struct *tty, struct winsize *ws); extern void tty_shutdown(struct tty_struct *tty); extern void tty_free_termios(struct tty_struct *tty); extern int is_current_pgrp_orphaned(void); @@ -421,6 +436,14 @@ extern int tty_port_alloc_xmit_buf(struct tty_port *port); extern void tty_port_free_xmit_buf(struct tty_port *port); extern struct tty_struct *tty_port_tty_get(struct tty_port *port); extern void tty_port_tty_set(struct tty_port *port, struct tty_struct *tty); +extern int tty_port_carrier_raised(struct tty_port *port); +extern void tty_port_raise_dtr_rts(struct tty_port *port); +extern void tty_port_hangup(struct tty_port *port); +extern int tty_port_block_til_ready(struct tty_port *port, + struct tty_struct *tty, struct file *filp); +extern int tty_port_close_start(struct tty_port *port, + struct tty_struct *tty, struct file *filp); +extern void tty_port_close_end(struct tty_port *port, struct tty_struct *tty); extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc); extern int tty_unregister_ldisc(int disc); @@ -442,6 +465,7 @@ extern void tty_audit_add_data(struct tty_struct *tty, unsigned char *data, size_t size); extern void tty_audit_exit(void); extern void tty_audit_fork(struct signal_struct *sig); +extern void tty_audit_tiocsti(struct tty_struct *tty, char ch); extern void tty_audit_push(struct tty_struct *tty); extern void tty_audit_push_task(struct task_struct *tsk, uid_t loginuid, u32 sessionid); @@ -450,6 +474,9 @@ static inline void tty_audit_add_data(struct tty_struct *tty, unsigned char *data, size_t size) { } +static inline void tty_audit_tiocsti(struct tty_struct *tty, char ch) +{ +} static inline void tty_audit_exit(void) { } diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h index 78416b90158..08e088334db 100644 --- a/include/linux/tty_driver.h +++ b/include/linux/tty_driver.h @@ -196,8 +196,7 @@ * Optional: If not provided then the write method is called under * the atomic write lock to keep it serialized with the ldisc. * - * int (*resize)(struct tty_struct *tty, struct tty_struct *real_tty, - * unsigned int rows, unsigned int cols); + * int (*resize)(struct tty_struct *tty, struct winsize *ws) * * Called when a termios request is issued which changes the * requested terminal geometry. @@ -258,8 +257,7 @@ struct tty_operations { int (*tiocmget)(struct tty_struct *tty, struct file *file); int (*tiocmset)(struct tty_struct *tty, struct file *file, unsigned int set, unsigned int clear); - int (*resize)(struct tty_struct *tty, struct tty_struct *real_tty, - struct winsize *ws); + int (*resize)(struct tty_struct *tty, struct winsize *ws); int (*set_termiox)(struct tty_struct *tty, struct termiox *tnew); #ifdef CONFIG_CONSOLE_POLL int (*poll_init)(struct tty_driver *driver, int line, char *options); diff --git a/include/linux/types.h b/include/linux/types.h index 1d98330b1f2..712ca53bc34 100644 --- a/include/linux/types.h +++ b/include/linux/types.h @@ -135,19 +135,14 @@ typedef __s64 int64_t; * * Linux always considers sectors to be 512 bytes long independently * of the devices real block size. + * + * blkcnt_t is the type of the inode's block count. */ #ifdef CONFIG_LBD typedef u64 sector_t; -#else -typedef unsigned long sector_t; -#endif - -/* - * The type of the inode's block count. - */ -#ifdef CONFIG_LSF typedef u64 blkcnt_t; #else +typedef unsigned long sector_t; typedef unsigned long blkcnt_t; #endif @@ -181,10 +176,9 @@ typedef __u16 __bitwise __le16; typedef __u16 __bitwise __be16; typedef __u32 __bitwise __le32; typedef __u32 __bitwise __be32; -#if defined(__GNUC__) typedef __u64 __bitwise __le64; typedef __u64 __bitwise __be64; -#endif + typedef __u16 __bitwise __sum16; typedef __u32 __bitwise __wsum; @@ -200,6 +194,16 @@ typedef u32 phys_addr_t; typedef phys_addr_t resource_size_t; +typedef struct { + volatile int counter; +} atomic_t; + +#ifdef CONFIG_64BIT +typedef struct { + volatile long counter; +} atomic64_t; +#endif + struct ustat { __kernel_daddr_t f_tfree; __kernel_ino_t f_tinode; diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index fec6decfb98..6b58367d145 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -78,7 +78,7 @@ static inline unsigned long __copy_from_user_nocache(void *to, \ set_fs(KERNEL_DS); \ pagefault_disable(); \ - ret = __get_user(retval, (__force typeof(retval) __user *)(addr)); \ + ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \ pagefault_enable(); \ set_fs(old_fs); \ ret; \ diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h index cdf338d94b7..a0bb6bd2e5c 100644 --- a/include/linux/uio_driver.h +++ b/include/linux/uio_driver.h @@ -38,6 +38,24 @@ struct uio_mem { #define MAX_UIO_MAPS 5 +struct uio_portio; + +/** + * struct uio_port - description of a UIO port region + * @start: start of port region + * @size: size of port region + * @porttype: type of port (see UIO_PORT_* below) + * @portio: for use by the UIO core only. + */ +struct uio_port { + unsigned long start; + unsigned long size; + int porttype; + struct uio_portio *portio; +}; + +#define MAX_UIO_PORT_REGIONS 5 + struct uio_device; /** @@ -46,6 +64,7 @@ struct uio_device; * @name: device name * @version: device driver version * @mem: list of mappable memory regions, size==0 for end of list + * @port: list of port regions, size==0 for end of list * @irq: interrupt number or UIO_IRQ_CUSTOM * @irq_flags: flags for request_irq() * @priv: optional private data @@ -57,9 +76,10 @@ struct uio_device; */ struct uio_info { struct uio_device *uio_dev; - char *name; - char *version; + const char *name; + const char *version; struct uio_mem mem[MAX_UIO_MAPS]; + struct uio_port port[MAX_UIO_PORT_REGIONS]; long irq; unsigned long irq_flags; void *priv; @@ -92,4 +112,10 @@ extern void uio_event_notify(struct uio_info *info); #define UIO_MEM_LOGICAL 2 #define UIO_MEM_VIRTUAL 3 +/* defines for uio_port->porttype */ +#define UIO_PORT_NONE 0 +#define UIO_PORT_X86 1 +#define UIO_PORT_GPIO 2 +#define UIO_PORT_OTHER 3 + #endif /* _LINUX_UIO_DRIVER_H_ */ diff --git a/include/linux/unwind.h b/include/linux/unwind.h deleted file mode 100644 index 7760860fa17..00000000000 --- a/include/linux/unwind.h +++ /dev/null @@ -1,68 +0,0 @@ -#ifndef _LINUX_UNWIND_H -#define _LINUX_UNWIND_H - -/* - * Copyright (C) 2002-2006 Novell, Inc. - * Jan Beulich <jbeulich@novell.com> - * This code is released under version 2 of the GNU GPL. - * - * A simple API for unwinding kernel stacks. This is used for - * debugging and error reporting purposes. The kernel doesn't need - * full-blown stack unwinding with all the bells and whistles, so there - * is not much point in implementing the full Dwarf2 unwind API. - */ - -struct module; - -struct unwind_frame_info {}; - -static inline void unwind_init(void) {} -static inline void unwind_setup(void) {} - -#ifdef CONFIG_MODULES - -static inline void *unwind_add_table(struct module *mod, - const void *table_start, - unsigned long table_size) -{ - return NULL; -} - -static inline void unwind_remove_table(void *handle, int init_only) -{ -} - -#endif - -static inline int unwind_init_frame_info(struct unwind_frame_info *info, - struct task_struct *tsk, - const struct pt_regs *regs) -{ - return -ENOSYS; -} - -static inline int unwind_init_blocked(struct unwind_frame_info *info, - struct task_struct *tsk) -{ - return -ENOSYS; -} - -static inline int unwind_init_running(struct unwind_frame_info *info, - asmlinkage int (*cb)(struct unwind_frame_info *, - void *arg), - void *arg) -{ - return -ENOSYS; -} - -static inline int unwind(struct unwind_frame_info *info) -{ - return -ENOSYS; -} - -static inline int unwind_to_user(struct unwind_frame_info *info) -{ - return -ENOSYS; -} - -#endif /* _LINUX_UNWIND_H */ diff --git a/include/linux/usb.h b/include/linux/usb.h index f72aa51f7bc..85ee9be9361 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -108,6 +108,7 @@ enum usb_interface_condition { * (in probe()), bound to a driver, or unbinding (in disconnect()) * @is_active: flag set when the interface is bound and not suspended. * @sysfs_files_created: sysfs attributes exist + * @ep_devs_created: endpoint child pseudo-devices exist * @unregistering: flag set when the interface is being unregistered * @needs_remote_wakeup: flag set when the driver requires remote-wakeup * capability during autosuspend. @@ -120,6 +121,11 @@ enum usb_interface_condition { * to the sysfs representation for that device. * @pm_usage_cnt: PM usage counter for this interface; autosuspend is not * allowed unless the counter is 0. + * @reset_ws: Used for scheduling resets from atomic context. + * @reset_running: set to 1 if the interface is currently running a + * queued reset so that usb_cancel_queued_reset() doesn't try to + * remove from the workqueue when running inside the worker + * thread. See __usb_queue_reset_device(). * * USB device drivers attach to interfaces on a physical device. Each * interface encapsulates a single high level function, such as feeding @@ -164,14 +170,17 @@ struct usb_interface { enum usb_interface_condition condition; /* state of binding */ unsigned is_active:1; /* the interface is not suspended */ unsigned sysfs_files_created:1; /* the sysfs attributes exist */ + unsigned ep_devs_created:1; /* endpoint "devices" exist */ unsigned unregistering:1; /* unregistration is in progress */ unsigned needs_remote_wakeup:1; /* driver requires remote wakeup */ unsigned needs_altsetting0:1; /* switch to altsetting 0 is pending */ unsigned needs_binding:1; /* needs delayed unbind/rebind */ + unsigned reset_running:1; struct device dev; /* interface specific device info */ struct device *usb_dev; int pm_usage_cnt; /* usage counter for autosuspend */ + struct work_struct reset_ws; /* for resets in atomic context */ }; #define to_usb_interface(d) container_of(d, struct usb_interface, dev) #define interface_to_usbdev(intf) \ @@ -329,7 +338,7 @@ struct usb_bus { #endif struct device *dev; /* device for this bus */ -#if defined(CONFIG_USB_MON) +#if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE) struct mon_bus *mon_bus; /* non-null when associated */ int monitored; /* non-zero when monitored */ #endif @@ -398,6 +407,7 @@ struct usb_tt; * @urbnum: number of URBs submitted for the whole device * @active_duration: total time device is not suspended * @autosuspend: for delayed autosuspends + * @autoresume: for autoresumes requested while in_interrupt * @pm_mutex: protects PM operations * @last_busy: time of last use * @autosuspend_delay: in jiffies @@ -476,6 +486,7 @@ struct usb_device { #ifdef CONFIG_PM struct delayed_work autosuspend; + struct work_struct autoresume; struct mutex pm_mutex; unsigned long last_busy; @@ -505,6 +516,7 @@ extern int usb_lock_device_for_reset(struct usb_device *udev, /* USB port reset for device reinitialization */ extern int usb_reset_device(struct usb_device *dev); +extern void usb_queue_reset_device(struct usb_interface *dev); extern struct usb_device *usb_find_device(u16 vendor_id, u16 product_id); @@ -513,6 +525,8 @@ extern struct usb_device *usb_find_device(u16 vendor_id, u16 product_id); extern int usb_autopm_set_interface(struct usb_interface *intf); extern int usb_autopm_get_interface(struct usb_interface *intf); extern void usb_autopm_put_interface(struct usb_interface *intf); +extern int usb_autopm_get_interface_async(struct usb_interface *intf); +extern void usb_autopm_put_interface_async(struct usb_interface *intf); static inline void usb_autopm_enable(struct usb_interface *intf) { @@ -539,8 +553,13 @@ static inline int usb_autopm_set_interface(struct usb_interface *intf) static inline int usb_autopm_get_interface(struct usb_interface *intf) { return 0; } +static inline int usb_autopm_get_interface_async(struct usb_interface *intf) +{ return 0; } + static inline void usb_autopm_put_interface(struct usb_interface *intf) { } +static inline void usb_autopm_put_interface_async(struct usb_interface *intf) +{ } static inline void usb_autopm_enable(struct usb_interface *intf) { } static inline void usb_autopm_disable(struct usb_interface *intf) @@ -1050,7 +1069,7 @@ struct usb_device_driver { void (*disconnect) (struct usb_device *udev); int (*suspend) (struct usb_device *udev, pm_message_t message); - int (*resume) (struct usb_device *udev); + int (*resume) (struct usb_device *udev, pm_message_t message); struct usbdrv_wrap drvwrap; unsigned int supports_autosuspend:1; }; @@ -1321,7 +1340,7 @@ struct urb { struct kref kref; /* reference count of the URB */ void *hcpriv; /* private data for host controller */ atomic_t use_count; /* concurrent submissions counter */ - u8 reject; /* submissions will fail */ + atomic_t reject; /* submissions will fail */ int unlinked; /* unlink error code */ /* public: documented fields in the urb that can be used by drivers */ @@ -1466,6 +1485,7 @@ extern void usb_poison_urb(struct urb *urb); extern void usb_unpoison_urb(struct urb *urb); extern void usb_kill_anchored_urbs(struct usb_anchor *anchor); extern void usb_poison_anchored_urbs(struct usb_anchor *anchor); +extern void usb_unpoison_anchored_urbs(struct usb_anchor *anchor); extern void usb_unlink_anchored_urbs(struct usb_anchor *anchor); extern void usb_anchor_urb(struct urb *urb, struct usb_anchor *anchor); extern void usb_unanchor_urb(struct urb *urb); @@ -1722,10 +1742,6 @@ extern void usb_unregister_notify(struct notifier_block *nb); #define err(format, arg...) printk(KERN_ERR KBUILD_MODNAME ": " \ format "\n" , ## arg) -#define info(format, arg...) printk(KERN_INFO KBUILD_MODNAME ": " \ - format "\n" , ## arg) -#define warn(format, arg...) printk(KERN_WARNING KBUILD_MODNAME ": " \ - format "\n" , ## arg) #endif /* __KERNEL__ */ diff --git a/include/linux/usb/association.h b/include/linux/usb/association.h index 07c5e3cf589..0a4a18b3c1b 100644 --- a/include/linux/usb/association.h +++ b/include/linux/usb/association.h @@ -28,17 +28,17 @@ struct wusb_am_attr { }; /* Different fields defined by the spec */ -#define WUSB_AR_AssociationTypeId { .id = 0x0000, .len = 2 } -#define WUSB_AR_AssociationSubTypeId { .id = 0x0001, .len = 2 } -#define WUSB_AR_Length { .id = 0x0002, .len = 4 } -#define WUSB_AR_AssociationStatus { .id = 0x0004, .len = 4 } -#define WUSB_AR_LangID { .id = 0x0008, .len = 2 } -#define WUSB_AR_DeviceFriendlyName { .id = 0x000b, .len = 64 } /* max */ -#define WUSB_AR_HostFriendlyName { .id = 0x000c, .len = 64 } /* max */ -#define WUSB_AR_CHID { .id = 0x1000, .len = 16 } -#define WUSB_AR_CDID { .id = 0x1001, .len = 16 } -#define WUSB_AR_ConnectionContext { .id = 0x1002, .len = 48 } -#define WUSB_AR_BandGroups { .id = 0x1004, .len = 2 } +#define WUSB_AR_AssociationTypeId { .id = cpu_to_le16(0x0000), .len = cpu_to_le16(2) } +#define WUSB_AR_AssociationSubTypeId { .id = cpu_to_le16(0x0001), .len = cpu_to_le16(2) } +#define WUSB_AR_Length { .id = cpu_to_le16(0x0002), .len = cpu_to_le16(4) } +#define WUSB_AR_AssociationStatus { .id = cpu_to_le16(0x0004), .len = cpu_to_le16(4) } +#define WUSB_AR_LangID { .id = cpu_to_le16(0x0008), .len = cpu_to_le16(2) } +#define WUSB_AR_DeviceFriendlyName { .id = cpu_to_le16(0x000b), .len = cpu_to_le16(64) } /* max */ +#define WUSB_AR_HostFriendlyName { .id = cpu_to_le16(0x000c), .len = cpu_to_le16(64) } /* max */ +#define WUSB_AR_CHID { .id = cpu_to_le16(0x1000), .len = cpu_to_le16(16) } +#define WUSB_AR_CDID { .id = cpu_to_le16(0x1001), .len = cpu_to_le16(16) } +#define WUSB_AR_ConnectionContext { .id = cpu_to_le16(0x1002), .len = cpu_to_le16(48) } +#define WUSB_AR_BandGroups { .id = cpu_to_le16(0x1004), .len = cpu_to_le16(2) } /* CBAF Control Requests (AMS1.0[T4-1] */ enum { diff --git a/include/linux/usb/gpio_vbus.h b/include/linux/usb/gpio_vbus.h new file mode 100644 index 00000000000..d9f03ccc2d6 --- /dev/null +++ b/include/linux/usb/gpio_vbus.h @@ -0,0 +1,30 @@ +/* + * A simple GPIO VBUS sensing driver for B peripheral only devices + * with internal transceivers. + * Optionally D+ pullup can be controlled by a second GPIO. + * + * Copyright (c) 2008 Philipp Zabel <philipp.zabel@gmail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +/** + * struct gpio_vbus_mach_info - configuration for gpio_vbus + * @gpio_vbus: VBUS sensing GPIO + * @gpio_pullup: optional D+ or D- pullup GPIO (else negative/invalid) + * @gpio_vbus_inverted: true if gpio_vbus is active low + * @gpio_pullup_inverted: true if gpio_pullup is active low + * + * The VBUS sensing GPIO should have a pulldown, which will normally be + * part of a resistor ladder turning a 4.0V-5.25V level on VBUS into a + * value the GPIO detects as active. Some systems will use comparators. + */ +struct gpio_vbus_mach_info { + int gpio_vbus; + int gpio_pullup; + bool gpio_vbus_inverted; + bool gpio_pullup_inverted; +}; diff --git a/include/linux/usb/musb.h b/include/linux/usb/musb.h index 630962c04ca..d6aad0ea603 100644 --- a/include/linux/usb/musb.h +++ b/include/linux/usb/musb.h @@ -47,6 +47,11 @@ struct musb_hdrc_config { u8 ram_bits; /* ram address size */ struct musb_hdrc_eps_bits *eps_bits; +#ifdef CONFIG_BLACKFIN + /* A GPIO controlling VRSEL in Blackfin */ + unsigned int gpio_vrsel; +#endif + }; struct musb_hdrc_platform_data { diff --git a/include/linux/usb/otg.h b/include/linux/usb/otg.h index 1db25d152ad..94df4fe6c6c 100644 --- a/include/linux/usb/otg.h +++ b/include/linux/usb/otg.h @@ -84,6 +84,7 @@ extern int otg_set_transceiver(struct otg_transceiver *); /* for usb host and peripheral controller drivers */ extern struct otg_transceiver *otg_get_transceiver(void); +extern void otg_put_transceiver(struct otg_transceiver *); static inline int otg_start_hnp(struct otg_transceiver *otg) diff --git a/include/linux/usb/wusb-wa.h b/include/linux/usb/wusb-wa.h index a102561e702..fb7c359bdfb 100644 --- a/include/linux/usb/wusb-wa.h +++ b/include/linux/usb/wusb-wa.h @@ -51,6 +51,7 @@ enum { WUSB_REQ_GET_TIME = 25, WUSB_REQ_SET_STREAM_IDX = 26, WUSB_REQ_SET_WUSB_MAS = 27, + WUSB_REQ_CHAN_STOP = 28, }; diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h index d9a3bbe38e6..1eea1ab68dc 100644 --- a/include/linux/usb_usual.h +++ b/include/linux/usb_usual.h @@ -52,8 +52,11 @@ US_FLAG(MAX_SECTORS_MIN,0x00002000) \ /* Sets max_sectors to arch min */ \ US_FLAG(BULK_IGNORE_TAG,0x00004000) \ - /* Ignore tag mismatch in bulk operations */ - + /* Ignore tag mismatch in bulk operations */ \ + US_FLAG(SANE_SENSE, 0x00008000) \ + /* Sane Sense (> 18 bytes) */ \ + US_FLAG(CAPACITY_OK, 0x00010000) \ + /* READ CAPACITY response is correct */ #define US_FLAG(name, value) US_FL_##name = value , enum { US_DO_ALL_FLAGS }; diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h index b5f41d4c2ee..315bcd37522 100644 --- a/include/linux/user_namespace.h +++ b/include/linux/user_namespace.h @@ -12,7 +12,7 @@ struct user_namespace { struct kref kref; struct hlist_head uidhash_table[UIDHASH_SZ]; - struct user_struct *root_user; + struct user_struct *creator; }; extern struct user_namespace init_user_ns; @@ -26,8 +26,7 @@ static inline struct user_namespace *get_user_ns(struct user_namespace *ns) return ns; } -extern struct user_namespace *copy_user_ns(int flags, - struct user_namespace *old_ns); +extern int create_user_ns(struct cred *new); extern void free_user_ns(struct kref *kref); static inline void put_user_ns(struct user_namespace *ns) @@ -43,13 +42,9 @@ static inline struct user_namespace *get_user_ns(struct user_namespace *ns) return &init_user_ns; } -static inline struct user_namespace *copy_user_ns(int flags, - struct user_namespace *old_ns) +static inline int create_user_ns(struct cred *new) { - if (flags & CLONE_NEWUSER) - return ERR_PTR(-EINVAL); - - return old_ns; + return -EINVAL; } static inline void put_user_ns(struct user_namespace *ns) diff --git a/include/linux/uwb.h b/include/linux/uwb.h index f9ccbd9a2ce..c02128991ff 100644 --- a/include/linux/uwb.h +++ b/include/linux/uwb.h @@ -30,6 +30,7 @@ #include <linux/device.h> #include <linux/mutex.h> #include <linux/timer.h> +#include <linux/wait.h> #include <linux/workqueue.h> #include <linux/uwb/spec.h> @@ -66,6 +67,7 @@ struct uwb_dev { struct uwb_dev_addr dev_addr; int beacon_slot; DECLARE_BITMAP(streams, UWB_NUM_STREAMS); + DECLARE_BITMAP(last_availability_bm, UWB_NUM_MAS); }; #define to_uwb_dev(d) container_of(d, struct uwb_dev, dev) @@ -86,12 +88,31 @@ struct uwb_notifs_chain { struct mutex mutex; }; +/* Beacon cache list */ +struct uwb_beca { + struct list_head list; + size_t entries; + struct mutex mutex; +}; + +/* Event handling thread. */ +struct uwbd { + int pid; + struct task_struct *task; + wait_queue_head_t wq; + struct list_head event_list; + spinlock_t event_list_lock; +}; + /** * struct uwb_mas_bm - a bitmap of all MAS in a superframe * @bm: a bitmap of length #UWB_NUM_MAS */ struct uwb_mas_bm { DECLARE_BITMAP(bm, UWB_NUM_MAS); + DECLARE_BITMAP(unsafe_bm, UWB_NUM_MAS); + int safe; + int unsafe; }; /** @@ -117,14 +138,24 @@ struct uwb_mas_bm { * FIXME: further target states TBD. */ enum uwb_rsv_state { - UWB_RSV_STATE_NONE, + UWB_RSV_STATE_NONE = 0, UWB_RSV_STATE_O_INITIATED, UWB_RSV_STATE_O_PENDING, UWB_RSV_STATE_O_MODIFIED, UWB_RSV_STATE_O_ESTABLISHED, + UWB_RSV_STATE_O_TO_BE_MOVED, + UWB_RSV_STATE_O_MOVE_EXPANDING, + UWB_RSV_STATE_O_MOVE_COMBINING, + UWB_RSV_STATE_O_MOVE_REDUCING, UWB_RSV_STATE_T_ACCEPTED, UWB_RSV_STATE_T_DENIED, + UWB_RSV_STATE_T_CONFLICT, UWB_RSV_STATE_T_PENDING, + UWB_RSV_STATE_T_EXPANDING_ACCEPTED, + UWB_RSV_STATE_T_EXPANDING_CONFLICT, + UWB_RSV_STATE_T_EXPANDING_PENDING, + UWB_RSV_STATE_T_EXPANDING_DENIED, + UWB_RSV_STATE_T_RESIZED, UWB_RSV_STATE_LAST, }; @@ -149,6 +180,12 @@ struct uwb_rsv_target { }; }; +struct uwb_rsv_move { + struct uwb_mas_bm final_mas; + struct uwb_ie_drp *companion_drp_ie; + struct uwb_mas_bm companion_mas; +}; + /* * Number of streams reserved for reservations targeted at DevAddrs. */ @@ -186,6 +223,7 @@ typedef void (*uwb_rsv_cb_f)(struct uwb_rsv *rsv); * * @status: negotiation status * @stream: stream index allocated for this reservation + * @tiebreaker: conflict tiebreaker for this reservation * @mas: reserved MAS * @drp_ie: the DRP IE * @ie_valid: true iff the DRP IE matches the reservation parameters @@ -201,25 +239,29 @@ struct uwb_rsv { struct uwb_rc *rc; struct list_head rc_node; struct list_head pal_node; + struct kref kref; struct uwb_dev *owner; struct uwb_rsv_target target; enum uwb_drp_type type; int max_mas; int min_mas; - int sparsity; + int max_interval; bool is_multicast; uwb_rsv_cb_f callback; void *pal_priv; enum uwb_rsv_state state; + bool needs_release_companion_mas; u8 stream; + u8 tiebreaker; struct uwb_mas_bm mas; struct uwb_ie_drp *drp_ie; + struct uwb_rsv_move mv; bool ie_valid; struct timer_list timer; - bool expired; + struct work_struct handle_timeout_work; }; static const @@ -261,6 +303,13 @@ struct uwb_drp_avail { bool ie_valid; }; +struct uwb_drp_backoff_win { + u8 window; + u8 n; + int total_expired; + struct timer_list timer; + bool can_reserve_extra_mases; +}; const char *uwb_rsv_state_str(enum uwb_rsv_state state); const char *uwb_rsv_type_str(enum uwb_drp_type type); @@ -276,6 +325,8 @@ void uwb_rsv_terminate(struct uwb_rsv *rsv); void uwb_rsv_accept(struct uwb_rsv *rsv, uwb_rsv_cb_f cb, void *pal_priv); +void uwb_rsv_get_usable_mas(struct uwb_rsv *orig_rsv, struct uwb_mas_bm *mas); + /** * Radio Control Interface instance * @@ -337,23 +388,33 @@ struct uwb_rc { u8 ctx_roll; int beaconing; /* Beaconing state [channel number] */ + int beaconing_forced; int scanning; enum uwb_scan_type scan_type:3; unsigned ready:1; struct uwb_notifs_chain notifs_chain; + struct uwb_beca uwb_beca; + + struct uwbd uwbd; + struct uwb_drp_backoff_win bow; struct uwb_drp_avail drp_avail; struct list_head reservations; + struct list_head cnflt_alien_list; + struct uwb_mas_bm cnflt_alien_bitmap; struct mutex rsvs_mutex; + spinlock_t rsvs_lock; struct workqueue_struct *rsv_workq; - struct work_struct rsv_update_work; + struct delayed_work rsv_update_work; + struct delayed_work rsv_alien_bp_work; + int set_drp_ie_pending; struct mutex ies_mutex; struct uwb_rc_cmd_set_ie *ies; size_t ies_capacity; - spinlock_t pal_lock; struct list_head pals; + int active_pals; struct uwb_dbg *dbg; }; @@ -361,11 +422,19 @@ struct uwb_rc { /** * struct uwb_pal - a UWB PAL - * @name: descriptive name for this PAL (wushc, wlp, etc.). + * @name: descriptive name for this PAL (wusbhc, wlp, etc.). * @device: a device for the PAL. Used to link the PAL and the radio * controller in sysfs. + * @rc: the radio controller the PAL uses. + * @channel_changed: called when the channel used by the radio changes. + * A channel of -1 means the channel has been stopped. * @new_rsv: called when a peer requests a reservation (may be NULL if * the PAL cannot accept reservation requests). + * @channel: channel being used by the PAL; 0 if the PAL isn't using + * the radio; -1 if the PAL wishes to use the radio but + * cannot. + * @debugfs_dir: a debugfs directory which the PAL can use for its own + * debugfs files. * * A Protocol Adaptation Layer (PAL) is a user of the WiMedia UWB * radio platform (e.g., WUSB, WLP or Bluetooth UWB AMP). @@ -384,12 +453,21 @@ struct uwb_pal { struct list_head node; const char *name; struct device *device; - void (*new_rsv)(struct uwb_rsv *rsv); + struct uwb_rc *rc; + + void (*channel_changed)(struct uwb_pal *pal, int channel); + void (*new_rsv)(struct uwb_pal *pal, struct uwb_rsv *rsv); + + int channel; + struct dentry *debugfs_dir; }; void uwb_pal_init(struct uwb_pal *pal); -int uwb_pal_register(struct uwb_rc *rc, struct uwb_pal *pal); -void uwb_pal_unregister(struct uwb_rc *rc, struct uwb_pal *pal); +int uwb_pal_register(struct uwb_pal *pal); +void uwb_pal_unregister(struct uwb_pal *pal); + +int uwb_radio_start(struct uwb_pal *pal); +void uwb_radio_stop(struct uwb_pal *pal); /* * General public API @@ -443,8 +521,6 @@ ssize_t uwb_rc_vcmd(struct uwb_rc *rc, const char *cmd_name, struct uwb_rccb *cmd, size_t cmd_size, u8 expected_type, u16 expected_event, struct uwb_rceb **preply); -ssize_t uwb_rc_get_ie(struct uwb_rc *, struct uwb_rc_evt_get_ie **); -int uwb_bg_joined(struct uwb_rc *rc); size_t __uwb_addr_print(char *, size_t, const unsigned char *, int); @@ -520,6 +596,8 @@ void uwb_rc_rm(struct uwb_rc *); void uwb_rc_neh_grok(struct uwb_rc *, void *, size_t); void uwb_rc_neh_error(struct uwb_rc *, int); void uwb_rc_reset_all(struct uwb_rc *rc); +void uwb_rc_pre_reset(struct uwb_rc *rc); +void uwb_rc_post_reset(struct uwb_rc *rc); /** * uwb_rsv_is_owner - is the owner of this reservation the RC? @@ -531,7 +609,9 @@ static inline bool uwb_rsv_is_owner(struct uwb_rsv *rsv) } /** - * Events generated by UWB that can be passed to any listeners + * enum uwb_notifs - UWB events that can be passed to any listeners + * @UWB_NOTIF_ONAIR: a new neighbour has joined the beacon group. + * @UWB_NOTIF_OFFAIR: a neighbour has left the beacon group. * * Higher layers can register callback functions with the radio * controller using uwb_notifs_register(). The radio controller @@ -539,8 +619,6 @@ static inline bool uwb_rsv_is_owner(struct uwb_rsv *rsv) * nodes when an event occurs. */ enum uwb_notifs { - UWB_NOTIF_BG_JOIN = 0, /* radio controller joined a beacon group */ - UWB_NOTIF_BG_LEAVE = 1, /* radio controller left a beacon group */ UWB_NOTIF_ONAIR, UWB_NOTIF_OFFAIR, }; @@ -652,22 +730,9 @@ static inline int edc_inc(struct edc *err_hist, u16 max_err, u16 timeframe) /* Information Element handling */ -/* For representing the state of writing to a buffer when iterating */ -struct uwb_buf_ctx { - char *buf; - size_t bytes, size; -}; - -typedef int (*uwb_ie_f)(struct uwb_dev *, const struct uwb_ie_hdr *, - size_t, void *); struct uwb_ie_hdr *uwb_ie_next(void **ptr, size_t *len); -ssize_t uwb_ie_for_each(struct uwb_dev *uwb_dev, uwb_ie_f fn, void *data, - const void *buf, size_t size); -int uwb_ie_dump_hex(struct uwb_dev *, const struct uwb_ie_hdr *, - size_t, void *); -int uwb_rc_set_ie(struct uwb_rc *, struct uwb_rc_cmd_set_ie *); -struct uwb_ie_hdr *uwb_ie_next(void **ptr, size_t *len); - +int uwb_rc_ie_add(struct uwb_rc *uwb_rc, const struct uwb_ie_hdr *ies, size_t size); +int uwb_rc_ie_rm(struct uwb_rc *uwb_rc, enum uwb_ie element_id); /* * Transmission statistics diff --git a/include/linux/uwb/debug-cmd.h b/include/linux/uwb/debug-cmd.h index 1141f41bab5..8da004e2562 100644 --- a/include/linux/uwb/debug-cmd.h +++ b/include/linux/uwb/debug-cmd.h @@ -32,6 +32,10 @@ enum uwb_dbg_cmd_type { UWB_DBG_CMD_RSV_ESTABLISH = 1, UWB_DBG_CMD_RSV_TERMINATE = 2, + UWB_DBG_CMD_IE_ADD = 3, + UWB_DBG_CMD_IE_RM = 4, + UWB_DBG_CMD_RADIO_START = 5, + UWB_DBG_CMD_RADIO_STOP = 6, }; struct uwb_dbg_cmd_rsv_establish { @@ -39,18 +43,25 @@ struct uwb_dbg_cmd_rsv_establish { __u8 type; __u16 max_mas; __u16 min_mas; - __u8 sparsity; + __u8 max_interval; }; struct uwb_dbg_cmd_rsv_terminate { int index; }; +struct uwb_dbg_cmd_ie { + __u8 data[128]; + int len; +}; + struct uwb_dbg_cmd { __u32 type; union { struct uwb_dbg_cmd_rsv_establish rsv_establish; struct uwb_dbg_cmd_rsv_terminate rsv_terminate; + struct uwb_dbg_cmd_ie ie_add; + struct uwb_dbg_cmd_ie ie_rm; }; }; diff --git a/include/linux/uwb/debug.h b/include/linux/uwb/debug.h deleted file mode 100644 index a86a73fe303..00000000000 --- a/include/linux/uwb/debug.h +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Ultra Wide Band - * Debug Support - * - * Copyright (C) 2005-2006 Intel Corporation - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA - * 02110-1301, USA. - * - * - * FIXME: doc - * Invoke like: - * - * #define D_LOCAL 4 - * #include <linux/uwb/debug.h> - * - * At the end of your include files. - */ -#include <linux/types.h> - -struct device; -extern void dump_bytes(struct device *dev, const void *_buf, size_t rsize); - -/* Master debug switch; !0 enables, 0 disables */ -#define D_MASTER (!0) - -/* Local (per-file) debug switch; #define before #including */ -#ifndef D_LOCAL -#define D_LOCAL 0 -#endif - -#undef __d_printf -#undef d_fnstart -#undef d_fnend -#undef d_printf -#undef d_dump - -#define __d_printf(l, _tag, _dev, f, a...) \ -do { \ - struct device *__dev = (_dev); \ - if (D_MASTER && D_LOCAL >= (l)) { \ - char __head[64] = ""; \ - if (_dev != NULL) { \ - if ((unsigned long)__dev < 4096) \ - printk(KERN_ERR "E: Corrupt dev %p\n", \ - __dev); \ - else \ - snprintf(__head, sizeof(__head), \ - "%s %s: ", \ - dev_driver_string(__dev), \ - __dev->bus_id); \ - } \ - printk(KERN_ERR "%s%s" _tag ": " f, __head, \ - __func__, ## a); \ - } \ -} while (0 && _dev) - -#define d_fnstart(l, _dev, f, a...) \ - __d_printf(l, " FNSTART", _dev, f, ## a) -#define d_fnend(l, _dev, f, a...) \ - __d_printf(l, " FNEND", _dev, f, ## a) -#define d_printf(l, _dev, f, a...) \ - __d_printf(l, "", _dev, f, ## a) -#define d_dump(l, _dev, ptr, size) \ -do { \ - struct device *__dev = _dev; \ - if (D_MASTER && D_LOCAL >= (l)) \ - dump_bytes(__dev, ptr, size); \ -} while (0 && _dev) -#define d_test(l) (D_MASTER && D_LOCAL >= (l)) diff --git a/include/linux/uwb/spec.h b/include/linux/uwb/spec.h index 198c15f8e25..b52e44f1bd3 100644 --- a/include/linux/uwb/spec.h +++ b/include/linux/uwb/spec.h @@ -59,6 +59,11 @@ enum { UWB_NUM_ZONES = 16 }; #define UWB_MAS_PER_ZONE (UWB_NUM_MAS / UWB_NUM_ZONES) /* + * Number of MAS required before a row can be considered available. + */ +#define UWB_USABLE_MAS_PER_ROW (UWB_NUM_ZONES - 1) + +/* * Number of streams per DRP reservation between a pair of devices. * * [ECMA-368] section 16.8.6. @@ -94,6 +99,26 @@ enum { UWB_BEACON_SLOT_LENGTH_US = 85 }; enum { UWB_MAX_LOST_BEACONS = 3 }; /* + * mDRPBackOffWinMin + * + * The minimum number of superframes to wait before trying to reserve + * extra MAS. + * + * [ECMA-368] section 17.16 + */ +enum { UWB_DRP_BACKOFF_WIN_MIN = 2 }; + +/* + * mDRPBackOffWinMax + * + * The maximum number of superframes to wait before trying to reserve + * extra MAS. + * + * [ECMA-368] section 17.16 + */ +enum { UWB_DRP_BACKOFF_WIN_MAX = 16 }; + +/* * Length of a superframe in microseconds. */ #define UWB_SUPERFRAME_LENGTH_US (UWB_MAS_LENGTH_US * UWB_NUM_MAS) @@ -200,6 +225,12 @@ enum uwb_drp_reason { UWB_DRP_REASON_MODIFIED, }; +/** Relinquish Request Reason Codes ([ECMA-368] table 113) */ +enum uwb_relinquish_req_reason { + UWB_RELINQUISH_REQ_REASON_NON_SPECIFIC = 0, + UWB_RELINQUISH_REQ_REASON_OVER_ALLOCATION, +}; + /** * DRP Notification Reason Codes (WHCI 0.95 [3.1.4.9]) */ @@ -252,6 +283,7 @@ enum uwb_ie { UWB_APP_SPEC_PROBE_IE = 15, UWB_IDENTIFICATION_IE = 19, UWB_MASTER_KEY_ID_IE = 20, + UWB_RELINQUISH_REQUEST_IE = 21, UWB_IE_WLP = 250, /* WiMedia Logical Link Control Protocol WLP 0.99 */ UWB_APP_SPEC_IE = 255, }; @@ -365,6 +397,27 @@ struct uwb_ie_drp_avail { DECLARE_BITMAP(bmp, UWB_NUM_MAS); } __attribute__((packed)); +/* Relinqish Request IE ([ECMA-368] section 16.8.19). */ +struct uwb_relinquish_request_ie { + struct uwb_ie_hdr hdr; + __le16 relinquish_req_control; + struct uwb_dev_addr dev_addr; + struct uwb_drp_alloc allocs[]; +} __attribute__((packed)); + +static inline int uwb_ie_relinquish_req_reason_code(struct uwb_relinquish_request_ie *ie) +{ + return (le16_to_cpu(ie->relinquish_req_control) >> 0) & 0xf; +} + +static inline void uwb_ie_relinquish_req_set_reason_code(struct uwb_relinquish_request_ie *ie, + int reason_code) +{ + u16 ctrl = le16_to_cpu(ie->relinquish_req_control); + ctrl = (ctrl & ~(0xf << 0)) | (reason_code << 0); + ie->relinquish_req_control = cpu_to_le16(ctrl); +} + /** * The Vendor ID is set to an OUI that indicates the vendor of the device. * ECMA-368 [16.8.10] diff --git a/include/linux/uwb/umc.h b/include/linux/uwb/umc.h index 36a39e34f8d..4b4fc0f4385 100644 --- a/include/linux/uwb/umc.h +++ b/include/linux/uwb/umc.h @@ -89,6 +89,8 @@ struct umc_driver { void (*remove)(struct umc_dev *); int (*suspend)(struct umc_dev *, pm_message_t state); int (*resume)(struct umc_dev *); + int (*pre_reset)(struct umc_dev *); + int (*post_reset)(struct umc_dev *); struct device_driver driver; }; diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h index 4669d7e72e7..5571dbe1c0a 100644 --- a/include/linux/videodev2.h +++ b/include/linux/videodev2.h @@ -293,6 +293,7 @@ struct v4l2_pix_format { #define V4L2_PIX_FMT_YVU420 v4l2_fourcc('Y', 'V', '1', '2') /* 12 YVU 4:2:0 */ #define V4L2_PIX_FMT_YUYV v4l2_fourcc('Y', 'U', 'Y', 'V') /* 16 YUV 4:2:2 */ #define V4L2_PIX_FMT_UYVY v4l2_fourcc('U', 'Y', 'V', 'Y') /* 16 YUV 4:2:2 */ +#define V4L2_PIX_FMT_VYUY v4l2_fourcc('V', 'Y', 'U', 'Y') /* 16 YUV 4:2:2 */ #define V4L2_PIX_FMT_YUV422P v4l2_fourcc('4', '2', '2', 'P') /* 16 YVU422 planar */ #define V4L2_PIX_FMT_YUV411P v4l2_fourcc('4', '1', '1', 'P') /* 16 YVU411 planar */ #define V4L2_PIX_FMT_Y41P v4l2_fourcc('Y', '4', '1', 'P') /* 12 YUV 4:1:1 */ @@ -304,6 +305,8 @@ struct v4l2_pix_format { /* two planes -- one Y, one Cr + Cb interleaved */ #define V4L2_PIX_FMT_NV12 v4l2_fourcc('N', 'V', '1', '2') /* 12 Y/CbCr 4:2:0 */ #define V4L2_PIX_FMT_NV21 v4l2_fourcc('N', 'V', '2', '1') /* 12 Y/CrCb 4:2:0 */ +#define V4L2_PIX_FMT_NV16 v4l2_fourcc('N', 'V', '1', '6') /* 16 Y/CbCr 4:2:2 */ +#define V4L2_PIX_FMT_NV61 v4l2_fourcc('N', 'V', '6', '1') /* 16 Y/CrCb 4:2:2 */ /* The following formats are not defined in the V4L2 specification */ #define V4L2_PIX_FMT_YUV410 v4l2_fourcc('Y', 'U', 'V', '9') /* 9 YUV 4:1:0 */ @@ -1050,7 +1053,7 @@ enum v4l2_mpeg_video_bitrate_mode { #define V4L2_CID_MPEG_VIDEO_MUTE (V4L2_CID_MPEG_BASE+210) #define V4L2_CID_MPEG_VIDEO_MUTE_YUV (V4L2_CID_MPEG_BASE+211) -/* MPEG-class control IDs specific to the CX2584x driver as defined by V4L2 */ +/* MPEG-class control IDs specific to the CX2341x driver as defined by V4L2 */ #define V4L2_CID_MPEG_CX2341X_BASE (V4L2_CTRL_CLASS_MPEG | 0x1000) #define V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE (V4L2_CID_MPEG_CX2341X_BASE+0) enum v4l2_mpeg_cx2341x_video_spatial_filter_mode { @@ -1117,6 +1120,12 @@ enum v4l2_exposure_auto_type { #define V4L2_CID_FOCUS_RELATIVE (V4L2_CID_CAMERA_CLASS_BASE+11) #define V4L2_CID_FOCUS_AUTO (V4L2_CID_CAMERA_CLASS_BASE+12) +#define V4L2_CID_ZOOM_ABSOLUTE (V4L2_CID_CAMERA_CLASS_BASE+13) +#define V4L2_CID_ZOOM_RELATIVE (V4L2_CID_CAMERA_CLASS_BASE+14) +#define V4L2_CID_ZOOM_CONTINUOUS (V4L2_CID_CAMERA_CLASS_BASE+15) + +#define V4L2_CID_PRIVACY (V4L2_CID_CAMERA_CLASS_BASE+16) + /* * T U N I N G */ @@ -1361,24 +1370,41 @@ struct v4l2_streamparm { /* * A D V A N C E D D E B U G G I N G * - * NOTE: EXPERIMENTAL API + * NOTE: EXPERIMENTAL API, NEVER RELY ON THIS IN APPLICATIONS! + * FOR DEBUGGING, TESTING AND INTERNAL USE ONLY! */ /* VIDIOC_DBG_G_REGISTER and VIDIOC_DBG_S_REGISTER */ #define V4L2_CHIP_MATCH_HOST 0 /* Match against chip ID on host (0 for the host) */ -#define V4L2_CHIP_MATCH_I2C_DRIVER 1 /* Match against I2C driver ID */ +#define V4L2_CHIP_MATCH_I2C_DRIVER 1 /* Match against I2C driver name */ #define V4L2_CHIP_MATCH_I2C_ADDR 2 /* Match against I2C 7-bit address */ +#define V4L2_CHIP_MATCH_AC97 3 /* Match against anciliary AC97 chip */ + +struct v4l2_dbg_match { + __u32 type; /* Match type */ + union { /* Match this chip, meaning determined by type */ + __u32 addr; + char name[32]; + }; +} __attribute__ ((packed)); -struct v4l2_register { - __u32 match_type; /* Match type */ - __u32 match_chip; /* Match this chip, meaning determined by match_type */ +struct v4l2_dbg_register { + struct v4l2_dbg_match match; + __u32 size; /* register size in bytes */ __u64 reg; __u64 val; -}; +} __attribute__ ((packed)); -/* VIDIOC_G_CHIP_IDENT */ -struct v4l2_chip_ident { +/* VIDIOC_DBG_G_CHIP_IDENT */ +struct v4l2_dbg_chip_ident { + struct v4l2_dbg_match match; + __u32 ident; /* chip identifier as specified in <media/v4l2-chip-ident.h> */ + __u32 revision; /* chip revision, chip specific */ +} __attribute__ ((packed)); + +/* VIDIOC_G_CHIP_IDENT_OLD: Deprecated, do not use */ +struct v4l2_chip_ident_old { __u32 match_type; /* Match type */ __u32 match_chip; /* Match this chip, meaning determined by match_type */ __u32 ident; /* chip identifier as specified in <media/v4l2-chip-ident.h> */ @@ -1450,14 +1476,25 @@ struct v4l2_chip_ident { #define VIDIOC_G_ENC_INDEX _IOR('V', 76, struct v4l2_enc_idx) #define VIDIOC_ENCODER_CMD _IOWR('V', 77, struct v4l2_encoder_cmd) #define VIDIOC_TRY_ENCODER_CMD _IOWR('V', 78, struct v4l2_encoder_cmd) +#endif -/* Experimental, only implemented if CONFIG_VIDEO_ADV_DEBUG is defined */ -#define VIDIOC_DBG_S_REGISTER _IOW('V', 79, struct v4l2_register) -#define VIDIOC_DBG_G_REGISTER _IOWR('V', 80, struct v4l2_register) - -#define VIDIOC_G_CHIP_IDENT _IOWR('V', 81, struct v4l2_chip_ident) +#if 1 +/* Experimental, meant for debugging, testing and internal use. + Only implemented if CONFIG_VIDEO_ADV_DEBUG is defined. + You must be root to use these ioctls. Never use these in applications! */ +#define VIDIOC_DBG_S_REGISTER _IOW('V', 79, struct v4l2_dbg_register) +#define VIDIOC_DBG_G_REGISTER _IOWR('V', 80, struct v4l2_dbg_register) + +/* Experimental, meant for debugging, testing and internal use. + Never use this ioctl in applications! */ +#define VIDIOC_DBG_G_CHIP_IDENT _IOWR('V', 81, struct v4l2_dbg_chip_ident) +/* This is deprecated and will go away in 2.6.30 */ +#define VIDIOC_G_CHIP_IDENT_OLD _IOWR('V', 81, struct v4l2_chip_ident_old) #endif + #define VIDIOC_S_HW_FREQ_SEEK _IOW('V', 82, struct v4l2_hw_freq_seek) +/* Reminder: when adding new ioctls please add support for them to + drivers/media/video/v4l2-compat-ioctl32.c as well! */ #ifdef __OLD_VIDIOC_ /* for compatibility, will go away some day */ diff --git a/include/linux/virtio_balloon.h b/include/linux/virtio_balloon.h index c30c7bfbf39..8726ff77763 100644 --- a/include/linux/virtio_balloon.h +++ b/include/linux/virtio_balloon.h @@ -10,6 +10,9 @@ /* The feature bitmap for virtio balloon */ #define VIRTIO_BALLOON_F_MUST_TELL_HOST 0 /* Tell before reclaiming pages */ +/* Size of a PFN in the balloon interface. */ +#define VIRTIO_BALLOON_PFN_SHIFT 12 + struct virtio_balloon_config { /* Number of pages host wants Guest to give up. */ diff --git a/include/linux/virtio_console.h b/include/linux/virtio_console.h index 19a0da0dba4..7615ffcdd55 100644 --- a/include/linux/virtio_console.h +++ b/include/linux/virtio_console.h @@ -7,6 +7,17 @@ /* The ID for virtio console */ #define VIRTIO_ID_CONSOLE 3 +/* Feature bits */ +#define VIRTIO_CONSOLE_F_SIZE 0 /* Does host provide console size? */ + +struct virtio_console_config { + /* colums of the screens */ + __u16 cols; + /* rows of the screens */ + __u16 rows; +} __attribute__((packed)); + + #ifdef __KERNEL__ int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int)); #endif /* __KERNEL__ */ diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h index 5e33761b9b8..5cdd0aa8bde 100644 --- a/include/linux/virtio_net.h +++ b/include/linux/virtio_net.h @@ -20,6 +20,7 @@ #define VIRTIO_NET_F_HOST_TSO6 12 /* Host can handle TSOv6 in. */ #define VIRTIO_NET_F_HOST_ECN 13 /* Host can handle TSO[6] w/ ECN in. */ #define VIRTIO_NET_F_HOST_UFO 14 /* Host can handle UFO in. */ +#define VIRTIO_NET_F_MRG_RXBUF 15 /* Host can merge receive buffers. */ struct virtio_net_config { @@ -44,4 +45,12 @@ struct virtio_net_hdr __u16 csum_start; /* Position to start checksumming from */ __u16 csum_offset; /* Offset after that to place checksum */ }; + +/* This is the version of the header to use when the MRG_RXBUF + * feature has been negotiated. */ +struct virtio_net_hdr_mrg_rxbuf { + struct virtio_net_hdr hdr; + __u16 num_buffers; /* Number of merged rx buffers */ +}; + #endif /* _LINUX_VIRTIO_NET_H */ diff --git a/include/linux/virtio_pci.h b/include/linux/virtio_pci.h index cdef3574293..cd0fd5d181a 100644 --- a/include/linux/virtio_pci.h +++ b/include/linux/virtio_pci.h @@ -53,4 +53,12 @@ /* Virtio ABI version, this must match exactly */ #define VIRTIO_PCI_ABI_VERSION 0 + +/* How many bits to shift physical queue address written to QUEUE_PFN. + * 12 is historical, and due to x86 page size. */ +#define VIRTIO_PCI_QUEUE_ADDR_SHIFT 12 + +/* The alignment to use between consumer and producer parts of vring. + * x86 pagesize again. */ +#define VIRTIO_PCI_VRING_ALIGN 4096 #endif diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h index c4a598fb382..71e03722fb5 100644 --- a/include/linux/virtio_ring.h +++ b/include/linux/virtio_ring.h @@ -83,7 +83,7 @@ struct vring { * __u16 avail_idx; * __u16 available[num]; * - * // Padding to the next page boundary. + * // Padding to the next align boundary. * char pad[]; * * // A ring of used descriptor heads with free-running index. @@ -93,19 +93,19 @@ struct vring { * }; */ static inline void vring_init(struct vring *vr, unsigned int num, void *p, - unsigned long pagesize) + unsigned long align) { vr->num = num; vr->desc = p; vr->avail = p + num*sizeof(struct vring_desc); - vr->used = (void *)(((unsigned long)&vr->avail->ring[num] + pagesize-1) - & ~(pagesize - 1)); + vr->used = (void *)(((unsigned long)&vr->avail->ring[num] + align-1) + & ~(align - 1)); } -static inline unsigned vring_size(unsigned int num, unsigned long pagesize) +static inline unsigned vring_size(unsigned int num, unsigned long align) { return ((sizeof(struct vring_desc) * num + sizeof(__u16) * (2 + num) - + pagesize - 1) & ~(pagesize - 1)) + + align - 1) & ~(align - 1)) + sizeof(__u16) * 2 + sizeof(struct vring_used_elem) * num; } @@ -115,6 +115,7 @@ struct virtio_device; struct virtqueue; struct virtqueue *vring_new_virtqueue(unsigned int num, + unsigned int vring_align, struct virtio_device *vdev, void *pages, void (*notify)(struct virtqueue *vq), diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 307b88577ea..506e7620a98 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -97,6 +97,10 @@ extern void unmap_kernel_range(unsigned long addr, unsigned long size); extern struct vm_struct *alloc_vm_area(size_t size); extern void free_vm_area(struct vm_struct *area); +/* for /dev/kmem */ +extern long vread(char *buf, char *addr, unsigned long count); +extern long vwrite(char *buf, char *addr, unsigned long count); + /* * Internals. Dont't use.. */ diff --git a/include/linux/wimax.h b/include/linux/wimax.h new file mode 100644 index 00000000000..c89de7f4e5b --- /dev/null +++ b/include/linux/wimax.h @@ -0,0 +1,234 @@ +/* + * Linux WiMax + * API for user space + * + * + * Copyright (C) 2007-2008 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * + * Intel Corporation <linux-wimax@intel.com> + * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> + * - Initial implementation + * + * + * This file declares the user/kernel protocol that is spoken over + * Generic Netlink, as well as any type declaration that is to be used + * by kernel and user space. + * + * It is intended for user space to clone it verbatim to use it as a + * primary reference for definitions. + * + * Stuff intended for kernel usage as well as full protocol and stack + * documentation is rooted in include/net/wimax.h. + */ + +#ifndef __LINUX__WIMAX_H__ +#define __LINUX__WIMAX_H__ + +#include <linux/types.h> + +enum { + /** + * Version of the interface (unsigned decimal, MMm, max 25.5) + * M - Major: change if removing or modifying an existing call. + * m - minor: change when adding a new call + */ + WIMAX_GNL_VERSION = 00, + /* Generic NetLink attributes */ + WIMAX_GNL_ATTR_INVALID = 0x00, + WIMAX_GNL_ATTR_MAX = 10, +}; + + +/* + * Generic NetLink operations + * + * Most of these map to an API call; _OP_ stands for operation, _RP_ + * for reply and _RE_ for report (aka: signal). + */ +enum { + WIMAX_GNL_OP_MSG_FROM_USER, /* User to kernel message */ + WIMAX_GNL_OP_MSG_TO_USER, /* Kernel to user message */ + WIMAX_GNL_OP_RFKILL, /* Run wimax_rfkill() */ + WIMAX_GNL_OP_RESET, /* Run wimax_rfkill() */ + WIMAX_GNL_RE_STATE_CHANGE, /* Report: status change */ +}; + + +/* Message from user / to user */ +enum { + WIMAX_GNL_MSG_IFIDX = 1, + WIMAX_GNL_MSG_PIPE_NAME, + WIMAX_GNL_MSG_DATA, +}; + + +/* + * wimax_rfkill() + * + * The state of the radio (ON/OFF) is mapped to the rfkill subsystem's + * switch state (DISABLED/ENABLED). + */ +enum wimax_rf_state { + WIMAX_RF_OFF = 0, /* Radio is off, rfkill on/enabled */ + WIMAX_RF_ON = 1, /* Radio is on, rfkill off/disabled */ + WIMAX_RF_QUERY = 2, +}; + +/* Attributes */ +enum { + WIMAX_GNL_RFKILL_IFIDX = 1, + WIMAX_GNL_RFKILL_STATE, +}; + + +/* Attributes for wimax_reset() */ +enum { + WIMAX_GNL_RESET_IFIDX = 1, +}; + + +/* + * Attributes for the Report State Change + * + * For now we just have the old and new states; new attributes might + * be added later on. + */ +enum { + WIMAX_GNL_STCH_IFIDX = 1, + WIMAX_GNL_STCH_STATE_OLD, + WIMAX_GNL_STCH_STATE_NEW, +}; + + +/** + * enum wimax_st - The different states of a WiMAX device + * @__WIMAX_ST_NULL: The device structure has been allocated and zeroed, + * but still wimax_dev_add() hasn't been called. There is no state. + * + * @WIMAX_ST_DOWN: The device has been registered with the WiMAX and + * networking stacks, but it is not initialized (normally that is + * done with 'ifconfig DEV up' [or equivalent], which can upload + * firmware and enable communications with the device). + * In this state, the device is powered down and using as less + * power as possible. + * This state is the default after a call to wimax_dev_add(). It + * is ok to have drivers move directly to %WIMAX_ST_UNINITIALIZED + * or %WIMAX_ST_RADIO_OFF in _probe() after the call to + * wimax_dev_add(). + * It is recommended that the driver leaves this state when + * calling 'ifconfig DEV up' and enters it back on 'ifconfig DEV + * down'. + * + * @__WIMAX_ST_QUIESCING: The device is being torn down, so no API + * operations are allowed to proceed except the ones needed to + * complete the device clean up process. + * + * @WIMAX_ST_UNINITIALIZED: [optional] Communication with the device + * is setup, but the device still requires some configuration + * before being operational. + * Some WiMAX API calls might work. + * + * @WIMAX_ST_RADIO_OFF: The device is fully up; radio is off (wether + * by hardware or software switches). + * It is recommended to always leave the device in this state + * after initialization. + * + * @WIMAX_ST_READY: The device is fully up and radio is on. + * + * @WIMAX_ST_SCANNING: [optional] The device has been instructed to + * scan. In this state, the device cannot be actively connected to + * a network. + * + * @WIMAX_ST_CONNECTING: The device is connecting to a network. This + * state exists because in some devices, the connect process can + * include a number of negotiations between user space, kernel + * space and the device. User space needs to know what the device + * is doing. If the connect sequence in a device is atomic and + * fast, the device can transition directly to CONNECTED + * + * @WIMAX_ST_CONNECTED: The device is connected to a network. + * + * @__WIMAX_ST_INVALID: This is an invalid state used to mark the + * maximum numeric value of states. + * + * Description: + * + * Transitions from one state to another one are atomic and can only + * be caused in kernel space with wimax_state_change(). To read the + * state, use wimax_state_get(). + * + * States starting with __ are internal and shall not be used or + * referred to by drivers or userspace. They look ugly, but that's the + * point -- if any use is made non-internal to the stack, it is easier + * to catch on review. + * + * All API operations [with well defined exceptions] will take the + * device mutex before starting and then check the state. If the state + * is %__WIMAX_ST_NULL, %WIMAX_ST_DOWN, %WIMAX_ST_UNINITIALIZED or + * %__WIMAX_ST_QUIESCING, it will drop the lock and quit with + * -%EINVAL, -%ENOMEDIUM, -%ENOTCONN or -%ESHUTDOWN. + * + * The order of the definitions is important, so we can do numerical + * comparisons (eg: < %WIMAX_ST_RADIO_OFF means the device is not ready + * to operate). + */ +/* + * The allowed state transitions are described in the table below + * (states in rows can go to states in columns where there is an X): + * + * UNINI RADIO READY SCAN CONNEC CONNEC + * NULL DOWN QUIESCING TIALIZED OFF NING TING TED + * NULL - x + * DOWN - x x x + * QUIESCING x - + * UNINITIALIZED x - x + * RADIO_OFF x - x + * READY x x - x x x + * SCANNING x x x - x x + * CONNECTING x x x x - x + * CONNECTED x x x - + * + * This table not available in kernel-doc because the formatting messes it up. + */ + enum wimax_st { + __WIMAX_ST_NULL = 0, + WIMAX_ST_DOWN, + __WIMAX_ST_QUIESCING, + WIMAX_ST_UNINITIALIZED, + WIMAX_ST_RADIO_OFF, + WIMAX_ST_READY, + WIMAX_ST_SCANNING, + WIMAX_ST_CONNECTING, + WIMAX_ST_CONNECTED, + __WIMAX_ST_INVALID /* Always keep last */ +}; + + +#endif /* #ifndef __LINUX__WIMAX_H__ */ diff --git a/include/linux/wimax/Kbuild b/include/linux/wimax/Kbuild new file mode 100644 index 00000000000..3cb4f269bb0 --- /dev/null +++ b/include/linux/wimax/Kbuild @@ -0,0 +1 @@ +header-y += i2400m.h diff --git a/include/linux/wimax/debug.h b/include/linux/wimax/debug.h new file mode 100644 index 00000000000..ba0c49399a8 --- /dev/null +++ b/include/linux/wimax/debug.h @@ -0,0 +1,453 @@ +/* + * Linux WiMAX + * Collection of tools to manage debug operations. + * + * + * Copyright (C) 2005-2007 Intel Corporation + * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * + * Don't #include this file directly, read on! + * + * + * EXECUTING DEBUGGING ACTIONS OR NOT + * + * The main thing this framework provides is decission power to take a + * debug action (like printing a message) if the current debug level + * allows it. + * + * The decission power is at two levels: at compile-time (what does + * not make it is compiled out) and at run-time. The run-time + * selection is done per-submodule (as they are declared by the user + * of the framework). + * + * A call to d_test(L) (L being the target debug level) returns true + * if the action should be taken because the current debug levels + * allow it (both compile and run time). + * + * It follows that a call to d_test() that can be determined to be + * always false at compile time will get the code depending on it + * compiled out by optimization. + * + * + * DEBUG LEVELS + * + * It is up to the caller to define how much a debugging level is. + * + * Convention sets 0 as "no debug" (so an action marked as debug level 0 + * will always be taken). The increasing debug levels are used for + * increased verbosity. + * + * + * USAGE + * + * Group the code in modules and submodules inside each module [which + * in most cases maps to Linux modules and .c files that compose + * those]. + * + * + * For each module, there is: + * + * - a MODULENAME (single word, legal C identifier) + * + * - a debug-levels.h header file that declares the list of + * submodules and that is included by all .c files that use + * the debugging tools. The file name can be anything. + * + * - some (optional) .c code to manipulate the runtime debug levels + * through debugfs. + * + * The debug-levels.h file would look like: + * + * #ifndef __debug_levels__h__ + * #define __debug_levels__h__ + * + * #define D_MODULENAME modulename + * #define D_MASTER 10 + * + * #include <linux/wimax/debug.h> + * + * enum d_module { + * D_SUBMODULE_DECLARE(submodule_1), + * D_SUBMODULE_DECLARE(submodule_2), + * ... + * D_SUBMODULE_DECLARE(submodule_N) + * }; + * + * #endif + * + * D_MASTER is the maximum compile-time debug level; any debug actions + * above this will be out. D_MODULENAME is the module name (legal C + * identifier), which has to be unique for each module (to avoid + * namespace collisions during linkage). Note those #defines need to + * be done before #including debug.h + * + * We declare N different submodules whose debug level can be + * independently controlled during runtime. + * + * In a .c file of the module (and only in one of them), define the + * following code: + * + * struct d_level D_LEVEL[] = { + * D_SUBMODULE_DEFINE(submodule_1), + * D_SUBMODULE_DEFINE(submodule_2), + * ... + * D_SUBMODULE_DEFINE(submodule_N), + * }; + * size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL); + * + * Externs for d_level_MODULENAME and d_level_size_MODULENAME are used + * and declared in this file using the D_LEVEL and D_LEVEL_SIZE macros + * #defined also in this file. + * + * To manipulate from user space the levels, create a debugfs dentry + * and then register each submodule with: + * + * result = d_level_register_debugfs("PREFIX_", submodule_X, parent); + * if (result < 0) + * goto error; + * + * Where PREFIX_ is a name of your chosing. This will create debugfs + * file with a single numeric value that can be use to tweak it. To + * remove the entires, just use debugfs_remove_recursive() on 'parent'. + * + * NOTE: remember that even if this will show attached to some + * particular instance of a device, the settings are *global*. + * + * + * On each submodule (for example, .c files), the debug infrastructure + * should be included like this: + * + * #define D_SUBMODULE submodule_x // matches one in debug-levels.h + * #include "debug-levels.h" + * + * after #including all your include files. + * + * + * Now you can use the d_*() macros below [d_test(), d_fnstart(), + * d_fnend(), d_printf(), d_dump()]. + * + * If their debug level is greater than D_MASTER, they will be + * compiled out. + * + * If their debug level is lower or equal than D_MASTER but greater + * than the current debug level of their submodule, they'll be + * ignored. + * + * Otherwise, the action will be performed. + */ +#ifndef __debug__h__ +#define __debug__h__ + +#include <linux/types.h> +#include <linux/device.h> + + +/* Backend stuff */ + +/* + * Debug backend: generate a message header from a 'struct device' + * + * @head: buffer where to place the header + * @head_size: length of @head + * @dev: pointer to device used to generate a header from. If NULL, + * an empty ("") header is generated. + */ +static inline +void __d_head(char *head, size_t head_size, + struct device *dev) +{ + if (dev == NULL) + head[0] = 0; + else if ((unsigned long)dev < 4096) { + printk(KERN_ERR "E: Corrupt dev %p\n", dev); + WARN_ON(1); + } else + snprintf(head, head_size, "%s %s: ", + dev_driver_string(dev), dev->bus_id); +} + + +/* + * Debug backend: log some message if debugging is enabled + * + * @l: intended debug level + * @tag: tag to prefix the message with + * @dev: 'struct device' associated to this message + * @f: printf-like format and arguments + * + * Note this is optimized out if it doesn't pass the compile-time + * check; however, it is *always* compiled. This is useful to make + * sure the printf-like formats and variables are always checked and + * they don't get bit rot if you have all the debugging disabled. + */ +#define _d_printf(l, tag, dev, f, a...) \ +do { \ + char head[64]; \ + if (!d_test(l)) \ + break; \ + __d_head(head, sizeof(head), dev); \ + printk(KERN_ERR "%s%s%s: " f, head, __func__, tag, ##a); \ +} while (0) + + +/* + * CPP sintatic sugar to generate A_B like symbol names when one of + * the arguments is a a preprocessor #define. + */ +#define __D_PASTE__(varname, modulename) varname##_##modulename +#define __D_PASTE(varname, modulename) (__D_PASTE__(varname, modulename)) +#define _D_SUBMODULE_INDEX(_name) (D_SUBMODULE_DECLARE(_name)) + + +/* + * Store a submodule's runtime debug level and name + */ +struct d_level { + u8 level; + const char *name; +}; + + +/* + * List of available submodules and their debug levels + * + * We call them d_level_MODULENAME and d_level_size_MODULENAME; the + * macros D_LEVEL and D_LEVEL_SIZE contain the name already for + * convenience. + * + * This array and the size are defined on some .c file that is part of + * the current module. + */ +#define D_LEVEL __D_PASTE(d_level, D_MODULENAME) +#define D_LEVEL_SIZE __D_PASTE(d_level_size, D_MODULENAME) + +extern struct d_level D_LEVEL[]; +extern size_t D_LEVEL_SIZE; + + +/* + * Frontend stuff + * + * + * Stuff you need to declare prior to using the actual "debug" actions + * (defined below). + */ + +#ifndef D_MODULENAME +#error D_MODULENAME is not defined in your debug-levels.h file +/** + * D_MODULE - Name of the current module + * + * #define in your module's debug-levels.h, making sure it is + * unique. This has to be a legal C identifier. + */ +#define D_MODULENAME undefined_modulename +#endif + + +#ifndef D_MASTER +#warning D_MASTER not defined, but debug.h included! [see docs] +/** + * D_MASTER - Compile time maximum debug level + * + * #define in your debug-levels.h file to the maximum debug level the + * runtime code will be allowed to have. This allows you to provide a + * main knob. + * + * Anything above that level will be optimized out of the compile. + * + * Defaults to zero (no debug code compiled in). + * + * Maximum one definition per module (at the debug-levels.h file). + */ +#define D_MASTER 0 +#endif + +#ifndef D_SUBMODULE +#error D_SUBMODULE not defined, but debug.h included! [see docs] +/** + * D_SUBMODULE - Name of the current submodule + * + * #define in your submodule .c file before #including debug-levels.h + * to the name of the current submodule as previously declared and + * defined with D_SUBMODULE_DECLARE() (in your module's + * debug-levels.h) and D_SUBMODULE_DEFINE(). + * + * This is used to provide runtime-control over the debug levels. + * + * Maximum one per .c file! Can be shared among different .c files + * (meaning they belong to the same submodule categorization). + */ +#define D_SUBMODULE undefined_module +#endif + + +/** + * D_SUBMODULE_DECLARE - Declare a submodule for runtime debug level control + * + * @_name: name of the submodule, restricted to the chars that make up a + * valid C identifier ([a-zA-Z0-9_]). + * + * Declare in the module's debug-levels.h header file as: + * + * enum d_module { + * D_SUBMODULE_DECLARE(submodule_1), + * D_SUBMODULE_DECLARE(submodule_2), + * D_SUBMODULE_DECLARE(submodule_3), + * }; + * + * Some corresponding .c file needs to have a matching + * D_SUBMODULE_DEFINE(). + */ +#define D_SUBMODULE_DECLARE(_name) __D_SUBMODULE_##_name + + +/** + * D_SUBMODULE_DEFINE - Define a submodule for runtime debug level control + * + * @_name: name of the submodule, restricted to the chars that make up a + * valid C identifier ([a-zA-Z0-9_]). + * + * Use once per module (in some .c file) as: + * + * static + * struct d_level d_level_SUBMODULENAME[] = { + * D_SUBMODULE_DEFINE(submodule_1), + * D_SUBMODULE_DEFINE(submodule_2), + * D_SUBMODULE_DEFINE(submodule_3), + * }; + * size_t d_level_size_SUBDMODULENAME = ARRAY_SIZE(d_level_SUBDMODULENAME); + * + * Matching D_SUBMODULE_DECLARE()s have to be present in a + * debug-levels.h header file. + */ +#define D_SUBMODULE_DEFINE(_name) \ +[__D_SUBMODULE_##_name] = { \ + .level = 0, \ + .name = #_name \ +} + + + +/* The actual "debug" operations */ + + +/** + * d_test - Returns true if debugging should be enabled + * + * @l: intended debug level (unsigned) + * + * If the master debug switch is enabled and the current settings are + * higher or equal to the requested level, then debugging + * output/actions should be enabled. + * + * NOTE: + * + * This needs to be coded so that it can be evaluated in compile + * time; this is why the ugly BUG_ON() is placed in there, so the + * D_MASTER evaluation compiles all out if it is compile-time false. + */ +#define d_test(l) \ +({ \ + unsigned __l = l; /* type enforcer */ \ + (D_MASTER) >= __l \ + && ({ \ + BUG_ON(_D_SUBMODULE_INDEX(D_SUBMODULE) >= D_LEVEL_SIZE);\ + D_LEVEL[_D_SUBMODULE_INDEX(D_SUBMODULE)].level >= __l; \ + }); \ +}) + + +/** + * d_fnstart - log message at function start if debugging enabled + * + * @l: intended debug level + * @_dev: 'struct device' pointer, NULL if none (for context) + * @f: printf-like format and arguments + */ +#define d_fnstart(l, _dev, f, a...) _d_printf(l, " FNSTART", _dev, f, ## a) + + +/** + * d_fnend - log message at function end if debugging enabled + * + * @l: intended debug level + * @_dev: 'struct device' pointer, NULL if none (for context) + * @f: printf-like format and arguments + */ +#define d_fnend(l, _dev, f, a...) _d_printf(l, " FNEND", _dev, f, ## a) + + +/** + * d_printf - log message if debugging enabled + * + * @l: intended debug level + * @_dev: 'struct device' pointer, NULL if none (for context) + * @f: printf-like format and arguments + */ +#define d_printf(l, _dev, f, a...) _d_printf(l, "", _dev, f, ## a) + + +/** + * d_dump - log buffer hex dump if debugging enabled + * + * @l: intended debug level + * @_dev: 'struct device' pointer, NULL if none (for context) + * @f: printf-like format and arguments + */ +#define d_dump(l, dev, ptr, size) \ +do { \ + char head[64]; \ + if (!d_test(l)) \ + break; \ + __d_head(head, sizeof(head), dev); \ + print_hex_dump(KERN_ERR, head, 0, 16, 1, \ + ((void *) ptr), (size), 0); \ +} while (0) + + +/** + * Export a submodule's debug level over debugfs as PREFIXSUBMODULE + * + * @prefix: string to prefix the name with + * @submodule: name of submodule (not a string, just the name) + * @dentry: debugfs parent dentry + * + * Returns: 0 if ok, < 0 errno on error. + * + * For removing, just use debugfs_remove_recursive() on the parent. + */ +#define d_level_register_debugfs(prefix, name, parent) \ +({ \ + int rc; \ + struct dentry *fd; \ + struct dentry *verify_parent_type = parent; \ + fd = debugfs_create_u8( \ + prefix #name, 0600, verify_parent_type, \ + &(D_LEVEL[__D_SUBMODULE_ ## name].level)); \ + rc = PTR_ERR(fd); \ + if (IS_ERR(fd) && rc != -ENODEV) \ + printk(KERN_ERR "%s: Can't create debugfs entry %s: " \ + "%d\n", __func__, prefix #name, rc); \ + else \ + rc = 0; \ + rc; \ +}) + + +#endif /* #ifndef __debug__h__ */ diff --git a/include/linux/wimax/i2400m.h b/include/linux/wimax/i2400m.h new file mode 100644 index 00000000000..74198f5bb4d --- /dev/null +++ b/include/linux/wimax/i2400m.h @@ -0,0 +1,512 @@ +/* + * Intel Wireless WiMax Connection 2400m + * Host-Device protocol interface definitions + * + * + * Copyright (C) 2007-2008 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * + * Intel Corporation <linux-wimax@intel.com> + * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> + * - Initial implementation + * + * + * This header defines the data structures and constants used to + * communicate with the device. + * + * BOOTMODE/BOOTROM/FIRMWARE UPLOAD PROTOCOL + * + * The firmware upload protocol is quite simple and only requires a + * handful of commands. See drivers/net/wimax/i2400m/fw.c for more + * details. + * + * The BCF data structure is for the firmware file header. + * + * + * THE DATA / CONTROL PROTOCOL + * + * This is the normal protocol spoken with the device once the + * firmware is uploaded. It transports data payloads and control + * messages back and forth. + * + * It consists 'messages' that pack one or more payloads each. The + * format is described in detail in drivers/net/wimax/i2400m/rx.c and + * tx.c. + * + * + * THE L3L4 PROTOCOL + * + * The term L3L4 refers to Layer 3 (the device), Layer 4 (the + * driver/host software). + * + * This is the control protocol used by the host to control the i2400m + * device (scan, connect, disconnect...). This is sent to / received + * as control frames. These frames consist of a header and zero or + * more TLVs with information. We call each control frame a "message". + * + * Each message is composed of: + * + * HEADER + * [TLV0 + PAYLOAD0] + * [TLV1 + PAYLOAD1] + * [...] + * [TLVN + PAYLOADN] + * + * The HEADER is defined by 'struct i2400m_l3l4_hdr'. The payloads are + * defined by a TLV structure (Type Length Value) which is a 'header' + * (struct i2400m_tlv_hdr) and then the payload. + * + * All integers are represented as Little Endian. + * + * - REQUESTS AND EVENTS + * + * The requests can be clasified as follows: + * + * COMMAND: implies a request from the host to the device requesting + * an action being performed. The device will reply with a + * message (with the same type as the command), status and + * no (TLV) payload. Execution of a command might cause + * events (of different type) to be sent later on as + * device's state changes. + * + * GET/SET: similar to COMMAND, but will not cause other + * EVENTs. The reply, in the case of GET, will contain + * TLVs with the requested information. + * + * EVENT: asynchronous messages sent from the device, maybe as a + * consequence of previous COMMANDs but disassociated from + * them. + * + * Only one request might be pending at the same time (ie: don't + * parallelize nor post another GET request before the previous + * COMMAND has been acknowledged with it's corresponding reply by the + * device). + * + * The different requests and their formats are described below: + * + * I2400M_MT_* Message types + * I2400M_MS_* Message status (for replies, events) + * i2400m_tlv_* TLVs + * + * data types are named 'struct i2400m_msg_OPNAME', OPNAME matching the + * operation. + */ + +#ifndef __LINUX__WIMAX__I2400M_H__ +#define __LINUX__WIMAX__I2400M_H__ + +#include <linux/types.h> + + +/* + * Host Device Interface (HDI) common to all busses + */ + +/* Boot-mode (firmware upload mode) commands */ + +/* Header for the firmware file */ +struct i2400m_bcf_hdr { + __le32 module_type; + __le32 header_len; + __le32 header_version; + __le32 module_id; + __le32 module_vendor; + __le32 date; /* BCD YYYMMDD */ + __le32 size; + __le32 key_size; /* in dwords */ + __le32 modulus_size; /* in dwords */ + __le32 exponent_size; /* in dwords */ + __u8 reserved[88]; +} __attribute__ ((packed)); + +/* Boot mode opcodes */ +enum i2400m_brh_opcode { + I2400M_BRH_READ = 1, + I2400M_BRH_WRITE = 2, + I2400M_BRH_JUMP = 3, + I2400M_BRH_SIGNED_JUMP = 8, + I2400M_BRH_HASH_PAYLOAD_ONLY = 9, +}; + +/* Boot mode command masks and stuff */ +enum i2400m_brh { + I2400M_BRH_SIGNATURE = 0xcbbc0000, + I2400M_BRH_SIGNATURE_MASK = 0xffff0000, + I2400M_BRH_SIGNATURE_SHIFT = 16, + I2400M_BRH_OPCODE_MASK = 0x0000000f, + I2400M_BRH_RESPONSE_MASK = 0x000000f0, + I2400M_BRH_RESPONSE_SHIFT = 4, + I2400M_BRH_DIRECT_ACCESS = 0x00000400, + I2400M_BRH_RESPONSE_REQUIRED = 0x00000200, + I2400M_BRH_USE_CHECKSUM = 0x00000100, +}; + + +/* Constants for bcf->module_id */ +enum i2400m_bcf_mod_id { + /* Firmware file carries its own pokes -- pokes are a set of + * magical values that have to be written in certain memory + * addresses to get the device up and ready for firmware + * download when it is in non-signed boot mode. */ + I2400M_BCF_MOD_ID_POKES = 0x000000001, +}; + + +/** + * i2400m_bootrom_header - Header for a boot-mode command + * + * @cmd: the above command descriptor + * @target_addr: where on the device memory should the action be performed. + * @data_size: for read/write, amount of data to be read/written + * @block_checksum: checksum value (if applicable) + * @payload: the beginning of data attached to this header + */ +struct i2400m_bootrom_header { + __le32 command; /* Compose with enum i2400_brh */ + __le32 target_addr; + __le32 data_size; + __le32 block_checksum; + char payload[0]; +} __attribute__ ((packed)); + + +/* + * Data / control protocol + */ + +/* Packet types for the host-device interface */ +enum i2400m_pt { + I2400M_PT_DATA = 0, + I2400M_PT_CTRL, + I2400M_PT_TRACE, /* For device debug */ + I2400M_PT_RESET_WARM, /* device reset */ + I2400M_PT_RESET_COLD, /* USB[transport] reset, like reconnect */ + I2400M_PT_ILLEGAL +}; + + +/* + * Payload for a data packet + * + * This is prefixed to each and every outgoing DATA type. + */ +struct i2400m_pl_data_hdr { + __le32 reserved; +} __attribute__((packed)); + + +/* Misc constants */ +enum { + I2400M_PL_PAD = 16, /* Payload data size alignment */ + I2400M_PL_SIZE_MAX = 0x3EFF, + I2400M_MAX_PLS_IN_MSG = 60, + /* protocol barkers: sync sequences; for notifications they + * are sent in groups of four. */ + I2400M_H2D_PREVIEW_BARKER = 0xcafe900d, + I2400M_COLD_RESET_BARKER = 0xc01dc01d, + I2400M_WARM_RESET_BARKER = 0x50f750f7, + I2400M_NBOOT_BARKER = 0xdeadbeef, + I2400M_SBOOT_BARKER = 0x0ff1c1a1, + I2400M_ACK_BARKER = 0xfeedbabe, + I2400M_D2H_MSG_BARKER = 0xbeefbabe, +}; + + +/* + * Hardware payload descriptor + * + * Bitfields encoded in a struct to enforce typing semantics. + * + * Look in rx.c and tx.c for a full description of the format. + */ +struct i2400m_pld { + __le32 val; +} __attribute__ ((packed)); + +#define I2400M_PLD_SIZE_MASK 0x00003fff +#define I2400M_PLD_TYPE_SHIFT 16 +#define I2400M_PLD_TYPE_MASK 0x000f0000 + +/* + * Header for a TX message or RX message + * + * @barker: preamble + * @size: used for management of the FIFO queue buffer; before + * sending, this is converted to be a real preamble. This + * indicates the real size of the TX message that starts at this + * point. If the highest bit is set, then this message is to be + * skipped. + * @sequence: sequence number of this message + * @offset: offset where the message itself starts -- see the comments + * in the file header about message header and payload descriptor + * alignment. + * @num_pls: number of payloads in this message + * @padding: amount of padding bytes at the end of the message to make + * it be of block-size aligned + * + * Look in rx.c and tx.c for a full description of the format. + */ +struct i2400m_msg_hdr { + union { + __le32 barker; + __u32 size; /* same size type as barker!! */ + }; + union { + __le32 sequence; + __u32 offset; /* same size type as barker!! */ + }; + __le16 num_pls; + __le16 rsv1; + __le16 padding; + __le16 rsv2; + struct i2400m_pld pld[0]; +} __attribute__ ((packed)); + + + +/* + * L3/L4 control protocol + */ + +enum { + /* Interface version */ + I2400M_L3L4_VERSION = 0x0100, +}; + +/* Message types */ +enum i2400m_mt { + I2400M_MT_RESERVED = 0x0000, + I2400M_MT_INVALID = 0xffff, + I2400M_MT_REPORT_MASK = 0x8000, + + I2400M_MT_GET_SCAN_RESULT = 0x4202, + I2400M_MT_SET_SCAN_PARAM = 0x4402, + I2400M_MT_CMD_RF_CONTROL = 0x4602, + I2400M_MT_CMD_SCAN = 0x4603, + I2400M_MT_CMD_CONNECT = 0x4604, + I2400M_MT_CMD_DISCONNECT = 0x4605, + I2400M_MT_CMD_EXIT_IDLE = 0x4606, + I2400M_MT_GET_LM_VERSION = 0x5201, + I2400M_MT_GET_DEVICE_INFO = 0x5202, + I2400M_MT_GET_LINK_STATUS = 0x5203, + I2400M_MT_GET_STATISTICS = 0x5204, + I2400M_MT_GET_STATE = 0x5205, + I2400M_MT_GET_MEDIA_STATUS = 0x5206, + I2400M_MT_SET_INIT_CONFIG = 0x5404, + I2400M_MT_CMD_INIT = 0x5601, + I2400M_MT_CMD_TERMINATE = 0x5602, + I2400M_MT_CMD_MODE_OF_OP = 0x5603, + I2400M_MT_CMD_RESET_DEVICE = 0x5604, + I2400M_MT_CMD_MONITOR_CONTROL = 0x5605, + I2400M_MT_CMD_ENTER_POWERSAVE = 0x5606, + I2400M_MT_GET_TLS_OPERATION_RESULT = 0x6201, + I2400M_MT_SET_EAP_SUCCESS = 0x6402, + I2400M_MT_SET_EAP_FAIL = 0x6403, + I2400M_MT_SET_EAP_KEY = 0x6404, + I2400M_MT_CMD_SEND_EAP_RESPONSE = 0x6602, + I2400M_MT_REPORT_SCAN_RESULT = 0xc002, + I2400M_MT_REPORT_STATE = 0xd002, + I2400M_MT_REPORT_POWERSAVE_READY = 0xd005, + I2400M_MT_REPORT_EAP_REQUEST = 0xe002, + I2400M_MT_REPORT_EAP_RESTART = 0xe003, + I2400M_MT_REPORT_ALT_ACCEPT = 0xe004, + I2400M_MT_REPORT_KEY_REQUEST = 0xe005, +}; + + +/* + * Message Ack Status codes + * + * When a message is replied-to, this status is reported. + */ +enum i2400m_ms { + I2400M_MS_DONE_OK = 0, + I2400M_MS_DONE_IN_PROGRESS = 1, + I2400M_MS_INVALID_OP = 2, + I2400M_MS_BAD_STATE = 3, + I2400M_MS_ILLEGAL_VALUE = 4, + I2400M_MS_MISSING_PARAMS = 5, + I2400M_MS_VERSION_ERROR = 6, + I2400M_MS_ACCESSIBILITY_ERROR = 7, + I2400M_MS_BUSY = 8, + I2400M_MS_CORRUPTED_TLV = 9, + I2400M_MS_UNINITIALIZED = 10, + I2400M_MS_UNKNOWN_ERROR = 11, + I2400M_MS_PRODUCTION_ERROR = 12, + I2400M_MS_NO_RF = 13, + I2400M_MS_NOT_READY_FOR_POWERSAVE = 14, + I2400M_MS_THERMAL_CRITICAL = 15, + I2400M_MS_MAX +}; + + +/** + * i2400m_tlv - enumeration of the different types of TLVs + * + * TLVs stand for type-length-value and are the header for a payload + * composed of almost anything. Each payload has a type assigned + * and a length. + */ +enum i2400m_tlv { + I2400M_TLV_L4_MESSAGE_VERSIONS = 129, + I2400M_TLV_SYSTEM_STATE = 141, + I2400M_TLV_MEDIA_STATUS = 161, + I2400M_TLV_RF_OPERATION = 162, + I2400M_TLV_RF_STATUS = 163, + I2400M_TLV_DEVICE_RESET_TYPE = 132, + I2400M_TLV_CONFIG_IDLE_PARAMETERS = 601, +}; + + +struct i2400m_tlv_hdr { + __le16 type; + __le16 length; /* payload's */ + __u8 pl[0]; +} __attribute__((packed)); + + +struct i2400m_l3l4_hdr { + __le16 type; + __le16 length; /* payload's */ + __le16 version; + __le16 resv1; + __le16 status; + __le16 resv2; + struct i2400m_tlv_hdr pl[0]; +} __attribute__((packed)); + + +/** + * i2400m_system_state - different states of the device + */ +enum i2400m_system_state { + I2400M_SS_UNINITIALIZED = 1, + I2400M_SS_INIT, + I2400M_SS_READY, + I2400M_SS_SCAN, + I2400M_SS_STANDBY, + I2400M_SS_CONNECTING, + I2400M_SS_WIMAX_CONNECTED, + I2400M_SS_DATA_PATH_CONNECTED, + I2400M_SS_IDLE, + I2400M_SS_DISCONNECTING, + I2400M_SS_OUT_OF_ZONE, + I2400M_SS_SLEEPACTIVE, + I2400M_SS_PRODUCTION, + I2400M_SS_CONFIG, + I2400M_SS_RF_OFF, + I2400M_SS_RF_SHUTDOWN, + I2400M_SS_DEVICE_DISCONNECT, + I2400M_SS_MAX, +}; + + +/** + * i2400m_tlv_system_state - report on the state of the system + * + * @state: see enum i2400m_system_state + */ +struct i2400m_tlv_system_state { + struct i2400m_tlv_hdr hdr; + __le32 state; +} __attribute__((packed)); + + +struct i2400m_tlv_l4_message_versions { + struct i2400m_tlv_hdr hdr; + __le16 major; + __le16 minor; + __le16 branch; + __le16 reserved; +} __attribute__((packed)); + + +struct i2400m_tlv_detailed_device_info { + struct i2400m_tlv_hdr hdr; + __u8 reserved1[400]; + __u8 mac_address[6]; + __u8 reserved2[2]; +} __attribute__((packed)); + + +enum i2400m_rf_switch_status { + I2400M_RF_SWITCH_ON = 1, + I2400M_RF_SWITCH_OFF = 2, +}; + +struct i2400m_tlv_rf_switches_status { + struct i2400m_tlv_hdr hdr; + __u8 sw_rf_switch; /* 1 ON, 2 OFF */ + __u8 hw_rf_switch; /* 1 ON, 2 OFF */ + __u8 reserved[2]; +} __attribute__((packed)); + + +enum { + i2400m_rf_operation_on = 1, + i2400m_rf_operation_off = 2 +}; + +struct i2400m_tlv_rf_operation { + struct i2400m_tlv_hdr hdr; + __le32 status; /* 1 ON, 2 OFF */ +} __attribute__((packed)); + + +enum i2400m_tlv_reset_type { + I2400M_RESET_TYPE_COLD = 1, + I2400M_RESET_TYPE_WARM +}; + +struct i2400m_tlv_device_reset_type { + struct i2400m_tlv_hdr hdr; + __le32 reset_type; +} __attribute__((packed)); + + +struct i2400m_tlv_config_idle_parameters { + struct i2400m_tlv_hdr hdr; + __le32 idle_timeout; /* 100 to 300000 ms [5min], 100 increments + * 0 disabled */ + __le32 idle_paging_interval; /* frames */ +} __attribute__((packed)); + + +enum i2400m_media_status { + I2400M_MEDIA_STATUS_LINK_UP = 1, + I2400M_MEDIA_STATUS_LINK_DOWN, + I2400M_MEDIA_STATUS_LINK_RENEW, +}; + +struct i2400m_tlv_media_status { + struct i2400m_tlv_hdr hdr; + __le32 media_status; +} __attribute__((packed)); + +#endif /* #ifndef __LINUX__WIMAX__I2400M_H__ */ diff --git a/include/linux/wlp.h b/include/linux/wlp.h index 033545e145c..ac95ce6606a 100644 --- a/include/linux/wlp.h +++ b/include/linux/wlp.h @@ -646,6 +646,7 @@ struct wlp_wss { struct wlp { struct mutex mutex; struct uwb_rc *rc; /* UWB radio controller */ + struct net_device *ndev; struct uwb_pal pal; struct wlp_eda eda; struct wlp_uuid uuid; @@ -675,7 +676,7 @@ struct wlp_wss_attribute { static struct wlp_wss_attribute wss_attr_##_name = __ATTR(_name, _mode, \ _show, _store) -extern int wlp_setup(struct wlp *, struct uwb_rc *); +extern int wlp_setup(struct wlp *, struct uwb_rc *, struct net_device *ndev); extern void wlp_remove(struct wlp *); extern ssize_t wlp_neighborhood_show(struct wlp *, char *); extern int wlp_wss_setup(struct net_device *, struct wlp_wss *); diff --git a/include/linux/writeback.h b/include/linux/writeback.h index e585657e983..7300ecdc480 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -30,7 +30,6 @@ static inline int task_is_pdflush(struct task_struct *task) enum writeback_sync_modes { WB_SYNC_NONE, /* Don't wait on anything */ WB_SYNC_ALL, /* Wait on every mapping */ - WB_SYNC_HOLD, /* Hold the inode on sb_dirty for sys_sync() */ }; /* @@ -107,7 +106,9 @@ void throttle_vm_writeout(gfp_t gfp_mask); /* These are exported to sysctl. */ extern int dirty_background_ratio; +extern unsigned long dirty_background_bytes; extern int vm_dirty_ratio; +extern unsigned long vm_dirty_bytes; extern int dirty_writeback_interval; extern int dirty_expire_interval; extern int vm_highmem_is_dirtyable; @@ -116,17 +117,26 @@ extern int laptop_mode; extern unsigned long determine_dirtyable_memory(void); +extern int dirty_background_ratio_handler(struct ctl_table *table, int write, + struct file *filp, void __user *buffer, size_t *lenp, + loff_t *ppos); +extern int dirty_background_bytes_handler(struct ctl_table *table, int write, + struct file *filp, void __user *buffer, size_t *lenp, + loff_t *ppos); extern int dirty_ratio_handler(struct ctl_table *table, int write, struct file *filp, void __user *buffer, size_t *lenp, loff_t *ppos); +extern int dirty_bytes_handler(struct ctl_table *table, int write, + struct file *filp, void __user *buffer, size_t *lenp, + loff_t *ppos); struct ctl_table; struct file; int dirty_writeback_centisecs_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *); -void get_dirty_limits(long *pbackground, long *pdirty, long *pbdi_dirty, - struct backing_dev_info *bdi); +void get_dirty_limits(unsigned long *pbackground, unsigned long *pdirty, + unsigned long *pbdi_dirty, struct backing_dev_info *bdi); void page_writeback_init(void); void balance_dirty_pages_ratelimited_nr(struct address_space *mapping, diff --git a/include/linux/xfrm.h b/include/linux/xfrm.h index 4bc1e6b86cb..52f3abd453a 100644 --- a/include/linux/xfrm.h +++ b/include/linux/xfrm.h @@ -199,6 +199,9 @@ enum { #define XFRM_MSG_NEWSPDINFO XFRM_MSG_NEWSPDINFO XFRM_MSG_GETSPDINFO, #define XFRM_MSG_GETSPDINFO XFRM_MSG_GETSPDINFO + + XFRM_MSG_MAPPING, +#define XFRM_MSG_MAPPING XFRM_MSG_MAPPING __XFRM_MSG_MAX }; #define XFRM_MSG_MAX (__XFRM_MSG_MAX - 1) @@ -438,6 +441,15 @@ struct xfrm_user_migrate { __u16 new_family; }; +struct xfrm_user_mapping { + struct xfrm_usersa_id id; + __u32 reqid; + xfrm_address_t old_saddr; + xfrm_address_t new_saddr; + __be16 old_sport; + __be16 new_sport; +}; + #ifndef __KERNEL__ /* backwards compatibility for userspace */ #define XFRMGRP_ACQUIRE 1 @@ -464,6 +476,8 @@ enum xfrm_nlgroups { #define XFRMNLGRP_REPORT XFRMNLGRP_REPORT XFRMNLGRP_MIGRATE, #define XFRMNLGRP_MIGRATE XFRMNLGRP_MIGRATE + XFRMNLGRP_MAPPING, +#define XFRMNLGRP_MAPPING XFRMNLGRP_MAPPING __XFRMNLGRP_MAX }; #define XFRMNLGRP_MAX (__XFRMNLGRP_MAX - 1) |