aboutsummaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
authorStefan Richter <stefanr@s5r6.in-berlin.de>2006-09-17 18:17:19 +0200
committerStefan Richter <stefanr@s5r6.in-berlin.de>2006-09-17 18:19:31 +0200
commit9b4f2e9576658c4e52d95dc8d309f51b2e2db096 (patch)
tree7b1902b0f931783fccc6fee45c6f9c16b4fde5ce /include/linux
parent3c6c65f5ed5a6d307bd607aecd06d658c0934d88 (diff)
parent803db244b9f71102e366fd689000c1417b9a7508 (diff)
ieee1394: merge from Linus
Conflicts: drivers/ieee1394/hosts.c Patch "lockdep: annotate ieee1394 skb-queue-head locking" was meddling with patch "ieee1394: fix kerneldoc of hpsb_alloc_host". Signed-off-by: Stefan Richter <stefanr@s5r6.in-berlin.de>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/Kbuild63
-rw-r--r--include/linux/atmdev.h2
-rw-r--r--include/linux/audit.h54
-rw-r--r--include/linux/blktrace_api.h5
-rw-r--r--include/linux/bootmem.h2
-rw-r--r--include/linux/byteorder/Kbuild2
-rw-r--r--include/linux/cn_proc.h3
-rw-r--r--include/linux/compat_ioctl.h1
-rw-r--r--include/linux/completion.h15
-rw-r--r--include/linux/console_struct.h1
-rw-r--r--include/linux/cpu.h6
-rw-r--r--include/linux/cpufreq.h3
-rw-r--r--include/linux/dcache.h12
-rw-r--r--include/linux/debug_locks.h71
-rw-r--r--include/linux/delayacct.h121
-rw-r--r--include/linux/dmaengine.h43
-rw-r--r--include/linux/dvb/Kbuild2
-rw-r--r--include/linux/elfcore.h10
-rw-r--r--include/linux/ext3_fs.h9
-rw-r--r--include/linux/fb.h5
-rw-r--r--include/linux/fs.h44
-rw-r--r--include/linux/fs_enet_pd.h50
-rw-r--r--include/linux/fsnotify.h6
-rw-r--r--include/linux/futex.h3
-rw-r--r--include/linux/hardirq.h27
-rw-r--r--include/linux/hdlc.h2
-rw-r--r--include/linux/hdlc/Kbuild1
-rw-r--r--include/linux/hrtimer.h2
-rw-r--r--include/linux/i2c-id.h1
-rw-r--r--include/linux/i2c.h2
-rw-r--r--include/linux/ide.h3
-rw-r--r--include/linux/idr.h2
-rw-r--r--include/linux/if_vlan.h10
-rw-r--r--include/linux/init_task.h15
-rw-r--r--include/linux/input.h24
-rw-r--r--include/linux/interrupt.h77
-rw-r--r--include/linux/ioport.h3
-rw-r--r--include/linux/ioprio.h23
-rw-r--r--include/linux/irq.h5
-rw-r--r--include/linux/irqflags.h96
-rw-r--r--include/linux/isdn/Kbuild1
-rw-r--r--include/linux/jbd.h3
-rw-r--r--include/linux/jiffies.h4
-rw-r--r--include/linux/kallsyms.h23
-rw-r--r--include/linux/kernel.h2
-rw-r--r--include/linux/kobject.h2
-rw-r--r--include/linux/kthread.h1
-rw-r--r--include/linux/ktime.h7
-rw-r--r--include/linux/libata.h87
-rw-r--r--include/linux/list.h11
-rw-r--r--include/linux/lockd/lockd.h1
-rw-r--r--include/linux/lockdep.h353
-rw-r--r--include/linux/mc146818rtc.h7
-rw-r--r--include/linux/mm.h9
-rw-r--r--include/linux/mmc/host.h2
-rw-r--r--include/linux/mmc/mmc.h2
-rw-r--r--include/linux/mmzone.h7
-rw-r--r--include/linux/module.h16
-rw-r--r--include/linux/mtd/bbm.h35
-rw-r--r--include/linux/mtd/mtd.h4
-rw-r--r--include/linux/mtd/nand.h16
-rw-r--r--include/linux/mtd/onenand.h77
-rw-r--r--include/linux/mutex-debug.h18
-rw-r--r--include/linux/mutex.h37
-rw-r--r--include/linux/namei.h2
-rw-r--r--include/linux/netdevice.h41
-rw-r--r--include/linux/netfilter/Kbuild11
-rw-r--r--include/linux/netfilter_arp/Kbuild2
-rw-r--r--include/linux/netfilter_bridge.h17
-rw-r--r--include/linux/netfilter_bridge/Kbuild4
-rw-r--r--include/linux/netfilter_ipv4/Kbuild21
-rw-r--r--include/linux/netfilter_ipv6/Kbuild6
-rw-r--r--include/linux/nfs4.h6
-rw-r--r--include/linux/nfs_fs.h64
-rw-r--r--include/linux/nfs_xdr.h4
-rw-r--r--include/linux/nfsd/Kbuild2
-rw-r--r--include/linux/nfsd/stats.h6
-rw-r--r--include/linux/node.h10
-rw-r--r--include/linux/notifier.h2
-rw-r--r--include/linux/nsc_gpio.h2
-rw-r--r--include/linux/pci.h1
-rw-r--r--include/linux/pci_ids.h14
-rw-r--r--include/linux/pci_regs.h16
-rw-r--r--include/linux/phy.h1
-rw-r--r--include/linux/pm_legacy.h7
-rw-r--r--include/linux/pmu.h3
-rw-r--r--include/linux/poison.h5
-rw-r--r--include/linux/raid/Kbuild1
-rw-r--r--include/linux/raid/md_k.h3
-rw-r--r--include/linux/root_dev.h2
-rw-r--r--include/linux/rtmutex.h10
-rw-r--r--include/linux/rwsem-spinlock.h27
-rw-r--r--include/linux/rwsem.h96
-rw-r--r--include/linux/sched.h149
-rw-r--r--include/linux/security.h40
-rw-r--r--include/linux/seqlock.h12
-rw-r--r--include/linux/serial_core.h4
-rw-r--r--include/linux/skbuff.h62
-rw-r--r--include/linux/spinlock.h63
-rw-r--r--include/linux/spinlock_api_smp.h2
-rw-r--r--include/linux/spinlock_api_up.h1
-rw-r--r--include/linux/spinlock_types.h47
-rw-r--r--include/linux/spinlock_types_up.h9
-rw-r--r--include/linux/spinlock_up.h1
-rw-r--r--include/linux/stacktrace.h20
-rw-r--r--include/linux/sunrpc/Kbuild1
-rw-r--r--include/linux/sunrpc/rpc_pipe_fs.h4
-rw-r--r--include/linux/sunrpc/xprt.h4
-rw-r--r--include/linux/swap.h1
-rw-r--r--include/linux/sysctl.h2
-rw-r--r--include/linux/taskstats.h137
-rw-r--r--include/linux/taskstats_kern.h89
-rw-r--r--include/linux/tc_act/Kbuild1
-rw-r--r--include/linux/tc_ematch/Kbuild1
-rw-r--r--include/linux/time.h12
-rw-r--r--include/linux/timex.h3
-rw-r--r--include/linux/tty.h13
-rw-r--r--include/linux/usb.h9
-rw-r--r--include/linux/usb/serial.h300
-rw-r--r--include/linux/usb_ch9.h7
-rw-r--r--include/linux/usb_gadget.h4
-rw-r--r--include/linux/usb_usual.h4
-rw-r--r--include/linux/vermagic.h2
-rw-r--r--include/linux/videodev.h6
-rw-r--r--include/linux/videodev2.h4
-rw-r--r--include/linux/vmalloc.h1
-rw-r--r--include/linux/vmstat.h17
-rw-r--r--include/linux/vt.h11
-rw-r--r--include/linux/vt_kern.h1
-rw-r--r--include/linux/wait.h8
-rw-r--r--include/linux/workqueue.h2
131 files changed, 2480 insertions, 511 deletions
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
new file mode 100644
index 00000000000..2b8a7d68fae
--- /dev/null
+++ b/include/linux/Kbuild
@@ -0,0 +1,63 @@
+header-y := byteorder/ dvb/ hdlc/ isdn/ nfsd/ raid/ sunrpc/ tc_act/ \
+ netfilter/ netfilter_arp/ netfilter_bridge/ netfilter_ipv4/ \
+ netfilter_ipv6/
+
+header-y += affs_fs.h affs_hardblocks.h aio_abi.h a.out.h arcfb.h \
+ atmapi.h atmbr2684.h atmclip.h atm_eni.h atm_he.h \
+ atm_idt77105.h atmioc.h atmlec.h atmmpc.h atm_nicstar.h \
+ atmppp.h atmsap.h atmsvc.h atm_zatm.h auto_fs4.h auxvec.h \
+ awe_voice.h ax25.h b1lli.h baycom.h bfs_fs.h blkpg.h \
+ bpqether.h cdk.h chio.h coda_psdev.h coff.h comstats.h \
+ consolemap.h cycx_cfm.h dm-ioctl.h dn.h dqblk_v1.h \
+ dqblk_v2.h dqblk_xfs.h efs_fs_sb.h elf-fdpic.h elf.h elf-em.h \
+ fadvise.h fd.h fdreg.h ftape-header-segment.h ftape-vendors.h \
+ fuse.h futex.h genetlink.h gen_stats.h gigaset_dev.h hdsmart.h \
+ hpfs_fs.h hysdn_if.h i2c-dev.h i8k.h icmp.h \
+ if_arcnet.h if_arp.h if_bonding.h if_cablemodem.h if_fc.h \
+ if_fddi.h if.h if_hippi.h if_infiniband.h if_packet.h \
+ if_plip.h if_ppp.h if_slip.h if_strip.h if_tunnel.h in6.h \
+ in_route.h ioctl.h ip.h ipmi_msgdefs.h ip_mp_alg.h ipsec.h \
+ ipx.h irda.h isdn_divertif.h iso_fs.h ite_gpio.h ixjuser.h \
+ jffs2.h keyctl.h limits.h major.h matroxfb.h meye.h minix_fs.h \
+ mmtimer.h mqueue.h mtio.h ncp_no.h netfilter_arp.h netrom.h \
+ nfs2.h nfs4_mount.h nfs_mount.h openprom_fs.h param.h \
+ pci_ids.h pci_regs.h personality.h pfkeyv2.h pg.h pkt_cls.h \
+ pkt_sched.h posix_types.h ppdev.h prctl.h ps2esdi.h qic117.h \
+ qnxtypes.h quotaio_v1.h quotaio_v2.h radeonfb.h raw.h \
+ resource.h rose.h sctp.h smbno.h snmp.h sockios.h som.h \
+ sound.h stddef.h synclink.h telephony.h termios.h ticable.h \
+ times.h tiocl.h tipc.h toshiba.h ultrasound.h un.h utime.h \
+ utsname.h video_decoder.h video_encoder.h videotext.h vt.h \
+ wavefront.h wireless.h xattr.h x25.h zorro_ids.h
+
+unifdef-y += acct.h adb.h adfs_fs.h agpgart.h apm_bios.h atalk.h \
+ atmarp.h atmdev.h atm.h atm_tcp.h audit.h auto_fs.h binfmts.h \
+ capability.h capi.h cciss_ioctl.h cdrom.h cm4000_cs.h \
+ cn_proc.h coda.h connector.h cramfs_fs.h cuda.h cyclades.h \
+ dccp.h dirent.h divert.h elfcore.h errno.h errqueue.h \
+ ethtool.h eventpoll.h ext2_fs.h ext3_fs.h fb.h fcntl.h \
+ filter.h flat.h fs.h ftape.h gameport.h generic_serial.h \
+ genhd.h hayesesp.h hdlcdrv.h hdlc.h hdreg.h hiddev.h hpet.h \
+ i2c.h i2o-dev.h icmpv6.h if_bridge.h if_ec.h \
+ if_eql.h if_ether.h if_frad.h if_ltalk.h if_pppox.h \
+ if_shaper.h if_tr.h if_tun.h if_vlan.h if_wanpipe.h igmp.h \
+ inet_diag.h in.h inotify.h input.h ipc.h ipmi.h ipv6.h \
+ ipv6_route.h isdn.h isdnif.h isdn_ppp.h isicom.h jbd.h \
+ joystick.h kdev_t.h kd.h kernelcapi.h kernel.h keyboard.h \
+ llc.h loop.h lp.h mempolicy.h mii.h mman.h mroute.h msdos_fs.h \
+ msg.h nbd.h ncp_fs.h ncp.h ncp_mount.h netdevice.h \
+ netfilter_bridge.h netfilter_decnet.h netfilter.h \
+ netfilter_ipv4.h netfilter_ipv6.h netfilter_logging.h net.h \
+ netlink.h nfs3.h nfs4.h nfsacl.h nfs_fs.h nfs.h nfs_idmap.h \
+ n_r3964.h nubus.h nvram.h parport.h patchkey.h pci.h pktcdvd.h \
+ pmu.h poll.h ppp_defs.h ppp-comp.h ptrace.h qnx4_fs.h quota.h \
+ random.h reboot.h reiserfs_fs.h reiserfs_xattr.h romfs_fs.h \
+ route.h rtc.h rtnetlink.h scc.h sched.h sdla.h \
+ selinux_netlink.h sem.h serial_core.h serial.h serio.h shm.h \
+ signal.h smb_fs.h smb.h smb_mount.h socket.h sonet.h sonypi.h \
+ soundcard.h stat.h sysctl.h tcp.h time.h timex.h tty.h types.h \
+ udf_fs_i.h udp.h uinput.h uio.h unistd.h usb_ch9.h \
+ usbdevice_fs.h user.h videodev2.h videodev.h wait.h \
+ wanrouter.h watchdog.h xfrm.h zftape.h
+
+objhdr-y := version.h
diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
index 41788a31c43..2096e5c7282 100644
--- a/include/linux/atmdev.h
+++ b/include/linux/atmdev.h
@@ -7,7 +7,6 @@
#define LINUX_ATMDEV_H
-#include <linux/device.h>
#include <linux/atmapi.h>
#include <linux/atm.h>
#include <linux/atmioc.h>
@@ -210,6 +209,7 @@ struct atm_cirange {
#ifdef __KERNEL__
+#include <linux/device.h>
#include <linux/wait.h> /* wait_queue_head_t */
#include <linux/time.h> /* struct timeval */
#include <linux/net.h>
diff --git a/include/linux/audit.h b/include/linux/audit.h
index b27d7debc5a..40a6c26294a 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -132,6 +132,10 @@
#define AUDIT_CLASS_DIR_WRITE_32 1
#define AUDIT_CLASS_CHATTR 2
#define AUDIT_CLASS_CHATTR_32 3
+#define AUDIT_CLASS_READ 4
+#define AUDIT_CLASS_READ_32 5
+#define AUDIT_CLASS_WRITE 6
+#define AUDIT_CLASS_WRITE_32 7
/* This bitmask is used to validate user input. It represents all bits that
* are currently used in an audit field constant understood by the kernel.
@@ -177,6 +181,7 @@
#define AUDIT_EXIT 103
#define AUDIT_SUCCESS 104 /* exit >= 0; value ignored */
#define AUDIT_WATCH 105
+#define AUDIT_PERM 106
#define AUDIT_ARG0 200
#define AUDIT_ARG1 (AUDIT_ARG0+1)
@@ -252,6 +257,11 @@
#define AUDIT_ARCH_V850 (EM_V850|__AUDIT_ARCH_LE)
#define AUDIT_ARCH_X86_64 (EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
+#define AUDIT_PERM_EXEC 1
+#define AUDIT_PERM_WRITE 2
+#define AUDIT_PERM_READ 4
+#define AUDIT_PERM_ATTR 8
+
struct audit_status {
__u32 mask; /* Bit mask for valid entries */
__u32 enabled; /* 1 = enabled, 0 = disabled */
@@ -314,6 +324,7 @@ struct mqstat;
#define AUDITSC_FAILURE 2
#define AUDITSC_RESULT(x) ( ((long)(x))<0?AUDITSC_FAILURE:AUDITSC_SUCCESS )
extern int __init audit_register_class(int class, unsigned *list);
+extern int audit_classify_syscall(int abi, unsigned syscall);
#ifdef CONFIG_AUDITSYSCALL
/* These are defined in auditsc.c */
/* Public API */
@@ -327,21 +338,31 @@ extern void __audit_getname(const char *name);
extern void audit_putname(const char *name);
extern void __audit_inode(const char *name, const struct inode *inode);
extern void __audit_inode_child(const char *dname, const struct inode *inode,
- unsigned long pino);
+ const struct inode *parent);
+extern void __audit_inode_update(const struct inode *inode);
+static inline int audit_dummy_context(void)
+{
+ void *p = current->audit_context;
+ return !p || *(int *)p;
+}
static inline void audit_getname(const char *name)
{
- if (unlikely(current->audit_context))
+ if (unlikely(!audit_dummy_context()))
__audit_getname(name);
}
static inline void audit_inode(const char *name, const struct inode *inode) {
- if (unlikely(current->audit_context))
+ if (unlikely(!audit_dummy_context()))
__audit_inode(name, inode);
}
static inline void audit_inode_child(const char *dname,
- const struct inode *inode,
- unsigned long pino) {
- if (unlikely(current->audit_context))
- __audit_inode_child(dname, inode, pino);
+ const struct inode *inode,
+ const struct inode *parent) {
+ if (unlikely(!audit_dummy_context()))
+ __audit_inode_child(dname, inode, parent);
+}
+static inline void audit_inode_update(const struct inode *inode) {
+ if (unlikely(!audit_dummy_context()))
+ __audit_inode_update(inode);
}
/* Private API (for audit.c only) */
@@ -365,57 +386,61 @@ extern int __audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat);
static inline int audit_ipc_obj(struct kern_ipc_perm *ipcp)
{
- if (unlikely(current->audit_context))
+ if (unlikely(!audit_dummy_context()))
return __audit_ipc_obj(ipcp);
return 0;
}
static inline int audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, mode_t mode)
{
- if (unlikely(current->audit_context))
+ if (unlikely(!audit_dummy_context()))
return __audit_ipc_set_perm(qbytes, uid, gid, mode);
return 0;
}
static inline int audit_mq_open(int oflag, mode_t mode, struct mq_attr __user *u_attr)
{
- if (unlikely(current->audit_context))
+ if (unlikely(!audit_dummy_context()))
return __audit_mq_open(oflag, mode, u_attr);
return 0;
}
static inline int audit_mq_timedsend(mqd_t mqdes, size_t msg_len, unsigned int msg_prio, const struct timespec __user *u_abs_timeout)
{
- if (unlikely(current->audit_context))
+ if (unlikely(!audit_dummy_context()))
return __audit_mq_timedsend(mqdes, msg_len, msg_prio, u_abs_timeout);
return 0;
}
static inline int audit_mq_timedreceive(mqd_t mqdes, size_t msg_len, unsigned int __user *u_msg_prio, const struct timespec __user *u_abs_timeout)
{
- if (unlikely(current->audit_context))
+ if (unlikely(!audit_dummy_context()))
return __audit_mq_timedreceive(mqdes, msg_len, u_msg_prio, u_abs_timeout);
return 0;
}
static inline int audit_mq_notify(mqd_t mqdes, const struct sigevent __user *u_notification)
{
- if (unlikely(current->audit_context))
+ if (unlikely(!audit_dummy_context()))
return __audit_mq_notify(mqdes, u_notification);
return 0;
}
static inline int audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat)
{
- if (unlikely(current->audit_context))
+ if (unlikely(!audit_dummy_context()))
return __audit_mq_getsetattr(mqdes, mqstat);
return 0;
}
+extern int audit_n_rules;
#else
#define audit_alloc(t) ({ 0; })
#define audit_free(t) do { ; } while (0)
#define audit_syscall_entry(ta,a,b,c,d,e) do { ; } while (0)
#define audit_syscall_exit(f,r) do { ; } while (0)
+#define audit_dummy_context() 1
#define audit_getname(n) do { ; } while (0)
#define audit_putname(n) do { ; } while (0)
#define __audit_inode(n,i) do { ; } while (0)
#define __audit_inode_child(d,i,p) do { ; } while (0)
+#define __audit_inode_update(i) do { ; } while (0)
#define audit_inode(n,i) do { ; } while (0)
#define audit_inode_child(d,i,p) do { ; } while (0)
+#define audit_inode_update(i) do { ; } while (0)
#define auditsc_get_stamp(c,t,s) do { BUG(); } while (0)
#define audit_get_loginuid(c) ({ -1; })
#define audit_ipc_obj(i) ({ 0; })
@@ -430,6 +455,7 @@ static inline int audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat)
#define audit_mq_timedreceive(d,l,p,t) ({ 0; })
#define audit_mq_notify(d,n) ({ 0; })
#define audit_mq_getsetattr(d,s) ({ 0; })
+#define audit_n_rules 0
#endif
#ifdef CONFIG_AUDIT
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
index a7e8cef73d1..7520cc1ff9e 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -11,7 +11,7 @@ enum blktrace_cat {
BLK_TC_READ = 1 << 0, /* reads */
BLK_TC_WRITE = 1 << 1, /* writes */
BLK_TC_BARRIER = 1 << 2, /* barrier */
- BLK_TC_SYNC = 1 << 3, /* barrier */
+ BLK_TC_SYNC = 1 << 3, /* sync IO */
BLK_TC_QUEUE = 1 << 4, /* queueing/merging */
BLK_TC_REQUEUE = 1 << 5, /* requeueing */
BLK_TC_ISSUE = 1 << 6, /* issue */
@@ -19,6 +19,7 @@ enum blktrace_cat {
BLK_TC_FS = 1 << 8, /* fs requests */
BLK_TC_PC = 1 << 9, /* pc requests */
BLK_TC_NOTIFY = 1 << 10, /* special message */
+ BLK_TC_AHEAD = 1 << 11, /* readahead */
BLK_TC_END = 1 << 15, /* only 16-bits, reminder */
};
@@ -147,7 +148,7 @@ static inline void blk_add_trace_rq(struct request_queue *q, struct request *rq,
u32 what)
{
struct blk_trace *bt = q->blk_trace;
- int rw = rq->flags & 0x07;
+ int rw = rq->flags & 0x03;
if (likely(!bt))
return;
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index 22866fa2d96..1021f508d82 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -91,7 +91,7 @@ static inline void *alloc_remap(int nid, unsigned long size)
}
#endif
-extern unsigned long nr_kernel_pages;
+extern unsigned long __meminitdata nr_kernel_pages;
extern unsigned long nr_all_pages;
extern void *__init alloc_large_system_hash(const char *tablename,
diff --git a/include/linux/byteorder/Kbuild b/include/linux/byteorder/Kbuild
new file mode 100644
index 00000000000..84a57d4fb21
--- /dev/null
+++ b/include/linux/byteorder/Kbuild
@@ -0,0 +1,2 @@
+unifdef-y += generic.h swabb.h swab.h
+header-y += big_endian.h little_endian.h pdp_endian.h
diff --git a/include/linux/cn_proc.h b/include/linux/cn_proc.h
index dbb7769009b..1c86d65bc4b 100644
--- a/include/linux/cn_proc.h
+++ b/include/linux/cn_proc.h
@@ -57,7 +57,8 @@ struct proc_event {
PROC_EVENT_EXIT = 0x80000000
} what;
__u32 cpu;
- struct timespec timestamp;
+ __u64 __attribute__((aligned(8))) timestamp_ns;
+ /* Number of nano seconds since system boot */
union { /* must be last field of proc_event struct */
struct {
__u32 err;
diff --git a/include/linux/compat_ioctl.h b/include/linux/compat_ioctl.h
index 269d000bb2a..bea0255196c 100644
--- a/include/linux/compat_ioctl.h
+++ b/include/linux/compat_ioctl.h
@@ -216,6 +216,7 @@ COMPATIBLE_IOCTL(VT_RESIZE)
COMPATIBLE_IOCTL(VT_RESIZEX)
COMPATIBLE_IOCTL(VT_LOCKSWITCH)
COMPATIBLE_IOCTL(VT_UNLOCKSWITCH)
+COMPATIBLE_IOCTL(VT_GETHIFONTMASK)
/* Little p (/dev/rtc, /dev/envctrl, etc.) */
COMPATIBLE_IOCTL(RTC_AIE_ON)
COMPATIBLE_IOCTL(RTC_AIE_OFF)
diff --git a/include/linux/completion.h b/include/linux/completion.h
index 90663ad217f..268c5a4a2bd 100644
--- a/include/linux/completion.h
+++ b/include/linux/completion.h
@@ -18,9 +18,24 @@ struct completion {
#define COMPLETION_INITIALIZER(work) \
{ 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
+#define COMPLETION_INITIALIZER_ONSTACK(work) \
+ ({ init_completion(&work); work; })
+
#define DECLARE_COMPLETION(work) \
struct completion work = COMPLETION_INITIALIZER(work)
+/*
+ * Lockdep needs to run a non-constant initializer for on-stack
+ * completions - so we use the _ONSTACK() variant for those that
+ * are on the kernel stack:
+ */
+#ifdef CONFIG_LOCKDEP
+# define DECLARE_COMPLETION_ONSTACK(work) \
+ struct completion work = COMPLETION_INITIALIZER_ONSTACK(work)
+#else
+# define DECLARE_COMPLETION_ONSTACK(work) DECLARE_COMPLETION(work)
+#endif
+
static inline void init_completion(struct completion *x)
{
x->done = 0;
diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h
index f8e5587a0f9..25423f79bf9 100644
--- a/include/linux/console_struct.h
+++ b/include/linux/console_struct.h
@@ -9,6 +9,7 @@
* to achieve effects such as fast scrolling by changing the origin.
*/
+#include <linux/wait.h>
#include <linux/vt.h>
struct vt_struct;
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 44a11f1ccaf..8fb344a9abd 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -48,7 +48,6 @@ static inline void unregister_cpu_notifier(struct notifier_block *nb)
{
}
#endif
-extern int current_in_cpu_hotplug(void);
int cpu_up(unsigned int cpu);
@@ -61,10 +60,6 @@ static inline int register_cpu_notifier(struct notifier_block *nb)
static inline void unregister_cpu_notifier(struct notifier_block *nb)
{
}
-static inline int current_in_cpu_hotplug(void)
-{
- return 0;
-}
#endif /* CONFIG_SMP */
extern struct sysdev_class cpu_sysdev_class;
@@ -73,7 +68,6 @@ extern struct sysdev_class cpu_sysdev_class;
/* Stop CPUs going up and down. */
extern void lock_cpu_hotplug(void);
extern void unlock_cpu_hotplug(void);
-extern int lock_cpu_hotplug_interruptible(void);
#define hotcpu_notifier(fn, pri) { \
static struct notifier_block fn##_nb = \
{ .notifier_call = fn, .priority = pri }; \
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 35e137636b0..4ea39fee99c 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -172,9 +172,6 @@ extern int __cpufreq_driver_target(struct cpufreq_policy *policy,
unsigned int relation);
-/* pass an event to the cpufreq governor */
-int cpufreq_governor(unsigned int cpu, unsigned int event);
-
int cpufreq_register_governor(struct cpufreq_governor *governor);
void cpufreq_unregister_governor(struct cpufreq_governor *governor);
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 0dd1610a94a..471781ffeab 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -114,6 +114,18 @@ struct dentry {
unsigned char d_iname[DNAME_INLINE_LEN_MIN]; /* small names */
};
+/*
+ * dentry->d_lock spinlock nesting subclasses:
+ *
+ * 0: normal
+ * 1: nested
+ */
+enum dentry_d_lock_class
+{
+ DENTRY_D_LOCK_NORMAL, /* implicitly used by plain spin_lock() APIs. */
+ DENTRY_D_LOCK_NESTED
+};
+
struct dentry_operations {
int (*d_revalidate)(struct dentry *, struct nameidata *);
int (*d_hash) (struct dentry *, struct qstr *);
diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h
new file mode 100644
index 00000000000..88dafa246d8
--- /dev/null
+++ b/include/linux/debug_locks.h
@@ -0,0 +1,71 @@
+#ifndef __LINUX_DEBUG_LOCKING_H
+#define __LINUX_DEBUG_LOCKING_H
+
+struct task_struct;
+
+extern int debug_locks;
+extern int debug_locks_silent;
+
+/*
+ * Generic 'turn off all lock debugging' function:
+ */
+extern int debug_locks_off(void);
+
+/*
+ * In the debug case we carry the caller's instruction pointer into
+ * other functions, but we dont want the function argument overhead
+ * in the nondebug case - hence these macros:
+ */
+#define _RET_IP_ (unsigned long)__builtin_return_address(0)
+#define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; })
+
+#define DEBUG_LOCKS_WARN_ON(c) \
+({ \
+ int __ret = 0; \
+ \
+ if (unlikely(c)) { \
+ if (debug_locks_off()) \
+ WARN_ON(1); \
+ __ret = 1; \
+ } \
+ __ret; \
+})
+
+#ifdef CONFIG_SMP
+# define SMP_DEBUG_LOCKS_WARN_ON(c) DEBUG_LOCKS_WARN_ON(c)
+#else
+# define SMP_DEBUG_LOCKS_WARN_ON(c) do { } while (0)
+#endif
+
+#ifdef CONFIG_DEBUG_LOCKING_API_SELFTESTS
+ extern void locking_selftest(void);
+#else
+# define locking_selftest() do { } while (0)
+#endif
+
+#ifdef CONFIG_LOCKDEP
+extern void debug_show_all_locks(void);
+extern void debug_show_held_locks(struct task_struct *task);
+extern void debug_check_no_locks_freed(const void *from, unsigned long len);
+extern void debug_check_no_locks_held(struct task_struct *task);
+#else
+static inline void debug_show_all_locks(void)
+{
+}
+
+static inline void debug_show_held_locks(struct task_struct *task)
+{
+}
+
+static inline void
+debug_check_no_locks_freed(const void *from, unsigned long len)
+{
+}
+
+static inline void
+debug_check_no_locks_held(struct task_struct *task)
+{
+}
+#endif
+
+#endif
diff --git a/include/linux/delayacct.h b/include/linux/delayacct.h
new file mode 100644
index 00000000000..561e2a77805
--- /dev/null
+++ b/include/linux/delayacct.h
@@ -0,0 +1,121 @@
+/* delayacct.h - per-task delay accounting
+ *
+ * Copyright (C) Shailabh Nagar, IBM Corp. 2006
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_DELAYACCT_H
+#define _LINUX_DELAYACCT_H
+
+#include <linux/sched.h>
+#include <linux/taskstats_kern.h>
+
+/*
+ * Per-task flags relevant to delay accounting
+ * maintained privately to avoid exhausting similar flags in sched.h:PF_*
+ * Used to set current->delays->flags
+ */
+#define DELAYACCT_PF_SWAPIN 0x00000001 /* I am doing a swapin */
+
+#ifdef CONFIG_TASK_DELAY_ACCT
+
+extern int delayacct_on; /* Delay accounting turned on/off */
+extern kmem_cache_t *delayacct_cache;
+extern void delayacct_init(void);
+extern void __delayacct_tsk_init(struct task_struct *);
+extern void __delayacct_tsk_exit(struct task_struct *);
+extern void __delayacct_blkio_start(void);
+extern void __delayacct_blkio_end(void);
+extern int __delayacct_add_tsk(struct taskstats *, struct task_struct *);
+extern __u64 __delayacct_blkio_ticks(struct task_struct *);
+
+static inline void delayacct_set_flag(int flag)
+{
+ if (current->delays)
+ current->delays->flags |= flag;
+}
+
+static inline void delayacct_clear_flag(int flag)
+{
+ if (current->delays)
+ current->delays->flags &= ~flag;
+}
+
+static inline void delayacct_tsk_init(struct task_struct *tsk)
+{
+ /* reinitialize in case parent's non-null pointer was dup'ed*/
+ tsk->delays = NULL;
+ if (delayacct_on)
+ __delayacct_tsk_init(tsk);
+}
+
+/* Free tsk->delays. Called from bad fork and __put_task_struct
+ * where there's no risk of tsk->delays being accessed elsewhere
+ */
+static inline void delayacct_tsk_free(struct task_struct *tsk)
+{
+ if (tsk->delays)
+ kmem_cache_free(delayacct_cache, tsk->delays);
+ tsk->delays = NULL;
+}
+
+static inline void delayacct_blkio_start(void)
+{
+ if (current->delays)
+ __delayacct_blkio_start();
+}
+
+static inline void delayacct_blkio_end(void)
+{
+ if (current->delays)
+ __delayacct_blkio_end();
+}
+
+static inline int delayacct_add_tsk(struct taskstats *d,
+ struct task_struct *tsk)
+{
+ if (!delayacct_on || !tsk->delays)
+ return 0;
+ return __delayacct_add_tsk(d, tsk);
+}
+
+static inline __u64 delayacct_blkio_ticks(struct task_struct *tsk)
+{
+ if (tsk->delays)
+ return __delayacct_blkio_ticks(tsk);
+ return 0;
+}
+
+#else
+static inline void delayacct_set_flag(int flag)
+{}
+static inline void delayacct_clear_flag(int flag)
+{}
+static inline void delayacct_init(void)
+{}
+static inline void delayacct_tsk_init(struct task_struct *tsk)
+{}
+static inline void delayacct_tsk_free(struct task_struct *tsk)
+{}
+static inline void delayacct_blkio_start(void)
+{}
+static inline void delayacct_blkio_end(void)
+{}
+static inline int delayacct_add_tsk(struct taskstats *d,
+ struct task_struct *tsk)
+{ return 0; }
+static inline __u64 delayacct_blkio_ticks(struct task_struct *tsk)
+{ return 0; }
+#endif /* CONFIG_TASK_DELAY_ACCT */
+
+#endif
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 272010a6078..c94d8f1d62e 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -44,7 +44,7 @@ enum dma_event {
};
/**
- * typedef dma_cookie_t
+ * typedef dma_cookie_t - an opaque DMA cookie
*
* if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code
*/
@@ -80,14 +80,14 @@ struct dma_chan_percpu {
/**
* struct dma_chan - devices supply DMA channels, clients use them
- * @client: ptr to the client user of this chan, will be NULL when unused
- * @device: ptr to the dma device who supplies this channel, always !NULL
+ * @client: ptr to the client user of this chan, will be %NULL when unused
+ * @device: ptr to the dma device who supplies this channel, always !%NULL
* @cookie: last cookie value returned to client
- * @chan_id:
- * @class_dev:
+ * @chan_id: channel ID for sysfs
+ * @class_dev: class device for sysfs
* @refcount: kref, used in "bigref" slow-mode
- * @slow_ref:
- * @rcu:
+ * @slow_ref: indicates that the DMA channel is free
+ * @rcu: the DMA channel's RCU head
* @client_node: used to add this to the client chan list
* @device_node: used to add this to the device chan list
* @local: per-cpu pointer to a struct dma_chan_percpu
@@ -162,10 +162,17 @@ struct dma_client {
* @chancnt: how many DMA channels are supported
* @channels: the list of struct dma_chan
* @global_node: list_head for global dma_device_list
- * @refcount:
- * @done:
- * @dev_id:
- * Other func ptrs: used to make use of this device's capabilities
+ * @refcount: reference count
+ * @done: IO completion struct
+ * @dev_id: unique device ID
+ * @device_alloc_chan_resources: allocate resources and return the
+ * number of allocated descriptors
+ * @device_free_chan_resources: release DMA channel's resources
+ * @device_memcpy_buf_to_buf: memcpy buf pointer to buf pointer
+ * @device_memcpy_buf_to_pg: memcpy buf pointer to struct page
+ * @device_memcpy_pg_to_pg: memcpy struct page/offset to struct page/offset
+ * @device_memcpy_complete: poll the status of an IOAT DMA transaction
+ * @device_memcpy_issue_pending: push appended descriptors to hardware
*/
struct dma_device {
@@ -211,7 +218,7 @@ void dma_async_client_chan_request(struct dma_client *client,
* Both @dest and @src must be mappable to a bus address according to the
* DMA mapping API rules for streaming mappings.
* Both @dest and @src must stay memory resident (kernel memory or locked
- * user space pages)
+ * user space pages).
*/
static inline dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan,
void *dest, void *src, size_t len)
@@ -225,7 +232,7 @@ static inline dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan,
}
/**
- * dma_async_memcpy_buf_to_pg - offloaded copy
+ * dma_async_memcpy_buf_to_pg - offloaded copy from address to page
* @chan: DMA channel to offload copy to
* @page: destination page
* @offset: offset in page to copy to
@@ -250,18 +257,18 @@ static inline dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan,
}
/**
- * dma_async_memcpy_buf_to_pg - offloaded copy
+ * dma_async_memcpy_pg_to_pg - offloaded copy from page to page
* @chan: DMA channel to offload copy to
- * @dest_page: destination page
+ * @dest_pg: destination page
* @dest_off: offset in page to copy to
- * @src_page: source page
+ * @src_pg: source page
* @src_off: offset in page to copy from
* @len: length
*
* Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus
* address according to the DMA mapping API rules for streaming mappings.
* Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident
- * (kernel memory or locked user space pages)
+ * (kernel memory or locked user space pages).
*/
static inline dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan,
struct page *dest_pg, unsigned int dest_off, struct page *src_pg,
@@ -278,7 +285,7 @@ static inline dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan,
/**
* dma_async_memcpy_issue_pending - flush pending copies to HW
- * @chan:
+ * @chan: target DMA channel
*
* This allows drivers to push copies to HW in batches,
* reducing MMIO writes where possible.
diff --git a/include/linux/dvb/Kbuild b/include/linux/dvb/Kbuild
new file mode 100644
index 00000000000..63973af72fd
--- /dev/null
+++ b/include/linux/dvb/Kbuild
@@ -0,0 +1,2 @@
+header-y += ca.h frontend.h net.h osd.h version.h
+unifdef-y := audio.h dmx.h video.h
diff --git a/include/linux/elfcore.h b/include/linux/elfcore.h
index 0cf0bea010f..9631dddae34 100644
--- a/include/linux/elfcore.h
+++ b/include/linux/elfcore.h
@@ -60,6 +60,16 @@ struct elf_prstatus
long pr_instr; /* Current instruction */
#endif
elf_gregset_t pr_reg; /* GP registers */
+#ifdef CONFIG_BINFMT_ELF_FDPIC
+ /* When using FDPIC, the loadmap addresses need to be communicated
+ * to GDB in order for GDB to do the necessary relocations. The
+ * fields (below) used to communicate this information are placed
+ * immediately after ``pr_reg'', so that the loadmap addresses may
+ * be viewed as part of the register set if so desired.
+ */
+ unsigned long pr_exec_fdpic_loadmap;
+ unsigned long pr_interp_fdpic_loadmap;
+#endif
int pr_fpvalid; /* True if math co-processor being used. */
};
diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h
index 5607e6457a6..9f9cce7bd86 100644
--- a/include/linux/ext3_fs.h
+++ b/include/linux/ext3_fs.h
@@ -492,6 +492,15 @@ static inline struct ext3_inode_info *EXT3_I(struct inode *inode)
{
return container_of(inode, struct ext3_inode_info, vfs_inode);
}
+
+static inline int ext3_valid_inum(struct super_block *sb, unsigned long ino)
+{
+ return ino == EXT3_ROOT_INO ||
+ ino == EXT3_JOURNAL_INO ||
+ ino == EXT3_RESIZE_INO ||
+ (ino >= EXT3_FIRST_INO(sb) &&
+ ino <= le32_to_cpu(EXT3_SB(sb)->s_es->s_inodes_count));
+}
#else
/* Assume that user mode programs are passing in an ext3fs superblock, not
* a kernel struct super_block. This will allow us to call the feature-test
diff --git a/include/linux/fb.h b/include/linux/fb.h
index ffefeeeeca9..2f335e96601 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -1,7 +1,6 @@
#ifndef _LINUX_FB_H
#define _LINUX_FB_H
-#include <linux/backlight.h>
#include <asm/types.h>
/* Definitions of frame buffers */
@@ -377,11 +376,11 @@ struct fb_cursor {
#include <linux/fs.h>
#include <linux/init.h>
-#include <linux/tty.h>
#include <linux/device.h>
#include <linux/workqueue.h>
#include <linux/notifier.h>
#include <linux/list.h>
+#include <linux/backlight.h>
#include <asm/io.h>
struct vm_area_struct;
@@ -525,7 +524,7 @@ struct fb_event {
extern int fb_register_client(struct notifier_block *nb);
extern int fb_unregister_client(struct notifier_block *nb);
-
+extern int fb_notifier_call_chain(unsigned long val, void *v);
/*
* Pixmap structure definition
*
diff --git a/include/linux/fs.h b/include/linux/fs.h
index e04a5cfe874..555bc195c42 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -27,6 +27,10 @@
#define BLOCK_SIZE_BITS 10
#define BLOCK_SIZE (1<<BLOCK_SIZE_BITS)
+#define SEEK_SET 0 /* seek relative to beginning of file */
+#define SEEK_CUR 1 /* seek relative to current file position */
+#define SEEK_END 2 /* seek relative to end of file */
+
/* And dynamically-tunable limits and defaults: */
struct files_stat_struct {
int nr_files; /* read only */
@@ -436,6 +440,21 @@ struct block_device {
};
/*
+ * bdev->bd_mutex nesting subclasses for the lock validator:
+ *
+ * 0: normal
+ * 1: 'whole'
+ * 2: 'partition'
+ */
+enum bdev_bd_mutex_lock_class
+{
+ BD_MUTEX_NORMAL,
+ BD_MUTEX_WHOLE,
+ BD_MUTEX_PARTITION
+};
+
+
+/*
* Radix-tree tags, for tagging dirty and writeback pages within the pagecache
* radix trees
*/
@@ -543,6 +562,26 @@ struct inode {
};
/*
+ * inode->i_mutex nesting subclasses for the lock validator:
+ *
+ * 0: the object of the current VFS operation
+ * 1: parent
+ * 2: child/target
+ * 3: quota file
+ *
+ * The locking order between these classes is
+ * parent -> child -> normal -> xattr -> quota
+ */
+enum inode_i_mutex_lock_class
+{
+ I_MUTEX_NORMAL,
+ I_MUTEX_PARENT,
+ I_MUTEX_CHILD,
+ I_MUTEX_XATTR,
+ I_MUTEX_QUOTA
+};
+
+/*
* NOTE: in a 32bit arch with a preemptable kernel and
* an UP compile the i_size_read/write must be atomic
* with respect to the local cpu (unlike with preempt disabled),
@@ -682,6 +721,7 @@ extern spinlock_t files_lock;
#define FL_POSIX 1
#define FL_FLOCK 2
#define FL_ACCESS 8 /* not trying to lock, just looking */
+#define FL_EXISTS 16 /* when unlocking, test for existence */
#define FL_LEASE 32 /* lease held on this file */
#define FL_CLOSE 64 /* unlock on close */
#define FL_SLEEP 128 /* A blocking lock */
@@ -1276,6 +1316,8 @@ struct file_system_type {
struct module *owner;
struct file_system_type * next;
struct list_head fs_supers;
+ struct lock_class_key s_lock_key;
+ struct lock_class_key s_umount_key;
};
extern int get_sb_bdev(struct file_system_type *fs_type,
@@ -1404,6 +1446,7 @@ extern void bd_set_size(struct block_device *, loff_t size);
extern void bd_forget(struct inode *inode);
extern void bdput(struct block_device *);
extern struct block_device *open_by_devnum(dev_t, unsigned);
+extern struct block_device *open_partition_by_devnum(dev_t, unsigned);
extern const struct file_operations def_blk_fops;
extern const struct address_space_operations def_blk_aops;
extern const struct file_operations def_chr_fops;
@@ -1414,6 +1457,7 @@ extern int blkdev_ioctl(struct inode *, struct file *, unsigned, unsigned long);
extern long compat_blkdev_ioctl(struct file *, unsigned, unsigned long);
extern int blkdev_get(struct block_device *, mode_t, unsigned);
extern int blkdev_put(struct block_device *);
+extern int blkdev_put_partition(struct block_device *);
extern int bd_claim(struct block_device *, void *);
extern void bd_release(struct block_device *);
#ifdef CONFIG_SYSFS
diff --git a/include/linux/fs_enet_pd.h b/include/linux/fs_enet_pd.h
index 783c476b867..74ed35a00a9 100644
--- a/include/linux/fs_enet_pd.h
+++ b/include/linux/fs_enet_pd.h
@@ -69,34 +69,21 @@ enum fs_ioport {
fsiop_porte,
};
-struct fs_mii_bus_info {
- int method; /* mii method */
- int id; /* the id of the mii_bus */
- int disable_aneg; /* if the controller needs to negothiate speed & duplex */
- int lpa; /* the default board-specific vallues will be applied otherwise */
-
- union {
- struct {
- int duplex;
- int speed;
- } fixed;
-
- struct {
- /* nothing */
- } fec;
-
- struct {
- /* nothing */
- } scc;
-
- struct {
- int mdio_port; /* port & bit for MDIO */
- int mdio_bit;
- int mdc_port; /* port & bit for MDC */
- int mdc_bit;
- int delay; /* delay in us */
- } bitbang;
- } i;
+struct fs_mii_bit {
+ u32 offset;
+ u8 bit;
+ u8 polarity;
+};
+struct fs_mii_bb_platform_info {
+ struct fs_mii_bit mdio_dir;
+ struct fs_mii_bit mdio_dat;
+ struct fs_mii_bit mdc_dat;
+ int mdio_port; /* port & bit for MDIO */
+ int mdio_bit;
+ int mdc_port; /* port & bit for MDC */
+ int mdc_bit;
+ int delay; /* delay in us */
+ int irq[32]; /* irqs per phy's */
};
struct fs_platform_info {
@@ -119,6 +106,7 @@ struct fs_platform_info {
u32 device_flags;
int phy_addr; /* the phy address (-1 no phy) */
+ const char* bus_id;
int phy_irq; /* the phy irq (if it exists) */
const struct fs_mii_bus_info *bus_info;
@@ -130,6 +118,10 @@ struct fs_platform_info {
int napi_weight; /* NAPI weight */
int use_rmii; /* use RMII mode */
+ int has_phy; /* if the network is phy container as well...*/
+};
+struct fs_mii_fec_platform_info {
+ u32 irq[32];
+ u32 mii_speed;
};
-
#endif
diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
index cc5dec70c32..d4f219ffaa5 100644
--- a/include/linux/fsnotify.h
+++ b/include/linux/fsnotify.h
@@ -67,7 +67,7 @@ static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir,
if (source) {
inotify_inode_queue_event(source, IN_MOVE_SELF, 0, NULL, NULL);
}
- audit_inode_child(new_name, source, new_dir->i_ino);
+ audit_inode_child(new_name, source, new_dir);
}
/*
@@ -98,7 +98,7 @@ static inline void fsnotify_create(struct inode *inode, struct dentry *dentry)
inode_dir_notify(inode, DN_CREATE);
inotify_inode_queue_event(inode, IN_CREATE, 0, dentry->d_name.name,
dentry->d_inode);
- audit_inode_child(dentry->d_name.name, dentry->d_inode, inode->i_ino);
+ audit_inode_child(dentry->d_name.name, dentry->d_inode, inode);
}
/*
@@ -109,7 +109,7 @@ static inline void fsnotify_mkdir(struct inode *inode, struct dentry *dentry)
inode_dir_notify(inode, DN_CREATE);
inotify_inode_queue_event(inode, IN_CREATE | IN_ISDIR, 0,
dentry->d_name.name, dentry->d_inode);
- audit_inode_child(dentry->d_name.name, dentry->d_inode, inode->i_ino);
+ audit_inode_child(dentry->d_name.name, dentry->d_inode, inode);
}
/*
diff --git a/include/linux/futex.h b/include/linux/futex.h
index 34c3a215f2c..d097b5b72bc 100644
--- a/include/linux/futex.h
+++ b/include/linux/futex.h
@@ -96,7 +96,8 @@ struct robust_list_head {
long do_futex(u32 __user *uaddr, int op, u32 val, unsigned long timeout,
u32 __user *uaddr2, u32 val2, u32 val3);
-extern int handle_futex_death(u32 __user *uaddr, struct task_struct *curr);
+extern int
+handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi);
#ifdef CONFIG_FUTEX
extern void exit_robust_list(struct task_struct *curr);
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index 114ae583cca..50d8b5744cf 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -3,6 +3,7 @@
#include <linux/preempt.h>
#include <linux/smp_lock.h>
+#include <linux/lockdep.h>
#include <asm/hardirq.h>
#include <asm/system.h>
@@ -86,9 +87,6 @@ extern void synchronize_irq(unsigned int irq);
# define synchronize_irq(irq) barrier()
#endif
-#define nmi_enter() irq_enter()
-#define nmi_exit() sub_preempt_count(HARDIRQ_OFFSET)
-
struct task_struct;
#ifndef CONFIG_VIRT_CPU_ACCOUNTING
@@ -97,12 +95,35 @@ static inline void account_system_vtime(struct task_struct *tsk)
}
#endif
+/*
+ * It is safe to do non-atomic ops on ->hardirq_context,
+ * because NMI handlers may not preempt and the ops are
+ * always balanced, so the interrupted value of ->hardirq_context
+ * will always be restored.
+ */
#define irq_enter() \
do { \
account_system_vtime(current); \
add_preempt_count(HARDIRQ_OFFSET); \
+ trace_hardirq_enter(); \
+ } while (0)
+
+/*
+ * Exit irq context without processing softirqs:
+ */
+#define __irq_exit() \
+ do { \
+ trace_hardirq_exit(); \
+ account_system_vtime(current); \
+ sub_preempt_count(HARDIRQ_OFFSET); \
} while (0)
+/*
+ * Exit irq context and process softirqs if needed:
+ */
extern void irq_exit(void);
+#define nmi_enter() do { lockdep_off(); irq_enter(); } while (0)
+#define nmi_exit() do { __irq_exit(); lockdep_on(); } while (0)
+
#endif /* LINUX_HARDIRQ_H */
diff --git a/include/linux/hdlc.h b/include/linux/hdlc.h
index 4513f9e4093..d5ebbb29aea 100644
--- a/include/linux/hdlc.h
+++ b/include/linux/hdlc.h
@@ -224,8 +224,6 @@ static __inline__ void debug_frame(const struct sk_buff *skb)
int hdlc_open(struct net_device *dev);
/* Must be called by hardware driver when HDLC device is being closed */
void hdlc_close(struct net_device *dev);
-/* Called by hardware driver when DCD line level changes */
-void hdlc_set_carrier(int on, struct net_device *dev);
/* May be used by hardware driver to gain control over HDLC device */
static __inline__ void hdlc_proto_detach(hdlc_device *hdlc)
diff --git a/include/linux/hdlc/Kbuild b/include/linux/hdlc/Kbuild
new file mode 100644
index 00000000000..1fb26448faa
--- /dev/null
+++ b/include/linux/hdlc/Kbuild
@@ -0,0 +1 @@
+header-y += ioctl.h
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 07d7305f131..4fc379de6c2 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -80,6 +80,7 @@ struct hrtimer_sleeper {
* @get_softirq_time: function to retrieve the current time from the softirq
* @curr_timer: the timer which is executing a callback right now
* @softirq_time: the time when running the hrtimer queue in the softirq
+ * @lock_key: the lock_class_key for use with lockdep
*/
struct hrtimer_base {
clockid_t index;
@@ -91,6 +92,7 @@ struct hrtimer_base {
ktime_t (*get_softirq_time)(void);
struct hrtimer *curr_timer;
ktime_t softirq_time;
+ struct lock_class_key lock_key;
};
/*
diff --git a/include/linux/i2c-id.h b/include/linux/i2c-id.h
index 21338bb3441..9418519a55d 100644
--- a/include/linux/i2c-id.h
+++ b/include/linux/i2c-id.h
@@ -115,6 +115,7 @@
#define I2C_DRIVERID_BT866 85 /* Conexant bt866 video encoder */
#define I2C_DRIVERID_KS0127 86 /* Samsung ks0127 video decoder */
#define I2C_DRIVERID_TLV320AIC23B 87 /* TI TLV320AIC23B audio codec */
+#define I2C_DRIVERID_ISL1208 88 /* Intersil ISL1208 RTC */
#define I2C_DRIVERID_I2CDEV 900
#define I2C_DRIVERID_ARP 902 /* SMBus ARP Client */
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 526ddc8eecf..eb0628a7ecc 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -193,6 +193,8 @@ struct i2c_algorithm {
to NULL. If an adapter algorithm can do SMBus access, set
smbus_xfer. If set to NULL, the SMBus protocol is simulated
using common I2C messages */
+ /* master_xfer should return the number of messages successfully
+ processed, or a negative value on error */
int (*master_xfer)(struct i2c_adapter *adap,struct i2c_msg *msgs,
int num);
int (*smbus_xfer) (struct i2c_adapter *adap, u16 addr,
diff --git a/include/linux/ide.h b/include/linux/ide.h
index 285316c836b..99620451d95 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -571,6 +571,7 @@ typedef struct ide_drive_s {
u8 waiting_for_dma; /* dma currently in progress */
u8 unmask; /* okay to unmask other irqs */
u8 bswap; /* byte swap data */
+ u8 noflush; /* don't attempt flushes */
u8 dsc_overlap; /* DSC overlap */
u8 nice1; /* give potential excess bandwidth */
@@ -1359,7 +1360,7 @@ extern struct semaphore ide_cfg_sem;
* ide_drive_t->hwif: constant, no locking
*/
-#define local_irq_set(flags) do { local_save_flags((flags)); local_irq_enable(); } while (0)
+#define local_irq_set(flags) do { local_save_flags((flags)); local_irq_enable_in_hardirq(); } while (0)
extern struct bus_type ide_bus_type;
diff --git a/include/linux/idr.h b/include/linux/idr.h
index f559a719dbe..826803449db 100644
--- a/include/linux/idr.h
+++ b/include/linux/idr.h
@@ -66,7 +66,7 @@ struct idr {
.id_free = NULL, \
.layers = 0, \
.id_free_cnt = 0, \
- .lock = SPIN_LOCK_UNLOCKED, \
+ .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
}
#define DEFINE_IDR(name) struct idr name = IDR_INIT(name)
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index eef0876d830..ab274083274 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -23,8 +23,8 @@ struct vlan_collection;
struct vlan_dev_info;
struct hlist_node;
-#include <linux/proc_fs.h> /* for proc_dir_entry */
#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
#define VLAN_HLEN 4 /* The additional bytes (on top of the Ethernet header)
* that VLAN requires.
@@ -155,6 +155,11 @@ static inline int __vlan_hwaccel_rx(struct sk_buff *skb,
{
struct net_device_stats *stats;
+ if (skb_bond_should_drop(skb)) {
+ dev_kfree_skb_any(skb);
+ return NET_RX_DROP;
+ }
+
skb->dev = grp->vlan_devices[vlan_tag & VLAN_VID_MASK];
if (skb->dev == NULL) {
dev_kfree_skb_any(skb);
@@ -185,7 +190,8 @@ static inline int __vlan_hwaccel_rx(struct sk_buff *skb,
* This allows the VLAN to have a different MAC than the underlying
* device, and still route correctly.
*/
- if (!memcmp(eth_hdr(skb)->h_dest, skb->dev->dev_addr, ETH_ALEN))
+ if (!compare_ether_addr(eth_hdr(skb)->h_dest,
+ skb->dev->dev_addr))
skb->pkt_type = PACKET_HOST;
break;
};
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 3a256957fb5..60aac2cea0c 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -3,6 +3,8 @@
#include <linux/file.h>
#include <linux/rcupdate.h>
+#include <linux/irqflags.h>
+#include <linux/lockdep.h>
#define INIT_FDTABLE \
{ \
@@ -21,7 +23,7 @@
.count = ATOMIC_INIT(1), \
.fdt = &init_files.fdtab, \
.fdtab = INIT_FDTABLE, \
- .file_lock = SPIN_LOCK_UNLOCKED, \
+ .file_lock = __SPIN_LOCK_UNLOCKED(init_task.file_lock), \
.next_fd = 0, \
.close_on_exec_init = { { 0, } }, \
.open_fds_init = { { 0, } }, \
@@ -36,7 +38,7 @@
.user_id = 0, \
.next = NULL, \
.wait = __WAIT_QUEUE_HEAD_INITIALIZER(name.wait), \
- .ctx_lock = SPIN_LOCK_UNLOCKED, \
+ .ctx_lock = __SPIN_LOCK_UNLOCKED(name.ctx_lock), \
.reqs_active = 0U, \
.max_reqs = ~0U, \
}
@@ -48,7 +50,7 @@
.mm_users = ATOMIC_INIT(2), \
.mm_count = ATOMIC_INIT(1), \
.mmap_sem = __RWSEM_INITIALIZER(name.mmap_sem), \
- .page_table_lock = SPIN_LOCK_UNLOCKED, \
+ .page_table_lock = __SPIN_LOCK_UNLOCKED(name.page_table_lock), \
.mmlist = LIST_HEAD_INIT(name.mmlist), \
.cpu_vm_mask = CPU_MASK_ALL, \
}
@@ -69,7 +71,7 @@
#define INIT_SIGHAND(sighand) { \
.count = ATOMIC_INIT(1), \
.action = { { { .sa_handler = NULL, } }, }, \
- .siglock = SPIN_LOCK_UNLOCKED, \
+ .siglock = __SPIN_LOCK_UNLOCKED(sighand.siglock), \
}
extern struct group_info init_groups;
@@ -119,12 +121,13 @@ extern struct group_info init_groups;
.list = LIST_HEAD_INIT(tsk.pending.list), \
.signal = {{0}}}, \
.blocked = {{0}}, \
- .alloc_lock = SPIN_LOCK_UNLOCKED, \
+ .alloc_lock = __SPIN_LOCK_UNLOCKED(tsk.alloc_lock), \
.journal_info = NULL, \
.cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \
.fs_excl = ATOMIC_INIT(0), \
.pi_lock = SPIN_LOCK_UNLOCKED, \
- INIT_RT_MUTEXES(tsk) \
+ INIT_TRACE_IRQFLAGS \
+ INIT_LOCKDEP \
}
diff --git a/include/linux/input.h b/include/linux/input.h
index 56f1e0e1e59..b3253ab72ff 100644
--- a/include/linux/input.h
+++ b/include/linux/input.h
@@ -893,7 +893,6 @@ struct input_dev {
int (*open)(struct input_dev *dev);
void (*close)(struct input_dev *dev);
- int (*accept)(struct input_dev *dev, struct file *file);
int (*flush)(struct input_dev *dev, struct file *file);
int (*event)(struct input_dev *dev, unsigned int type, unsigned int code, int value);
int (*upload_effect)(struct input_dev *dev, struct ff_effect *effect);
@@ -961,6 +960,26 @@ struct input_dev {
struct input_handle;
+/**
+ * struct input_handler - implements one of interfaces for input devices
+ * @private: driver-specific data
+ * @event: event handler
+ * @connect: called when attaching a handler to an input device
+ * @disconnect: disconnects a handler from input device
+ * @start: starts handler for given handle. This function is called by
+ * input core right after connect() method and also when a process
+ * that "grabbed" a device releases it
+ * @fops: file operations this driver implements
+ * @minor: beginning of range of 32 minors for devices this driver
+ * can provide
+ * @name: name of the handler, to be shown in /proc/bus/input/handlers
+ * @id_table: pointer to a table of input_device_ids this driver can
+ * handle
+ * @blacklist: prointer to a table of input_device_ids this driver should
+ * ignore even if they match @id_table
+ * @h_list: list of input handles associated with the handler
+ * @node: for placing the driver onto input_handler_list
+ */
struct input_handler {
void *private;
@@ -968,6 +987,7 @@ struct input_handler {
void (*event)(struct input_handle *handle, unsigned int type, unsigned int code, int value);
struct input_handle* (*connect)(struct input_handler *handler, struct input_dev *dev, struct input_device_id *id);
void (*disconnect)(struct input_handle *handle);
+ void (*start)(struct input_handle *handle);
const struct file_operations *fops;
int minor;
@@ -1030,10 +1050,10 @@ void input_release_device(struct input_handle *);
int input_open_device(struct input_handle *);
void input_close_device(struct input_handle *);
-int input_accept_process(struct input_handle *handle, struct file *file);
int input_flush_device(struct input_handle* handle, struct file* file);
void input_event(struct input_dev *dev, unsigned int type, unsigned int code, int value);
+void input_inject_event(struct input_handle *handle, unsigned int type, unsigned int code, int value);
static inline void input_report_key(struct input_dev *dev, unsigned int code, int value)
{
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index da3e0dbe61d..d5afee95fd4 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -10,6 +10,7 @@
#include <linux/irqreturn.h>
#include <linux/hardirq.h>
#include <linux/sched.h>
+#include <linux/irqflags.h>
#include <asm/atomic.h>
#include <asm/ptrace.h>
#include <asm/system.h>
@@ -80,12 +81,64 @@ extern int request_irq(unsigned int,
unsigned long, const char *, void *);
extern void free_irq(unsigned int, void *);
+/*
+ * On lockdep we dont want to enable hardirqs in hardirq
+ * context. Use local_irq_enable_in_hardirq() to annotate
+ * kernel code that has to do this nevertheless (pretty much
+ * the only valid case is for old/broken hardware that is
+ * insanely slow).
+ *
+ * NOTE: in theory this might break fragile code that relies
+ * on hardirq delivery - in practice we dont seem to have such
+ * places left. So the only effect should be slightly increased
+ * irqs-off latencies.
+ */
+#ifdef CONFIG_LOCKDEP
+# define local_irq_enable_in_hardirq() do { } while (0)
+#else
+# define local_irq_enable_in_hardirq() local_irq_enable()
+#endif
#ifdef CONFIG_GENERIC_HARDIRQS
extern void disable_irq_nosync(unsigned int irq);
extern void disable_irq(unsigned int irq);
extern void enable_irq(unsigned int irq);
+/*
+ * Special lockdep variants of irq disabling/enabling.
+ * These should be used for locking constructs that
+ * know that a particular irq context which is disabled,
+ * and which is the only irq-context user of a lock,
+ * that it's safe to take the lock in the irq-disabled
+ * section without disabling hardirqs.
+ *
+ * On !CONFIG_LOCKDEP they are equivalent to the normal
+ * irq disable/enable methods.
+ */
+static inline void disable_irq_nosync_lockdep(unsigned int irq)
+{
+ disable_irq_nosync(irq);
+#ifdef CONFIG_LOCKDEP
+ local_irq_disable();
+#endif
+}
+
+static inline void disable_irq_lockdep(unsigned int irq)
+{
+ disable_irq(irq);
+#ifdef CONFIG_LOCKDEP
+ local_irq_disable();
+#endif
+}
+
+static inline void enable_irq_lockdep(unsigned int irq)
+{
+#ifdef CONFIG_LOCKDEP
+ local_irq_enable();
+#endif
+ enable_irq(irq);
+}
+
/* IRQ wakeup (PM) control: */
extern int set_irq_wake(unsigned int irq, unsigned int on);
@@ -99,7 +152,19 @@ static inline int disable_irq_wake(unsigned int irq)
return set_irq_wake(irq, 0);
}
-#endif
+#else /* !CONFIG_GENERIC_HARDIRQS */
+/*
+ * NOTE: non-genirq architectures, if they want to support the lock
+ * validator need to define the methods below in their asm/irq.h
+ * files, under an #ifdef CONFIG_LOCKDEP section.
+ */
+# ifndef CONFIG_LOCKDEP
+# define disable_irq_nosync_lockdep(irq) disable_irq_nosync(irq)
+# define disable_irq_lockdep(irq) disable_irq(irq)
+# define enable_irq_lockdep(irq) enable_irq(irq)
+# endif
+
+#endif /* CONFIG_GENERIC_HARDIRQS */
#ifndef __ARCH_SET_SOFTIRQ_PENDING
#define set_softirq_pending(x) (local_softirq_pending() = (x))
@@ -135,13 +200,11 @@ static inline void __deprecated save_and_cli(unsigned long *x)
#define save_and_cli(x) save_and_cli(&x)
#endif /* CONFIG_SMP */
-/* SoftIRQ primitives. */
-#define local_bh_disable() \
- do { add_preempt_count(SOFTIRQ_OFFSET); barrier(); } while (0)
-#define __local_bh_enable() \
- do { barrier(); sub_preempt_count(SOFTIRQ_OFFSET); } while (0)
-
+extern void local_bh_disable(void);
+extern void __local_bh_enable(void);
+extern void _local_bh_enable(void);
extern void local_bh_enable(void);
+extern void local_bh_enable_ip(unsigned long ip);
/* PLEASE, avoid to allocate new softirqs, if you need not _really_ high
frequency threaded job scheduling. For almost all the purposes
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index 87a9fc039b4..d42c8339907 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -55,6 +55,7 @@ struct resource_list {
#define IORESOURCE_IRQ_LOWEDGE (1<<1)
#define IORESOURCE_IRQ_HIGHLEVEL (1<<2)
#define IORESOURCE_IRQ_LOWLEVEL (1<<3)
+#define IORESOURCE_IRQ_SHAREABLE (1<<4)
/* ISA PnP DMA specific bits (IORESOURCE_BITS) */
#define IORESOURCE_DMA_TYPE_MASK (3<<0)
@@ -96,7 +97,7 @@ extern struct resource iomem_resource;
extern int request_resource(struct resource *root, struct resource *new);
extern struct resource * ____request_resource(struct resource *root, struct resource *new);
extern int release_resource(struct resource *new);
-extern __deprecated_for_modules int insert_resource(struct resource *parent, struct resource *new);
+extern int insert_resource(struct resource *parent, struct resource *new);
extern int allocate_resource(struct resource *root, struct resource *new,
resource_size_t size, resource_size_t min,
resource_size_t max, resource_size_t align,
diff --git a/include/linux/ioprio.h b/include/linux/ioprio.h
index 88d5961f7a3..8e2042b9d47 100644
--- a/include/linux/ioprio.h
+++ b/include/linux/ioprio.h
@@ -59,27 +59,6 @@ static inline int task_nice_ioprio(struct task_struct *task)
/*
* For inheritance, return the highest of the two given priorities
*/
-static inline int ioprio_best(unsigned short aprio, unsigned short bprio)
-{
- unsigned short aclass = IOPRIO_PRIO_CLASS(aprio);
- unsigned short bclass = IOPRIO_PRIO_CLASS(bprio);
-
- if (!ioprio_valid(aprio))
- return bprio;
- if (!ioprio_valid(bprio))
- return aprio;
-
- if (aclass == IOPRIO_CLASS_NONE)
- aclass = IOPRIO_CLASS_BE;
- if (bclass == IOPRIO_CLASS_NONE)
- bclass = IOPRIO_CLASS_BE;
-
- if (aclass == bclass)
- return min(aprio, bprio);
- if (aclass > bclass)
- return bprio;
- else
- return aprio;
-}
+extern int ioprio_best(unsigned short aprio, unsigned short bprio);
#endif
diff --git a/include/linux/irq.h b/include/linux/irq.h
index b48eae32dc6..fbf6d901e9c 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -47,8 +47,8 @@
#define IRQ_WAITING 0x00200000 /* IRQ not yet seen - for autodetection */
#define IRQ_LEVEL 0x00400000 /* IRQ level triggered */
#define IRQ_MASKED 0x00800000 /* IRQ masked - shouldn't be seen again */
+#define IRQ_PER_CPU 0x01000000 /* IRQ is per CPU */
#ifdef CONFIG_IRQ_PER_CPU
-# define IRQ_PER_CPU 0x01000000 /* IRQ is per CPU */
# define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU)
#else
# define CHECK_IRQ_PER_CPU(var) 0
@@ -58,6 +58,7 @@
#define IRQ_NOREQUEST 0x04000000 /* IRQ cannot be requested */
#define IRQ_NOAUTOEN 0x08000000 /* IRQ will not be enabled on request irq */
#define IRQ_DELAYED_DISABLE 0x10000000 /* IRQ disable (masking) happens delayed. */
+#define IRQ_WAKEUP 0x20000000 /* IRQ triggers system wakeup */
struct proc_dir_entry;
@@ -124,6 +125,7 @@ struct irq_chip {
* @action: the irq action chain
* @status: status information
* @depth: disable-depth, for nested irq_disable() calls
+ * @wake_depth: enable depth, for multiple set_irq_wake() callers
* @irq_count: stats field to detect stalled irqs
* @irqs_unhandled: stats field for spurious unhandled interrupts
* @lock: locking for SMP
@@ -147,6 +149,7 @@ struct irq_desc {
unsigned int status; /* IRQ status */
unsigned int depth; /* nested irq disables */
+ unsigned int wake_depth; /* nested wake enables */
unsigned int irq_count; /* For detecting broken IRQs */
unsigned int irqs_unhandled;
spinlock_t lock;
diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
new file mode 100644
index 00000000000..412e025bc5c
--- /dev/null
+++ b/include/linux/irqflags.h
@@ -0,0 +1,96 @@
+/*
+ * include/linux/irqflags.h
+ *
+ * IRQ flags tracing: follow the state of the hardirq and softirq flags and
+ * provide callbacks for transitions between ON and OFF states.
+ *
+ * This file gets included from lowlevel asm headers too, to provide
+ * wrapped versions of the local_irq_*() APIs, based on the
+ * raw_local_irq_*() macros from the lowlevel headers.
+ */
+#ifndef _LINUX_TRACE_IRQFLAGS_H
+#define _LINUX_TRACE_IRQFLAGS_H
+
+#ifdef CONFIG_TRACE_IRQFLAGS
+ extern void trace_hardirqs_on(void);
+ extern void trace_hardirqs_off(void);
+ extern void trace_softirqs_on(unsigned long ip);
+ extern void trace_softirqs_off(unsigned long ip);
+# define trace_hardirq_context(p) ((p)->hardirq_context)
+# define trace_softirq_context(p) ((p)->softirq_context)
+# define trace_hardirqs_enabled(p) ((p)->hardirqs_enabled)
+# define trace_softirqs_enabled(p) ((p)->softirqs_enabled)
+# define trace_hardirq_enter() do { current->hardirq_context++; } while (0)
+# define trace_hardirq_exit() do { current->hardirq_context--; } while (0)
+# define trace_softirq_enter() do { current->softirq_context++; } while (0)
+# define trace_softirq_exit() do { current->softirq_context--; } while (0)
+# define INIT_TRACE_IRQFLAGS .softirqs_enabled = 1,
+#else
+# define trace_hardirqs_on() do { } while (0)
+# define trace_hardirqs_off() do { } while (0)
+# define trace_softirqs_on(ip) do { } while (0)
+# define trace_softirqs_off(ip) do { } while (0)
+# define trace_hardirq_context(p) 0
+# define trace_softirq_context(p) 0
+# define trace_hardirqs_enabled(p) 0
+# define trace_softirqs_enabled(p) 0
+# define trace_hardirq_enter() do { } while (0)
+# define trace_hardirq_exit() do { } while (0)
+# define trace_softirq_enter() do { } while (0)
+# define trace_softirq_exit() do { } while (0)
+# define INIT_TRACE_IRQFLAGS
+#endif
+
+#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
+
+#include <asm/irqflags.h>
+
+#define local_irq_enable() \
+ do { trace_hardirqs_on(); raw_local_irq_enable(); } while (0)
+#define local_irq_disable() \
+ do { raw_local_irq_disable(); trace_hardirqs_off(); } while (0)
+#define local_irq_save(flags) \
+ do { raw_local_irq_save(flags); trace_hardirqs_off(); } while (0)
+
+#define local_irq_restore(flags) \
+ do { \
+ if (raw_irqs_disabled_flags(flags)) { \
+ raw_local_irq_restore(flags); \
+ trace_hardirqs_off(); \
+ } else { \
+ trace_hardirqs_on(); \
+ raw_local_irq_restore(flags); \
+ } \
+ } while (0)
+#else /* !CONFIG_TRACE_IRQFLAGS_SUPPORT */
+/*
+ * The local_irq_*() APIs are equal to the raw_local_irq*()
+ * if !TRACE_IRQFLAGS.
+ */
+# define raw_local_irq_disable() local_irq_disable()
+# define raw_local_irq_enable() local_irq_enable()
+# define raw_local_irq_save(flags) local_irq_save(flags)
+# define raw_local_irq_restore(flags) local_irq_restore(flags)
+#endif /* CONFIG_TRACE_IRQFLAGS_SUPPORT */
+
+#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
+#define safe_halt() \
+ do { \
+ trace_hardirqs_on(); \
+ raw_safe_halt(); \
+ } while (0)
+
+#define local_save_flags(flags) raw_local_save_flags(flags)
+
+#define irqs_disabled() \
+({ \
+ unsigned long flags; \
+ \
+ raw_local_save_flags(flags); \
+ raw_irqs_disabled_flags(flags); \
+})
+
+#define irqs_disabled_flags(flags) raw_irqs_disabled_flags(flags)
+#endif /* CONFIG_X86 */
+
+#endif
diff --git a/include/linux/isdn/Kbuild b/include/linux/isdn/Kbuild
new file mode 100644
index 00000000000..991cdb29ab2
--- /dev/null
+++ b/include/linux/isdn/Kbuild
@@ -0,0 +1 @@
+header-y += capicmd.h
diff --git a/include/linux/jbd.h b/include/linux/jbd.h
index 20eb34403d0..a04c154c520 100644
--- a/include/linux/jbd.h
+++ b/include/linux/jbd.h
@@ -72,6 +72,9 @@ extern int journal_enable_debug;
#endif
extern void * __jbd_kmalloc (const char *where, size_t size, gfp_t flags, int retry);
+extern void * jbd_slab_alloc(size_t size, gfp_t flags);
+extern void jbd_slab_free(void *ptr, size_t size);
+
#define jbd_kmalloc(size, flags) \
__jbd_kmalloc(__FUNCTION__, (size), (flags), journal_oom_retry)
#define jbd_rep_kmalloc(size, flags) \
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index 043376920f5..329ebcffa10 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -47,8 +47,8 @@
* - (NOM / DEN) fits in (32 - LSH) bits.
* - (NOM % DEN) fits in (32 - LSH) bits.
*/
-#define SH_DIV(NOM,DEN,LSH) ( ((NOM / DEN) << LSH) \
- + (((NOM % DEN) << LSH) + DEN / 2) / DEN)
+#define SH_DIV(NOM,DEN,LSH) ( (((NOM) / (DEN)) << (LSH)) \
+ + ((((NOM) % (DEN)) << (LSH)) + (DEN) / 2) / (DEN))
/* HZ is the requested value. ACTHZ is actual HZ ("<< 8" is for accuracy) */
#define ACTHZ (SH_DIV (CLOCK_TICK_RATE, LATCH, 8))
diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
index 54e2549f96b..849043ce4ed 100644
--- a/include/linux/kallsyms.h
+++ b/include/linux/kallsyms.h
@@ -57,10 +57,25 @@ do { \
#define print_fn_descriptor_symbol(fmt, addr) print_symbol(fmt, addr)
#endif
-#define print_symbol(fmt, addr) \
-do { \
- __check_printsym_format(fmt, ""); \
- __print_symbol(fmt, addr); \
+static inline void print_symbol(const char *fmt, unsigned long addr)
+{
+ __check_printsym_format(fmt, "");
+ __print_symbol(fmt, (unsigned long)
+ __builtin_extract_return_addr((void *)addr));
+}
+
+#ifndef CONFIG_64BIT
+#define print_ip_sym(ip) \
+do { \
+ printk("[<%08lx>]", ip); \
+ print_symbol(" %s\n", ip); \
} while(0)
+#else
+#define print_ip_sym(ip) \
+do { \
+ printk("[<%016lx>]", ip); \
+ print_symbol(" %s\n", ip); \
+} while(0)
+#endif
#endif /*_LINUX_KALLSYMS_H*/
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 5c1ec1f84ea..851aa1bcfc1 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -33,6 +33,7 @@ extern const char linux_banner[];
#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
#define ALIGN(x,a) (((x)+(a)-1)&~((a)-1))
#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
+#define roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y))
#define KERN_EMERG "<0>" /* system is unusable */
#define KERN_ALERT "<1>" /* action must be taken immediately */
@@ -209,6 +210,7 @@ extern enum system_states {
extern void dump_stack(void);
#ifdef DEBUG
+/* If you are writing a driver, please use dev_dbg instead */
#define pr_debug(fmt,arg...) \
printk(KERN_DEBUG fmt,##arg)
#else
diff --git a/include/linux/kobject.h b/include/linux/kobject.h
index 0503b2ed8ba..2d229327959 100644
--- a/include/linux/kobject.h
+++ b/include/linux/kobject.h
@@ -46,8 +46,6 @@ enum kobject_action {
KOBJ_UMOUNT = (__force kobject_action_t) 0x05, /* umount event for block devices (broken) */
KOBJ_OFFLINE = (__force kobject_action_t) 0x06, /* device offline */
KOBJ_ONLINE = (__force kobject_action_t) 0x07, /* device online */
- KOBJ_UNDOCK = (__force kobject_action_t) 0x08, /* undocking */
- KOBJ_DOCK = (__force kobject_action_t) 0x09, /* dock */
};
struct kobject {
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index 7cce5dfa092..1c65e7a9f18 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -28,7 +28,6 @@ struct task_struct *kthread_create(int (*threadfn)(void *data),
void kthread_bind(struct task_struct *k, unsigned int cpu);
int kthread_stop(struct task_struct *k);
-int kthread_stop_sem(struct task_struct *k, struct semaphore *s);
int kthread_should_stop(void);
#endif /* _LINUX_KTHREAD_H */
diff --git a/include/linux/ktime.h b/include/linux/ktime.h
index ed3396dcc4f..84eeecd60a0 100644
--- a/include/linux/ktime.h
+++ b/include/linux/ktime.h
@@ -56,7 +56,8 @@ typedef union {
#endif
} ktime_t;
-#define KTIME_MAX (~((u64)1 << 63))
+#define KTIME_MAX ((s64)~((u64)1 << 63))
+#define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC)
/*
* ktime_t definitions when using the 64-bit scalar representation:
@@ -73,6 +74,10 @@ typedef union {
*/
static inline ktime_t ktime_set(const long secs, const unsigned long nsecs)
{
+#if (BITS_PER_LONG == 64)
+ if (unlikely(secs >= KTIME_SEC_MAX))
+ return (ktime_t){ .tv64 = KTIME_MAX };
+#endif
return (ktime_t) { .tv64 = (s64)secs * NSEC_PER_SEC + (s64)nsecs };
}
diff --git a/include/linux/libata.h b/include/linux/libata.h
index f4284bf8975..66c3100c2b9 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -131,6 +131,7 @@ enum {
ATA_DFLAG_CFG_MASK = (1 << 8) - 1,
ATA_DFLAG_PIO = (1 << 8), /* device currently in PIO mode */
+ ATA_DFLAG_SUSPENDED = (1 << 9), /* device suspended */
ATA_DFLAG_INIT_MASK = (1 << 16) - 1,
ATA_DFLAG_DETACH = (1 << 16),
@@ -160,22 +161,28 @@ enum {
ATA_FLAG_HRST_TO_RESUME = (1 << 11), /* hardreset to resume phy */
ATA_FLAG_SKIP_D2H_BSY = (1 << 12), /* can't wait for the first D2H
* Register FIS clearing BSY */
-
ATA_FLAG_DEBUGMSG = (1 << 13),
- ATA_FLAG_FLUSH_PORT_TASK = (1 << 14), /* flush port task */
- ATA_FLAG_EH_PENDING = (1 << 15), /* EH pending */
- ATA_FLAG_EH_IN_PROGRESS = (1 << 16), /* EH in progress */
- ATA_FLAG_FROZEN = (1 << 17), /* port is frozen */
- ATA_FLAG_RECOVERED = (1 << 18), /* recovery action performed */
- ATA_FLAG_LOADING = (1 << 19), /* boot/loading probe */
- ATA_FLAG_UNLOADING = (1 << 20), /* module is unloading */
- ATA_FLAG_SCSI_HOTPLUG = (1 << 21), /* SCSI hotplug scheduled */
+ /* The following flag belongs to ap->pflags but is kept in
+ * ap->flags because it's referenced in many LLDs and will be
+ * removed in not-too-distant future.
+ */
+ ATA_FLAG_DISABLED = (1 << 23), /* port is disabled, ignore it */
+
+ /* bits 24:31 of ap->flags are reserved for LLD specific flags */
- ATA_FLAG_DISABLED = (1 << 22), /* port is disabled, ignore it */
- ATA_FLAG_SUSPENDED = (1 << 23), /* port is suspended (power) */
+ /* struct ata_port pflags */
+ ATA_PFLAG_EH_PENDING = (1 << 0), /* EH pending */
+ ATA_PFLAG_EH_IN_PROGRESS = (1 << 1), /* EH in progress */
+ ATA_PFLAG_FROZEN = (1 << 2), /* port is frozen */
+ ATA_PFLAG_RECOVERED = (1 << 3), /* recovery action performed */
+ ATA_PFLAG_LOADING = (1 << 4), /* boot/loading probe */
+ ATA_PFLAG_UNLOADING = (1 << 5), /* module is unloading */
+ ATA_PFLAG_SCSI_HOTPLUG = (1 << 6), /* SCSI hotplug scheduled */
- /* bits 24:31 of ap->flags are reserved for LLDD specific flags */
+ ATA_PFLAG_FLUSH_PORT_TASK = (1 << 16), /* flush port task */
+ ATA_PFLAG_SUSPENDED = (1 << 17), /* port is suspended (power) */
+ ATA_PFLAG_PM_PENDING = (1 << 18), /* PM operation pending */
/* struct ata_queued_cmd flags */
ATA_QCFLAG_ACTIVE = (1 << 0), /* cmd not yet ack'd to scsi lyer */
@@ -248,15 +255,24 @@ enum {
ATA_EH_REVALIDATE = (1 << 0),
ATA_EH_SOFTRESET = (1 << 1),
ATA_EH_HARDRESET = (1 << 2),
+ ATA_EH_SUSPEND = (1 << 3),
+ ATA_EH_RESUME = (1 << 4),
+ ATA_EH_PM_FREEZE = (1 << 5),
ATA_EH_RESET_MASK = ATA_EH_SOFTRESET | ATA_EH_HARDRESET,
- ATA_EH_PERDEV_MASK = ATA_EH_REVALIDATE,
+ ATA_EH_PERDEV_MASK = ATA_EH_REVALIDATE | ATA_EH_SUSPEND |
+ ATA_EH_RESUME | ATA_EH_PM_FREEZE,
/* ata_eh_info->flags */
ATA_EHI_HOTPLUGGED = (1 << 0), /* could have been hotplugged */
+ ATA_EHI_RESUME_LINK = (1 << 1), /* resume link (reset modifier) */
+ ATA_EHI_NO_AUTOPSY = (1 << 2), /* no autopsy */
+ ATA_EHI_QUIET = (1 << 3), /* be quiet */
ATA_EHI_DID_RESET = (1 << 16), /* already reset this port */
+ ATA_EHI_RESET_MODIFIER_MASK = ATA_EHI_RESUME_LINK,
+
/* max repeat if error condition is still set after ->error_handler */
ATA_EH_MAX_REPEAT = 5,
@@ -486,6 +502,7 @@ struct ata_port {
const struct ata_port_operations *ops;
spinlock_t *lock;
unsigned long flags; /* ATA_FLAG_xxx */
+ unsigned int pflags; /* ATA_PFLAG_xxx */
unsigned int id; /* unique id req'd by scsi midlyr */
unsigned int port_no; /* unique port #; from zero */
unsigned int hard_port_no; /* hardware port #; from zero */
@@ -535,6 +552,9 @@ struct ata_port {
struct list_head eh_done_q;
wait_queue_head_t eh_wait_q;
+ pm_message_t pm_mesg;
+ int *pm_result;
+
void *private_data;
u8 sector_buf[ATA_SECT_SIZE]; /* owned by EH */
@@ -589,6 +609,9 @@ struct ata_port_operations {
void (*scr_write) (struct ata_port *ap, unsigned int sc_reg,
u32 val);
+ int (*port_suspend) (struct ata_port *ap, pm_message_t mesg);
+ int (*port_resume) (struct ata_port *ap);
+
int (*port_start) (struct ata_port *ap);
void (*port_stop) (struct ata_port *ap);
@@ -622,9 +645,18 @@ struct ata_timing {
#define FIT(v,vmin,vmax) max_t(short,min_t(short,v,vmax),vmin)
-extern const unsigned long sata_deb_timing_boot[];
-extern const unsigned long sata_deb_timing_eh[];
-extern const unsigned long sata_deb_timing_before_fsrst[];
+extern const unsigned long sata_deb_timing_normal[];
+extern const unsigned long sata_deb_timing_hotplug[];
+extern const unsigned long sata_deb_timing_long[];
+
+static inline const unsigned long *
+sata_ehc_deb_timing(struct ata_eh_context *ehc)
+{
+ if (ehc->i.flags & ATA_EHI_HOTPLUGGED)
+ return sata_deb_timing_hotplug;
+ else
+ return sata_deb_timing_normal;
+}
extern void ata_port_probe(struct ata_port *);
extern void __sata_phy_reset(struct ata_port *ap);
@@ -644,6 +676,8 @@ extern void ata_std_ports(struct ata_ioports *ioaddr);
extern int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
unsigned int n_ports);
extern void ata_pci_remove_one (struct pci_dev *pdev);
+extern void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t state);
+extern void ata_pci_device_do_resume(struct pci_dev *pdev);
extern int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state);
extern int ata_pci_device_resume(struct pci_dev *pdev);
extern int ata_pci_clear_simplex(struct pci_dev *pdev);
@@ -664,8 +698,9 @@ extern int ata_port_online(struct ata_port *ap);
extern int ata_port_offline(struct ata_port *ap);
extern int ata_scsi_device_resume(struct scsi_device *);
extern int ata_scsi_device_suspend(struct scsi_device *, pm_message_t state);
-extern int ata_device_resume(struct ata_device *);
-extern int ata_device_suspend(struct ata_device *, pm_message_t state);
+extern int ata_host_set_suspend(struct ata_host_set *host_set,
+ pm_message_t mesg);
+extern void ata_host_set_resume(struct ata_host_set *host_set);
extern int ata_ratelimit(void);
extern unsigned int ata_busy_sleep(struct ata_port *ap,
unsigned long timeout_pat,
@@ -825,19 +860,24 @@ extern void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
(ehi)->desc_len = 0; \
} while (0)
-static inline void ata_ehi_hotplugged(struct ata_eh_info *ehi)
+static inline void __ata_ehi_hotplugged(struct ata_eh_info *ehi)
{
if (ehi->flags & ATA_EHI_HOTPLUGGED)
return;
- ehi->flags |= ATA_EHI_HOTPLUGGED;
+ ehi->flags |= ATA_EHI_HOTPLUGGED | ATA_EHI_RESUME_LINK;
ehi->hotplug_timestamp = jiffies;
- ehi->err_mask |= AC_ERR_ATA_BUS;
ehi->action |= ATA_EH_SOFTRESET;
ehi->probe_mask |= (1 << ATA_MAX_DEVICES) - 1;
}
+static inline void ata_ehi_hotplugged(struct ata_eh_info *ehi)
+{
+ __ata_ehi_hotplugged(ehi);
+ ehi->err_mask |= AC_ERR_ATA_BUS;
+}
+
/*
* qc helpers
*/
@@ -921,6 +961,11 @@ static inline unsigned int ata_dev_absent(const struct ata_device *dev)
return ata_class_absent(dev->class);
}
+static inline unsigned int ata_dev_ready(const struct ata_device *dev)
+{
+ return ata_dev_enabled(dev) && !(dev->flags & ATA_DFLAG_SUSPENDED);
+}
+
/*
* port helpers
*/
diff --git a/include/linux/list.h b/include/linux/list.h
index 6b74adf5297..65a5b5ceda4 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -265,6 +265,17 @@ static inline void list_move_tail(struct list_head *list,
}
/**
+ * list_is_last - tests whether @list is the last entry in list @head
+ * @list: the entry to test
+ * @head: the head of the list
+ */
+static inline int list_is_last(const struct list_head *list,
+ const struct list_head *head)
+{
+ return list->next == head;
+}
+
+/**
* list_empty - tests whether a list is empty
* @head: the list to test.
*/
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
index aa4fe905bb4..0d92c468d55 100644
--- a/include/linux/lockd/lockd.h
+++ b/include/linux/lockd/lockd.h
@@ -123,7 +123,6 @@ struct nlm_block {
unsigned int b_id; /* block id */
unsigned char b_queued; /* re-queued */
unsigned char b_granted; /* VFS granted lock */
- unsigned char b_done; /* callback complete */
struct nlm_file * b_file; /* file in question */
};
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
new file mode 100644
index 00000000000..c040a8c969a
--- /dev/null
+++ b/include/linux/lockdep.h
@@ -0,0 +1,353 @@
+/*
+ * Runtime locking correctness validator
+ *
+ * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ *
+ * see Documentation/lockdep-design.txt for more details.
+ */
+#ifndef __LINUX_LOCKDEP_H
+#define __LINUX_LOCKDEP_H
+
+#include <linux/linkage.h>
+#include <linux/list.h>
+#include <linux/debug_locks.h>
+#include <linux/stacktrace.h>
+
+#ifdef CONFIG_LOCKDEP
+
+/*
+ * Lock-class usage-state bits:
+ */
+enum lock_usage_bit
+{
+ LOCK_USED = 0,
+ LOCK_USED_IN_HARDIRQ,
+ LOCK_USED_IN_SOFTIRQ,
+ LOCK_ENABLED_SOFTIRQS,
+ LOCK_ENABLED_HARDIRQS,
+ LOCK_USED_IN_HARDIRQ_READ,
+ LOCK_USED_IN_SOFTIRQ_READ,
+ LOCK_ENABLED_SOFTIRQS_READ,
+ LOCK_ENABLED_HARDIRQS_READ,
+ LOCK_USAGE_STATES
+};
+
+/*
+ * Usage-state bitmasks:
+ */
+#define LOCKF_USED (1 << LOCK_USED)
+#define LOCKF_USED_IN_HARDIRQ (1 << LOCK_USED_IN_HARDIRQ)
+#define LOCKF_USED_IN_SOFTIRQ (1 << LOCK_USED_IN_SOFTIRQ)
+#define LOCKF_ENABLED_HARDIRQS (1 << LOCK_ENABLED_HARDIRQS)
+#define LOCKF_ENABLED_SOFTIRQS (1 << LOCK_ENABLED_SOFTIRQS)
+
+#define LOCKF_ENABLED_IRQS (LOCKF_ENABLED_HARDIRQS | LOCKF_ENABLED_SOFTIRQS)
+#define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ)
+
+#define LOCKF_USED_IN_HARDIRQ_READ (1 << LOCK_USED_IN_HARDIRQ_READ)
+#define LOCKF_USED_IN_SOFTIRQ_READ (1 << LOCK_USED_IN_SOFTIRQ_READ)
+#define LOCKF_ENABLED_HARDIRQS_READ (1 << LOCK_ENABLED_HARDIRQS_READ)
+#define LOCKF_ENABLED_SOFTIRQS_READ (1 << LOCK_ENABLED_SOFTIRQS_READ)
+
+#define LOCKF_ENABLED_IRQS_READ \
+ (LOCKF_ENABLED_HARDIRQS_READ | LOCKF_ENABLED_SOFTIRQS_READ)
+#define LOCKF_USED_IN_IRQ_READ \
+ (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ)
+
+#define MAX_LOCKDEP_SUBCLASSES 8UL
+
+/*
+ * Lock-classes are keyed via unique addresses, by embedding the
+ * lockclass-key into the kernel (or module) .data section. (For
+ * static locks we use the lock address itself as the key.)
+ */
+struct lockdep_subclass_key {
+ char __one_byte;
+} __attribute__ ((__packed__));
+
+struct lock_class_key {
+ struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES];
+};
+
+/*
+ * The lock-class itself:
+ */
+struct lock_class {
+ /*
+ * class-hash:
+ */
+ struct list_head hash_entry;
+
+ /*
+ * global list of all lock-classes:
+ */
+ struct list_head lock_entry;
+
+ struct lockdep_subclass_key *key;
+ unsigned int subclass;
+
+ /*
+ * IRQ/softirq usage tracking bits:
+ */
+ unsigned long usage_mask;
+ struct stack_trace usage_traces[LOCK_USAGE_STATES];
+
+ /*
+ * These fields represent a directed graph of lock dependencies,
+ * to every node we attach a list of "forward" and a list of
+ * "backward" graph nodes.
+ */
+ struct list_head locks_after, locks_before;
+
+ /*
+ * Generation counter, when doing certain classes of graph walking,
+ * to ensure that we check one node only once:
+ */
+ unsigned int version;
+
+ /*
+ * Statistics counter:
+ */
+ unsigned long ops;
+
+ const char *name;
+ int name_version;
+};
+
+/*
+ * Map the lock object (the lock instance) to the lock-class object.
+ * This is embedded into specific lock instances:
+ */
+struct lockdep_map {
+ struct lock_class_key *key;
+ struct lock_class *class_cache;
+ const char *name;
+};
+
+/*
+ * Every lock has a list of other locks that were taken after it.
+ * We only grow the list, never remove from it:
+ */
+struct lock_list {
+ struct list_head entry;
+ struct lock_class *class;
+ struct stack_trace trace;
+};
+
+/*
+ * We record lock dependency chains, so that we can cache them:
+ */
+struct lock_chain {
+ struct list_head entry;
+ u64 chain_key;
+};
+
+struct held_lock {
+ /*
+ * One-way hash of the dependency chain up to this point. We
+ * hash the hashes step by step as the dependency chain grows.
+ *
+ * We use it for dependency-caching and we skip detection
+ * passes and dependency-updates if there is a cache-hit, so
+ * it is absolutely critical for 100% coverage of the validator
+ * to have a unique key value for every unique dependency path
+ * that can occur in the system, to make a unique hash value
+ * as likely as possible - hence the 64-bit width.
+ *
+ * The task struct holds the current hash value (initialized
+ * with zero), here we store the previous hash value:
+ */
+ u64 prev_chain_key;
+ struct lock_class *class;
+ unsigned long acquire_ip;
+ struct lockdep_map *instance;
+
+ /*
+ * The lock-stack is unified in that the lock chains of interrupt
+ * contexts nest ontop of process context chains, but we 'separate'
+ * the hashes by starting with 0 if we cross into an interrupt
+ * context, and we also keep do not add cross-context lock
+ * dependencies - the lock usage graph walking covers that area
+ * anyway, and we'd just unnecessarily increase the number of
+ * dependencies otherwise. [Note: hardirq and softirq contexts
+ * are separated from each other too.]
+ *
+ * The following field is used to detect when we cross into an
+ * interrupt context:
+ */
+ int irq_context;
+ int trylock;
+ int read;
+ int check;
+ int hardirqs_off;
+};
+
+/*
+ * Initialization, self-test and debugging-output methods:
+ */
+extern void lockdep_init(void);
+extern void lockdep_info(void);
+extern void lockdep_reset(void);
+extern void lockdep_reset_lock(struct lockdep_map *lock);
+extern void lockdep_free_key_range(void *start, unsigned long size);
+
+extern void lockdep_off(void);
+extern void lockdep_on(void);
+extern int lockdep_internal(void);
+
+/*
+ * These methods are used by specific locking variants (spinlocks,
+ * rwlocks, mutexes and rwsems) to pass init/acquire/release events
+ * to lockdep:
+ */
+
+extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
+ struct lock_class_key *key);
+
+/*
+ * Reinitialize a lock key - for cases where there is special locking or
+ * special initialization of locks so that the validator gets the scope
+ * of dependencies wrong: they are either too broad (they need a class-split)
+ * or they are too narrow (they suffer from a false class-split):
+ */
+#define lockdep_set_class(lock, key) \
+ lockdep_init_map(&(lock)->dep_map, #key, key)
+#define lockdep_set_class_and_name(lock, key, name) \
+ lockdep_init_map(&(lock)->dep_map, name, key)
+
+/*
+ * Acquire a lock.
+ *
+ * Values for "read":
+ *
+ * 0: exclusive (write) acquire
+ * 1: read-acquire (no recursion allowed)
+ * 2: read-acquire with same-instance recursion allowed
+ *
+ * Values for check:
+ *
+ * 0: disabled
+ * 1: simple checks (freeing, held-at-exit-time, etc.)
+ * 2: full validation
+ */
+extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
+ int trylock, int read, int check, unsigned long ip);
+
+extern void lock_release(struct lockdep_map *lock, int nested,
+ unsigned long ip);
+
+# define INIT_LOCKDEP .lockdep_recursion = 0,
+
+#else /* !LOCKDEP */
+
+static inline void lockdep_off(void)
+{
+}
+
+static inline void lockdep_on(void)
+{
+}
+
+static inline int lockdep_internal(void)
+{
+ return 0;
+}
+
+# define lock_acquire(l, s, t, r, c, i) do { } while (0)
+# define lock_release(l, n, i) do { } while (0)
+# define lockdep_init() do { } while (0)
+# define lockdep_info() do { } while (0)
+# define lockdep_init_map(lock, name, key) do { (void)(key); } while (0)
+# define lockdep_set_class(lock, key) do { (void)(key); } while (0)
+# define lockdep_set_class_and_name(lock, key, name) \
+ do { (void)(key); } while (0)
+# define INIT_LOCKDEP
+# define lockdep_reset() do { debug_locks = 1; } while (0)
+# define lockdep_free_key_range(start, size) do { } while (0)
+/*
+ * The class key takes no space if lockdep is disabled:
+ */
+struct lock_class_key { };
+#endif /* !LOCKDEP */
+
+#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_GENERIC_HARDIRQS)
+extern void early_init_irq_lock_class(void);
+#else
+# define early_init_irq_lock_class() do { } while (0)
+#endif
+
+#ifdef CONFIG_TRACE_IRQFLAGS
+extern void early_boot_irqs_off(void);
+extern void early_boot_irqs_on(void);
+#else
+# define early_boot_irqs_off() do { } while (0)
+# define early_boot_irqs_on() do { } while (0)
+#endif
+
+/*
+ * For trivial one-depth nesting of a lock-class, the following
+ * global define can be used. (Subsystems with multiple levels
+ * of nesting should define their own lock-nesting subclasses.)
+ */
+#define SINGLE_DEPTH_NESTING 1
+
+/*
+ * Map the dependency ops to NOP or to real lockdep ops, depending
+ * on the per lock-class debug mode:
+ */
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# ifdef CONFIG_PROVE_LOCKING
+# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i)
+# else
+# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i)
+# endif
+# define spin_release(l, n, i) lock_release(l, n, i)
+#else
+# define spin_acquire(l, s, t, i) do { } while (0)
+# define spin_release(l, n, i) do { } while (0)
+#endif
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# ifdef CONFIG_PROVE_LOCKING
+# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i)
+# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 2, i)
+# else
+# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i)
+# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 1, i)
+# endif
+# define rwlock_release(l, n, i) lock_release(l, n, i)
+#else
+# define rwlock_acquire(l, s, t, i) do { } while (0)
+# define rwlock_acquire_read(l, s, t, i) do { } while (0)
+# define rwlock_release(l, n, i) do { } while (0)
+#endif
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# ifdef CONFIG_PROVE_LOCKING
+# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i)
+# else
+# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i)
+# endif
+# define mutex_release(l, n, i) lock_release(l, n, i)
+#else
+# define mutex_acquire(l, s, t, i) do { } while (0)
+# define mutex_release(l, n, i) do { } while (0)
+#endif
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# ifdef CONFIG_PROVE_LOCKING
+# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i)
+# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, i)
+# else
+# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i)
+# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, i)
+# endif
+# define rwsem_release(l, n, i) lock_release(l, n, i)
+#else
+# define rwsem_acquire(l, s, t, i) do { } while (0)
+# define rwsem_acquire_read(l, s, t, i) do { } while (0)
+# define rwsem_release(l, n, i) do { } while (0)
+#endif
+
+#endif /* __LINUX_LOCKDEP_H */
diff --git a/include/linux/mc146818rtc.h b/include/linux/mc146818rtc.h
index bbc93ae217e..432b2fa2492 100644
--- a/include/linux/mc146818rtc.h
+++ b/include/linux/mc146818rtc.h
@@ -89,4 +89,11 @@ extern spinlock_t rtc_lock; /* serialize CMOS RAM access */
# define RTC_VRT 0x80 /* valid RAM and time */
/**********************************************************************/
+#ifndef ARCH_RTC_LOCATION /* Override by <asm/mc146818rtc.h>? */
+
+#define RTC_IO_EXTENT 0x8
+#define RTC_IOMAPPED 1 /* Default to I/O mapping. */
+
+#endif /* ARCH_RTC_LOCATION */
+
#endif /* _MC146818RTC_H */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 75179529e39..f0b135cd86d 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -14,6 +14,7 @@
#include <linux/prio_tree.h>
#include <linux/fs.h>
#include <linux/mutex.h>
+#include <linux/debug_locks.h>
struct mempolicy;
struct anon_vma;
@@ -335,6 +336,7 @@ static inline void init_page_count(struct page *page)
}
void put_page(struct page *page);
+void put_pages_list(struct list_head *pages);
void split_page(struct page *page, unsigned int order);
@@ -1034,13 +1036,6 @@ static inline void vm_stat_account(struct mm_struct *mm,
}
#endif /* CONFIG_PROC_FS */
-static inline void
-debug_check_no_locks_freed(const void *from, unsigned long len)
-{
- mutex_debug_check_no_locks_freed(from, len);
- rt_mutex_debug_check_no_locks_freed(from, len);
-}
-
#ifndef CONFIG_DEBUG_PAGEALLOC
static inline void
kernel_map_pages(struct page *page, int numpages, int enable)
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index c1f021eddff..ba095aebedf 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -77,7 +77,7 @@ struct mmc_host {
struct device *dev;
struct class_device class_dev;
int index;
- struct mmc_host_ops *ops;
+ const struct mmc_host_ops *ops;
unsigned int f_min;
unsigned int f_max;
u32 ocr_avail;
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
index 03a14a30c46..627e2c08ce4 100644
--- a/include/linux/mmc/mmc.h
+++ b/include/linux/mmc/mmc.h
@@ -105,6 +105,8 @@ extern int mmc_wait_for_cmd(struct mmc_host *, struct mmc_command *, int);
extern int mmc_wait_for_app_cmd(struct mmc_host *, unsigned int,
struct mmc_command *, int);
+extern void mmc_set_data_timeout(struct mmc_data *, const struct mmc_card *, int);
+
extern int __mmc_claim_host(struct mmc_host *host, struct mmc_card *card);
static inline void mmc_claim_host(struct mmc_host *host)
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 27e748eb72b..f45163c528e 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -77,6 +77,7 @@ struct per_cpu_pages {
struct per_cpu_pageset {
struct per_cpu_pages pcp[2]; /* 0: hot. 1: cold */
#ifdef CONFIG_SMP
+ s8 stat_threshold;
s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
#endif
} ____cacheline_aligned_in_smp;
@@ -150,6 +151,10 @@ struct zone {
unsigned long lowmem_reserve[MAX_NR_ZONES];
#ifdef CONFIG_NUMA
+ /*
+ * zone reclaim becomes active if more unmapped pages exist.
+ */
+ unsigned long min_unmapped_ratio;
struct per_cpu_pageset *pageset[NR_CPUS];
#else
struct per_cpu_pageset pageset[NR_CPUS];
@@ -414,6 +419,8 @@ int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, struct file *,
void __user *, size_t *, loff_t *);
int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int, struct file *,
void __user *, size_t *, loff_t *);
+int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int,
+ struct file *, void __user *, size_t *, loff_t *);
#include <linux/topology.h>
/* Returns the number of the current Node. */
diff --git a/include/linux/module.h b/include/linux/module.h
index 9e9dc7c24d9..0dfb794c52d 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -358,13 +358,12 @@ static inline int module_is_live(struct module *mod)
/* Is this address in a module? (second is with no locks, for oops) */
struct module *module_text_address(unsigned long addr);
struct module *__module_text_address(unsigned long addr);
+int is_module_address(unsigned long addr);
/* Returns module and fills in value, defined and namebuf, or NULL if
symnum out of range. */
-struct module *module_get_kallsym(unsigned int symnum,
- unsigned long *value,
- char *type,
- char namebuf[128]);
+struct module *module_get_kallsym(unsigned int symnum, unsigned long *value,
+ char *type, char *name, size_t namelen);
/* Look for this name: can be of form module:name. */
unsigned long module_kallsyms_lookup_name(const char *name);
@@ -496,6 +495,11 @@ static inline struct module *__module_text_address(unsigned long addr)
return NULL;
}
+static inline int is_module_address(unsigned long addr)
+{
+ return 0;
+}
+
/* Get/put a kernel symbol (calls should be symmetric) */
#define symbol_get(x) ({ extern typeof(x) x __attribute__((weak)); &(x); })
#define symbol_put(x) do { } while(0)
@@ -529,8 +533,8 @@ static inline const char *module_address_lookup(unsigned long addr,
static inline struct module *module_get_kallsym(unsigned int symnum,
unsigned long *value,
- char *type,
- char namebuf[128])
+ char *type, char *name,
+ size_t namelen)
{
return NULL;
}
diff --git a/include/linux/mtd/bbm.h b/include/linux/mtd/bbm.h
index 7a7fbe87fef..1221b7c4415 100644
--- a/include/linux/mtd/bbm.h
+++ b/include/linux/mtd/bbm.h
@@ -19,21 +19,21 @@
/**
* struct nand_bbt_descr - bad block table descriptor
- * @param options options for this descriptor
- * @param pages the page(s) where we find the bbt, used with
+ * @options: options for this descriptor
+ * @pages: the page(s) where we find the bbt, used with
* option BBT_ABSPAGE when bbt is searched,
* then we store the found bbts pages here.
* Its an array and supports up to 8 chips now
- * @param offs offset of the pattern in the oob area of the page
- * @param veroffs offset of the bbt version counter in the oob are of the page
- * @param version version read from the bbt page during scan
- * @param len length of the pattern, if 0 no pattern check is performed
- * @param maxblocks maximum number of blocks to search for a bbt. This number of
- * blocks is reserved at the end of the device
+ * @offs: offset of the pattern in the oob area of the page
+ * @veroffs: offset of the bbt version counter in the oob area of the page
+ * @version: version read from the bbt page during scan
+ * @len: length of the pattern, if 0 no pattern check is performed
+ * @maxblocks: maximum number of blocks to search for a bbt. This
+ * number of blocks is reserved at the end of the device
* where the tables are written.
- * @param reserved_block_code if non-0, this pattern denotes a reserved
+ * @reserved_block_code: if non-0, this pattern denotes a reserved
* (rather than bad) block in the stored bbt
- * @param pattern pattern to identify bad block table or factory marked
+ * @pattern: pattern to identify bad block table or factory marked
* good / bad blocks, can be NULL, if len = 0
*
* Descriptor for the bad block table marker and the descriptor for the
@@ -93,12 +93,15 @@ struct nand_bbt_descr {
#define ONENAND_BADBLOCK_POS 0
/**
- * struct bbt_info - [GENERIC] Bad Block Table data structure
- * @param bbt_erase_shift [INTERN] number of address bits in a bbt entry
- * @param badblockpos [INTERN] position of the bad block marker in the oob area
- * @param bbt [INTERN] bad block table pointer
- * @param badblock_pattern [REPLACEABLE] bad block scan pattern used for initial bad block scan
- * @param priv [OPTIONAL] pointer to private bbm date
+ * struct bbm_info - [GENERIC] Bad Block Table data structure
+ * @bbt_erase_shift: [INTERN] number of address bits in a bbt entry
+ * @badblockpos: [INTERN] position of the bad block marker in the oob area
+ * @options: options for this descriptor
+ * @bbt: [INTERN] bad block table pointer
+ * @isbad_bbt: function to determine if a block is bad
+ * @badblock_pattern: [REPLACEABLE] bad block scan pattern used for
+ * initial bad block scan
+ * @priv: [OPTIONAL] pointer to private bbm date
*/
struct bbm_info {
int bbt_erase_shift;
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index 9b7a2b525d6..94a443d4525 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -77,11 +77,11 @@ typedef enum {
*
* @len: number of bytes to write/read. When a data buffer is given
* (datbuf != NULL) this is the number of data bytes. When
- + no data buffer is available this is the number of oob bytes.
+ * no data buffer is available this is the number of oob bytes.
*
* @retlen: number of bytes written/read. When a data buffer is given
* (datbuf != NULL) this is the number of data bytes. When
- + no data buffer is available this is the number of oob bytes.
+ * no data buffer is available this is the number of oob bytes.
*
* @ooblen: number of oob bytes per page
* @ooboffs: offset of oob data in the oob area (only relevant when
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index 66559272ebc..0b4cd2fa64a 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -202,7 +202,7 @@ typedef enum {
struct nand_chip;
/**
- * struct nand_hw_control - Control structure for hardware controller (e.g ECC generator) shared among independend devices
+ * struct nand_hw_control - Control structure for hardware controller (e.g ECC generator) shared among independent devices
* @lock: protection lock
* @active: the mtd device which holds the controller currently
* @wq: wait queue to sleep on if a NAND operation is in progress
@@ -223,12 +223,15 @@ struct nand_hw_control {
* @total: total number of ecc bytes per page
* @prepad: padding information for syndrome based ecc generators
* @postpad: padding information for syndrome based ecc generators
+ * @layout: ECC layout control struct pointer
* @hwctl: function to control hardware ecc generator. Must only
* be provided if an hardware ECC is available
* @calculate: function for ecc calculation or readback from ecc hardware
* @correct: function for ecc correction, matching to ecc generator (sw/hw)
* @read_page: function to read a page according to the ecc generator requirements
* @write_page: function to write a page according to the ecc generator requirements
+ * @read_oob: function to read chip OOB data
+ * @write_oob: function to write chip OOB data
*/
struct nand_ecc_ctrl {
nand_ecc_modes_t mode;
@@ -300,11 +303,15 @@ struct nand_buffers {
* @cmdfunc: [REPLACEABLE] hardwarespecific function for writing commands to the chip
* @waitfunc: [REPLACEABLE] hardwarespecific function for wait on ready
* @ecc: [BOARDSPECIFIC] ecc control ctructure
+ * @buffers: buffer structure for read/write
+ * @hwcontrol: platform-specific hardware control structure
+ * @ops: oob operation operands
* @erase_cmd: [INTERN] erase command write function, selectable due to AND support
* @scan_bbt: [REPLACEABLE] function to scan bad block table
* @chip_delay: [BOARDSPECIFIC] chip dependent delay for transfering data from array to read regs (tR)
* @wq: [INTERN] wait queue to sleep on if a NAND operation is in progress
* @state: [INTERN] the current state of the NAND device
+ * @oob_poi: poison value buffer
* @page_shift: [INTERN] number of address bits in a page (column address bits)
* @phys_erase_shift: [INTERN] number of address bits in a physical eraseblock
* @bbt_erase_shift: [INTERN] number of address bits in a bbt entry
@@ -400,7 +407,6 @@ struct nand_chip {
/**
* struct nand_flash_dev - NAND Flash Device ID Structure
- *
* @name: Identify the device type
* @id: device ID code
* @pagesize: Pagesize in bytes. Either 256 or 512 or 0
@@ -519,9 +525,8 @@ extern int nand_do_read(struct mtd_info *mtd, loff_t from, size_t len,
/**
* struct platform_nand_chip - chip level device structure
- *
* @nr_chips: max. number of chips to scan for
- * @chip_offs: chip number offset
+ * @chip_offset: chip number offset
* @nr_partitions: number of partitions pointed to by partitions (or zero)
* @partitions: mtd partition list
* @chip_delay: R/B delay value in us
@@ -542,11 +547,10 @@ struct platform_nand_chip {
/**
* struct platform_nand_ctrl - controller level device structure
- *
* @hwcontrol: platform specific hardware control structure
* @dev_ready: platform specific function to read ready/busy pin
* @select_chip: platform specific chip select function
- * @priv_data: private data to transport driver specific settings
+ * @priv: private data to transport driver specific settings
*
* All fields are optional and depend on the hardware driver requirements
*/
diff --git a/include/linux/mtd/onenand.h b/include/linux/mtd/onenand.h
index 9ce9a48db44..1f497215524 100644
--- a/include/linux/mtd/onenand.h
+++ b/include/linux/mtd/onenand.h
@@ -23,7 +23,7 @@ extern int onenand_scan(struct mtd_info *mtd, int max_chips);
/* Free resources held by the OneNAND device */
extern void onenand_release(struct mtd_info *mtd);
-/**
+/*
* onenand_state_t - chip states
* Enumeration for OneNAND flash chip state
*/
@@ -42,9 +42,9 @@ typedef enum {
/**
* struct onenand_bufferram - OneNAND BufferRAM Data
- * @param block block address in BufferRAM
- * @param page page address in BufferRAM
- * @param valid valid flag
+ * @block: block address in BufferRAM
+ * @page: page address in BufferRAM
+ * @valid: valid flag
*/
struct onenand_bufferram {
int block;
@@ -54,32 +54,43 @@ struct onenand_bufferram {
/**
* struct onenand_chip - OneNAND Private Flash Chip Data
- * @param base [BOARDSPECIFIC] address to access OneNAND
- * @param chipsize [INTERN] the size of one chip for multichip arrays
- * @param device_id [INTERN] device ID
- * @param verstion_id [INTERN] version ID
- * @param options [BOARDSPECIFIC] various chip options. They can partly be set to inform onenand_scan about
- * @param erase_shift [INTERN] number of address bits in a block
- * @param page_shift [INTERN] number of address bits in a page
- * @param ppb_shift [INTERN] number of address bits in a pages per block
- * @param page_mask [INTERN] a page per block mask
- * @param bufferam_index [INTERN] BufferRAM index
- * @param bufferam [INTERN] BufferRAM info
- * @param readw [REPLACEABLE] hardware specific function for read short
- * @param writew [REPLACEABLE] hardware specific function for write short
- * @param command [REPLACEABLE] hardware specific function for writing commands to the chip
- * @param wait [REPLACEABLE] hardware specific function for wait on ready
- * @param read_bufferram [REPLACEABLE] hardware specific function for BufferRAM Area
- * @param write_bufferram [REPLACEABLE] hardware specific function for BufferRAM Area
- * @param read_word [REPLACEABLE] hardware specific function for read register of OneNAND
- * @param write_word [REPLACEABLE] hardware specific function for write register of OneNAND
- * @param scan_bbt [REPLACEALBE] hardware specific function for scaning Bad block Table
- * @param chip_lock [INTERN] spinlock used to protect access to this structure and the chip
- * @param wq [INTERN] wait queue to sleep on if a OneNAND operation is in progress
- * @param state [INTERN] the current state of the OneNAND device
- * @param ecclayout [REPLACEABLE] the default ecc placement scheme
- * @param bbm [REPLACEABLE] pointer to Bad Block Management
- * @param priv [OPTIONAL] pointer to private chip date
+ * @base: [BOARDSPECIFIC] address to access OneNAND
+ * @chipsize: [INTERN] the size of one chip for multichip arrays
+ * @device_id: [INTERN] device ID
+ * @density_mask: chip density, used for DDP devices
+ * @verstion_id: [INTERN] version ID
+ * @options: [BOARDSPECIFIC] various chip options. They can
+ * partly be set to inform onenand_scan about
+ * @erase_shift: [INTERN] number of address bits in a block
+ * @page_shift: [INTERN] number of address bits in a page
+ * @ppb_shift: [INTERN] number of address bits in a pages per block
+ * @page_mask: [INTERN] a page per block mask
+ * @bufferram_index: [INTERN] BufferRAM index
+ * @bufferram: [INTERN] BufferRAM info
+ * @readw: [REPLACEABLE] hardware specific function for read short
+ * @writew: [REPLACEABLE] hardware specific function for write short
+ * @command: [REPLACEABLE] hardware specific function for writing
+ * commands to the chip
+ * @wait: [REPLACEABLE] hardware specific function for wait on ready
+ * @read_bufferram: [REPLACEABLE] hardware specific function for BufferRAM Area
+ * @write_bufferram: [REPLACEABLE] hardware specific function for BufferRAM Area
+ * @read_word: [REPLACEABLE] hardware specific function for read
+ * register of OneNAND
+ * @write_word: [REPLACEABLE] hardware specific function for write
+ * register of OneNAND
+ * @mmcontrol: sync burst read function
+ * @block_markbad: function to mark a block as bad
+ * @scan_bbt: [REPLACEALBE] hardware specific function for scanning
+ * Bad block Table
+ * @chip_lock: [INTERN] spinlock used to protect access to this
+ * structure and the chip
+ * @wq: [INTERN] wait queue to sleep on if a OneNAND
+ * operation is in progress
+ * @state: [INTERN] the current state of the OneNAND device
+ * @page_buf: data buffer
+ * @ecclayout: [REPLACEABLE] the default ecc placement scheme
+ * @bbm: [REPLACEABLE] pointer to Bad Block Management
+ * @priv: [OPTIONAL] pointer to private chip date
*/
struct onenand_chip {
void __iomem *base;
@@ -147,9 +158,9 @@ struct onenand_chip {
#define ONENAND_MFR_SAMSUNG 0xec
/**
- * struct nand_manufacturers - NAND Flash Manufacturer ID Structure
- * @param name: Manufacturer name
- * @param id: manufacturer ID code of device.
+ * struct onenand_manufacturers - NAND Flash Manufacturer ID Structure
+ * @name: Manufacturer name
+ * @id: manufacturer ID code of device.
*/
struct onenand_manufacturers {
int id;
diff --git a/include/linux/mutex-debug.h b/include/linux/mutex-debug.h
index 8b5769f0046..2537285e106 100644
--- a/include/linux/mutex-debug.h
+++ b/include/linux/mutex-debug.h
@@ -2,22 +2,22 @@
#define __LINUX_MUTEX_DEBUG_H
#include <linux/linkage.h>
+#include <linux/lockdep.h>
/*
* Mutexes - debugging helpers:
*/
-#define __DEBUG_MUTEX_INITIALIZER(lockname) \
- , .held_list = LIST_HEAD_INIT(lockname.held_list), \
- .name = #lockname , .magic = &lockname
+#define __DEBUG_MUTEX_INITIALIZER(lockname) \
+ , .magic = &lockname
-#define mutex_init(sem) __mutex_init(sem, __FUNCTION__)
+#define mutex_init(mutex) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ __mutex_init((mutex), #mutex, &__key); \
+} while (0)
extern void FASTCALL(mutex_destroy(struct mutex *lock));
-extern void mutex_debug_show_all_locks(void);
-extern void mutex_debug_show_held_locks(struct task_struct *filter);
-extern void mutex_debug_check_no_locks_held(struct task_struct *task);
-extern void mutex_debug_check_no_locks_freed(const void *from, unsigned long len);
-
#endif
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index f1ac507fa20..27c48daa318 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -13,6 +13,7 @@
#include <linux/list.h>
#include <linux/spinlock_types.h>
#include <linux/linkage.h>
+#include <linux/lockdep.h>
#include <asm/atomic.h>
@@ -50,11 +51,12 @@ struct mutex {
struct list_head wait_list;
#ifdef CONFIG_DEBUG_MUTEXES
struct thread_info *owner;
- struct list_head held_list;
- unsigned long acquire_ip;
const char *name;
void *magic;
#endif
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
};
/*
@@ -74,24 +76,34 @@ struct mutex_waiter {
# include <linux/mutex-debug.h>
#else
# define __DEBUG_MUTEX_INITIALIZER(lockname)
-# define mutex_init(mutex) __mutex_init(mutex, NULL)
+# define mutex_init(mutex) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ __mutex_init((mutex), #mutex, &__key); \
+} while (0)
# define mutex_destroy(mutex) do { } while (0)
-# define mutex_debug_show_all_locks() do { } while (0)
-# define mutex_debug_show_held_locks(p) do { } while (0)
-# define mutex_debug_check_no_locks_held(task) do { } while (0)
-# define mutex_debug_check_no_locks_freed(from, len) do { } while (0)
+#endif
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
+ , .dep_map = { .name = #lockname }
+#else
+# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
#endif
#define __MUTEX_INITIALIZER(lockname) \
{ .count = ATOMIC_INIT(1) \
, .wait_lock = SPIN_LOCK_UNLOCKED \
, .wait_list = LIST_HEAD_INIT(lockname.wait_list) \
- __DEBUG_MUTEX_INITIALIZER(lockname) }
+ __DEBUG_MUTEX_INITIALIZER(lockname) \
+ __DEP_MAP_MUTEX_INITIALIZER(lockname) }
#define DEFINE_MUTEX(mutexname) \
struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
-extern void fastcall __mutex_init(struct mutex *lock, const char *name);
+extern void __mutex_init(struct mutex *lock, const char *name,
+ struct lock_class_key *key);
/***
* mutex_is_locked - is the mutex locked
@@ -110,6 +122,13 @@ static inline int fastcall mutex_is_locked(struct mutex *lock)
*/
extern void fastcall mutex_lock(struct mutex *lock);
extern int fastcall mutex_lock_interruptible(struct mutex *lock);
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
+#else
+# define mutex_lock_nested(lock, subclass) mutex_lock(lock)
+#endif
+
/*
* NOTE: mutex_trylock() follows the spin_trylock() convention,
* not the down_trylock() convention!
diff --git a/include/linux/namei.h b/include/linux/namei.h
index 58cb3d3d44b..45511a5918d 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -11,7 +11,7 @@ struct open_intent {
struct file *file;
};
-enum { MAX_NESTED_LINKS = 5 };
+enum { MAX_NESTED_LINKS = 8 };
struct nameidata {
struct dentry *dentry;
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 85f99f60dee..50a4719512e 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -320,6 +320,9 @@ struct net_device
#define NETIF_F_TSO_ECN (SKB_GSO_TCP_ECN << NETIF_F_GSO_SHIFT)
#define NETIF_F_TSO6 (SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT)
+ /* List of features with software fallbacks. */
+#define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6)
+
#define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
#define NETIF_F_ALL_CSUM (NETIF_F_IP_CSUM | NETIF_F_GEN_CSUM)
@@ -549,6 +552,7 @@ struct packet_type {
struct net_device *);
struct sk_buff *(*gso_segment)(struct sk_buff *skb,
int features);
+ int (*gso_send_check)(struct sk_buff *skb);
void *af_packet_priv;
struct list_head list;
};
@@ -923,10 +927,10 @@ static inline void netif_tx_lock_bh(struct net_device *dev)
static inline int netif_tx_trylock(struct net_device *dev)
{
- int err = spin_trylock(&dev->_xmit_lock);
- if (!err)
+ int ok = spin_trylock(&dev->_xmit_lock);
+ if (likely(ok))
dev->xmit_lock_owner = smp_processor_id();
- return err;
+ return ok;
}
static inline void netif_tx_unlock(struct net_device *dev)
@@ -1001,13 +1005,38 @@ static inline int net_gso_ok(int features, int gso_type)
static inline int skb_gso_ok(struct sk_buff *skb, int features)
{
- return net_gso_ok(features, skb_shinfo(skb)->gso_size ?
- skb_shinfo(skb)->gso_type : 0);
+ return net_gso_ok(features, skb_shinfo(skb)->gso_type);
}
static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
{
- return !skb_gso_ok(skb, dev->features);
+ return skb_is_gso(skb) &&
+ (!skb_gso_ok(skb, dev->features) ||
+ unlikely(skb->ip_summed != CHECKSUM_HW));
+}
+
+/* On bonding slaves other than the currently active slave, suppress
+ * duplicates except for 802.3ad ETH_P_SLOW and alb non-mcast/bcast.
+ */
+static inline int skb_bond_should_drop(struct sk_buff *skb)
+{
+ struct net_device *dev = skb->dev;
+ struct net_device *master = dev->master;
+
+ if (master &&
+ (dev->priv_flags & IFF_SLAVE_INACTIVE)) {
+ if (master->priv_flags & IFF_MASTER_ALB) {
+ if (skb->pkt_type != PACKET_BROADCAST &&
+ skb->pkt_type != PACKET_MULTICAST)
+ return 0;
+ }
+ if (master->priv_flags & IFF_MASTER_8023AD &&
+ skb->protocol == __constant_htons(ETH_P_SLOW))
+ return 0;
+
+ return 1;
+ }
+ return 0;
}
#endif /* __KERNEL__ */
diff --git a/include/linux/netfilter/Kbuild b/include/linux/netfilter/Kbuild
new file mode 100644
index 00000000000..d06311acd44
--- /dev/null
+++ b/include/linux/netfilter/Kbuild
@@ -0,0 +1,11 @@
+header-y := nf_conntrack_sctp.h nf_conntrack_tuple_common.h \
+ nfnetlink_conntrack.h nfnetlink_log.h nfnetlink_queue.h \
+ xt_CLASSIFY.h xt_comment.h xt_connbytes.h xt_connmark.h \
+ xt_CONNMARK.h xt_conntrack.h xt_dccp.h xt_esp.h \
+ xt_helper.h xt_length.h xt_limit.h xt_mac.h xt_mark.h \
+ xt_MARK.h xt_multiport.h xt_NFQUEUE.h xt_pkttype.h \
+ xt_policy.h xt_realm.h xt_sctp.h xt_state.h xt_string.h \
+ xt_tcpmss.h xt_tcpudp.h
+
+unifdef-y := nf_conntrack_common.h nf_conntrack_ftp.h \
+ nf_conntrack_tcp.h nfnetlink.h x_tables.h xt_physdev.h
diff --git a/include/linux/netfilter_arp/Kbuild b/include/linux/netfilter_arp/Kbuild
new file mode 100644
index 00000000000..198ec5e7b17
--- /dev/null
+++ b/include/linux/netfilter_arp/Kbuild
@@ -0,0 +1,2 @@
+header-y := arpt_mangle.h
+unifdef-y := arp_tables.h
diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h
index 87764022cc6..427c67ff89e 100644
--- a/include/linux/netfilter_bridge.h
+++ b/include/linux/netfilter_bridge.h
@@ -6,7 +6,6 @@
#include <linux/netfilter.h>
#if defined(__KERNEL__) && defined(CONFIG_BRIDGE_NETFILTER)
-#include <asm/atomic.h>
#include <linux/if_ether.h>
#endif
@@ -49,15 +48,25 @@ enum nf_br_hook_priorities {
/* Only used in br_forward.c */
static inline
-void nf_bridge_maybe_copy_header(struct sk_buff *skb)
+int nf_bridge_maybe_copy_header(struct sk_buff *skb)
{
+ int err;
+
if (skb->nf_bridge) {
if (skb->protocol == __constant_htons(ETH_P_8021Q)) {
+ err = skb_cow(skb, 18);
+ if (err)
+ return err;
memcpy(skb->data - 18, skb->nf_bridge->data, 18);
skb_push(skb, 4);
- } else
+ } else {
+ err = skb_cow(skb, 16);
+ if (err)
+ return err;
memcpy(skb->data - 16, skb->nf_bridge->data, 16);
+ }
}
+ return 0;
}
/* This is called by the IP fragmenting code and it ensures there is
@@ -79,6 +88,8 @@ struct bridge_skb_cb {
__u32 ipv4;
} daddr;
};
+
+extern int brnf_deferred_hooks;
#endif /* CONFIG_BRIDGE_NETFILTER */
#endif /* __KERNEL__ */
diff --git a/include/linux/netfilter_bridge/Kbuild b/include/linux/netfilter_bridge/Kbuild
new file mode 100644
index 00000000000..5b1aba6abba
--- /dev/null
+++ b/include/linux/netfilter_bridge/Kbuild
@@ -0,0 +1,4 @@
+header-y += ebt_among.h ebt_arp.h ebt_arpreply.h ebt_ip.h ebt_limit.h \
+ ebt_log.h ebt_mark_m.h ebt_mark_t.h ebt_nat.h ebt_pkttype.h \
+ ebt_redirect.h ebt_stp.h ebt_ulog.h ebt_vlan.h
+unifdef-y := ebtables.h ebt_802_3.h
diff --git a/include/linux/netfilter_ipv4/Kbuild b/include/linux/netfilter_ipv4/Kbuild
new file mode 100644
index 00000000000..04e4d272168
--- /dev/null
+++ b/include/linux/netfilter_ipv4/Kbuild
@@ -0,0 +1,21 @@
+
+header-y := ip_conntrack_helper.h ip_conntrack_helper_h323_asn1.h \
+ ip_conntrack_helper_h323_types.h ip_conntrack_protocol.h \
+ ip_conntrack_sctp.h ip_conntrack_tcp.h ip_conntrack_tftp.h \
+ ip_nat_pptp.h ipt_addrtype.h ipt_ah.h \
+ ipt_CLASSIFY.h ipt_CLUSTERIP.h ipt_comment.h \
+ ipt_connbytes.h ipt_connmark.h ipt_CONNMARK.h \
+ ipt_conntrack.h ipt_dccp.h ipt_dscp.h ipt_DSCP.h ipt_ecn.h \
+ ipt_ECN.h ipt_esp.h ipt_hashlimit.h ipt_helper.h \
+ ipt_iprange.h ipt_length.h ipt_limit.h ipt_LOG.h ipt_mac.h \
+ ipt_mark.h ipt_MARK.h ipt_multiport.h ipt_NFQUEUE.h \
+ ipt_owner.h ipt_physdev.h ipt_pkttype.h ipt_policy.h \
+ ipt_realm.h ipt_recent.h ipt_REJECT.h ipt_SAME.h \
+ ipt_sctp.h ipt_state.h ipt_string.h ipt_tcpmss.h \
+ ipt_TCPMSS.h ipt_tos.h ipt_TOS.h ipt_ttl.h ipt_TTL.h \
+ ipt_ULOG.h
+
+unifdef-y := ip_conntrack.h ip_conntrack_h323.h ip_conntrack_irc.h \
+ ip_conntrack_pptp.h ip_conntrack_proto_gre.h \
+ ip_conntrack_tuple.h ip_nat.h ip_nat_rule.h ip_queue.h \
+ ip_tables.h
diff --git a/include/linux/netfilter_ipv6/Kbuild b/include/linux/netfilter_ipv6/Kbuild
new file mode 100644
index 00000000000..913ddbf55b4
--- /dev/null
+++ b/include/linux/netfilter_ipv6/Kbuild
@@ -0,0 +1,6 @@
+header-y += ip6t_HL.h ip6t_LOG.h ip6t_MARK.h ip6t_REJECT.h ip6t_ah.h \
+ ip6t_esp.h ip6t_frag.h ip6t_hl.h ip6t_ipv6header.h \
+ ip6t_length.h ip6t_limit.h ip6t_mac.h ip6t_mark.h \
+ ip6t_multiport.h ip6t_opts.h ip6t_owner.h ip6t_policy.h \
+ ip6t_physdev.h ip6t_rt.h
+unifdef-y := ip6_tables.h
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index 5f681d53429..db05182ca0e 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -157,6 +157,12 @@ enum nfs_opnum4 {
OP_ILLEGAL = 10044,
};
+/*Defining first and last NFS4 operations implemented.
+Needs to be updated if more operations are defined in future.*/
+
+#define FIRST_NFS4_OP OP_ACCESS
+#define LAST_NFS4_OP OP_RELEASE_LOCKOWNER
+
enum nfsstat4 {
NFS4_OK = 0,
NFS4ERR_PERM = 1,
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 55ea853d57b..6c2066caeaa 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -9,27 +9,6 @@
#ifndef _LINUX_NFS_FS_H
#define _LINUX_NFS_FS_H
-#include <linux/in.h>
-#include <linux/mm.h>
-#include <linux/pagemap.h>
-#include <linux/rwsem.h>
-#include <linux/wait.h>
-
-#include <linux/sunrpc/debug.h>
-#include <linux/sunrpc/auth.h>
-#include <linux/sunrpc/clnt.h>
-
-#include <linux/nfs.h>
-#include <linux/nfs2.h>
-#include <linux/nfs3.h>
-#include <linux/nfs4.h>
-#include <linux/nfs_xdr.h>
-
-#include <linux/nfs_fs_sb.h>
-
-#include <linux/rwsem.h>
-#include <linux/mempool.h>
-
/*
* Enable debugging support for nfs client.
* Requires RPC_DEBUG.
@@ -48,11 +27,6 @@
#define NFS_SUPER_MAGIC 0x6969
/*
- * These are the default flags for swap requests
- */
-#define NFS_RPC_SWAPFLAGS (RPC_TASK_SWAPPER|RPC_TASK_ROOTCREDS)
-
-/*
* When flushing a cluster of dirty pages, there can be different
* strategies:
*/
@@ -65,6 +39,32 @@
#ifdef __KERNEL__
+#include <linux/in.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/rwsem.h>
+#include <linux/wait.h>
+
+#include <linux/sunrpc/debug.h>
+#include <linux/sunrpc/auth.h>
+#include <linux/sunrpc/clnt.h>
+
+#include <linux/nfs.h>
+#include <linux/nfs2.h>
+#include <linux/nfs3.h>
+#include <linux/nfs4.h>
+#include <linux/nfs_xdr.h>
+
+#include <linux/nfs_fs_sb.h>
+
+#include <linux/rwsem.h>
+#include <linux/mempool.h>
+
+/*
+ * These are the default flags for swap requests
+ */
+#define NFS_RPC_SWAPFLAGS (RPC_TASK_SWAPPER|RPC_TASK_ROOTCREDS)
+
/*
* NFSv3/v4 Access mode cache entry
*/
@@ -427,7 +427,7 @@ extern int nfs_writeback_done(struct rpc_task *, struct nfs_write_data *);
extern void nfs_writedata_release(void *);
#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
-struct nfs_write_data *nfs_commit_alloc(unsigned int pagecount);
+struct nfs_write_data *nfs_commit_alloc(void);
void nfs_commit_free(struct nfs_write_data *p);
#endif
@@ -476,10 +476,9 @@ static inline int nfs_wb_page(struct inode *inode, struct page* page)
}
/*
- * Allocate and free nfs_write_data structures
+ * Allocate nfs_write_data structures
*/
-extern struct nfs_write_data *nfs_writedata_alloc(unsigned int pagecount);
-extern void nfs_writedata_free(struct nfs_write_data *p);
+extern struct nfs_write_data *nfs_writedata_alloc(size_t len);
/*
* linux/fs/nfs/read.c
@@ -491,10 +490,9 @@ extern int nfs_readpage_result(struct rpc_task *, struct nfs_read_data *);
extern void nfs_readdata_release(void *data);
/*
- * Allocate and free nfs_read_data structures
+ * Allocate nfs_read_data structures
*/
-extern struct nfs_read_data *nfs_readdata_alloc(unsigned int pagecount);
-extern void nfs_readdata_free(struct nfs_read_data *p);
+extern struct nfs_read_data *nfs_readdata_alloc(size_t len);
/*
* linux/fs/nfs3proc.c
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 7c7320fa51a..41e5a19199e 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -659,7 +659,7 @@ struct nfs4_rename_res {
struct nfs4_setclientid {
const nfs4_verifier * sc_verifier; /* request */
unsigned int sc_name_len;
- char sc_name[32]; /* request */
+ char sc_name[48]; /* request */
u32 sc_prog; /* request */
unsigned int sc_netid_len;
char sc_netid[4]; /* request */
@@ -729,6 +729,7 @@ struct nfs_read_data {
struct list_head pages; /* Coalesced read requests */
struct nfs_page *req; /* multi ops per nfs_page */
struct page **pagevec;
+ unsigned int npages; /* Max length of pagevec */
struct nfs_readargs args;
struct nfs_readres res;
#ifdef CONFIG_NFS_V4
@@ -747,6 +748,7 @@ struct nfs_write_data {
struct list_head pages; /* Coalesced requests we wish to flush */
struct nfs_page *req; /* multi ops per nfs_page */
struct page **pagevec;
+ unsigned int npages; /* Max length of pagevec */
struct nfs_writeargs args; /* argument struct */
struct nfs_writeres res; /* result struct */
#ifdef CONFIG_NFS_V4
diff --git a/include/linux/nfsd/Kbuild b/include/linux/nfsd/Kbuild
new file mode 100644
index 00000000000..c8c54566588
--- /dev/null
+++ b/include/linux/nfsd/Kbuild
@@ -0,0 +1,2 @@
+unifdef-y := const.h export.h stats.h syscall.h nfsfh.h debug.h auth.h
+
diff --git a/include/linux/nfsd/stats.h b/include/linux/nfsd/stats.h
index b6f1e0cda4f..28a82fdd922 100644
--- a/include/linux/nfsd/stats.h
+++ b/include/linux/nfsd/stats.h
@@ -9,6 +9,8 @@
#ifndef LINUX_NFSD_STATS_H
#define LINUX_NFSD_STATS_H
+#include <linux/nfs4.h>
+
struct nfsd_stats {
unsigned int rchits; /* repcache hits */
unsigned int rcmisses; /* repcache hits */
@@ -27,6 +29,10 @@ struct nfsd_stats {
unsigned int ra_size; /* size of ra cache */
unsigned int ra_depth[11]; /* number of times ra entry was found that deep
* in the cache (10percentiles). [10] = not found */
+#ifdef CONFIG_NFSD_V4
+ unsigned int nfs4_opcount[LAST_NFS4_OP + 1]; /* count of individual nfsv4 operations */
+#endif
+
};
/* thread usage wraps very million seconds (approx one fortnight) */
diff --git a/include/linux/node.h b/include/linux/node.h
index 81dcec84cd8..bc001bc225c 100644
--- a/include/linux/node.h
+++ b/include/linux/node.h
@@ -30,12 +30,20 @@ extern struct node node_devices[];
extern int register_node(struct node *, int, struct node *);
extern void unregister_node(struct node *node);
+#ifdef CONFIG_NUMA
extern int register_one_node(int nid);
extern void unregister_one_node(int nid);
-#ifdef CONFIG_NUMA
extern int register_cpu_under_node(unsigned int cpu, unsigned int nid);
extern int unregister_cpu_under_node(unsigned int cpu, unsigned int nid);
#else
+static inline int register_one_node(int nid)
+{
+ return 0;
+}
+static inline int unregister_one_node(int nid)
+{
+ return 0;
+}
static inline int register_cpu_under_node(unsigned int cpu, unsigned int nid)
{
return 0;
diff --git a/include/linux/notifier.h b/include/linux/notifier.h
index 51dbab9710c..7ff386a6ae8 100644
--- a/include/linux/notifier.h
+++ b/include/linux/notifier.h
@@ -65,7 +65,7 @@ struct raw_notifier_head {
} while (0)
#define ATOMIC_NOTIFIER_INIT(name) { \
- .lock = SPIN_LOCK_UNLOCKED, \
+ .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
.head = NULL }
#define BLOCKING_NOTIFIER_INIT(name) { \
.rwsem = __RWSEM_INITIALIZER((name).rwsem), \
diff --git a/include/linux/nsc_gpio.h b/include/linux/nsc_gpio.h
index 135742cfada..7da0cf3702e 100644
--- a/include/linux/nsc_gpio.h
+++ b/include/linux/nsc_gpio.h
@@ -25,8 +25,6 @@ struct nsc_gpio_ops {
void (*gpio_dump) (struct nsc_gpio_ops *amp, unsigned iminor);
int (*gpio_get) (unsigned iminor);
void (*gpio_set) (unsigned iminor, int state);
- void (*gpio_set_high)(unsigned iminor);
- void (*gpio_set_low) (unsigned iminor);
void (*gpio_change) (unsigned iminor);
int (*gpio_current) (unsigned iminor);
struct device* dev; /* for dev_dbg() support, set in init */
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 983fca251b2..8565b81d7fb 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -161,6 +161,7 @@ struct pci_dev {
unsigned int is_enabled:1; /* pci_enable_device has been called */
unsigned int is_busmaster:1; /* device is busmaster */
unsigned int no_msi:1; /* device may not use msi */
+ unsigned int no_d1d2:1; /* only allow d0 or d3 */
unsigned int block_ucfg_access:1; /* userspace config space access is blocked */
unsigned int broken_parity_status:1; /* Device generates false positive parity */
unsigned int msi_enabled:1;
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 685081c0134..7a249155ee4 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -648,6 +648,8 @@
#define PCI_DEVICE_ID_SI_962 0x0962
#define PCI_DEVICE_ID_SI_963 0x0963
#define PCI_DEVICE_ID_SI_965 0x0965
+#define PCI_DEVICE_ID_SI_966 0x0966
+#define PCI_DEVICE_ID_SI_968 0x0968
#define PCI_DEVICE_ID_SI_5511 0x5511
#define PCI_DEVICE_ID_SI_5513 0x5513
#define PCI_DEVICE_ID_SI_5517 0x5517
@@ -1292,6 +1294,7 @@
#define PCI_DEVICE_ID_VIA_8367_0 0x3099
#define PCI_DEVICE_ID_VIA_8653_0 0x3101
#define PCI_DEVICE_ID_VIA_8622 0x3102
+#define PCI_DEVICE_ID_VIA_8235_USB_2 0x3104
#define PCI_DEVICE_ID_VIA_8233C_0 0x3109
#define PCI_DEVICE_ID_VIA_8361 0x3112
#define PCI_DEVICE_ID_VIA_XM266 0x3116
@@ -1726,6 +1729,9 @@
#define PCI_VENDOR_ID_DOMEX 0x134a
#define PCI_DEVICE_ID_DOMEX_DMX3191D 0x0001
+#define PCI_VENDOR_ID_INTASHIELD 0x135a
+#define PCI_DEVICE_ID_INTASHIELD_IS200 0x0d80
+
#define PCI_VENDOR_ID_QUATECH 0x135C
#define PCI_DEVICE_ID_QUATECH_QSC100 0x0010
#define PCI_DEVICE_ID_QUATECH_DSC100 0x0020
@@ -2019,6 +2025,13 @@
#define PCI_VENDOR_ID_TDI 0x192E
#define PCI_DEVICE_ID_TDI_EHCI 0x0101
+#define PCI_VENDOR_ID_JMICRON 0x197B
+#define PCI_DEVICE_ID_JMICRON_JMB360 0x2360
+#define PCI_DEVICE_ID_JMICRON_JMB361 0x2361
+#define PCI_DEVICE_ID_JMICRON_JMB363 0x2363
+#define PCI_DEVICE_ID_JMICRON_JMB365 0x2365
+#define PCI_DEVICE_ID_JMICRON_JMB366 0x2366
+#define PCI_DEVICE_ID_JMICRON_JMB368 0x2368
#define PCI_VENDOR_ID_TEKRAM 0x1de1
#define PCI_DEVICE_ID_TEKRAM_DC290 0xdc29
@@ -2135,6 +2148,7 @@
#define PCI_DEVICE_ID_INTEL_82820_UP_HB 0x2501
#define PCI_DEVICE_ID_INTEL_82850_HB 0x2530
#define PCI_DEVICE_ID_INTEL_82860_HB 0x2531
+#define PCI_DEVICE_ID_INTEL_E7501_MCH 0x254c
#define PCI_DEVICE_ID_INTEL_82845G_HB 0x2560
#define PCI_DEVICE_ID_INTEL_82845G_IG 0x2562
#define PCI_DEVICE_ID_INTEL_82865_HB 0x2570
diff --git a/include/linux/pci_regs.h b/include/linux/pci_regs.h
index 6bce4a24036..96930cb5927 100644
--- a/include/linux/pci_regs.h
+++ b/include/linux/pci_regs.h
@@ -422,7 +422,23 @@
#define PCI_ERR_CAP_ECRC_CHKE 0x00000100 /* ECRC Check Enable */
#define PCI_ERR_HEADER_LOG 28 /* Header Log Register (16 bytes) */
#define PCI_ERR_ROOT_COMMAND 44 /* Root Error Command */
+/* Correctable Err Reporting Enable */
+#define PCI_ERR_ROOT_CMD_COR_EN 0x00000001
+/* Non-fatal Err Reporting Enable */
+#define PCI_ERR_ROOT_CMD_NONFATAL_EN 0x00000002
+/* Fatal Err Reporting Enable */
+#define PCI_ERR_ROOT_CMD_FATAL_EN 0x00000004
#define PCI_ERR_ROOT_STATUS 48
+#define PCI_ERR_ROOT_COR_RCV 0x00000001 /* ERR_COR Received */
+/* Multi ERR_COR Received */
+#define PCI_ERR_ROOT_MULTI_COR_RCV 0x00000002
+/* ERR_FATAL/NONFATAL Recevied */
+#define PCI_ERR_ROOT_UNCOR_RCV 0x00000004
+/* Multi ERR_FATAL/NONFATAL Recevied */
+#define PCI_ERR_ROOT_MULTI_UNCOR_RCV 0x00000008
+#define PCI_ERR_ROOT_FIRST_FATAL 0x00000010 /* First Fatal */
+#define PCI_ERR_ROOT_NONFATAL_RCV 0x00000020 /* Non-Fatal Received */
+#define PCI_ERR_ROOT_FATAL_RCV 0x00000040 /* Fatal Received */
#define PCI_ERR_ROOT_COR_SRC 52
#define PCI_ERR_ROOT_SRC 54
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 331521a10a2..9447a57ee8a 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -378,6 +378,7 @@ int phy_mii_ioctl(struct phy_device *phydev,
struct mii_ioctl_data *mii_data, int cmd);
int phy_start_interrupts(struct phy_device *phydev);
void phy_print_status(struct phy_device *phydev);
+struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id);
extern struct bus_type mdio_bus_type;
#endif /* __PHY_H */
diff --git a/include/linux/pm_legacy.h b/include/linux/pm_legacy.h
index 78027c533b9..514729a4468 100644
--- a/include/linux/pm_legacy.h
+++ b/include/linux/pm_legacy.h
@@ -15,11 +15,6 @@ struct pm_dev __deprecated *
pm_register(pm_dev_t type, unsigned long id, pm_callback callback);
/*
- * Unregister all devices with matching callback
- */
-void __deprecated pm_unregister_all(pm_callback callback);
-
-/*
* Send a request to all devices
*/
int __deprecated pm_send_all(pm_request_t rqst, void *data);
@@ -35,8 +30,6 @@ static inline struct pm_dev *pm_register(pm_dev_t type,
return NULL;
}
-static inline void pm_unregister_all(pm_callback callback) {}
-
static inline int pm_send_all(pm_request_t rqst, void *data)
{
return 0;
diff --git a/include/linux/pmu.h b/include/linux/pmu.h
index 2ed807ddc08..783177387ac 100644
--- a/include/linux/pmu.h
+++ b/include/linux/pmu.h
@@ -231,7 +231,6 @@ extern struct pmu_battery_info pmu_batteries[PMU_MAX_BATTERIES];
extern unsigned int pmu_power_flags;
/* Backlight */
-extern int disable_kernel_backlight;
-extern void pmu_backlight_init(struct device_node*);
+extern void pmu_backlight_init(void);
#endif /* __KERNEL__ */
diff --git a/include/linux/poison.h b/include/linux/poison.h
index a5347c02432..3e628f990fd 100644
--- a/include/linux/poison.h
+++ b/include/linux/poison.h
@@ -44,6 +44,11 @@
/********** drivers/atm/ **********/
#define ATM_POISON_FREE 0x12
+#define ATM_POISON 0xdeadbeef
+
+/********** net/ **********/
+#define NEIGHBOR_DEAD 0xdeadbeef
+#define NETFILTER_LINK_POISON 0xdead57ac
/********** kernel/mutexes **********/
#define MUTEX_DEBUG_INIT 0x11
diff --git a/include/linux/raid/Kbuild b/include/linux/raid/Kbuild
new file mode 100644
index 00000000000..73fa27a8d55
--- /dev/null
+++ b/include/linux/raid/Kbuild
@@ -0,0 +1 @@
+header-y += md_p.h md_u.h
diff --git a/include/linux/raid/md_k.h b/include/linux/raid/md_k.h
index c1e0ac55bab..d2889029585 100644
--- a/include/linux/raid/md_k.h
+++ b/include/linux/raid/md_k.h
@@ -148,9 +148,10 @@ struct mddev_s
struct mdk_thread_s *thread; /* management thread */
struct mdk_thread_s *sync_thread; /* doing resync or reconstruct */
- sector_t curr_resync; /* blocks scheduled */
+ sector_t curr_resync; /* last block scheduled */
unsigned long resync_mark; /* a recent timestamp */
sector_t resync_mark_cnt;/* blocks written at resync_mark */
+ sector_t curr_mark_cnt; /* blocks scheduled now */
sector_t resync_max_sectors; /* may be set by personality */
diff --git a/include/linux/root_dev.h b/include/linux/root_dev.h
index ea4bc9d1373..ed241aad7c1 100644
--- a/include/linux/root_dev.h
+++ b/include/linux/root_dev.h
@@ -2,6 +2,8 @@
#define _ROOT_DEV_H_
#include <linux/major.h>
+#include <linux/types.h>
+#include <linux/kdev_t.h>
enum {
Root_NFS = MKDEV(UNNAMED_MAJOR, 255),
diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
index fa4a3b82ba7..5d41dee82f8 100644
--- a/include/linux/rtmutex.h
+++ b/include/linux/rtmutex.h
@@ -29,8 +29,6 @@ struct rt_mutex {
struct task_struct *owner;
#ifdef CONFIG_DEBUG_RT_MUTEXES
int save_state;
- struct list_head held_list_entry;
- unsigned long acquire_ip;
const char *name, *file;
int line;
void *magic;
@@ -98,14 +96,6 @@ extern int rt_mutex_trylock(struct rt_mutex *lock);
extern void rt_mutex_unlock(struct rt_mutex *lock);
-#ifdef CONFIG_DEBUG_RT_MUTEXES
-# define INIT_RT_MUTEX_DEBUG(tsk) \
- .held_list_head = LIST_HEAD_INIT(tsk.held_list_head), \
- .held_list_lock = SPIN_LOCK_UNLOCKED
-#else
-# define INIT_RT_MUTEX_DEBUG(tsk)
-#endif
-
#ifdef CONFIG_RT_MUTEXES
# define INIT_RT_MUTEXES(tsk) \
.pi_waiters = PLIST_HEAD_INIT(tsk.pi_waiters, tsk.pi_lock), \
diff --git a/include/linux/rwsem-spinlock.h b/include/linux/rwsem-spinlock.h
index f30f805080a..ae1fcadd598 100644
--- a/include/linux/rwsem-spinlock.h
+++ b/include/linux/rwsem-spinlock.h
@@ -32,30 +32,37 @@ struct rw_semaphore {
__s32 activity;
spinlock_t wait_lock;
struct list_head wait_list;
-#if RWSEM_DEBUG
- int debug;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
#endif
};
-/*
- * initialisation
- */
-#if RWSEM_DEBUG
-#define __RWSEM_DEBUG_INIT , 0
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
#else
-#define __RWSEM_DEBUG_INIT /* */
+# define __RWSEM_DEP_MAP_INIT(lockname)
#endif
#define __RWSEM_INITIALIZER(name) \
-{ 0, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) __RWSEM_DEBUG_INIT }
+{ 0, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) }
#define DECLARE_RWSEM(name) \
struct rw_semaphore name = __RWSEM_INITIALIZER(name)
-extern void FASTCALL(init_rwsem(struct rw_semaphore *sem));
+extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
+ struct lock_class_key *key);
+
+#define init_rwsem(sem) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ __init_rwsem((sem), #sem, &__key); \
+} while (0)
+
extern void FASTCALL(__down_read(struct rw_semaphore *sem));
extern int FASTCALL(__down_read_trylock(struct rw_semaphore *sem));
extern void FASTCALL(__down_write(struct rw_semaphore *sem));
+extern void FASTCALL(__down_write_nested(struct rw_semaphore *sem, int subclass));
extern int FASTCALL(__down_write_trylock(struct rw_semaphore *sem));
extern void FASTCALL(__up_read(struct rw_semaphore *sem));
extern void FASTCALL(__up_write(struct rw_semaphore *sem));
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index f99fe90732a..7b524b4109a 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -9,8 +9,6 @@
#include <linux/linkage.h>
-#define RWSEM_DEBUG 0
-
#ifdef __KERNEL__
#include <linux/types.h>
@@ -26,89 +24,71 @@ struct rw_semaphore;
#include <asm/rwsem.h> /* use an arch-specific implementation */
#endif
-#ifndef rwsemtrace
-#if RWSEM_DEBUG
-extern void FASTCALL(rwsemtrace(struct rw_semaphore *sem, const char *str));
-#else
-#define rwsemtrace(SEM,FMT)
-#endif
-#endif
-
/*
* lock for reading
*/
-static inline void down_read(struct rw_semaphore *sem)
-{
- might_sleep();
- rwsemtrace(sem,"Entering down_read");
- __down_read(sem);
- rwsemtrace(sem,"Leaving down_read");
-}
+extern void down_read(struct rw_semaphore *sem);
/*
* trylock for reading -- returns 1 if successful, 0 if contention
*/
-static inline int down_read_trylock(struct rw_semaphore *sem)
-{
- int ret;
- rwsemtrace(sem,"Entering down_read_trylock");
- ret = __down_read_trylock(sem);
- rwsemtrace(sem,"Leaving down_read_trylock");
- return ret;
-}
+extern int down_read_trylock(struct rw_semaphore *sem);
/*
* lock for writing
*/
-static inline void down_write(struct rw_semaphore *sem)
-{
- might_sleep();
- rwsemtrace(sem,"Entering down_write");
- __down_write(sem);
- rwsemtrace(sem,"Leaving down_write");
-}
+extern void down_write(struct rw_semaphore *sem);
/*
* trylock for writing -- returns 1 if successful, 0 if contention
*/
-static inline int down_write_trylock(struct rw_semaphore *sem)
-{
- int ret;
- rwsemtrace(sem,"Entering down_write_trylock");
- ret = __down_write_trylock(sem);
- rwsemtrace(sem,"Leaving down_write_trylock");
- return ret;
-}
+extern int down_write_trylock(struct rw_semaphore *sem);
/*
* release a read lock
*/
-static inline void up_read(struct rw_semaphore *sem)
-{
- rwsemtrace(sem,"Entering up_read");
- __up_read(sem);
- rwsemtrace(sem,"Leaving up_read");
-}
+extern void up_read(struct rw_semaphore *sem);
/*
* release a write lock
*/
-static inline void up_write(struct rw_semaphore *sem)
-{
- rwsemtrace(sem,"Entering up_write");
- __up_write(sem);
- rwsemtrace(sem,"Leaving up_write");
-}
+extern void up_write(struct rw_semaphore *sem);
/*
* downgrade write lock to read lock
*/
-static inline void downgrade_write(struct rw_semaphore *sem)
-{
- rwsemtrace(sem,"Entering downgrade_write");
- __downgrade_write(sem);
- rwsemtrace(sem,"Leaving downgrade_write");
-}
+extern void downgrade_write(struct rw_semaphore *sem);
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+/*
+ * nested locking. NOTE: rwsems are not allowed to recurse
+ * (which occurs if the same task tries to acquire the same
+ * lock instance multiple times), but multiple locks of the
+ * same lock class might be taken, if the order of the locks
+ * is always the same. This ordering rule can be expressed
+ * to lockdep via the _nested() APIs, but enumerating the
+ * subclasses that are used. (If the nesting relationship is
+ * static then another method for expressing nested locking is
+ * the explicit definition of lock class keys and the use of
+ * lockdep_set_class() at lock initialization time.
+ * See Documentation/lockdep-design.txt for more details.)
+ */
+extern void down_read_nested(struct rw_semaphore *sem, int subclass);
+extern void down_write_nested(struct rw_semaphore *sem, int subclass);
+/*
+ * Take/release a lock when not the owner will release it.
+ *
+ * [ This API should be avoided as much as possible - the
+ * proper abstraction for this case is completions. ]
+ */
+extern void down_read_non_owner(struct rw_semaphore *sem);
+extern void up_read_non_owner(struct rw_semaphore *sem);
+#else
+# define down_read_nested(sem, subclass) down_read(sem)
+# define down_write_nested(sem, subclass) down_write(sem)
+# define down_read_non_owner(sem) down_read(sem)
+# define up_read_non_owner(sem) up_read(sem)
+#endif
#endif /* __KERNEL__ */
#endif /* _LINUX_RWSEM_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index aaf723308ed..34ed0d99b1b 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -184,11 +184,11 @@ extern unsigned long weighted_cpuload(const int cpu);
extern rwlock_t tasklist_lock;
extern spinlock_t mmlist_lock;
-typedef struct task_struct task_t;
+struct task_struct;
extern void sched_init(void);
extern void sched_init_smp(void);
-extern void init_idle(task_t *idle, int cpu);
+extern void init_idle(struct task_struct *idle, int cpu);
extern cpumask_t nohz_cpu_mask;
@@ -383,7 +383,7 @@ struct signal_struct {
wait_queue_head_t wait_chldexit; /* for wait4() */
/* current thread group signal load-balancing target: */
- task_t *curr_target;
+ struct task_struct *curr_target;
/* shared signal handling: */
struct sigpending shared_pending;
@@ -463,6 +463,10 @@ struct signal_struct {
#ifdef CONFIG_BSD_PROCESS_ACCT
struct pacct_struct pacct; /* per-process accounting information */
#endif
+#ifdef CONFIG_TASKSTATS
+ spinlock_t stats_lock;
+ struct taskstats *stats;
+#endif
};
/* Context switch must be unlocked if interrupts are to be enabled */
@@ -534,11 +538,10 @@ extern struct user_struct *find_user(uid_t);
extern struct user_struct root_user;
#define INIT_USER (&root_user)
-typedef struct prio_array prio_array_t;
struct backing_dev_info;
struct reclaim_state;
-#ifdef CONFIG_SCHEDSTATS
+#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
struct sched_info {
/* cumulative counters */
unsigned long cpu_time, /* time spent on the cpu */
@@ -549,9 +552,53 @@ struct sched_info {
unsigned long last_arrival, /* when we last ran on a cpu */
last_queued; /* when we were last queued to run */
};
+#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
+#ifdef CONFIG_SCHEDSTATS
extern struct file_operations proc_schedstat_operations;
+#endif /* CONFIG_SCHEDSTATS */
+
+#ifdef CONFIG_TASK_DELAY_ACCT
+struct task_delay_info {
+ spinlock_t lock;
+ unsigned int flags; /* Private per-task flags */
+
+ /* For each stat XXX, add following, aligned appropriately
+ *
+ * struct timespec XXX_start, XXX_end;
+ * u64 XXX_delay;
+ * u32 XXX_count;
+ *
+ * Atomicity of updates to XXX_delay, XXX_count protected by
+ * single lock above (split into XXX_lock if contention is an issue).
+ */
+
+ /*
+ * XXX_count is incremented on every XXX operation, the delay
+ * associated with the operation is added to XXX_delay.
+ * XXX_delay contains the accumulated delay time in nanoseconds.
+ */
+ struct timespec blkio_start, blkio_end; /* Shared by blkio, swapin */
+ u64 blkio_delay; /* wait for sync block io completion */
+ u64 swapin_delay; /* wait for swapin block io completion */
+ u32 blkio_count; /* total count of the number of sync block */
+ /* io operations performed */
+ u32 swapin_count; /* total count of the number of swapin block */
+ /* io operations performed */
+};
+#endif /* CONFIG_TASK_DELAY_ACCT */
+
+static inline int sched_info_on(void)
+{
+#ifdef CONFIG_SCHEDSTATS
+ return 1;
+#elif defined(CONFIG_TASK_DELAY_ACCT)
+ extern int delayacct_on;
+ return delayacct_on;
+#else
+ return 0;
#endif
+}
enum idle_type
{
@@ -699,7 +746,7 @@ extern int groups_search(struct group_info *group_info, gid_t grp);
((gi)->blocks[(i)/NGROUPS_PER_BLOCK][(i)%NGROUPS_PER_BLOCK])
#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
-extern void prefetch_stack(struct task_struct*);
+extern void prefetch_stack(struct task_struct *t);
#else
static inline void prefetch_stack(struct task_struct *t) { }
#endif
@@ -715,6 +762,8 @@ enum sleep_type {
SLEEP_INTERRUPTED,
};
+struct prio_array;
+
struct task_struct {
volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
struct thread_info *thread_info;
@@ -732,7 +781,7 @@ struct task_struct {
int load_weight; /* for niceness load balancing purposes */
int prio, static_prio, normal_prio;
struct list_head run_list;
- prio_array_t *array;
+ struct prio_array *array;
unsigned short ioprio;
unsigned int btrace_seq;
@@ -746,7 +795,7 @@ struct task_struct {
cpumask_t cpus_allowed;
unsigned int time_slice, first_time_slice;
-#ifdef CONFIG_SCHEDSTATS
+#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
struct sched_info sched_info;
#endif
@@ -865,16 +914,34 @@ struct task_struct {
struct plist_head pi_waiters;
/* Deadlock detection and priority inheritance handling */
struct rt_mutex_waiter *pi_blocked_on;
-# ifdef CONFIG_DEBUG_RT_MUTEXES
- spinlock_t held_list_lock;
- struct list_head held_list_head;
-# endif
#endif
#ifdef CONFIG_DEBUG_MUTEXES
/* mutex deadlock detection */
struct mutex_waiter *blocked_on;
#endif
+#ifdef CONFIG_TRACE_IRQFLAGS
+ unsigned int irq_events;
+ int hardirqs_enabled;
+ unsigned long hardirq_enable_ip;
+ unsigned int hardirq_enable_event;
+ unsigned long hardirq_disable_ip;
+ unsigned int hardirq_disable_event;
+ int softirqs_enabled;
+ unsigned long softirq_disable_ip;
+ unsigned int softirq_disable_event;
+ unsigned long softirq_enable_ip;
+ unsigned int softirq_enable_event;
+ int hardirq_context;
+ int softirq_context;
+#endif
+#ifdef CONFIG_LOCKDEP
+# define MAX_LOCK_DEPTH 30UL
+ u64 curr_chain_key;
+ int lockdep_depth;
+ struct held_lock held_locks[MAX_LOCK_DEPTH];
+ unsigned int lockdep_recursion;
+#endif
/* journalling filesystem info */
void *journal_info;
@@ -926,6 +993,9 @@ struct task_struct {
* cache last used pipe for splice
*/
struct pipe_inode_info *splice_pipe;
+#ifdef CONFIG_TASK_DELAY_ACCT
+ struct task_delay_info *delays;
+#endif
};
static inline pid_t process_group(struct task_struct *tsk)
@@ -1013,9 +1083,9 @@ static inline void put_task_struct(struct task_struct *t)
#define used_math() tsk_used_math(current)
#ifdef CONFIG_SMP
-extern int set_cpus_allowed(task_t *p, cpumask_t new_mask);
+extern int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask);
#else
-static inline int set_cpus_allowed(task_t *p, cpumask_t new_mask)
+static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
{
if (!cpu_isset(0, new_mask))
return -EINVAL;
@@ -1024,7 +1094,8 @@ static inline int set_cpus_allowed(task_t *p, cpumask_t new_mask)
#endif
extern unsigned long long sched_clock(void);
-extern unsigned long long current_sched_time(const task_t *current_task);
+extern unsigned long long
+current_sched_time(const struct task_struct *current_task);
/* sched_exec is called by processes performing an exec */
#ifdef CONFIG_SMP
@@ -1042,27 +1113,27 @@ static inline void idle_task_exit(void) {}
extern void sched_idle_next(void);
#ifdef CONFIG_RT_MUTEXES
-extern int rt_mutex_getprio(task_t *p);
-extern void rt_mutex_setprio(task_t *p, int prio);
-extern void rt_mutex_adjust_pi(task_t *p);
+extern int rt_mutex_getprio(struct task_struct *p);
+extern void rt_mutex_setprio(struct task_struct *p, int prio);
+extern void rt_mutex_adjust_pi(struct task_struct *p);
#else
-static inline int rt_mutex_getprio(task_t *p)
+static inline int rt_mutex_getprio(struct task_struct *p)
{
return p->normal_prio;
}
# define rt_mutex_adjust_pi(p) do { } while (0)
#endif
-extern void set_user_nice(task_t *p, long nice);
-extern int task_prio(const task_t *p);
-extern int task_nice(const task_t *p);
-extern int can_nice(const task_t *p, const int nice);
-extern int task_curr(const task_t *p);
+extern void set_user_nice(struct task_struct *p, long nice);
+extern int task_prio(const struct task_struct *p);
+extern int task_nice(const struct task_struct *p);
+extern int can_nice(const struct task_struct *p, const int nice);
+extern int task_curr(const struct task_struct *p);
extern int idle_cpu(int cpu);
extern int sched_setscheduler(struct task_struct *, int, struct sched_param *);
-extern task_t *idle_task(int cpu);
-extern task_t *curr_task(int cpu);
-extern void set_curr_task(int cpu, task_t *p);
+extern struct task_struct *idle_task(int cpu);
+extern struct task_struct *curr_task(int cpu);
+extern void set_curr_task(int cpu, struct task_struct *p);
void yield(void);
@@ -1119,8 +1190,8 @@ extern void FASTCALL(wake_up_new_task(struct task_struct * tsk,
#else
static inline void kick_process(struct task_struct *tsk) { }
#endif
-extern void FASTCALL(sched_fork(task_t * p, int clone_flags));
-extern void FASTCALL(sched_exit(task_t * p));
+extern void FASTCALL(sched_fork(struct task_struct * p, int clone_flags));
+extern void FASTCALL(sched_exit(struct task_struct * p));
extern int in_group_p(gid_t);
extern int in_egroup_p(gid_t);
@@ -1225,17 +1296,17 @@ extern NORET_TYPE void do_group_exit(int);
extern void daemonize(const char *, ...);
extern int allow_signal(int);
extern int disallow_signal(int);
-extern task_t *child_reaper;
+extern struct task_struct *child_reaper;
extern int do_execve(char *, char __user * __user *, char __user * __user *, struct pt_regs *);
extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *);
-task_t *fork_idle(int);
+struct task_struct *fork_idle(int);
extern void set_task_comm(struct task_struct *tsk, char *from);
extern void get_task_comm(char *to, struct task_struct *tsk);
#ifdef CONFIG_SMP
-extern void wait_task_inactive(task_t * p);
+extern void wait_task_inactive(struct task_struct * p);
#else
#define wait_task_inactive(p) do { } while (0)
#endif
@@ -1261,13 +1332,13 @@ extern void wait_task_inactive(task_t * p);
/* de_thread depends on thread_group_leader not being a pid based check */
#define thread_group_leader(p) (p == p->group_leader)
-static inline task_t *next_thread(const task_t *p)
+static inline struct task_struct *next_thread(const struct task_struct *p)
{
return list_entry(rcu_dereference(p->thread_group.next),
- task_t, thread_group);
+ struct task_struct, thread_group);
}
-static inline int thread_group_empty(task_t *p)
+static inline int thread_group_empty(struct task_struct *p)
{
return list_empty(&p->thread_group);
}
@@ -1486,6 +1557,14 @@ static inline void freeze(struct task_struct *p)
}
/*
+ * Sometimes we may need to cancel the previous 'freeze' request
+ */
+static inline void do_not_freeze(struct task_struct *p)
+{
+ p->flags &= ~PF_FREEZE;
+}
+
+/*
* Wake up a frozen process
*/
static inline int thaw_process(struct task_struct *p)
diff --git a/include/linux/security.h b/include/linux/security.h
index f75303831d0..6bc2aad494f 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -1109,6 +1109,16 @@ struct swap_info_struct;
* @name contains the name of the security module being unstacked.
* @ops contains a pointer to the struct security_operations of the module to unstack.
*
+ * @secid_to_secctx:
+ * Convert secid to security context.
+ * @secid contains the security ID.
+ * @secdata contains the pointer that stores the converted security context.
+ *
+ * @release_secctx:
+ * Release the security context.
+ * @secdata contains the security context.
+ * @seclen contains the length of the security context.
+ *
* This is the main security structure.
*/
struct security_operations {
@@ -1289,6 +1299,8 @@ struct security_operations {
int (*getprocattr)(struct task_struct *p, char *name, void *value, size_t size);
int (*setprocattr)(struct task_struct *p, char *name, void *value, size_t size);
+ int (*secid_to_secctx)(u32 secid, char **secdata, u32 *seclen);
+ void (*release_secctx)(char *secdata, u32 seclen);
#ifdef CONFIG_SECURITY_NETWORK
int (*unix_stream_connect) (struct socket * sock,
@@ -1317,7 +1329,7 @@ struct security_operations {
int (*socket_shutdown) (struct socket * sock, int how);
int (*socket_sock_rcv_skb) (struct sock * sk, struct sk_buff * skb);
int (*socket_getpeersec_stream) (struct socket *sock, char __user *optval, int __user *optlen, unsigned len);
- int (*socket_getpeersec_dgram) (struct sk_buff *skb, char **secdata, u32 *seclen);
+ int (*socket_getpeersec_dgram) (struct socket *sock, struct sk_buff *skb, u32 *secid);
int (*sk_alloc_security) (struct sock *sk, int family, gfp_t priority);
void (*sk_free_security) (struct sock *sk);
unsigned int (*sk_getsid) (struct sock *sk, struct flowi *fl, u8 dir);
@@ -2059,6 +2071,16 @@ static inline int security_netlink_recv(struct sk_buff * skb, int cap)
return security_ops->netlink_recv(skb, cap);
}
+static inline int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
+{
+ return security_ops->secid_to_secctx(secid, secdata, seclen);
+}
+
+static inline void security_release_secctx(char *secdata, u32 seclen)
+{
+ return security_ops->release_secctx(secdata, seclen);
+}
+
/* prototypes */
extern int security_init (void);
extern int register_security (struct security_operations *ops);
@@ -2725,6 +2747,14 @@ static inline void securityfs_remove(struct dentry *dentry)
{
}
+static inline int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void security_release_secctx(char *secdata, u32 seclen)
+{
+}
#endif /* CONFIG_SECURITY */
#ifdef CONFIG_SECURITY_NETWORK
@@ -2840,10 +2870,9 @@ static inline int security_socket_getpeersec_stream(struct socket *sock, char __
return security_ops->socket_getpeersec_stream(sock, optval, optlen, len);
}
-static inline int security_socket_getpeersec_dgram(struct sk_buff *skb, char **secdata,
- u32 *seclen)
+static inline int security_socket_getpeersec_dgram(struct socket *sock, struct sk_buff *skb, u32 *secid)
{
- return security_ops->socket_getpeersec_dgram(skb, secdata, seclen);
+ return security_ops->socket_getpeersec_dgram(sock, skb, secid);
}
static inline int security_sk_alloc(struct sock *sk, int family, gfp_t priority)
@@ -2968,8 +2997,7 @@ static inline int security_socket_getpeersec_stream(struct socket *sock, char __
return -ENOPROTOOPT;
}
-static inline int security_socket_getpeersec_dgram(struct sk_buff *skb, char **secdata,
- u32 *seclen)
+static inline int security_socket_getpeersec_dgram(struct socket *sock, struct sk_buff *skb, u32 *secid)
{
return -ENOPROTOOPT;
}
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 7bc5c7c12b5..46000936f8f 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -38,9 +38,17 @@ typedef struct {
* These macros triggered gcc-3.x compile-time problems. We think these are
* OK now. Be cautious.
*/
-#define SEQLOCK_UNLOCKED { 0, SPIN_LOCK_UNLOCKED }
-#define seqlock_init(x) do { *(x) = (seqlock_t) SEQLOCK_UNLOCKED; } while (0)
+#define __SEQLOCK_UNLOCKED(lockname) \
+ { 0, __SPIN_LOCK_UNLOCKED(lockname) }
+#define SEQLOCK_UNLOCKED \
+ __SEQLOCK_UNLOCKED(old_style_seqlock_init)
+
+#define seqlock_init(x) \
+ do { *(x) = (seqlock_t) __SEQLOCK_UNLOCKED(x); } while (0)
+
+#define DEFINE_SEQLOCK(x) \
+ seqlock_t x = __SEQLOCK_UNLOCKED(x)
/* Lock out other writers and update the count.
* Acts like a normal spin_lock/unlock.
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index fc1104a2cfa..86501a3de2a 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -216,16 +216,18 @@ struct uart_port {
unsigned char __iomem *membase; /* read/write[bwl] */
unsigned int irq; /* irq number */
unsigned int uartclk; /* base uart clock */
- unsigned char fifosize; /* tx fifo size */
+ unsigned int fifosize; /* tx fifo size */
unsigned char x_char; /* xon/xoff char */
unsigned char regshift; /* reg offset shift */
unsigned char iotype; /* io access style */
+ unsigned char unused1;
#define UPIO_PORT (0)
#define UPIO_HUB6 (1)
#define UPIO_MEM (2)
#define UPIO_MEM32 (3)
#define UPIO_AU (4) /* Au1x00 type IO */
+#define UPIO_TSI (5) /* Tsi108/109 type IO */
unsigned int read_status_mask; /* driver specific */
unsigned int ignore_status_mask; /* driver specific */
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 57d7d4965f9..755e9cddac4 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -604,6 +604,14 @@ static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
return list_->qlen;
}
+/*
+ * This function creates a split out lock class for each invocation;
+ * this is needed for now since a whole lot of users of the skb-queue
+ * infrastructure in drivers have different locking usage (in hardirq)
+ * than the networking core (in softirq only). In the long run either the
+ * network layer or drivers should need annotation to consolidate the
+ * main types of usage into 3 classes.
+ */
static inline void skb_queue_head_init(struct sk_buff_head *list)
{
spin_lock_init(&list->lock);
@@ -1032,6 +1040,21 @@ static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
}
/**
+ * pskb_trim_unique - remove end from a paged unique (not cloned) buffer
+ * @skb: buffer to alter
+ * @len: new length
+ *
+ * This is identical to pskb_trim except that the caller knows that
+ * the skb is not cloned so we should never get an error due to out-
+ * of-memory.
+ */
+static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
+{
+ int err = pskb_trim(skb, len);
+ BUG_ON(err);
+}
+
+/**
* skb_orphan - orphan a buffer
* @skb: buffer to orphan
*
@@ -1063,9 +1086,8 @@ static inline void __skb_queue_purge(struct sk_buff_head *list)
kfree_skb(skb);
}
-#ifndef CONFIG_HAVE_ARCH_DEV_ALLOC_SKB
/**
- * __dev_alloc_skb - allocate an skbuff for sending
+ * __dev_alloc_skb - allocate an skbuff for receiving
* @length: length to allocate
* @gfp_mask: get_free_pages mask, passed to alloc_skb
*
@@ -1074,7 +1096,7 @@ static inline void __skb_queue_purge(struct sk_buff_head *list)
* the headroom they think they need without accounting for the
* built in space. The built in space is used for optimisations.
*
- * %NULL is returned in there is no free memory.
+ * %NULL is returned if there is no free memory.
*/
static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
gfp_t gfp_mask)
@@ -1084,12 +1106,9 @@ static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
skb_reserve(skb, NET_SKB_PAD);
return skb;
}
-#else
-extern struct sk_buff *__dev_alloc_skb(unsigned int length, int gfp_mask);
-#endif
/**
- * dev_alloc_skb - allocate an skbuff for sending
+ * dev_alloc_skb - allocate an skbuff for receiving
* @length: length to allocate
*
* Allocate a new &sk_buff and assign it a usage count of one. The
@@ -1097,7 +1116,7 @@ extern struct sk_buff *__dev_alloc_skb(unsigned int length, int gfp_mask);
* the headroom they think they need without accounting for the
* built in space. The built in space is used for optimisations.
*
- * %NULL is returned in there is no free memory. Although this function
+ * %NULL is returned if there is no free memory. Although this function
* allocates memory it can be called from an interrupt.
*/
static inline struct sk_buff *dev_alloc_skb(unsigned int length)
@@ -1105,6 +1124,28 @@ static inline struct sk_buff *dev_alloc_skb(unsigned int length)
return __dev_alloc_skb(length, GFP_ATOMIC);
}
+extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
+ unsigned int length, gfp_t gfp_mask);
+
+/**
+ * netdev_alloc_skb - allocate an skbuff for rx on a specific device
+ * @dev: network device to receive on
+ * @length: length to allocate
+ *
+ * Allocate a new &sk_buff and assign it a usage count of one. The
+ * buffer has unspecified headroom built in. Users should allocate
+ * the headroom they think they need without accounting for the
+ * built in space. The built in space is used for optimisations.
+ *
+ * %NULL is returned if there is no free memory. Although this function
+ * allocates memory it can be called from an interrupt.
+ */
+static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
+ unsigned int length)
+{
+ return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
+}
+
/**
* skb_cow - copy header of skb when it is required
* @skb: buffer to cow
@@ -1452,5 +1493,10 @@ static inline void skb_init_secmark(struct sk_buff *skb)
{ }
#endif
+static inline int skb_is_gso(const struct sk_buff *skb)
+{
+ return skb_shinfo(skb)->gso_size;
+}
+
#endif /* __KERNEL__ */
#endif /* _LINUX_SKBUFF_H */
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index ae23beef9cc..31473db92d3 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -82,14 +82,40 @@ extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock);
/*
* Pull the __raw*() functions/declarations (UP-nondebug doesnt need them):
*/
-#if defined(CONFIG_SMP)
+#ifdef CONFIG_SMP
# include <asm/spinlock.h>
#else
# include <linux/spinlock_up.h>
#endif
-#define spin_lock_init(lock) do { *(lock) = SPIN_LOCK_UNLOCKED; } while (0)
-#define rwlock_init(lock) do { *(lock) = RW_LOCK_UNLOCKED; } while (0)
+#ifdef CONFIG_DEBUG_SPINLOCK
+ extern void __spin_lock_init(spinlock_t *lock, const char *name,
+ struct lock_class_key *key);
+# define spin_lock_init(lock) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ __spin_lock_init((lock), #lock, &__key); \
+} while (0)
+
+#else
+# define spin_lock_init(lock) \
+ do { *(lock) = SPIN_LOCK_UNLOCKED; } while (0)
+#endif
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+ extern void __rwlock_init(rwlock_t *lock, const char *name,
+ struct lock_class_key *key);
+# define rwlock_init(lock) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ __rwlock_init((lock), #lock, &__key); \
+} while (0)
+#else
+# define rwlock_init(lock) \
+ do { *(lock) = RW_LOCK_UNLOCKED; } while (0)
+#endif
#define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock)
@@ -113,7 +139,6 @@ extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock);
#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
extern int _raw_spin_trylock(spinlock_t *lock);
extern void _raw_spin_unlock(spinlock_t *lock);
-
extern void _raw_read_lock(rwlock_t *lock);
extern int _raw_read_trylock(rwlock_t *lock);
extern void _raw_read_unlock(rwlock_t *lock);
@@ -121,17 +146,17 @@ extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock);
extern int _raw_write_trylock(rwlock_t *lock);
extern void _raw_write_unlock(rwlock_t *lock);
#else
-# define _raw_spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock)
-# define _raw_spin_trylock(lock) __raw_spin_trylock(&(lock)->raw_lock)
# define _raw_spin_lock(lock) __raw_spin_lock(&(lock)->raw_lock)
# define _raw_spin_lock_flags(lock, flags) \
__raw_spin_lock_flags(&(lock)->raw_lock, *(flags))
+# define _raw_spin_trylock(lock) __raw_spin_trylock(&(lock)->raw_lock)
+# define _raw_spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock)
# define _raw_read_lock(rwlock) __raw_read_lock(&(rwlock)->raw_lock)
-# define _raw_write_lock(rwlock) __raw_write_lock(&(rwlock)->raw_lock)
-# define _raw_read_unlock(rwlock) __raw_read_unlock(&(rwlock)->raw_lock)
-# define _raw_write_unlock(rwlock) __raw_write_unlock(&(rwlock)->raw_lock)
# define _raw_read_trylock(rwlock) __raw_read_trylock(&(rwlock)->raw_lock)
+# define _raw_read_unlock(rwlock) __raw_read_unlock(&(rwlock)->raw_lock)
+# define _raw_write_lock(rwlock) __raw_write_lock(&(rwlock)->raw_lock)
# define _raw_write_trylock(rwlock) __raw_write_trylock(&(rwlock)->raw_lock)
+# define _raw_write_unlock(rwlock) __raw_write_unlock(&(rwlock)->raw_lock)
#endif
#define read_can_lock(rwlock) __raw_read_can_lock(&(rwlock)->raw_lock)
@@ -147,6 +172,13 @@ extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock);
#define write_trylock(lock) __cond_lock(_write_trylock(lock))
#define spin_lock(lock) _spin_lock(lock)
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define spin_lock_nested(lock, subclass) _spin_lock_nested(lock, subclass)
+#else
+# define spin_lock_nested(lock, subclass) _spin_lock(lock)
+#endif
+
#define write_lock(lock) _write_lock(lock)
#define read_lock(lock) _read_lock(lock)
@@ -172,21 +204,18 @@ extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock);
/*
* We inline the unlock functions in the nondebug case:
*/
-#if defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PREEMPT) || !defined(CONFIG_SMP)
+#if defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PREEMPT) || \
+ !defined(CONFIG_SMP)
# define spin_unlock(lock) _spin_unlock(lock)
# define read_unlock(lock) _read_unlock(lock)
# define write_unlock(lock) _write_unlock(lock)
-#else
-# define spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock)
-# define read_unlock(lock) __raw_read_unlock(&(lock)->raw_lock)
-# define write_unlock(lock) __raw_write_unlock(&(lock)->raw_lock)
-#endif
-
-#if defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PREEMPT) || !defined(CONFIG_SMP)
# define spin_unlock_irq(lock) _spin_unlock_irq(lock)
# define read_unlock_irq(lock) _read_unlock_irq(lock)
# define write_unlock_irq(lock) _write_unlock_irq(lock)
#else
+# define spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock)
+# define read_unlock(lock) __raw_read_unlock(&(lock)->raw_lock)
+# define write_unlock(lock) __raw_write_unlock(&(lock)->raw_lock)
# define spin_unlock_irq(lock) \
do { __raw_spin_unlock(&(lock)->raw_lock); local_irq_enable(); } while (0)
# define read_unlock_irq(lock) \
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
index 78e6989ffb5..b2c4f829946 100644
--- a/include/linux/spinlock_api_smp.h
+++ b/include/linux/spinlock_api_smp.h
@@ -20,6 +20,8 @@ int in_lock_functions(unsigned long addr);
#define assert_spin_locked(x) BUG_ON(!spin_is_locked(x))
void __lockfunc _spin_lock(spinlock_t *lock) __acquires(spinlock_t);
+void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass)
+ __acquires(spinlock_t);
void __lockfunc _read_lock(rwlock_t *lock) __acquires(rwlock_t);
void __lockfunc _write_lock(rwlock_t *lock) __acquires(rwlock_t);
void __lockfunc _spin_lock_bh(spinlock_t *lock) __acquires(spinlock_t);
diff --git a/include/linux/spinlock_api_up.h b/include/linux/spinlock_api_up.h
index cd81cee566f..67faa044c5f 100644
--- a/include/linux/spinlock_api_up.h
+++ b/include/linux/spinlock_api_up.h
@@ -49,6 +49,7 @@
do { local_irq_restore(flags); __UNLOCK(lock); } while (0)
#define _spin_lock(lock) __LOCK(lock)
+#define _spin_lock_nested(lock, subclass) __LOCK(lock)
#define _read_lock(lock) __LOCK(lock)
#define _write_lock(lock) __LOCK(lock)
#define _spin_lock_bh(lock) __LOCK_BH(lock)
diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h
index 9cb51e07039..dc5fb69e4de 100644
--- a/include/linux/spinlock_types.h
+++ b/include/linux/spinlock_types.h
@@ -9,6 +9,8 @@
* Released under the General Public License (GPL).
*/
+#include <linux/lockdep.h>
+
#if defined(CONFIG_SMP)
# include <asm/spinlock_types.h>
#else
@@ -24,6 +26,9 @@ typedef struct {
unsigned int magic, owner_cpu;
void *owner;
#endif
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
} spinlock_t;
#define SPINLOCK_MAGIC 0xdead4ead
@@ -37,31 +42,53 @@ typedef struct {
unsigned int magic, owner_cpu;
void *owner;
#endif
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
} rwlock_t;
#define RWLOCK_MAGIC 0xdeaf1eed
#define SPINLOCK_OWNER_INIT ((void *)-1L)
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
+#else
+# define SPIN_DEP_MAP_INIT(lockname)
+#endif
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
+#else
+# define RW_DEP_MAP_INIT(lockname)
+#endif
+
#ifdef CONFIG_DEBUG_SPINLOCK
-# define SPIN_LOCK_UNLOCKED \
+# define __SPIN_LOCK_UNLOCKED(lockname) \
(spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \
.magic = SPINLOCK_MAGIC, \
.owner = SPINLOCK_OWNER_INIT, \
- .owner_cpu = -1 }
-#define RW_LOCK_UNLOCKED \
+ .owner_cpu = -1, \
+ SPIN_DEP_MAP_INIT(lockname) }
+#define __RW_LOCK_UNLOCKED(lockname) \
(rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \
.magic = RWLOCK_MAGIC, \
.owner = SPINLOCK_OWNER_INIT, \
- .owner_cpu = -1 }
+ .owner_cpu = -1, \
+ RW_DEP_MAP_INIT(lockname) }
#else
-# define SPIN_LOCK_UNLOCKED \
- (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED }
-#define RW_LOCK_UNLOCKED \
- (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED }
+# define __SPIN_LOCK_UNLOCKED(lockname) \
+ (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \
+ SPIN_DEP_MAP_INIT(lockname) }
+#define __RW_LOCK_UNLOCKED(lockname) \
+ (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \
+ RW_DEP_MAP_INIT(lockname) }
#endif
-#define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED
-#define DEFINE_RWLOCK(x) rwlock_t x = RW_LOCK_UNLOCKED
+#define SPIN_LOCK_UNLOCKED __SPIN_LOCK_UNLOCKED(old_style_spin_init)
+#define RW_LOCK_UNLOCKED __RW_LOCK_UNLOCKED(old_style_rw_init)
+
+#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
+#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x)
#endif /* __LINUX_SPINLOCK_TYPES_H */
diff --git a/include/linux/spinlock_types_up.h b/include/linux/spinlock_types_up.h
index 04135b0e198..27644af20b7 100644
--- a/include/linux/spinlock_types_up.h
+++ b/include/linux/spinlock_types_up.h
@@ -12,10 +12,14 @@
* Released under the General Public License (GPL).
*/
-#ifdef CONFIG_DEBUG_SPINLOCK
+#if defined(CONFIG_DEBUG_SPINLOCK) || \
+ defined(CONFIG_DEBUG_LOCK_ALLOC)
typedef struct {
volatile unsigned int slock;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
} raw_spinlock_t;
#define __RAW_SPIN_LOCK_UNLOCKED { 1 }
@@ -30,6 +34,9 @@ typedef struct { } raw_spinlock_t;
typedef struct {
/* no debug version on UP */
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
} raw_rwlock_t;
#define __RAW_RW_LOCK_UNLOCKED { }
diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h
index 31accf2f0b1..ea54c4c9a4e 100644
--- a/include/linux/spinlock_up.h
+++ b/include/linux/spinlock_up.h
@@ -18,7 +18,6 @@
*/
#ifdef CONFIG_DEBUG_SPINLOCK
-
#define __raw_spin_is_locked(x) ((x)->slock == 0)
static inline void __raw_spin_lock(raw_spinlock_t *lock)
diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h
new file mode 100644
index 00000000000..9cc81e57222
--- /dev/null
+++ b/include/linux/stacktrace.h
@@ -0,0 +1,20 @@
+#ifndef __LINUX_STACKTRACE_H
+#define __LINUX_STACKTRACE_H
+
+#ifdef CONFIG_STACKTRACE
+struct stack_trace {
+ unsigned int nr_entries, max_entries;
+ unsigned long *entries;
+};
+
+extern void save_stack_trace(struct stack_trace *trace,
+ struct task_struct *task, int all_contexts,
+ unsigned int skip);
+
+extern void print_stack_trace(struct stack_trace *trace, int spaces);
+#else
+# define save_stack_trace(trace, task, all, skip) do { } while (0)
+# define print_stack_trace(trace) do { } while (0)
+#endif
+
+#endif
diff --git a/include/linux/sunrpc/Kbuild b/include/linux/sunrpc/Kbuild
new file mode 100644
index 00000000000..0d1d768a27b
--- /dev/null
+++ b/include/linux/sunrpc/Kbuild
@@ -0,0 +1 @@
+unifdef-y := debug.h
diff --git a/include/linux/sunrpc/rpc_pipe_fs.h b/include/linux/sunrpc/rpc_pipe_fs.h
index 2c2189cb30a..a481472c948 100644
--- a/include/linux/sunrpc/rpc_pipe_fs.h
+++ b/include/linux/sunrpc/rpc_pipe_fs.h
@@ -42,9 +42,9 @@ RPC_I(struct inode *inode)
extern int rpc_queue_upcall(struct inode *, struct rpc_pipe_msg *);
extern struct dentry *rpc_mkdir(char *, struct rpc_clnt *);
-extern int rpc_rmdir(char *);
+extern int rpc_rmdir(struct dentry *);
extern struct dentry *rpc_mkpipe(char *, void *, struct rpc_pipe_ops *, int flags);
-extern int rpc_unlink(char *);
+extern int rpc_unlink(struct dentry *);
extern struct vfsmount *rpc_get_mount(void);
extern void rpc_put_mount(void);
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index e8bbe8118de..3a0cca255b7 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -37,7 +37,7 @@ extern unsigned int xprt_max_resvport;
#define RPC_MIN_RESVPORT (1U)
#define RPC_MAX_RESVPORT (65535U)
-#define RPC_DEF_MIN_RESVPORT (650U)
+#define RPC_DEF_MIN_RESVPORT (665U)
#define RPC_DEF_MAX_RESVPORT (1023U)
/*
@@ -229,7 +229,7 @@ int xprt_reserve_xprt(struct rpc_task *task);
int xprt_reserve_xprt_cong(struct rpc_task *task);
int xprt_prepare_transmit(struct rpc_task *task);
void xprt_transmit(struct rpc_task *task);
-void xprt_abort_transmit(struct rpc_task *task);
+void xprt_end_transmit(struct rpc_task *task);
int xprt_adjust_timeout(struct rpc_rqst *req);
void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task);
void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task);
diff --git a/include/linux/swap.h b/include/linux/swap.h
index cf6ca6e377b..5e59184c909 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -189,6 +189,7 @@ extern long vm_total_pages;
#ifdef CONFIG_NUMA
extern int zone_reclaim_mode;
+extern int sysctl_min_unmapped_ratio;
extern int zone_reclaim(struct zone *, gfp_t, unsigned int);
#else
#define zone_reclaim_mode 0
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index 46e4d8f2771..e4b1a4d4dcf 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -188,7 +188,7 @@ enum
VM_DROP_PAGECACHE=29, /* int: nuke lots of pagecache */
VM_PERCPU_PAGELIST_FRACTION=30,/* int: fraction of pages in each percpu_pagelist */
VM_ZONE_RECLAIM_MODE=31, /* reclaim local zone memory before going off node */
- VM_ZONE_RECLAIM_INTERVAL=32, /* time period to wait after reclaim failure */
+ VM_MIN_UNMAPPED=32, /* Set min percent of unmapped pages */
VM_PANIC_ON_OOM=33, /* panic at out-of-memory */
VM_VDSO_ENABLED=34, /* map VDSO into new processes? */
};
diff --git a/include/linux/taskstats.h b/include/linux/taskstats.h
new file mode 100644
index 00000000000..f1cb6cddd19
--- /dev/null
+++ b/include/linux/taskstats.h
@@ -0,0 +1,137 @@
+/* taskstats.h - exporting per-task statistics
+ *
+ * Copyright (C) Shailabh Nagar, IBM Corp. 2006
+ * (C) Balbir Singh, IBM Corp. 2006
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2.1 of the GNU Lesser General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef _LINUX_TASKSTATS_H
+#define _LINUX_TASKSTATS_H
+
+/* Format for per-task data returned to userland when
+ * - a task exits
+ * - listener requests stats for a task
+ *
+ * The struct is versioned. Newer versions should only add fields to
+ * the bottom of the struct to maintain backward compatibility.
+ *
+ *
+ * To add new fields
+ * a) bump up TASKSTATS_VERSION
+ * b) add comment indicating new version number at end of struct
+ * c) add new fields after version comment; maintain 64-bit alignment
+ */
+
+#define TASKSTATS_VERSION 1
+
+struct taskstats {
+
+ /* Version 1 */
+ __u16 version;
+ __u16 padding[3]; /* Userspace should not interpret the padding
+ * field which can be replaced by useful
+ * fields if struct taskstats is extended.
+ */
+
+ /* Delay accounting fields start
+ *
+ * All values, until comment "Delay accounting fields end" are
+ * available only if delay accounting is enabled, even though the last
+ * few fields are not delays
+ *
+ * xxx_count is the number of delay values recorded
+ * xxx_delay_total is the corresponding cumulative delay in nanoseconds
+ *
+ * xxx_delay_total wraps around to zero on overflow
+ * xxx_count incremented regardless of overflow
+ */
+
+ /* Delay waiting for cpu, while runnable
+ * count, delay_total NOT updated atomically
+ */
+ __u64 cpu_count;
+ __u64 cpu_delay_total;
+
+ /* Following four fields atomically updated using task->delays->lock */
+
+ /* Delay waiting for synchronous block I/O to complete
+ * does not account for delays in I/O submission
+ */
+ __u64 blkio_count;
+ __u64 blkio_delay_total;
+
+ /* Delay waiting for page fault I/O (swap in only) */
+ __u64 swapin_count;
+ __u64 swapin_delay_total;
+
+ /* cpu "wall-clock" running time
+ * On some architectures, value will adjust for cpu time stolen
+ * from the kernel in involuntary waits due to virtualization.
+ * Value is cumulative, in nanoseconds, without a corresponding count
+ * and wraps around to zero silently on overflow
+ */
+ __u64 cpu_run_real_total;
+
+ /* cpu "virtual" running time
+ * Uses time intervals seen by the kernel i.e. no adjustment
+ * for kernel's involuntary waits due to virtualization.
+ * Value is cumulative, in nanoseconds, without a corresponding count
+ * and wraps around to zero silently on overflow
+ */
+ __u64 cpu_run_virtual_total;
+ /* Delay accounting fields end */
+ /* version 1 ends here */
+};
+
+
+/*
+ * Commands sent from userspace
+ * Not versioned. New commands should only be inserted at the enum's end
+ * prior to __TASKSTATS_CMD_MAX
+ */
+
+enum {
+ TASKSTATS_CMD_UNSPEC = 0, /* Reserved */
+ TASKSTATS_CMD_GET, /* user->kernel request/get-response */
+ TASKSTATS_CMD_NEW, /* kernel->user event */
+ __TASKSTATS_CMD_MAX,
+};
+
+#define TASKSTATS_CMD_MAX (__TASKSTATS_CMD_MAX - 1)
+
+enum {
+ TASKSTATS_TYPE_UNSPEC = 0, /* Reserved */
+ TASKSTATS_TYPE_PID, /* Process id */
+ TASKSTATS_TYPE_TGID, /* Thread group id */
+ TASKSTATS_TYPE_STATS, /* taskstats structure */
+ TASKSTATS_TYPE_AGGR_PID, /* contains pid + stats */
+ TASKSTATS_TYPE_AGGR_TGID, /* contains tgid + stats */
+ __TASKSTATS_TYPE_MAX,
+};
+
+#define TASKSTATS_TYPE_MAX (__TASKSTATS_TYPE_MAX - 1)
+
+enum {
+ TASKSTATS_CMD_ATTR_UNSPEC = 0,
+ TASKSTATS_CMD_ATTR_PID,
+ TASKSTATS_CMD_ATTR_TGID,
+ TASKSTATS_CMD_ATTR_REGISTER_CPUMASK,
+ TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK,
+ __TASKSTATS_CMD_ATTR_MAX,
+};
+
+#define TASKSTATS_CMD_ATTR_MAX (__TASKSTATS_CMD_ATTR_MAX - 1)
+
+/* NETLINK_GENERIC related info */
+
+#define TASKSTATS_GENL_NAME "TASKSTATS"
+#define TASKSTATS_GENL_VERSION 0x1
+
+#endif /* _LINUX_TASKSTATS_H */
diff --git a/include/linux/taskstats_kern.h b/include/linux/taskstats_kern.h
new file mode 100644
index 00000000000..16894b7edcc
--- /dev/null
+++ b/include/linux/taskstats_kern.h
@@ -0,0 +1,89 @@
+/* taskstats_kern.h - kernel header for per-task statistics interface
+ *
+ * Copyright (C) Shailabh Nagar, IBM Corp. 2006
+ * (C) Balbir Singh, IBM Corp. 2006
+ */
+
+#ifndef _LINUX_TASKSTATS_KERN_H
+#define _LINUX_TASKSTATS_KERN_H
+
+#include <linux/taskstats.h>
+#include <linux/sched.h>
+#include <net/genetlink.h>
+
+#ifdef CONFIG_TASKSTATS
+extern kmem_cache_t *taskstats_cache;
+extern struct mutex taskstats_exit_mutex;
+
+static inline void taskstats_exit_free(struct taskstats *tidstats)
+{
+ if (tidstats)
+ kmem_cache_free(taskstats_cache, tidstats);
+}
+
+static inline void taskstats_tgid_init(struct signal_struct *sig)
+{
+ spin_lock_init(&sig->stats_lock);
+ sig->stats = NULL;
+}
+
+static inline void taskstats_tgid_alloc(struct signal_struct *sig)
+{
+ struct taskstats *stats;
+ unsigned long flags;
+
+ stats = kmem_cache_zalloc(taskstats_cache, SLAB_KERNEL);
+ if (!stats)
+ return;
+
+ spin_lock_irqsave(&sig->stats_lock, flags);
+ if (!sig->stats) {
+ sig->stats = stats;
+ stats = NULL;
+ }
+ spin_unlock_irqrestore(&sig->stats_lock, flags);
+
+ if (stats)
+ kmem_cache_free(taskstats_cache, stats);
+}
+
+static inline void taskstats_tgid_free(struct signal_struct *sig)
+{
+ struct taskstats *stats = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sig->stats_lock, flags);
+ if (sig->stats) {
+ stats = sig->stats;
+ sig->stats = NULL;
+ }
+ spin_unlock_irqrestore(&sig->stats_lock, flags);
+ if (stats)
+ kmem_cache_free(taskstats_cache, stats);
+}
+
+extern void taskstats_exit_alloc(struct taskstats **, unsigned int *);
+extern void taskstats_exit_send(struct task_struct *, struct taskstats *, int, unsigned int);
+extern void taskstats_init_early(void);
+extern void taskstats_tgid_alloc(struct signal_struct *);
+#else
+static inline void taskstats_exit_alloc(struct taskstats **ptidstats, unsigned int *mycpu)
+{}
+static inline void taskstats_exit_free(struct taskstats *ptidstats)
+{}
+static inline void taskstats_exit_send(struct task_struct *tsk,
+ struct taskstats *tidstats,
+ int group_dead, unsigned int cpu)
+{}
+static inline void taskstats_tgid_init(struct signal_struct *sig)
+{}
+static inline void taskstats_tgid_alloc(struct signal_struct *sig)
+{}
+static inline void taskstats_tgid_free(struct signal_struct *sig)
+{}
+static inline void taskstats_init_early(void)
+{}
+#endif /* CONFIG_TASKSTATS */
+
+#endif
+
diff --git a/include/linux/tc_act/Kbuild b/include/linux/tc_act/Kbuild
new file mode 100644
index 00000000000..5251a505b2f
--- /dev/null
+++ b/include/linux/tc_act/Kbuild
@@ -0,0 +1 @@
+header-y += tc_gact.h tc_ipt.h tc_mirred.h tc_pedit.h
diff --git a/include/linux/tc_ematch/Kbuild b/include/linux/tc_ematch/Kbuild
new file mode 100644
index 00000000000..381e93018df
--- /dev/null
+++ b/include/linux/tc_ematch/Kbuild
@@ -0,0 +1 @@
+headers-y := tc_em_cmp.h tc_em_meta.h tc_em_nbyte.h tc_em_text.h
diff --git a/include/linux/time.h b/include/linux/time.h
index c05f8bb9a32..a5b739967b7 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -71,6 +71,18 @@ extern unsigned long mktime(const unsigned int year, const unsigned int mon,
extern void set_normalized_timespec(struct timespec *ts, time_t sec, long nsec);
/*
+ * sub = lhs - rhs, in normalized form
+ */
+static inline struct timespec timespec_sub(struct timespec lhs,
+ struct timespec rhs)
+{
+ struct timespec ts_delta;
+ set_normalized_timespec(&ts_delta, lhs.tv_sec - rhs.tv_sec,
+ lhs.tv_nsec - rhs.tv_nsec);
+ return ts_delta;
+}
+
+/*
* Returns true if the timespec is norm, false if denorm:
*/
#define timespec_valid(ts) \
diff --git a/include/linux/timex.h b/include/linux/timex.h
index 19bb6538b49..d543d3871e3 100644
--- a/include/linux/timex.h
+++ b/include/linux/timex.h
@@ -57,7 +57,6 @@
#include <linux/time.h>
#include <asm/param.h>
-#include <asm/timex.h>
/*
* SHIFT_KG and SHIFT_KF establish the damping of the PLL and are chosen
@@ -191,6 +190,8 @@ struct timex {
#define TIME_BAD TIME_ERROR /* bw compat */
#ifdef __KERNEL__
+#include <asm/timex.h>
+
/*
* kernel variables
* Note: maximum error = NTP synch distance = dispersion + delay / 2;
diff --git a/include/linux/tty.h b/include/linux/tty.h
index b3b807e4b05..04827ca6578 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -5,16 +5,6 @@
* 'tty.h' defines some structures used by tty_io.c and some defines.
*/
-/*
- * These constants are also useful for user-level apps (e.g., VC
- * resizing).
- */
-#define MIN_NR_CONSOLES 1 /* must be at least 1 */
-#define MAX_NR_CONSOLES 63 /* serial lines start at 64 */
-#define MAX_NR_USER_CONSOLES 63 /* must be root to allocate above this */
- /* Note: the ioctl VT_GETSTATE does not work for
- consoles 16 and higher (since it returns a short) */
-
#ifdef __KERNEL__
#include <linux/fs.h>
#include <linux/major.h>
@@ -22,7 +12,6 @@
#include <linux/workqueue.h>
#include <linux/tty_driver.h>
#include <linux/tty_ldisc.h>
-#include <linux/screen_info.h>
#include <linux/mutex.h>
#include <asm/system.h>
@@ -70,6 +59,7 @@ struct tty_bufhead {
struct tty_buffer *head; /* Queue head */
struct tty_buffer *tail; /* Active buffer */
struct tty_buffer *free; /* Free queue head */
+ int memory_used; /* Buffer space used excluding free queue */
};
/*
* The pty uses char_buf and flag_buf as a contiguous buffer
@@ -270,7 +260,6 @@ struct tty_struct {
extern void tty_write_flush(struct tty_struct *);
extern struct termios tty_std_termios;
-extern int fg_console, last_console, want_console;
extern int kmsg_redirect;
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 8dead32e7eb..d2bd0c8e015 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -48,7 +48,7 @@ struct ep_device;
* @urb_list: urbs queued to this endpoint; maintained by usbcore
* @hcpriv: for use by HCD; typically holds hardware dma queue head (QH)
* with one or more transfer descriptors (TDs) per urb
- * @kobj: kobject for sysfs info
+ * @ep_dev: ep_device for sysfs info
* @extra: descriptors following this endpoint in the configuration
* @extralen: how many bytes of "extra" are valid
*
@@ -103,8 +103,7 @@ enum usb_interface_condition {
* @condition: binding state of the interface: not bound, binding
* (in probe()), bound to a driver, or unbinding (in disconnect())
* @dev: driver model's view of this device
- * @usb_dev: if an interface is bound to the USB major, this will point
- * to the sysfs representation for that device.
+ * @class_dev: driver model's class view of this device.
*
* USB device drivers attach to interfaces on a physical device. Each
* interface encapsulates a single high level function, such as feeding
@@ -144,7 +143,7 @@ struct usb_interface {
* bound to */
enum usb_interface_condition condition; /* state of binding */
struct device dev; /* interface specific device info */
- struct device *usb_dev; /* pointer to the usb class's device, if any */
+ struct class_device *class_dev;
};
#define to_usb_interface(d) container_of(d, struct usb_interface, dev)
#define interface_to_usbdev(intf) \
@@ -361,7 +360,7 @@ struct usb_device {
char *serial; /* iSerialNumber string, if present */
struct list_head filelist;
- struct device *usbfs_dev;
+ struct class_device *class_dev;
struct dentry *usbfs_dentry; /* usbfs dentry entry for the device */
/*
diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h
new file mode 100644
index 00000000000..91c983eef89
--- /dev/null
+++ b/include/linux/usb/serial.h
@@ -0,0 +1,300 @@
+/*
+ * USB Serial Converter stuff
+ *
+ * Copyright (C) 1999 - 2005
+ * Greg Kroah-Hartman (greg@kroah.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ */
+
+
+#ifndef __LINUX_USB_SERIAL_H
+#define __LINUX_USB_SERIAL_H
+
+#include <linux/kref.h>
+#include <linux/mutex.h>
+
+#define SERIAL_TTY_MAJOR 188 /* Nice legal number now */
+#define SERIAL_TTY_MINORS 255 /* loads of devices :) */
+
+#define MAX_NUM_PORTS 8 /* The maximum number of ports one device can grab at once */
+
+/* parity check flag */
+#define RELEVANT_IFLAG(iflag) (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK))
+
+/**
+ * usb_serial_port: structure for the specific ports of a device.
+ * @serial: pointer back to the struct usb_serial owner of this port.
+ * @tty: pointer to the corresponding tty for this port.
+ * @lock: spinlock to grab when updating portions of this structure.
+ * @mutex: mutex used to synchronize serial_open() and serial_close()
+ * access for this port.
+ * @number: the number of the port (the minor number).
+ * @interrupt_in_buffer: pointer to the interrupt in buffer for this port.
+ * @interrupt_in_urb: pointer to the interrupt in struct urb for this port.
+ * @interrupt_in_endpointAddress: endpoint address for the interrupt in pipe
+ * for this port.
+ * @interrupt_out_buffer: pointer to the interrupt out buffer for this port.
+ * @interrupt_out_size: the size of the interrupt_out_buffer, in bytes.
+ * @interrupt_out_urb: pointer to the interrupt out struct urb for this port.
+ * @interrupt_out_endpointAddress: endpoint address for the interrupt out pipe
+ * for this port.
+ * @bulk_in_buffer: pointer to the bulk in buffer for this port.
+ * @read_urb: pointer to the bulk in struct urb for this port.
+ * @bulk_in_endpointAddress: endpoint address for the bulk in pipe for this
+ * port.
+ * @bulk_out_buffer: pointer to the bulk out buffer for this port.
+ * @bulk_out_size: the size of the bulk_out_buffer, in bytes.
+ * @write_urb: pointer to the bulk out struct urb for this port.
+ * @bulk_out_endpointAddress: endpoint address for the bulk out pipe for this
+ * port.
+ * @write_wait: a wait_queue_head_t used by the port.
+ * @work: work queue entry for the line discipline waking up.
+ * @open_count: number of times this port has been opened.
+ *
+ * This structure is used by the usb-serial core and drivers for the specific
+ * ports of a device.
+ */
+struct usb_serial_port {
+ struct usb_serial * serial;
+ struct tty_struct * tty;
+ spinlock_t lock;
+ struct mutex mutex;
+ unsigned char number;
+
+ unsigned char * interrupt_in_buffer;
+ struct urb * interrupt_in_urb;
+ __u8 interrupt_in_endpointAddress;
+
+ unsigned char * interrupt_out_buffer;
+ int interrupt_out_size;
+ struct urb * interrupt_out_urb;
+ __u8 interrupt_out_endpointAddress;
+
+ unsigned char * bulk_in_buffer;
+ int bulk_in_size;
+ struct urb * read_urb;
+ __u8 bulk_in_endpointAddress;
+
+ unsigned char * bulk_out_buffer;
+ int bulk_out_size;
+ struct urb * write_urb;
+ int write_urb_busy;
+ __u8 bulk_out_endpointAddress;
+
+ wait_queue_head_t write_wait;
+ struct work_struct work;
+ int open_count;
+ struct device dev;
+};
+#define to_usb_serial_port(d) container_of(d, struct usb_serial_port, dev)
+
+/* get and set the port private data pointer helper functions */
+static inline void *usb_get_serial_port_data (struct usb_serial_port *port)
+{
+ return dev_get_drvdata(&port->dev);
+}
+
+static inline void usb_set_serial_port_data (struct usb_serial_port *port, void *data)
+{
+ dev_set_drvdata(&port->dev, data);
+}
+
+/**
+ * usb_serial - structure used by the usb-serial core for a device
+ * @dev: pointer to the struct usb_device for this device
+ * @type: pointer to the struct usb_serial_driver for this device
+ * @interface: pointer to the struct usb_interface for this device
+ * @minor: the starting minor number for this device
+ * @num_ports: the number of ports this device has
+ * @num_interrupt_in: number of interrupt in endpoints we have
+ * @num_interrupt_out: number of interrupt out endpoints we have
+ * @num_bulk_in: number of bulk in endpoints we have
+ * @num_bulk_out: number of bulk out endpoints we have
+ * @port: array of struct usb_serial_port structures for the different ports.
+ * @private: place to put any driver specific information that is needed. The
+ * usb-serial driver is required to manage this data, the usb-serial core
+ * will not touch this. Use usb_get_serial_data() and
+ * usb_set_serial_data() to access this.
+ */
+struct usb_serial {
+ struct usb_device * dev;
+ struct usb_serial_driver * type;
+ struct usb_interface * interface;
+ unsigned char minor;
+ unsigned char num_ports;
+ unsigned char num_port_pointers;
+ char num_interrupt_in;
+ char num_interrupt_out;
+ char num_bulk_in;
+ char num_bulk_out;
+ struct usb_serial_port * port[MAX_NUM_PORTS];
+ struct kref kref;
+ void * private;
+};
+#define to_usb_serial(d) container_of(d, struct usb_serial, kref)
+
+#define NUM_DONT_CARE (-1)
+
+/* get and set the serial private data pointer helper functions */
+static inline void *usb_get_serial_data (struct usb_serial *serial)
+{
+ return serial->private;
+}
+
+static inline void usb_set_serial_data (struct usb_serial *serial, void *data)
+{
+ serial->private = data;
+}
+
+/**
+ * usb_serial_driver - describes a usb serial driver
+ * @description: pointer to a string that describes this driver. This string used
+ * in the syslog messages when a device is inserted or removed.
+ * @id_table: pointer to a list of usb_device_id structures that define all
+ * of the devices this structure can support.
+ * @num_interrupt_in: the number of interrupt in endpoints this device will
+ * have.
+ * @num_interrupt_out: the number of interrupt out endpoints this device will
+ * have.
+ * @num_bulk_in: the number of bulk in endpoints this device will have.
+ * @num_bulk_out: the number of bulk out endpoints this device will have.
+ * @num_ports: the number of different ports this device will have.
+ * @calc_num_ports: pointer to a function to determine how many ports this
+ * device has dynamically. It will be called after the probe()
+ * callback is called, but before attach()
+ * @probe: pointer to the driver's probe function.
+ * This will be called when the device is inserted into the system,
+ * but before the device has been fully initialized by the usb_serial
+ * subsystem. Use this function to download any firmware to the device,
+ * or any other early initialization that might be needed.
+ * Return 0 to continue on with the initialization sequence. Anything
+ * else will abort it.
+ * @attach: pointer to the driver's attach function.
+ * This will be called when the struct usb_serial structure is fully set
+ * set up. Do any local initialization of the device, or any private
+ * memory structure allocation at this point in time.
+ * @shutdown: pointer to the driver's shutdown function. This will be
+ * called when the device is removed from the system.
+ *
+ * This structure is defines a USB Serial driver. It provides all of
+ * the information that the USB serial core code needs. If the function
+ * pointers are defined, then the USB serial core code will call them when
+ * the corresponding tty port functions are called. If they are not
+ * called, the generic serial function will be used instead.
+ *
+ * The driver.owner field should be set to the module owner of this driver.
+ * The driver.name field should be set to the name of this driver (remember
+ * it will show up in sysfs, so it needs to be short and to the point.
+ * Useing the module name is a good idea.)
+ */
+struct usb_serial_driver {
+ const char *description;
+ const struct usb_device_id *id_table;
+ char num_interrupt_in;
+ char num_interrupt_out;
+ char num_bulk_in;
+ char num_bulk_out;
+ char num_ports;
+
+ struct list_head driver_list;
+ struct device_driver driver;
+
+ int (*probe) (struct usb_serial *serial, const struct usb_device_id *id);
+ int (*attach) (struct usb_serial *serial);
+ int (*calc_num_ports) (struct usb_serial *serial);
+
+ void (*shutdown) (struct usb_serial *serial);
+
+ int (*port_probe) (struct usb_serial_port *port);
+ int (*port_remove) (struct usb_serial_port *port);
+
+ /* serial function calls */
+ int (*open) (struct usb_serial_port *port, struct file * filp);
+ void (*close) (struct usb_serial_port *port, struct file * filp);
+ int (*write) (struct usb_serial_port *port, const unsigned char *buf, int count);
+ int (*write_room) (struct usb_serial_port *port);
+ int (*ioctl) (struct usb_serial_port *port, struct file * file, unsigned int cmd, unsigned long arg);
+ void (*set_termios) (struct usb_serial_port *port, struct termios * old);
+ void (*break_ctl) (struct usb_serial_port *port, int break_state);
+ int (*chars_in_buffer) (struct usb_serial_port *port);
+ void (*throttle) (struct usb_serial_port *port);
+ void (*unthrottle) (struct usb_serial_port *port);
+ int (*tiocmget) (struct usb_serial_port *port, struct file *file);
+ int (*tiocmset) (struct usb_serial_port *port, struct file *file, unsigned int set, unsigned int clear);
+
+ void (*read_int_callback)(struct urb *urb, struct pt_regs *regs);
+ void (*write_int_callback)(struct urb *urb, struct pt_regs *regs);
+ void (*read_bulk_callback)(struct urb *urb, struct pt_regs *regs);
+ void (*write_bulk_callback)(struct urb *urb, struct pt_regs *regs);
+};
+#define to_usb_serial_driver(d) container_of(d, struct usb_serial_driver, driver)
+
+extern int usb_serial_register(struct usb_serial_driver *driver);
+extern void usb_serial_deregister(struct usb_serial_driver *driver);
+extern void usb_serial_port_softint(struct usb_serial_port *port);
+
+extern int usb_serial_probe(struct usb_interface *iface, const struct usb_device_id *id);
+extern void usb_serial_disconnect(struct usb_interface *iface);
+
+extern int ezusb_writememory (struct usb_serial *serial, int address, unsigned char *data, int length, __u8 bRequest);
+extern int ezusb_set_reset (struct usb_serial *serial, unsigned char reset_bit);
+
+/* USB Serial console functions */
+#ifdef CONFIG_USB_SERIAL_CONSOLE
+extern void usb_serial_console_init (int debug, int minor);
+extern void usb_serial_console_exit (void);
+extern void usb_serial_console_disconnect(struct usb_serial *serial);
+#else
+static inline void usb_serial_console_init (int debug, int minor) { }
+static inline void usb_serial_console_exit (void) { }
+static inline void usb_serial_console_disconnect(struct usb_serial *serial) {}
+#endif
+
+/* Functions needed by other parts of the usbserial core */
+extern struct usb_serial *usb_serial_get_by_index (unsigned int minor);
+extern void usb_serial_put(struct usb_serial *serial);
+extern int usb_serial_generic_open (struct usb_serial_port *port, struct file *filp);
+extern int usb_serial_generic_write (struct usb_serial_port *port, const unsigned char *buf, int count);
+extern void usb_serial_generic_close (struct usb_serial_port *port, struct file *filp);
+extern int usb_serial_generic_write_room (struct usb_serial_port *port);
+extern int usb_serial_generic_chars_in_buffer (struct usb_serial_port *port);
+extern void usb_serial_generic_read_bulk_callback (struct urb *urb, struct pt_regs *regs);
+extern void usb_serial_generic_write_bulk_callback (struct urb *urb, struct pt_regs *regs);
+extern void usb_serial_generic_shutdown (struct usb_serial *serial);
+extern int usb_serial_generic_register (int debug);
+extern void usb_serial_generic_deregister (void);
+
+extern int usb_serial_bus_register (struct usb_serial_driver *device);
+extern void usb_serial_bus_deregister (struct usb_serial_driver *device);
+
+extern struct usb_serial_driver usb_serial_generic_device;
+extern struct bus_type usb_serial_bus_type;
+extern struct tty_driver *usb_serial_tty_driver;
+
+static inline void usb_serial_debug_data(int debug,
+ struct device *dev,
+ const char *function, int size,
+ const unsigned char *data)
+{
+ int i;
+
+ if (debug) {
+ dev_printk(KERN_DEBUG, dev, "%s - length = %d, data = ", function, size);
+ for (i = 0; i < size; ++i)
+ printk ("%.2x ", data[i]);
+ printk ("\n");
+ }
+}
+
+/* Use our own dbg macro */
+#undef dbg
+#define dbg(format, arg...) do { if (debug) printk(KERN_DEBUG "%s: " format "\n" , __FILE__ , ## arg); } while (0)
+
+
+
+#endif /* ifdef __LINUX_USB_SERIAL_H */
+
diff --git a/include/linux/usb_ch9.h b/include/linux/usb_ch9.h
index a2aacfc7af2..c720d107ff2 100644
--- a/include/linux/usb_ch9.h
+++ b/include/linux/usb_ch9.h
@@ -51,6 +51,9 @@
#define USB_RECIP_INTERFACE 0x01
#define USB_RECIP_ENDPOINT 0x02
#define USB_RECIP_OTHER 0x03
+/* From Wireless USB 1.0 */
+#define USB_RECIP_PORT 0x04
+#define USB_RECIP_RPIPE 0x05
/*
* Standard requests, for the bRequest field of a SETUP packet.
@@ -73,7 +76,9 @@
#define USB_REQ_SET_ENCRYPTION 0x0D /* Wireless USB */
#define USB_REQ_GET_ENCRYPTION 0x0E
+#define USB_REQ_RPIPE_ABORT 0x0E
#define USB_REQ_SET_HANDSHAKE 0x0F
+#define USB_REQ_RPIPE_RESET 0x0F
#define USB_REQ_GET_HANDSHAKE 0x10
#define USB_REQ_SET_CONNECTION 0x11
#define USB_REQ_SET_SECURITY_DATA 0x12
@@ -159,6 +164,8 @@ struct usb_ctrlrequest {
#define USB_DT_BOS 0x0f
#define USB_DT_DEVICE_CAPABILITY 0x10
#define USB_DT_WIRELESS_ENDPOINT_COMP 0x11
+#define USB_DT_WIRE_ADAPTER 0x21
+#define USB_DT_RPIPE 0x22
/* conventional codes for class-specific descriptors */
#define USB_DT_CS_DEVICE 0x21
diff --git a/include/linux/usb_gadget.h b/include/linux/usb_gadget.h
index 1d78870ed8a..e17186dbcdc 100644
--- a/include/linux/usb_gadget.h
+++ b/include/linux/usb_gadget.h
@@ -872,9 +872,9 @@ int usb_gadget_config_buf(const struct usb_config_descriptor *config,
/* utility wrapping a simple endpoint selection policy */
extern struct usb_ep *usb_ep_autoconfig (struct usb_gadget *,
- struct usb_endpoint_descriptor *) __init;
+ struct usb_endpoint_descriptor *) __devinit;
-extern void usb_ep_autoconfig_reset (struct usb_gadget *) __init;
+extern void usb_ep_autoconfig_reset (struct usb_gadget *) __devinit;
#endif /* __KERNEL__ */
diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h
index 608487a62c9..e7fc5fed5b9 100644
--- a/include/linux/usb_usual.h
+++ b/include/linux/usb_usual.h
@@ -43,6 +43,10 @@
/* Need delay after Command phase */ \
US_FLAG(NO_WP_DETECT, 0x00000200) \
/* Don't check for write-protect */ \
+ US_FLAG(MAX_SECTORS_64, 0x00000400) \
+ /* Sets max_sectors to 64 */ \
+ US_FLAG(IGNORE_DEVICE, 0x00000800) \
+ /* Don't claim device */
#define US_FLAG(name, value) US_FL_##name = value ,
enum { US_DO_ALL_FLAGS };
diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
index dc7c621e464..46919f9f5eb 100644
--- a/include/linux/vermagic.h
+++ b/include/linux/vermagic.h
@@ -1,4 +1,4 @@
-#include <linux/version.h>
+#include <linux/utsrelease.h>
#include <linux/module.h>
/* Simply sanity version stamp for modules. */
diff --git a/include/linux/videodev.h b/include/linux/videodev.h
index 41bc7e9603c..8dba97a291f 100644
--- a/include/linux/videodev.h
+++ b/include/linux/videodev.h
@@ -12,10 +12,10 @@
#ifndef __LINUX_VIDEODEV_H
#define __LINUX_VIDEODEV_H
-#define HAVE_V4L1 1
-
#include <linux/videodev2.h>
+#if defined(CONFIG_VIDEO_V4L1_COMPAT) || !defined (__KERNEL__)
+
struct video_capability
{
char name[32];
@@ -336,6 +336,8 @@ struct video_code
#define VID_HARDWARE_SN9C102 38
#define VID_HARDWARE_ARV 39
+#endif /* CONFIG_VIDEO_V4L1_COMPAT */
+
#endif /* __LINUX_VIDEODEV_H */
/*
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
index a62673dad76..e3715d77419 100644
--- a/include/linux/videodev2.h
+++ b/include/linux/videodev2.h
@@ -22,8 +22,6 @@
#endif
#include <linux/types.h>
-#define HAVE_V4L2 1
-
/*
* Common stuff for both V4L1 and V4L2
* Moved from videodev.h
@@ -716,7 +714,7 @@ struct v4l2_ext_control
__s64 value64;
void *reserved;
};
-};
+} __attribute__ ((packed));
struct v4l2_ext_controls
{
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index f6024ab4eff..71b6363caaa 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -11,6 +11,7 @@ struct vm_area_struct;
#define VM_ALLOC 0x00000002 /* vmalloc() */
#define VM_MAP 0x00000004 /* vmap()ed pages */
#define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
+#define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
/* bits [20..32] reserved for arch specific ioremap internals */
/*
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 3e0daf54133..2d9b1b60798 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -41,23 +41,23 @@ DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
static inline void __count_vm_event(enum vm_event_item item)
{
- __get_cpu_var(vm_event_states.event[item])++;
+ __get_cpu_var(vm_event_states).event[item]++;
}
static inline void count_vm_event(enum vm_event_item item)
{
- get_cpu_var(vm_event_states.event[item])++;
+ get_cpu_var(vm_event_states).event[item]++;
put_cpu();
}
static inline void __count_vm_events(enum vm_event_item item, long delta)
{
- __get_cpu_var(vm_event_states.event[item]) += delta;
+ __get_cpu_var(vm_event_states).event[item] += delta;
}
static inline void count_vm_events(enum vm_event_item item, long delta)
{
- get_cpu_var(vm_event_states.event[item])++;
+ get_cpu_var(vm_event_states).event[item] += delta;
put_cpu();
}
@@ -186,11 +186,16 @@ static inline void __mod_zone_page_state(struct zone *zone,
zone_page_state_add(delta, zone, item);
}
+static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
+{
+ atomic_long_inc(&zone->vm_stat[item]);
+ atomic_long_inc(&vm_stat[item]);
+}
+
static inline void __inc_zone_page_state(struct page *page,
enum zone_stat_item item)
{
- atomic_long_inc(&page_zone(page)->vm_stat[item]);
- atomic_long_inc(&vm_stat[item]);
+ __inc_zone_state(page_zone(page), item);
}
static inline void __dec_zone_page_state(struct page *page,
diff --git a/include/linux/vt.h b/include/linux/vt.h
index 9f95b0bea5b..ba806e8711b 100644
--- a/include/linux/vt.h
+++ b/include/linux/vt.h
@@ -1,6 +1,16 @@
#ifndef _LINUX_VT_H
#define _LINUX_VT_H
+/*
+ * These constants are also useful for user-level apps (e.g., VC
+ * resizing).
+ */
+#define MIN_NR_CONSOLES 1 /* must be at least 1 */
+#define MAX_NR_CONSOLES 63 /* serial lines start at 64 */
+#define MAX_NR_USER_CONSOLES 63 /* must be root to allocate above this */
+ /* Note: the ioctl VT_GETSTATE does not work for
+ consoles 16 and higher (since it returns a short) */
+
/* 0x56 is 'V', to avoid collision with termios and kd */
#define VT_OPENQRY 0x5600 /* find available vt */
@@ -50,5 +60,6 @@ struct vt_consize {
#define VT_RESIZEX 0x560A /* set kernel's idea of screensize + more */
#define VT_LOCKSWITCH 0x560B /* disallow vt switching */
#define VT_UNLOCKSWITCH 0x560C /* allow vt switching */
+#define VT_GETHIFONTMASK 0x560D /* return hi font mask */
#endif /* _LINUX_VT_H */
diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h
index 940d0261a54..918a29763ae 100644
--- a/include/linux/vt_kern.h
+++ b/include/linux/vt_kern.h
@@ -26,6 +26,7 @@
extern void kd_mksound(unsigned int hz, unsigned int ticks);
extern int kbd_rate(struct kbd_repeat *rep);
+extern int fg_console, last_console, want_console;
/* console.c */
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 544e855c7c0..b3b9048421d 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -68,7 +68,7 @@ struct task_struct;
wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk)
#define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
- .lock = SPIN_LOCK_UNLOCKED, \
+ .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
.task_list = { &(name).task_list, &(name).task_list } }
#define DECLARE_WAIT_QUEUE_HEAD(name) \
@@ -77,11 +77,7 @@ struct task_struct;
#define __WAIT_BIT_KEY_INITIALIZER(word, bit) \
{ .flags = word, .bit_nr = bit, }
-static inline void init_waitqueue_head(wait_queue_head_t *q)
-{
- spin_lock_init(&q->lock);
- INIT_LIST_HEAD(&q->task_list);
-}
+extern void init_waitqueue_head(wait_queue_head_t *q);
static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
{
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 957c21c16d6..9bca3539a1e 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -63,6 +63,8 @@ extern void destroy_workqueue(struct workqueue_struct *wq);
extern int FASTCALL(queue_work(struct workqueue_struct *wq, struct work_struct *work));
extern int FASTCALL(queue_delayed_work(struct workqueue_struct *wq, struct work_struct *work, unsigned long delay));
+extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
+ struct work_struct *work, unsigned long delay);
extern void FASTCALL(flush_workqueue(struct workqueue_struct *wq));
extern int FASTCALL(schedule_work(struct work_struct *work));