aboutsummaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/acpi.h3
-rw-r--r--include/linux/adb.h1
-rw-r--r--include/linux/bio.h130
-rw-r--r--include/linux/blkdev.h167
-rw-r--r--include/linux/blktrace_api.h1
-rw-r--r--include/linux/brcmphy.h6
-rw-r--r--include/linux/configfs.h3
-rw-r--r--include/linux/crc-t10dif.h8
-rw-r--r--include/linux/crypto.h48
-rw-r--r--include/linux/cyclades.h13
-rw-r--r--include/linux/dcache.h1
-rw-r--r--include/linux/dccp.h6
-rw-r--r--include/linux/debugfs.h2
-rw-r--r--include/linux/device.h9
-rw-r--r--include/linux/dm9000.h1
-rw-r--r--include/linux/elf.h1
-rw-r--r--include/linux/ethtool.h33
-rw-r--r--include/linux/firmware.h23
-rw-r--r--include/linux/freezer.h10
-rw-r--r--include/linux/fs.h11
-rw-r--r--include/linux/ftrace.h144
-rw-r--r--include/linux/generic_serial.h8
-rw-r--r--include/linux/genhd.h12
-rw-r--r--include/linux/hayesesp.h9
-rw-r--r--include/linux/hdlc.h7
-rw-r--r--include/linux/i2c-algo-pcf.h8
-rw-r--r--include/linux/i2c-id.h11
-rw-r--r--include/linux/i2c.h46
-rw-r--r--include/linux/i2c/at24.h28
-rw-r--r--include/linux/ide.h129
-rw-r--r--include/linux/ieee80211.h499
-rw-r--r--include/linux/if_bridge.h2
-rw-r--r--include/linux/if_packet.h24
-rw-r--r--include/linux/if_ppp.h2
-rw-r--r--include/linux/if_tun.h34
-rw-r--r--include/linux/if_vlan.h238
-rw-r--r--include/linux/igmp.h1
-rw-r--r--include/linux/ihex.h74
-rw-r--r--include/linux/inet.h7
-rw-r--r--include/linux/init_task.h4
-rw-r--r--include/linux/interrupt.h8
-rw-r--r--include/linux/iocontext.h18
-rw-r--r--include/linux/ioport.h5
-rw-r--r--include/linux/ip6_tunnel.h4
-rw-r--r--include/linux/ipv6.h4
-rw-r--r--include/linux/irq.h9
-rw-r--r--include/linux/irqflags.h13
-rw-r--r--include/linux/istallion.h6
-rw-r--r--include/linux/jbd2.h73
-rw-r--r--include/linux/kernel.h8
-rw-r--r--include/linux/kprobes.h4
-rw-r--r--include/linux/kvm.h33
-rw-r--r--include/linux/kvm_host.h11
-rw-r--r--include/linux/libata.h55
-rw-r--r--include/linux/linkage.h2
-rw-r--r--include/linux/list.h367
-rw-r--r--include/linux/lm_interface.h6
-rw-r--r--include/linux/lockd/lockd.h8
-rw-r--r--include/linux/lockdep.h11
-rw-r--r--include/linux/marker.h40
-rw-r--r--include/linux/mfd/asic3.h185
-rw-r--r--include/linux/mlx4/device.h3
-rw-r--r--include/linux/mm.h1
-rw-r--r--include/linux/mman.h29
-rw-r--r--include/linux/mmc/core.h1
-rw-r--r--include/linux/mmc/host.h32
-rw-r--r--include/linux/mmc/mmc.h1
-rw-r--r--include/linux/mmc/sdio_func.h21
-rw-r--r--include/linux/mmiotrace.h85
-rw-r--r--include/linux/mod_devicetable.h9
-rw-r--r--include/linux/mpage.h10
-rw-r--r--include/linux/mroute.h28
-rw-r--r--include/linux/mroute6.h35
-rw-r--r--include/linux/mv643xx_eth.h65
-rw-r--r--include/linux/net.h4
-rw-r--r--include/linux/netdevice.h330
-rw-r--r--include/linux/netfilter/nfnetlink_conntrack.h10
-rw-r--r--include/linux/netfilter/xt_string.h15
-rw-r--r--include/linux/netfilter_bridge/ebt_ip6.h40
-rw-r--r--include/linux/netfilter_bridge/ebt_log.h3
-rw-r--r--include/linux/netfilter_ipv4.h1
-rw-r--r--include/linux/netfilter_ipv6.h5
-rw-r--r--include/linux/netlink.h2
-rw-r--r--include/linux/nfs4.h3
-rw-r--r--include/linux/nfs_fs.h10
-rw-r--r--include/linux/nfs_iostat.h119
-rw-r--r--include/linux/nfs_page.h9
-rw-r--r--include/linux/nfs_xdr.h3
-rw-r--r--include/linux/nfsd/nfsd.h27
-rw-r--r--include/linux/nfsd/state.h2
-rw-r--r--include/linux/nl80211.h13
-rw-r--r--include/linux/notifier.h1
-rw-r--r--include/linux/of_device.h3
-rw-r--r--include/linux/pci.h57
-rw-r--r--include/linux/pci_hotplug.h14
-rw-r--r--include/linux/pci_ids.h6
-rw-r--r--include/linux/pci_regs.h1
-rw-r--r--include/linux/percpu_counter.h12
-rw-r--r--include/linux/pkt_cls.h1
-rw-r--r--include/linux/pkt_sched.h29
-rw-r--r--include/linux/platform_device.h1
-rw-r--r--include/linux/pm.h314
-rw-r--r--include/linux/pm_wakeup.h28
-rw-r--r--include/linux/pnp.h146
-rw-r--r--include/linux/ppp-comp.h2
-rw-r--r--include/linux/ppp_defs.h2
-rw-r--r--include/linux/preempt.h34
-rw-r--r--include/linux/ptrace.h8
-rw-r--r--include/linux/pwm.h31
-rw-r--r--include/linux/pwm_backlight.h17
-rw-r--r--include/linux/rcuclassic.h3
-rw-r--r--include/linux/rculist.h369
-rw-r--r--include/linux/rcupdate.h26
-rw-r--r--include/linux/rcupreempt.h42
-rw-r--r--include/linux/rfkill.h46
-rw-r--r--include/linux/rtnetlink.h1
-rw-r--r--include/linux/sched.h104
-rw-r--r--include/linux/security.h49
-rw-r--r--include/linux/seq_file_net.h3
-rw-r--r--include/linux/serial_core.h27
-rw-r--r--include/linux/skbuff.h27
-rw-r--r--include/linux/smc911x.h12
-rw-r--r--include/linux/smp.h46
-rw-r--r--include/linux/smp_lock.h13
-rw-r--r--include/linux/socket.h6
-rw-r--r--include/linux/sonet.h2
-rw-r--r--include/linux/spi/mmc_spi.h9
-rw-r--r--include/linux/ssb/ssb.h144
-rw-r--r--include/linux/stallion.h6
-rw-r--r--include/linux/sunrpc/auth_gss.h2
-rw-r--r--include/linux/sunrpc/clnt.h7
-rw-r--r--include/linux/sunrpc/gss_api.h2
-rw-r--r--include/linux/sunrpc/gss_krb5.h3
-rw-r--r--include/linux/sunrpc/sched.h1
-rw-r--r--include/linux/sunrpc/svc.h7
-rw-r--r--include/linux/sunrpc/svc_rdma.h36
-rw-r--r--include/linux/sunrpc/svcauth_gss.h3
-rw-r--r--include/linux/suspend.h14
-rw-r--r--include/linux/tcp.h56
-rw-r--r--include/linux/textsearch.h13
-rw-r--r--include/linux/tipc_config.h10
-rw-r--r--include/linux/topology.h13
-rw-r--r--include/linux/tty.h204
-rw-r--r--include/linux/tty_ldisc.h7
-rw-r--r--include/linux/udp.h6
-rw-r--r--include/linux/usb/rndis_host.h3
-rw-r--r--include/linux/videodev2.h14
-rw-r--r--include/linux/wanrouter.h2
-rw-r--r--include/linux/wireless.h30
-rw-r--r--include/linux/writeback.h3
-rw-r--r--include/linux/xfrm.h1
151 files changed, 4115 insertions, 1514 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 0601075d09a..a1717763937 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -235,6 +235,9 @@ int acpi_check_region(resource_size_t start, resource_size_t n,
int acpi_check_mem_region(resource_size_t start, resource_size_t n,
const char *name);
+#ifdef CONFIG_PM_SLEEP
+void __init acpi_old_suspend_ordering(void);
+#endif /* CONFIG_PM_SLEEP */
#else /* CONFIG_ACPI */
static inline int early_acpi_boot_init(void)
diff --git a/include/linux/adb.h b/include/linux/adb.h
index 64d8878e144..63bca502fa5 100644
--- a/include/linux/adb.h
+++ b/include/linux/adb.h
@@ -84,7 +84,6 @@ enum adb_message {
ADB_MSG_PRE_RESET, /* Called before resetting the bus */
ADB_MSG_POST_RESET /* Called after resetting the bus (re-do init & register) */
};
-extern struct adb_driver *adb_controller;
extern struct blocking_notifier_head adb_client_list;
int adb_request(struct adb_request *req, void (*done)(struct adb_request *),
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 61c15eaf3fb..0933a14e641 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -64,6 +64,7 @@ struct bio_vec {
struct bio_set;
struct bio;
+struct bio_integrity_payload;
typedef void (bio_end_io_t) (struct bio *, int);
typedef void (bio_destructor_t) (struct bio *);
@@ -112,6 +113,9 @@ struct bio {
atomic_t bi_cnt; /* pin count */
void *bi_private;
+#if defined(CONFIG_BLK_DEV_INTEGRITY)
+ struct bio_integrity_payload *bi_integrity; /* data integrity */
+#endif
bio_destructor_t *bi_destructor; /* destructor */
};
@@ -271,6 +275,29 @@ static inline void *bio_data(struct bio *bio)
*/
#define bio_get(bio) atomic_inc(&(bio)->bi_cnt)
+#if defined(CONFIG_BLK_DEV_INTEGRITY)
+/*
+ * bio integrity payload
+ */
+struct bio_integrity_payload {
+ struct bio *bip_bio; /* parent bio */
+ struct bio_vec *bip_vec; /* integrity data vector */
+
+ sector_t bip_sector; /* virtual start sector */
+
+ void *bip_buf; /* generated integrity data */
+ bio_end_io_t *bip_end_io; /* saved I/O completion fn */
+
+ int bip_error; /* saved I/O error */
+ unsigned int bip_size;
+
+ unsigned short bip_pool; /* pool the ivec came from */
+ unsigned short bip_vcnt; /* # of integrity bio_vecs */
+ unsigned short bip_idx; /* current bip_vec index */
+
+ struct work_struct bip_work; /* I/O completion */
+};
+#endif /* CONFIG_BLK_DEV_INTEGRITY */
/*
* A bio_pair is used when we need to split a bio.
@@ -283,10 +310,14 @@ static inline void *bio_data(struct bio *bio)
* in bio2.bi_private
*/
struct bio_pair {
- struct bio bio1, bio2;
- struct bio_vec bv1, bv2;
- atomic_t cnt;
- int error;
+ struct bio bio1, bio2;
+ struct bio_vec bv1, bv2;
+#if defined(CONFIG_BLK_DEV_INTEGRITY)
+ struct bio_integrity_payload bip1, bip2;
+ struct bio_vec iv1, iv2;
+#endif
+ atomic_t cnt;
+ int error;
};
extern struct bio_pair *bio_split(struct bio *bi, mempool_t *pool,
int first_sectors);
@@ -333,6 +364,39 @@ extern struct bio *bio_copy_user_iov(struct request_queue *, struct sg_iovec *,
int, int);
extern int bio_uncopy_user(struct bio *);
void zero_fill_bio(struct bio *bio);
+extern struct bio_vec *bvec_alloc_bs(gfp_t, int, unsigned long *, struct bio_set *);
+extern unsigned int bvec_nr_vecs(unsigned short idx);
+
+/*
+ * bio_set is used to allow other portions of the IO system to
+ * allocate their own private memory pools for bio and iovec structures.
+ * These memory pools in turn all allocate from the bio_slab
+ * and the bvec_slabs[].
+ */
+#define BIO_POOL_SIZE 2
+#define BIOVEC_NR_POOLS 6
+
+struct bio_set {
+ mempool_t *bio_pool;
+#if defined(CONFIG_BLK_DEV_INTEGRITY)
+ mempool_t *bio_integrity_pool;
+#endif
+ mempool_t *bvec_pools[BIOVEC_NR_POOLS];
+};
+
+struct biovec_slab {
+ int nr_vecs;
+ char *name;
+ struct kmem_cache *slab;
+};
+
+extern struct bio_set *fs_bio_set;
+
+/*
+ * a small number of entries is fine, not going to be performance critical.
+ * basically we just need to survive
+ */
+#define BIO_SPLIT_ENTRIES 2
#ifdef CONFIG_HIGHMEM
/*
@@ -381,5 +445,63 @@ static inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx,
__bio_kmap_irq((bio), (bio)->bi_idx, (flags))
#define bio_kunmap_irq(buf,flags) __bio_kunmap_irq(buf, flags)
+#if defined(CONFIG_BLK_DEV_INTEGRITY)
+
+#define bip_vec_idx(bip, idx) (&(bip->bip_vec[(idx)]))
+#define bip_vec(bip) bip_vec_idx(bip, 0)
+
+#define __bip_for_each_vec(bvl, bip, i, start_idx) \
+ for (bvl = bip_vec_idx((bip), (start_idx)), i = (start_idx); \
+ i < (bip)->bip_vcnt; \
+ bvl++, i++)
+
+#define bip_for_each_vec(bvl, bip, i) \
+ __bip_for_each_vec(bvl, bip, i, (bip)->bip_idx)
+
+static inline int bio_integrity(struct bio *bio)
+{
+#if defined(CONFIG_BLK_DEV_INTEGRITY)
+ return bio->bi_integrity != NULL;
+#else
+ return 0;
+#endif
+}
+
+extern struct bio_integrity_payload *bio_integrity_alloc_bioset(struct bio *, gfp_t, unsigned int, struct bio_set *);
+extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int);
+extern void bio_integrity_free(struct bio *, struct bio_set *);
+extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int);
+extern int bio_integrity_enabled(struct bio *bio);
+extern int bio_integrity_set_tag(struct bio *, void *, unsigned int);
+extern int bio_integrity_get_tag(struct bio *, void *, unsigned int);
+extern int bio_integrity_prep(struct bio *);
+extern void bio_integrity_endio(struct bio *, int);
+extern void bio_integrity_advance(struct bio *, unsigned int);
+extern void bio_integrity_trim(struct bio *, unsigned int, unsigned int);
+extern void bio_integrity_split(struct bio *, struct bio_pair *, int);
+extern int bio_integrity_clone(struct bio *, struct bio *, struct bio_set *);
+extern int bioset_integrity_create(struct bio_set *, int);
+extern void bioset_integrity_free(struct bio_set *);
+extern void bio_integrity_init_slab(void);
+
+#else /* CONFIG_BLK_DEV_INTEGRITY */
+
+#define bio_integrity(a) (0)
+#define bioset_integrity_create(a, b) (0)
+#define bio_integrity_prep(a) (0)
+#define bio_integrity_enabled(a) (0)
+#define bio_integrity_clone(a, b, c) (0)
+#define bioset_integrity_free(a) do { } while (0)
+#define bio_integrity_free(a, b) do { } while (0)
+#define bio_integrity_endio(a, b) do { } while (0)
+#define bio_integrity_advance(a, b) do { } while (0)
+#define bio_integrity_trim(a, b, c) do { } while (0)
+#define bio_integrity_split(a, b, c) do { } while (0)
+#define bio_integrity_set_tag(a, b, c) do { } while (0)
+#define bio_integrity_get_tag(a, b, c) do { } while (0)
+#define bio_integrity_init_slab(a) do { } while (0)
+
+#endif /* CONFIG_BLK_DEV_INTEGRITY */
+
#endif /* CONFIG_BLOCK */
#endif /* __LINUX_BIO_H */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index d2a1b71e93c..88d68081a0f 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -23,7 +23,6 @@
struct scsi_ioctl_command;
struct request_queue;
-typedef struct request_queue request_queue_t __deprecated;
struct elevator_queue;
typedef struct elevator_queue elevator_t;
struct request_pm_state;
@@ -34,12 +33,6 @@ struct sg_io_hdr;
#define BLKDEV_MIN_RQ 4
#define BLKDEV_MAX_RQ 128 /* Default maximum */
-int put_io_context(struct io_context *ioc);
-void exit_io_context(void);
-struct io_context *get_io_context(gfp_t gfp_flags, int node);
-struct io_context *alloc_io_context(gfp_t gfp_flags, int node);
-void copy_io_context(struct io_context **pdst, struct io_context **psrc);
-
struct request;
typedef void (rq_end_io_fn)(struct request *, int);
@@ -113,6 +106,7 @@ enum rq_flag_bits {
__REQ_ALLOCED, /* request came from our alloc pool */
__REQ_RW_META, /* metadata io request */
__REQ_COPY_USER, /* contains copies of user pages */
+ __REQ_INTEGRITY, /* integrity metadata has been remapped */
__REQ_NR_BITS, /* stops here */
};
@@ -135,6 +129,7 @@ enum rq_flag_bits {
#define REQ_ALLOCED (1 << __REQ_ALLOCED)
#define REQ_RW_META (1 << __REQ_RW_META)
#define REQ_COPY_USER (1 << __REQ_COPY_USER)
+#define REQ_INTEGRITY (1 << __REQ_INTEGRITY)
#define BLK_MAX_CDB 16
@@ -259,7 +254,14 @@ typedef int (prep_rq_fn) (struct request_queue *, struct request *);
typedef void (unplug_fn) (struct request_queue *);
struct bio_vec;
-typedef int (merge_bvec_fn) (struct request_queue *, struct bio *, struct bio_vec *);
+struct bvec_merge_data {
+ struct block_device *bi_bdev;
+ sector_t bi_sector;
+ unsigned bi_size;
+ unsigned long bi_rw;
+};
+typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *,
+ struct bio_vec *);
typedef void (prepare_flush_fn) (struct request_queue *, struct request *);
typedef void (softirq_done_fn)(struct request *);
typedef int (dma_drain_needed_fn)(struct request *);
@@ -426,6 +428,32 @@ static inline void queue_flag_set_unlocked(unsigned int flag,
__set_bit(flag, &q->queue_flags);
}
+static inline int queue_flag_test_and_clear(unsigned int flag,
+ struct request_queue *q)
+{
+ WARN_ON_ONCE(!queue_is_locked(q));
+
+ if (test_bit(flag, &q->queue_flags)) {
+ __clear_bit(flag, &q->queue_flags);
+ return 1;
+ }
+
+ return 0;
+}
+
+static inline int queue_flag_test_and_set(unsigned int flag,
+ struct request_queue *q)
+{
+ WARN_ON_ONCE(!queue_is_locked(q));
+
+ if (!test_bit(flag, &q->queue_flags)) {
+ __set_bit(flag, &q->queue_flags);
+ return 0;
+ }
+
+ return 1;
+}
+
static inline void queue_flag_set(unsigned int flag, struct request_queue *q)
{
WARN_ON_ONCE(!queue_is_locked(q));
@@ -623,7 +651,6 @@ extern void generic_make_request(struct bio *bio);
extern void blk_rq_init(struct request_queue *q, struct request *rq);
extern void blk_put_request(struct request *);
extern void __blk_put_request(struct request_queue *, struct request *);
-extern void blk_end_sync_rq(struct request *rq, int error);
extern struct request *blk_get_request(struct request_queue *, int, gfp_t);
extern void blk_insert_request(struct request_queue *, struct request *, int, void *);
extern void blk_requeue_request(struct request_queue *, struct request *);
@@ -676,7 +703,6 @@ extern int blk_execute_rq(struct request_queue *, struct gendisk *,
struct request *, int);
extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
struct request *, int, rq_end_io_fn *);
-extern int blk_verify_command(unsigned char *, int);
extern void blk_unplug(struct request_queue *q);
static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
@@ -749,6 +775,7 @@ extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
extern void blk_queue_hardsect_size(struct request_queue *, unsigned short);
extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
extern void blk_queue_dma_pad(struct request_queue *, unsigned int);
+extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
extern int blk_queue_dma_drain(struct request_queue *q,
dma_drain_needed_fn *dma_drain_needed,
void *buf, unsigned int size);
@@ -802,6 +829,15 @@ static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
extern int blkdev_issue_flush(struct block_device *, sector_t *);
+/*
+* command filter functions
+*/
+extern int blk_verify_command(struct file *file, unsigned char *cmd);
+extern int blk_cmd_filter_verify_command(struct blk_scsi_cmd_filter *filter,
+ unsigned char *cmd, mode_t *f_mode);
+extern int blk_register_filter(struct gendisk *disk);
+extern void blk_unregister_filter(struct gendisk *disk);
+
#define MAX_PHYS_SEGMENTS 128
#define MAX_HW_SEGMENTS 128
#define SAFE_MAX_SECTORS 255
@@ -865,28 +901,119 @@ void kblockd_flush_work(struct work_struct *work);
#define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \
MODULE_ALIAS("block-major-" __stringify(major) "-*")
+#if defined(CONFIG_BLK_DEV_INTEGRITY)
-#else /* CONFIG_BLOCK */
-/*
- * stubs for when the block layer is configured out
- */
-#define buffer_heads_over_limit 0
+#define INTEGRITY_FLAG_READ 2 /* verify data integrity on read */
+#define INTEGRITY_FLAG_WRITE 4 /* generate data integrity on write */
-static inline long nr_blockdev_pages(void)
+struct blk_integrity_exchg {
+ void *prot_buf;
+ void *data_buf;
+ sector_t sector;
+ unsigned int data_size;
+ unsigned short sector_size;
+ const char *disk_name;
+};
+
+typedef void (integrity_gen_fn) (struct blk_integrity_exchg *);
+typedef int (integrity_vrfy_fn) (struct blk_integrity_exchg *);
+typedef void (integrity_set_tag_fn) (void *, void *, unsigned int);
+typedef void (integrity_get_tag_fn) (void *, void *, unsigned int);
+
+struct blk_integrity {
+ integrity_gen_fn *generate_fn;
+ integrity_vrfy_fn *verify_fn;
+ integrity_set_tag_fn *set_tag_fn;
+ integrity_get_tag_fn *get_tag_fn;
+
+ unsigned short flags;
+ unsigned short tuple_size;
+ unsigned short sector_size;
+ unsigned short tag_size;
+
+ const char *name;
+
+ struct kobject kobj;
+};
+
+extern int blk_integrity_register(struct gendisk *, struct blk_integrity *);
+extern void blk_integrity_unregister(struct gendisk *);
+extern int blk_integrity_compare(struct block_device *, struct block_device *);
+extern int blk_rq_map_integrity_sg(struct request *, struct scatterlist *);
+extern int blk_rq_count_integrity_sg(struct request *);
+
+static inline unsigned short blk_integrity_tuple_size(struct blk_integrity *bi)
{
+ if (bi)
+ return bi->tuple_size;
+
return 0;
}
-static inline void exit_io_context(void)
+static inline struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
{
+ return bdev->bd_disk->integrity;
}
-struct io_context;
-static inline int put_io_context(struct io_context *ioc)
+static inline unsigned int bdev_get_tag_size(struct block_device *bdev)
{
- return 1;
+ struct blk_integrity *bi = bdev_get_integrity(bdev);
+
+ if (bi)
+ return bi->tag_size;
+
+ return 0;
+}
+
+static inline int bdev_integrity_enabled(struct block_device *bdev, int rw)
+{
+ struct blk_integrity *bi = bdev_get_integrity(bdev);
+
+ if (bi == NULL)
+ return 0;
+
+ if (rw == READ && bi->verify_fn != NULL &&
+ (bi->flags & INTEGRITY_FLAG_READ))
+ return 1;
+
+ if (rw == WRITE && bi->generate_fn != NULL &&
+ (bi->flags & INTEGRITY_FLAG_WRITE))
+ return 1;
+
+ return 0;
}
+static inline int blk_integrity_rq(struct request *rq)
+{
+ if (rq->bio == NULL)
+ return 0;
+
+ return bio_integrity(rq->bio);
+}
+
+#else /* CONFIG_BLK_DEV_INTEGRITY */
+
+#define blk_integrity_rq(rq) (0)
+#define blk_rq_count_integrity_sg(a) (0)
+#define blk_rq_map_integrity_sg(a, b) (0)
+#define bdev_get_integrity(a) (0)
+#define bdev_get_tag_size(a) (0)
+#define blk_integrity_compare(a, b) (0)
+#define blk_integrity_register(a, b) (0)
+#define blk_integrity_unregister(a) do { } while (0);
+
+#endif /* CONFIG_BLK_DEV_INTEGRITY */
+
+#else /* CONFIG_BLOCK */
+/*
+ * stubs for when the block layer is configured out
+ */
+#define buffer_heads_over_limit 0
+
+static inline long nr_blockdev_pages(void)
+{
+ return 0;
+}
#endif /* CONFIG_BLOCK */
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
index e3ef903aae8..d084b8d227a 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -129,6 +129,7 @@ struct blk_trace {
u32 dev;
struct dentry *dir;
struct dentry *dropped_file;
+ struct dentry *msg_file;
atomic_t dropped;
};
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
new file mode 100644
index 00000000000..9b64b6d6787
--- /dev/null
+++ b/include/linux/brcmphy.h
@@ -0,0 +1,6 @@
+#define PHY_BRCM_WIRESPEED_ENABLE 0x00000001
+#define PHY_BRCM_AUTO_PWRDWN_ENABLE 0x00000002
+#define PHY_BRCM_APD_CLK125_ENABLE 0x00000004
+#define PHY_BRCM_STD_IBND_DISABLE 0x00000008
+#define PHY_BRCM_EXT_IBND_RX_ENABLE 0x00000010
+#define PHY_BRCM_EXT_IBND_TX_ENABLE 0x00000020
diff --git a/include/linux/configfs.h b/include/linux/configfs.h
index 3ae65b1bf90..d62c19ff041 100644
--- a/include/linux/configfs.h
+++ b/include/linux/configfs.h
@@ -148,7 +148,8 @@ struct configfs_attribute {
* items. If the item is a group, it may support mkdir(2).
* Groups supply one of make_group() and make_item(). If the
* group supports make_group(), one can create group children. If it
- * supports make_item(), one can create config_item children. If it has
+ * supports make_item(), one can create config_item children. make_group()
+ * and make_item() return ERR_PTR() on errors. If it has
* default_groups on group->default_groups, it has automatically created
* group children. default_groups may coexist alongsize make_group() or
* make_item(), but if the group wishes to have only default_groups
diff --git a/include/linux/crc-t10dif.h b/include/linux/crc-t10dif.h
new file mode 100644
index 00000000000..a9c96d865ee
--- /dev/null
+++ b/include/linux/crc-t10dif.h
@@ -0,0 +1,8 @@
+#ifndef _LINUX_CRC_T10DIF_H
+#define _LINUX_CRC_T10DIF_H
+
+#include <linux/types.h>
+
+__u16 crc_t10dif(unsigned char const *, size_t);
+
+#endif
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 425824bd49f..c43dc47fdf7 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -30,15 +30,17 @@
*/
#define CRYPTO_ALG_TYPE_MASK 0x0000000f
#define CRYPTO_ALG_TYPE_CIPHER 0x00000001
-#define CRYPTO_ALG_TYPE_DIGEST 0x00000002
-#define CRYPTO_ALG_TYPE_HASH 0x00000003
+#define CRYPTO_ALG_TYPE_COMPRESS 0x00000002
+#define CRYPTO_ALG_TYPE_AEAD 0x00000003
#define CRYPTO_ALG_TYPE_BLKCIPHER 0x00000004
#define CRYPTO_ALG_TYPE_ABLKCIPHER 0x00000005
#define CRYPTO_ALG_TYPE_GIVCIPHER 0x00000006
-#define CRYPTO_ALG_TYPE_COMPRESS 0x00000008
-#define CRYPTO_ALG_TYPE_AEAD 0x00000009
+#define CRYPTO_ALG_TYPE_DIGEST 0x00000008
+#define CRYPTO_ALG_TYPE_HASH 0x00000009
+#define CRYPTO_ALG_TYPE_AHASH 0x0000000a
#define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e
+#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000c
#define CRYPTO_ALG_TYPE_BLKCIPHER_MASK 0x0000000c
#define CRYPTO_ALG_LARVAL 0x00000010
@@ -102,6 +104,7 @@ struct crypto_async_request;
struct crypto_aead;
struct crypto_blkcipher;
struct crypto_hash;
+struct crypto_ahash;
struct crypto_tfm;
struct crypto_type;
struct aead_givcrypt_request;
@@ -131,6 +134,16 @@ struct ablkcipher_request {
void *__ctx[] CRYPTO_MINALIGN_ATTR;
};
+struct ahash_request {
+ struct crypto_async_request base;
+
+ unsigned int nbytes;
+ struct scatterlist *src;
+ u8 *result;
+
+ void *__ctx[] CRYPTO_MINALIGN_ATTR;
+};
+
/**
* struct aead_request - AEAD request
* @base: Common attributes for async crypto requests
@@ -195,6 +208,17 @@ struct ablkcipher_alg {
unsigned int ivsize;
};
+struct ahash_alg {
+ int (*init)(struct ahash_request *req);
+ int (*update)(struct ahash_request *req);
+ int (*final)(struct ahash_request *req);
+ int (*digest)(struct ahash_request *req);
+ int (*setkey)(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int keylen);
+
+ unsigned int digestsize;
+};
+
struct aead_alg {
int (*setkey)(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen);
@@ -272,6 +296,7 @@ struct compress_alg {
#define cra_cipher cra_u.cipher
#define cra_digest cra_u.digest
#define cra_hash cra_u.hash
+#define cra_ahash cra_u.ahash
#define cra_compress cra_u.compress
struct crypto_alg {
@@ -298,6 +323,7 @@ struct crypto_alg {
struct cipher_alg cipher;
struct digest_alg digest;
struct hash_alg hash;
+ struct ahash_alg ahash;
struct compress_alg compress;
} cra_u;
@@ -383,6 +409,18 @@ struct hash_tfm {
unsigned int digestsize;
};
+struct ahash_tfm {
+ int (*init)(struct ahash_request *req);
+ int (*update)(struct ahash_request *req);
+ int (*final)(struct ahash_request *req);
+ int (*digest)(struct ahash_request *req);
+ int (*setkey)(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int keylen);
+
+ unsigned int digestsize;
+ unsigned int reqsize;
+};
+
struct compress_tfm {
int (*cot_compress)(struct crypto_tfm *tfm,
const u8 *src, unsigned int slen,
@@ -397,6 +435,7 @@ struct compress_tfm {
#define crt_blkcipher crt_u.blkcipher
#define crt_cipher crt_u.cipher
#define crt_hash crt_u.hash
+#define crt_ahash crt_u.ahash
#define crt_compress crt_u.compress
struct crypto_tfm {
@@ -409,6 +448,7 @@ struct crypto_tfm {
struct blkcipher_tfm blkcipher;
struct cipher_tfm cipher;
struct hash_tfm hash;
+ struct ahash_tfm ahash;
struct compress_tfm compress;
} crt_u;
diff --git a/include/linux/cyclades.h b/include/linux/cyclades.h
index 504cb2c3fa9..2d3d1e04ba9 100644
--- a/include/linux/cyclades.h
+++ b/include/linux/cyclades.h
@@ -550,11 +550,11 @@ struct cyclades_icount {
struct cyclades_port {
int magic;
+ struct tty_port port;
struct cyclades_card *card;
int line;
int flags; /* defined in tty.h */
int type; /* UART type */
- struct tty_struct *tty;
int read_status_mask;
int ignore_status_mask;
int timeout;
@@ -567,13 +567,8 @@ struct cyclades_port {
int chip_rev;
int custom_divisor;
u8 x_char; /* to be pushed out ASAP */
- int close_delay;
- unsigned short closing_wait;
- int count; /* # of fd on device */
int breakon;
int breakoff;
- int blocked_open; /* # of blocked opens */
- unsigned char *xmit_buf;
int xmit_head;
int xmit_tail;
int xmit_cnt;
@@ -583,16 +578,14 @@ struct cyclades_port {
struct cyclades_monitor mon;
struct cyclades_idle_stats idle_stats;
struct cyclades_icount icount;
- wait_queue_head_t open_wait;
- wait_queue_head_t close_wait;
struct completion shutdown_wait;
wait_queue_head_t delta_msr_wait;
int throttle;
};
#define CLOSING_WAIT_DELAY 30*HZ
-#define CY_CLOSING_WAIT_NONE 65535
-#define CY_CLOSING_WAIT_INF 0
+#define CY_CLOSING_WAIT_NONE ASYNC_CLOSING_WAIT_NONE
+#define CY_CLOSING_WAIT_INF ASYNC_CLOSING_WAIT_INF
#define CyMAX_CHIPS_PER_CARD 8
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index d982eb89c77..98202c672fd 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -3,6 +3,7 @@
#include <asm/atomic.h>
#include <linux/list.h>
+#include <linux/rculist.h>
#include <linux/spinlock.h>
#include <linux/cache.h>
#include <linux/rcupdate.h>
diff --git a/include/linux/dccp.h b/include/linux/dccp.h
index aa0737019e3..6080449fbec 100644
--- a/include/linux/dccp.h
+++ b/include/linux/dccp.h
@@ -364,8 +364,6 @@ static inline unsigned int dccp_hdr_len(const struct sk_buff *skb)
/* FIXME: for now we're default to 1 but it should really be 0 */
#define DCCPF_INITIAL_SEND_NDP_COUNT 1
-#define DCCP_NDP_LIMIT 0xFFFFFF
-
/**
* struct dccp_minisock - Minimal DCCP connection representation
*
@@ -437,7 +435,7 @@ extern int dccp_parse_options(struct sock *sk, struct dccp_request_sock *dreq,
struct sk_buff *skb);
struct dccp_options_received {
- u32 dccpor_ndp; /* only 24 bits */
+ u64 dccpor_ndp:48;
u32 dccpor_timestamp;
u32 dccpor_timestamp_echo;
u32 dccpor_elapsed_time;
@@ -533,7 +531,7 @@ struct dccp_sock {
__u16 dccps_r_ack_ratio;
__u16 dccps_pcslen;
__u16 dccps_pcrlen;
- unsigned long dccps_ndp_count;
+ __u64 dccps_ndp_count:48;
unsigned long dccps_rate_last;
struct dccp_minisock dccps_minisock;
struct dccp_ackvec *dccps_hc_rx_ackvec;
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index 7266124361b..32755cdf68d 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -26,6 +26,8 @@ struct debugfs_blob_wrapper {
unsigned long size;
};
+extern struct dentry *arch_debugfs_dir;
+
#if defined(CONFIG_DEBUG_FS)
/* declared over in file.c */
diff --git a/include/linux/device.h b/include/linux/device.h
index 6a2d04c011b..f71a78d123a 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -68,6 +68,8 @@ struct bus_type {
int (*resume_early)(struct device *dev);
int (*resume)(struct device *dev);
+ struct pm_ext_ops *pm;
+
struct bus_type_private *p;
};
@@ -131,6 +133,8 @@ struct device_driver {
int (*resume) (struct device *dev);
struct attribute_group **groups;
+ struct pm_ops *pm;
+
struct driver_private *p;
};
@@ -197,6 +201,8 @@ struct class {
int (*suspend)(struct device *dev, pm_message_t state);
int (*resume)(struct device *dev);
+
+ struct pm_ops *pm;
};
extern int __must_check class_register(struct class *class);
@@ -248,8 +254,11 @@ struct device_type {
struct attribute_group **groups;
int (*uevent)(struct device *dev, struct kobj_uevent_env *env);
void (*release)(struct device *dev);
+
int (*suspend)(struct device *dev, pm_message_t state);
int (*resume)(struct device *dev);
+
+ struct pm_ops *pm;
};
/* interface for exporting device attributes */
diff --git a/include/linux/dm9000.h b/include/linux/dm9000.h
index a3750462f9e..fc82446b642 100644
--- a/include/linux/dm9000.h
+++ b/include/linux/dm9000.h
@@ -21,6 +21,7 @@
#define DM9000_PLATF_32BITONLY (0x0004)
#define DM9000_PLATF_EXT_PHY (0x0008)
#define DM9000_PLATF_NO_EEPROM (0x0010)
+#define DM9000_PLATF_SIMPLE_PHY (0x0020) /* Use NSR to find LinkStatus */
/* platfrom data for platfrom device structure's platfrom_data field */
diff --git a/include/linux/elf.h b/include/linux/elf.h
index ff9fbed9012..edc3dac3f02 100644
--- a/include/linux/elf.h
+++ b/include/linux/elf.h
@@ -358,6 +358,7 @@ typedef struct elf64_shdr {
#define NT_PRXFPREG 0x46e62b7f /* copied from gdb5.1/include/elf/common.h */
#define NT_PPC_VMX 0x100 /* PowerPC Altivec/VMX registers */
#define NT_PPC_SPE 0x101 /* PowerPC SPE/EVR registers */
+#define NT_PPC_VSX 0x102 /* PowerPC VSX registers */
#define NT_386_TLS 0x200 /* i386 TLS slots (struct user_desc) */
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index c8d21635786..8bb5e87df36 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -272,6 +272,12 @@ enum ethtool_flags {
ETH_FLAG_LRO = (1 << 15), /* LRO is enabled */
};
+struct ethtool_rxnfc {
+ __u32 cmd;
+ __u32 flow_type;
+ __u64 data;
+};
+
#ifdef __KERNEL__
struct net_device;
@@ -396,6 +402,8 @@ struct ethtool_ops {
/* the following hooks are obsolete */
int (*self_test_count)(struct net_device *);/* use get_sset_count */
int (*get_stats_count)(struct net_device *);/* use get_sset_count */
+ int (*get_rxhash)(struct net_device *, struct ethtool_rxnfc *);
+ int (*set_rxhash)(struct net_device *, struct ethtool_rxnfc *);
};
#endif /* __KERNEL__ */
@@ -442,6 +450,9 @@ struct ethtool_ops {
#define ETHTOOL_GPFLAGS 0x00000027 /* Get driver-private flags bitmap */
#define ETHTOOL_SPFLAGS 0x00000028 /* Set driver-private flags bitmap */
+#define ETHTOOL_GRXFH 0x00000029 /* Get RX flow hash configuration */
+#define ETHTOOL_SRXFH 0x0000002a /* Set RX flow hash configuration */
+
/* compatibility with older code */
#define SPARC_ETH_GSET ETHTOOL_GSET
#define SPARC_ETH_SSET ETHTOOL_SSET
@@ -528,4 +539,26 @@ struct ethtool_ops {
#define WAKE_MAGIC (1 << 5)
#define WAKE_MAGICSECURE (1 << 6) /* only meaningful if WAKE_MAGIC */
+/* L3-L4 network traffic flow types */
+#define TCP_V4_FLOW 0x01
+#define UDP_V4_FLOW 0x02
+#define SCTP_V4_FLOW 0x03
+#define AH_ESP_V4_FLOW 0x04
+#define TCP_V6_FLOW 0x05
+#define UDP_V6_FLOW 0x06
+#define SCTP_V6_FLOW 0x07
+#define AH_ESP_V6_FLOW 0x08
+
+/* L3-L4 network traffic flow hash options */
+#define RXH_DEV_PORT (1 << 0)
+#define RXH_L2DA (1 << 1)
+#define RXH_VLAN (1 << 2)
+#define RXH_L3_PROTO (1 << 3)
+#define RXH_IP_SRC (1 << 4)
+#define RXH_IP_DST (1 << 5)
+#define RXH_L4_B_0_1 (1 << 6) /* src port in case of TCP/UDP/SCTP */
+#define RXH_L4_B_2_3 (1 << 7) /* dst port in case of TCP/UDP/SCTP */
+#define RXH_DISCARD (1 << 31)
+
+
#endif /* _LINUX_ETHTOOL_H */
diff --git a/include/linux/firmware.h b/include/linux/firmware.h
index 6c7eff2ebad..c8ecf5b2a20 100644
--- a/include/linux/firmware.h
+++ b/include/linux/firmware.h
@@ -1,18 +1,39 @@
#ifndef _LINUX_FIRMWARE_H
#define _LINUX_FIRMWARE_H
+
#include <linux/module.h>
#include <linux/types.h>
+#include <linux/compiler.h>
+
#define FIRMWARE_NAME_MAX 30
#define FW_ACTION_NOHOTPLUG 0
#define FW_ACTION_HOTPLUG 1
struct firmware {
size_t size;
- u8 *data;
+ const u8 *data;
};
struct device;
+struct builtin_fw {
+ char *name;
+ void *data;
+ unsigned long size;
+};
+
+/* We have to play tricks here much like stringify() to get the
+ __COUNTER__ macro to be expanded as we want it */
+#define __fw_concat1(x, y) x##y
+#define __fw_concat(x, y) __fw_concat1(x, y)
+
+#define DECLARE_BUILTIN_FIRMWARE(name, blob) \
+ DECLARE_BUILTIN_FIRMWARE_SIZE(name, &(blob), sizeof(blob))
+
+#define DECLARE_BUILTIN_FIRMWARE_SIZE(name, blob, size) \
+ static const struct builtin_fw __fw_concat(__builtin_fw,__COUNTER__) \
+ __used __section(.builtin_fw) = { name, blob, size }
+
#if defined(CONFIG_FW_LOADER) || (defined(CONFIG_FW_LOADER_MODULE) && defined(MODULE))
int request_firmware(const struct firmware **fw, const char *name,
struct device *device);
diff --git a/include/linux/freezer.h b/include/linux/freezer.h
index 08934995c7a..deddeedf325 100644
--- a/include/linux/freezer.h
+++ b/include/linux/freezer.h
@@ -128,6 +128,15 @@ static inline void set_freezable(void)
}
/*
+ * Tell the freezer that the current task should be frozen by it and that it
+ * should send a fake signal to the task to freeze it.
+ */
+static inline void set_freezable_with_signal(void)
+{
+ current->flags &= ~(PF_NOFREEZE | PF_FREEZER_NOSIG);
+}
+
+/*
* Freezer-friendly wrappers around wait_event_interruptible() and
* wait_event_interruptible_timeout(), originally defined in <linux/wait.h>
*/
@@ -174,6 +183,7 @@ static inline void freezer_do_not_count(void) {}
static inline void freezer_count(void) {}
static inline int freezer_should_skip(struct task_struct *p) { return 0; }
static inline void set_freezable(void) {}
+static inline void set_freezable_with_signal(void) {}
#define wait_event_freezable(wq, condition) \
wait_event_interruptible(wq, condition)
diff --git a/include/linux/fs.h b/include/linux/fs.h
index d8e2762ed14..9c2ac5c0ef5 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -918,12 +918,12 @@ struct file_lock {
struct list_head fl_link; /* doubly linked list of all locks */
struct list_head fl_block; /* circular list of blocked processes */
fl_owner_t fl_owner;
+ unsigned char fl_flags;
+ unsigned char fl_type;
unsigned int fl_pid;
struct pid *fl_nspid;
wait_queue_head_t fl_wait;
struct file *fl_file;
- unsigned char fl_flags;
- unsigned char fl_type;
loff_t fl_start;
loff_t fl_end;
@@ -1729,6 +1729,8 @@ static inline void invalidate_remote_inode(struct inode *inode)
extern int invalidate_inode_pages2(struct address_space *mapping);
extern int invalidate_inode_pages2_range(struct address_space *mapping,
pgoff_t start, pgoff_t end);
+extern void generic_sync_sb_inodes(struct super_block *sb,
+ struct writeback_control *wbc);
extern int write_inode_now(struct inode *, int);
extern int filemap_fdatawrite(struct address_space *);
extern int filemap_flush(struct address_space *);
@@ -1740,6 +1742,8 @@ extern int wait_on_page_writeback_range(struct address_space *mapping,
pgoff_t start, pgoff_t end);
extern int __filemap_fdatawrite_range(struct address_space *mapping,
loff_t start, loff_t end, int sync_mode);
+extern int filemap_fdatawrite_range(struct address_space *mapping,
+ loff_t start, loff_t end);
extern long do_fsync(struct file *file, int datasync);
extern void sync_supers(void);
@@ -1870,7 +1874,8 @@ extern void
file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping);
extern loff_t no_llseek(struct file *file, loff_t offset, int origin);
extern loff_t generic_file_llseek(struct file *file, loff_t offset, int origin);
-extern loff_t remote_llseek(struct file *file, loff_t offset, int origin);
+extern loff_t generic_file_llseek_unlocked(struct file *file, loff_t offset,
+ int origin);
extern int generic_file_open(struct inode * inode, struct file * filp);
extern int nonseekable_open(struct inode * inode, struct file * filp);
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
new file mode 100644
index 00000000000..f368d041e02
--- /dev/null
+++ b/include/linux/ftrace.h
@@ -0,0 +1,144 @@
+#ifndef _LINUX_FTRACE_H
+#define _LINUX_FTRACE_H
+
+#ifdef CONFIG_FTRACE
+
+#include <linux/linkage.h>
+#include <linux/fs.h>
+
+extern int ftrace_enabled;
+extern int
+ftrace_enable_sysctl(struct ctl_table *table, int write,
+ struct file *filp, void __user *buffer, size_t *lenp,
+ loff_t *ppos);
+
+typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip);
+
+struct ftrace_ops {
+ ftrace_func_t func;
+ struct ftrace_ops *next;
+};
+
+/*
+ * The ftrace_ops must be a static and should also
+ * be read_mostly. These functions do modify read_mostly variables
+ * so use them sparely. Never free an ftrace_op or modify the
+ * next pointer after it has been registered. Even after unregistering
+ * it, the next pointer may still be used internally.
+ */
+int register_ftrace_function(struct ftrace_ops *ops);
+int unregister_ftrace_function(struct ftrace_ops *ops);
+void clear_ftrace_function(void);
+
+extern void ftrace_stub(unsigned long a0, unsigned long a1);
+
+#else /* !CONFIG_FTRACE */
+# define register_ftrace_function(ops) do { } while (0)
+# define unregister_ftrace_function(ops) do { } while (0)
+# define clear_ftrace_function(ops) do { } while (0)
+#endif /* CONFIG_FTRACE */
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+# define FTRACE_HASHBITS 10
+# define FTRACE_HASHSIZE (1<<FTRACE_HASHBITS)
+
+enum {
+ FTRACE_FL_FREE = (1 << 0),
+ FTRACE_FL_FAILED = (1 << 1),
+ FTRACE_FL_FILTER = (1 << 2),
+ FTRACE_FL_ENABLED = (1 << 3),
+ FTRACE_FL_NOTRACE = (1 << 4),
+ FTRACE_FL_CONVERTED = (1 << 5),
+ FTRACE_FL_FROZEN = (1 << 6),
+};
+
+struct dyn_ftrace {
+ struct hlist_node node;
+ unsigned long ip; /* address of mcount call-site */
+ unsigned long flags;
+};
+
+int ftrace_force_update(void);
+void ftrace_set_filter(unsigned char *buf, int len, int reset);
+
+/* defined in arch */
+extern int ftrace_ip_converted(unsigned long ip);
+extern unsigned char *ftrace_nop_replace(void);
+extern unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr);
+extern int ftrace_dyn_arch_init(void *data);
+extern int ftrace_mcount_set(unsigned long *data);
+extern int ftrace_modify_code(unsigned long ip, unsigned char *old_code,
+ unsigned char *new_code);
+extern int ftrace_update_ftrace_func(ftrace_func_t func);
+extern void ftrace_caller(void);
+extern void ftrace_call(void);
+extern void mcount_call(void);
+
+extern int skip_trace(unsigned long ip);
+
+void ftrace_disable_daemon(void);
+void ftrace_enable_daemon(void);
+
+#else
+# define skip_trace(ip) ({ 0; })
+# define ftrace_force_update() ({ 0; })
+# define ftrace_set_filter(buf, len, reset) do { } while (0)
+# define ftrace_disable_daemon() do { } while (0)
+# define ftrace_enable_daemon() do { } while (0)
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+/* totally disable ftrace - can not re-enable after this */
+void ftrace_kill(void);
+void ftrace_kill_atomic(void);
+
+static inline void tracer_disable(void)
+{
+#ifdef CONFIG_FTRACE
+ ftrace_enabled = 0;
+#endif
+}
+
+#ifdef CONFIG_FRAME_POINTER
+/* TODO: need to fix this for ARM */
+# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
+# define CALLER_ADDR1 ((unsigned long)__builtin_return_address(1))
+# define CALLER_ADDR2 ((unsigned long)__builtin_return_address(2))
+# define CALLER_ADDR3 ((unsigned long)__builtin_return_address(3))
+# define CALLER_ADDR4 ((unsigned long)__builtin_return_address(4))
+# define CALLER_ADDR5 ((unsigned long)__builtin_return_address(5))
+# define CALLER_ADDR6 ((unsigned long)__builtin_return_address(6))
+#else
+# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
+# define CALLER_ADDR1 0UL
+# define CALLER_ADDR2 0UL
+# define CALLER_ADDR3 0UL
+# define CALLER_ADDR4 0UL
+# define CALLER_ADDR5 0UL
+# define CALLER_ADDR6 0UL
+#endif
+
+#ifdef CONFIG_IRQSOFF_TRACER
+ extern void time_hardirqs_on(unsigned long a0, unsigned long a1);
+ extern void time_hardirqs_off(unsigned long a0, unsigned long a1);
+#else
+# define time_hardirqs_on(a0, a1) do { } while (0)
+# define time_hardirqs_off(a0, a1) do { } while (0)
+#endif
+
+#ifdef CONFIG_PREEMPT_TRACER
+ extern void trace_preempt_on(unsigned long a0, unsigned long a1);
+ extern void trace_preempt_off(unsigned long a0, unsigned long a1);
+#else
+# define trace_preempt_on(a0, a1) do { } while (0)
+# define trace_preempt_off(a0, a1) do { } while (0)
+#endif
+
+#ifdef CONFIG_TRACING
+extern void
+ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3);
+#else
+static inline void
+ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { }
+#endif
+
+#endif /* _LINUX_FTRACE_H */
diff --git a/include/linux/generic_serial.h b/include/linux/generic_serial.h
index 110833666e3..4cc91393981 100644
--- a/include/linux/generic_serial.h
+++ b/include/linux/generic_serial.h
@@ -14,6 +14,7 @@
#ifdef __KERNEL__
#include <linux/mutex.h>
+#include <linux/tty.h>
struct real_driver {
void (*disable_tx_interrupts) (void *);
@@ -33,17 +34,12 @@ struct real_driver {
struct gs_port {
int magic;
+ struct tty_port port;
unsigned char *xmit_buf;
int xmit_head;
int xmit_tail;
int xmit_cnt;
struct mutex port_write_mutex;
- int flags;
- wait_queue_head_t open_wait;
- wait_queue_head_t close_wait;
- int count;
- int blocked_open;
- struct tty_struct *tty;
unsigned long event;
unsigned short closing_wait;
int close_delay;
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index ae7aec3cabe..e8787417f65 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -110,6 +110,14 @@ struct hd_struct {
#define GENHD_FL_SUPPRESS_PARTITION_INFO 32
#define GENHD_FL_FAIL 64
+#define BLK_SCSI_MAX_CMDS (256)
+#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
+
+struct blk_scsi_cmd_filter {
+ unsigned long read_ok[BLK_SCSI_CMD_PER_LONG];
+ unsigned long write_ok[BLK_SCSI_CMD_PER_LONG];
+ struct kobject kobj;
+};
struct gendisk {
int major; /* major number of driver */
@@ -120,6 +128,7 @@ struct gendisk {
struct hd_struct **part; /* [indexed by minor] */
struct block_device_operations *fops;
struct request_queue *queue;
+ struct blk_scsi_cmd_filter cmd_filter;
void *private_data;
sector_t capacity;
@@ -141,6 +150,9 @@ struct gendisk {
struct disk_stats dkstats;
#endif
struct work_struct async_notify;
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+ struct blk_integrity *integrity;
+#endif
};
/*
diff --git a/include/linux/hayesesp.h b/include/linux/hayesesp.h
index 2177ee5b2fe..940aeb51d53 100644
--- a/include/linux/hayesesp.h
+++ b/include/linux/hayesesp.h
@@ -76,11 +76,10 @@ struct hayes_esp_config {
struct esp_struct {
int magic;
+ struct tty_port port;
spinlock_t lock;
- int port;
+ int io_port;
int irq;
- int flags; /* defined in tty.h */
- struct tty_struct *tty;
int read_status_mask;
int ignore_status_mask;
int timeout;
@@ -93,14 +92,10 @@ struct esp_struct {
int MCR; /* Modem control register */
unsigned long last_active;
int line;
- int count; /* # of fd on device */
- int blocked_open; /* # of blocked opens */
unsigned char *xmit_buf;
int xmit_head;
int xmit_tail;
int xmit_cnt;
- wait_queue_head_t open_wait;
- wait_queue_head_t close_wait;
wait_queue_head_t delta_msr_wait;
wait_queue_head_t break_wait;
struct async_icount icount; /* kernel counters for the 4 input interrupts */
diff --git a/include/linux/hdlc.h b/include/linux/hdlc.h
index 6115545a5b9..c59769693be 100644
--- a/include/linux/hdlc.h
+++ b/include/linux/hdlc.h
@@ -45,7 +45,6 @@ struct hdlc_proto {
/* Pointed to by dev->priv */
typedef struct hdlc_device {
- struct net_device_stats stats;
/* used by HDLC layer to take control over HDLC device from hw driver*/
int (*attach)(struct net_device *dev,
unsigned short encoding, unsigned short parity);
@@ -109,12 +108,6 @@ int attach_hdlc_protocol(struct net_device *dev, struct hdlc_proto *proto,
/* May be used by hardware driver to gain control over HDLC device */
void detach_hdlc_protocol(struct net_device *dev);
-static __inline__ struct net_device_stats *hdlc_stats(struct net_device *dev)
-{
- return &dev_to_hdlc(dev)->stats;
-}
-
-
static __inline__ __be16 hdlc_type_trans(struct sk_buff *skb,
struct net_device *dev)
{
diff --git a/include/linux/i2c-algo-pcf.h b/include/linux/i2c-algo-pcf.h
index 77afbb60fd1..0177d280f73 100644
--- a/include/linux/i2c-algo-pcf.h
+++ b/include/linux/i2c-algo-pcf.h
@@ -33,9 +33,11 @@ struct i2c_algo_pcf_data {
int (*getclock) (void *data);
void (*waitforpin) (void);
- /* local settings */
- int udelay;
- int timeout;
+ /* Multi-master lost arbitration back-off delay (msecs)
+ * This should be set by the bus adapter or knowledgable client
+ * if bus is multi-mastered, else zero
+ */
+ unsigned long lab_mdelay;
};
int i2c_pcf_add_bus(struct i2c_adapter *);
diff --git a/include/linux/i2c-id.h b/include/linux/i2c-id.h
index 580acc93903..4862398e05b 100644
--- a/include/linux/i2c-id.h
+++ b/include/linux/i2c-id.h
@@ -33,15 +33,11 @@
#define I2C_DRIVERID_MSP3400 1
#define I2C_DRIVERID_TUNER 2
-#define I2C_DRIVERID_TDA8425 4 /* stereo sound processor */
#define I2C_DRIVERID_TEA6420 5 /* audio matrix switch */
#define I2C_DRIVERID_TEA6415C 6 /* video matrix switch */
#define I2C_DRIVERID_TDA9840 7 /* stereo sound processor */
#define I2C_DRIVERID_SAA7111A 8 /* video input processor */
#define I2C_DRIVERID_SAA7185B 13 /* video encoder */
-#define I2C_DRIVERID_TEA6300 18 /* audio mixer */
-#define I2C_DRIVERID_TDA9850 20 /* audio mixer */
-#define I2C_DRIVERID_TDA9855 21 /* audio mixer */
#define I2C_DRIVERID_SAA7110 22 /* video decoder */
#define I2C_DRIVERID_MGATVO 23 /* Matrox TVOut */
#define I2C_DRIVERID_SAA5249 24 /* SAA5249 and compatibles */
@@ -50,9 +46,7 @@
#define I2C_DRIVERID_TDA7432 27 /* Stereo sound processor */
#define I2C_DRIVERID_TVMIXER 28 /* Mixer driver for tv cards */
#define I2C_DRIVERID_TVAUDIO 29 /* Generic TV sound driver */
-#define I2C_DRIVERID_TDA9873 31 /* TV sound decoder chip */
#define I2C_DRIVERID_TDA9875 32 /* TV sound decoder chip */
-#define I2C_DRIVERID_PIC16C54_PV9 33 /* Audio mux/ir receiver */
#define I2C_DRIVERID_BT819 40 /* video decoder */
#define I2C_DRIVERID_BT856 41 /* video encoder */
#define I2C_DRIVERID_VPX3220 42 /* video decoder+vbi/vtxt */
@@ -63,7 +57,6 @@
#define I2C_DRIVERID_INDYCAM 58 /* SGI IndyCam */
#define I2C_DRIVERID_OVCAMCHIP 61 /* OmniVision CMOS image sens. */
#define I2C_DRIVERID_MAX6900 63 /* MAX6900 real-time clock */
-#define I2C_DRIVERID_TDA9874 66 /* TV sound decoder */
#define I2C_DRIVERID_SAA6752HS 67 /* MPEG2 encoder */
#define I2C_DRIVERID_TVEEPROM 68 /* TV EEPROM */
#define I2C_DRIVERID_WM8775 69 /* wm8775 audio processor */
@@ -91,8 +84,6 @@
#define I2C_DRIVERID_M52790 95 /* Mitsubishi M52790SP/FP AV switch */
#define I2C_DRIVERID_CS5345 96 /* cs5345 audio processor */
-#define I2C_DRIVERID_I2CDEV 900
-
#define I2C_DRIVERID_OV7670 1048 /* Omnivision 7670 camera */
/*
@@ -111,7 +102,6 @@
#define I2C_HW_B_RIVA 0x010010 /* Riva based graphics cards */
#define I2C_HW_B_IOC 0x010011 /* IOC bit-wiggling */
#define I2C_HW_B_IXP2000 0x010016 /* GPIO on IXP2000 systems */
-#define I2C_HW_B_S3VIA 0x010018 /* S3Via ProSavage adapter */
#define I2C_HW_B_ZR36067 0x010019 /* Zoran-36057/36067 based boards */
#define I2C_HW_B_PCILYNX 0x01001a /* TI PCILynx I2C adapter */
#define I2C_HW_B_CX2388x 0x01001b /* connexant 2388x based tv cards */
@@ -161,7 +151,6 @@
#define I2C_HW_SMBUS_W9968CF 0x04000d
#define I2C_HW_SMBUS_OV511 0x04000e /* OV511(+) USB 1.1 webcam ICs */
#define I2C_HW_SMBUS_OV518 0x04000f /* OV518(+) USB 1.1 webcam ICs */
-#define I2C_HW_SMBUS_OVFX2 0x040011 /* Cypress/OmniVision FX2 webcam */
#define I2C_HW_SMBUS_CAFE 0x040012 /* Marvell 88ALP01 "CAFE" cam */
#define I2C_HW_SMBUS_ALI1563 0x040013
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 8dc73013219..08be0d21864 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -35,6 +35,8 @@
#include <linux/sched.h> /* for completion */
#include <linux/mutex.h>
+extern struct bus_type i2c_bus_type;
+
/* --- General options ------------------------------------------------ */
struct i2c_msg;
@@ -43,6 +45,7 @@ struct i2c_adapter;
struct i2c_client;
struct i2c_driver;
union i2c_smbus_data;
+struct i2c_board_info;
/*
* The master routines are the ones normally used to transmit data to devices
@@ -69,9 +72,8 @@ extern s32 i2c_smbus_xfer (struct i2c_adapter * adapter, u16 addr,
union i2c_smbus_data * data);
/* Now follow the 'nice' access routines. These also document the calling
- conventions of smbus_access. */
+ conventions of i2c_smbus_xfer. */
-extern s32 i2c_smbus_write_quick(struct i2c_client * client, u8 value);
extern s32 i2c_smbus_read_byte(struct i2c_client * client);
extern s32 i2c_smbus_write_byte(struct i2c_client * client, u8 value);
extern s32 i2c_smbus_read_byte_data(struct i2c_client * client, u8 command);
@@ -93,15 +95,33 @@ extern s32 i2c_smbus_write_i2c_block_data(struct i2c_client * client,
u8 command, u8 length,
const u8 *values);
-/*
- * A driver is capable of handling one or more physical devices present on
- * I2C adapters. This information is used to inform the driver of adapter
- * events.
+/**
+ * struct i2c_driver - represent an I2C device driver
+ * @class: What kind of i2c device we instantiate (for detect)
+ * @detect: Callback for device detection
+ * @address_data: The I2C addresses to probe, ignore or force (for detect)
+ * @clients: List of detected clients we created (for i2c-core use only)
*
* The driver.owner field should be set to the module owner of this driver.
* The driver.name field should be set to the name of this driver.
+ *
+ * For automatic device detection, both @detect and @address_data must
+ * be defined. @class should also be set, otherwise only devices forced
+ * with module parameters will be created. The detect function must
+ * fill at least the name field of the i2c_board_info structure it is
+ * handed upon successful detection, and possibly also the flags field.
+ *
+ * If @detect is missing, the driver will still work fine for enumerated
+ * devices. Detected devices simply won't be supported. This is expected
+ * for the many I2C/SMBus devices which can't be detected reliably, and
+ * the ones which can always be enumerated in practice.
+ *
+ * The i2c_client structure which is handed to the @detect callback is
+ * not a real i2c_client. It is initialized just enough so that you can
+ * call i2c_smbus_read_byte_data and friends on it. Don't do anything
+ * else with it. In particular, calling dev_dbg and friends on it is
+ * not allowed.
*/
-
struct i2c_driver {
int id;
unsigned int class;
@@ -141,6 +161,11 @@ struct i2c_driver {
struct device_driver driver;
const struct i2c_device_id *id_table;
+
+ /* Device detection callback for automatic device creation */
+ int (*detect)(struct i2c_client *, int kind, struct i2c_board_info *);
+ const struct i2c_client_address_data *address_data;
+ struct list_head clients;
};
#define to_i2c_driver(d) container_of(d, struct i2c_driver, driver)
@@ -156,6 +181,7 @@ struct i2c_driver {
* @dev: Driver model device node for the slave.
* @irq: indicates the IRQ generated by this device (if any)
* @list: list of active/busy clients (DEPRECATED)
+ * @detected: member of an i2c_driver.clients list
* @released: used to synchronize client releases & detaches and references
*
* An i2c_client identifies a single device (i.e. chip) connected to an
@@ -173,6 +199,7 @@ struct i2c_client {
struct device dev; /* the device structure */
int irq; /* irq issued by device */
struct list_head list; /* DEPRECATED */
+ struct list_head detected;
struct completion released;
};
#define to_i2c_client(d) container_of(d, struct i2c_client, dev)
@@ -350,10 +377,11 @@ static inline void i2c_set_adapdata (struct i2c_adapter *dev, void *data)
#define I2C_CLASS_HWMON (1<<0) /* lm_sensors, ... */
#define I2C_CLASS_TV_ANALOG (1<<1) /* bttv + friends */
#define I2C_CLASS_TV_DIGITAL (1<<2) /* dvb cards */
-#define I2C_CLASS_DDC (1<<3) /* i2c-matroxfb ? */
+#define I2C_CLASS_DDC (1<<3) /* DDC bus on graphics adapters */
#define I2C_CLASS_CAM_ANALOG (1<<4) /* camera with analog CCD */
#define I2C_CLASS_CAM_DIGITAL (1<<5) /* most webcams */
#define I2C_CLASS_SOUND (1<<6) /* sound devices */
+#define I2C_CLASS_SPD (1<<7) /* SPD EEPROMs and similar */
#define I2C_CLASS_ALL (UINT_MAX) /* all of the above */
/* i2c_client_address_data is the struct for holding default client
@@ -537,7 +565,7 @@ union i2c_smbus_data {
/* and one more for user-space compatibility */
};
-/* smbus_access read or write markers */
+/* i2c_smbus_xfer read or write markers */
#define I2C_SMBUS_READ 1
#define I2C_SMBUS_WRITE 0
diff --git a/include/linux/i2c/at24.h b/include/linux/i2c/at24.h
new file mode 100644
index 00000000000..f6edd522a92
--- /dev/null
+++ b/include/linux/i2c/at24.h
@@ -0,0 +1,28 @@
+#ifndef _LINUX_AT24_H
+#define _LINUX_AT24_H
+
+#include <linux/types.h>
+
+/*
+ * As seen through Linux I2C, differences between the most common types of I2C
+ * memory include:
+ * - How much memory is available (usually specified in bit)?
+ * - What write page size does it support?
+ * - Special flags (16 bit addresses, read_only, world readable...)?
+ *
+ * If you set up a custom eeprom type, please double-check the parameters.
+ * Especially page_size needs extra care, as you risk data loss if your value
+ * is bigger than what the chip actually supports!
+ */
+
+struct at24_platform_data {
+ u32 byte_len; /* size (sum of all addr) */
+ u16 page_size; /* for writes */
+ u8 flags;
+#define AT24_FLAG_ADDR16 0x80 /* address pointer is 16 bit */
+#define AT24_FLAG_READONLY 0x40 /* sysfs-entry will be read-only */
+#define AT24_FLAG_IRUGO 0x20 /* sysfs-entry will be world-readable */
+#define AT24_FLAG_TAKE8ADDR 0x10 /* take always 8 addresses (24c00) */
+};
+
+#endif /* _LINUX_AT24_H */
diff --git a/include/linux/ide.h b/include/linux/ide.h
index eddb6daadf4..4726126f5a5 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -139,6 +139,12 @@ struct ide_io_ports {
#define WAIT_MIN_SLEEP (2*HZ/100) /* 20msec - minimum sleep time */
/*
+ * Op codes for special requests to be handled by ide_special_rq().
+ * Values should be in the range of 0x20 to 0x3f.
+ */
+#define REQ_DRIVE_RESET 0x20
+
+/*
* Check for an interrupt and acknowledge the interrupt status
*/
struct hwif_s;
@@ -171,7 +177,7 @@ typedef struct hw_regs_s {
int irq; /* our irq number */
ide_ack_intr_t *ack_intr; /* acknowledge interrupt */
hwif_chipset_t chipset;
- struct device *dev;
+ struct device *dev, *parent;
} hw_regs_t;
void ide_init_port_data(struct hwif_s *, unsigned int);
@@ -364,7 +370,6 @@ typedef struct ide_drive_s {
u8 wcache; /* status of write cache */
u8 acoustic; /* acoustic management */
u8 media; /* disk, cdrom, tape, floppy, ... */
- u8 ctl; /* "normal" value for Control register */
u8 ready_stat; /* min status value for drive ready */
u8 mult_count; /* current multiple sector setting */
u8 mult_req; /* requested multiple sector setting */
@@ -406,8 +411,8 @@ typedef struct ide_drive_s {
struct ide_port_info;
struct ide_port_ops {
- /* host specific initialization of devices on a port */
- void (*port_init_devs)(struct hwif_s *);
+ /* host specific initialization of a device */
+ void (*init_dev)(ide_drive_t *);
/* routine to program host for PIO mode */
void (*set_pio_mode)(ide_drive_t *, const u8);
/* routine to program host for DMA mode */
@@ -493,7 +498,7 @@ typedef struct hwif_s {
void (*ide_dma_clear_irq)(ide_drive_t *drive);
void (*OUTB)(u8 addr, unsigned long port);
- void (*OUTBSYNC)(ide_drive_t *drive, u8 addr, unsigned long port);
+ void (*OUTBSYNC)(struct hwif_s *hwif, u8 addr, unsigned long port);
u8 (*INB)(unsigned long port);
@@ -532,7 +537,6 @@ typedef struct hwif_s {
unsigned serialized : 1; /* serialized all channel operation */
unsigned sharing_irq: 1; /* 1 = sharing irq with another hwif */
unsigned sg_mapped : 1; /* sg_table and sg_nents are ready */
- unsigned mmio : 1; /* host uses MMIO */
struct device gendev;
struct device *portdev;
@@ -567,8 +571,6 @@ typedef struct hwgroup_s {
unsigned int sleeping : 1;
/* BOOL: polling active & poll_timeout field valid */
unsigned int polling : 1;
- /* BOOL: in a polling reset situation. Must not trigger another reset yet */
- unsigned int resetting : 1;
/* current drive */
ide_drive_t *drive;
@@ -604,12 +606,13 @@ enum {
PC_FLAG_SUPPRESS_ERROR = (1 << 1),
PC_FLAG_WAIT_FOR_DSC = (1 << 2),
PC_FLAG_DMA_OK = (1 << 3),
- PC_FLAG_DMA_RECOMMENDED = (1 << 4),
- PC_FLAG_DMA_IN_PROGRESS = (1 << 5),
- PC_FLAG_DMA_ERROR = (1 << 6),
- PC_FLAG_WRITING = (1 << 7),
+ PC_FLAG_DMA_IN_PROGRESS = (1 << 4),
+ PC_FLAG_DMA_ERROR = (1 << 5),
+ PC_FLAG_WRITING = (1 << 6),
/* command timed out */
- PC_FLAG_TIMEDOUT = (1 << 8),
+ PC_FLAG_TIMEDOUT = (1 << 7),
+ PC_FLAG_ZIP_DRIVE = (1 << 8),
+ PC_FLAG_DRQ_INTERRUPT = (1 << 9),
};
struct ide_atapi_pc {
@@ -642,8 +645,8 @@ struct ide_atapi_pc {
* to change/removal later.
*/
u8 pc_buf[256];
- void (*idefloppy_callback) (ide_drive_t *);
- ide_startstop_t (*idetape_callback) (ide_drive_t *);
+
+ void (*callback)(ide_drive_t *);
/* idetape only */
struct idetape_bh *bh;
@@ -787,7 +790,6 @@ struct ide_driver_s {
ide_startstop_t (*do_request)(ide_drive_t *, struct request *, sector_t);
int (*end_request)(ide_drive_t *, int, int);
ide_startstop_t (*error)(ide_drive_t *, struct request *rq, u8, u8);
- ide_startstop_t (*abort)(ide_drive_t *, struct request *rq);
struct device_driver gen_driver;
int (*probe)(ide_drive_t *);
void (*remove)(ide_drive_t *);
@@ -802,22 +804,6 @@ struct ide_driver_s {
int generic_ide_ioctl(ide_drive_t *, struct file *, struct block_device *, unsigned, unsigned long);
-/*
- * ide_hwifs[] is the master data structure used to keep track
- * of just about everything in ide.c. Whenever possible, routines
- * should be using pointers to a drive (ide_drive_t *) or
- * pointers to a hwif (ide_hwif_t *), rather than indexing this
- * structure directly (the allocation/layout may change!).
- *
- */
-#ifndef _IDE_C
-extern ide_hwif_t ide_hwifs[]; /* master data repository */
-#endif
-extern int ide_noacpi;
-extern int ide_acpigtf;
-extern int ide_acpionboot;
-extern int noautodma;
-
extern int ide_vlb_clk;
extern int ide_pci_clk;
@@ -845,10 +831,6 @@ ide_startstop_t __ide_error(ide_drive_t *, struct request *, u8, u8);
ide_startstop_t ide_error (ide_drive_t *drive, const char *msg, byte stat);
-ide_startstop_t __ide_abort(ide_drive_t *, struct request *);
-
-extern ide_startstop_t ide_abort(ide_drive_t *, const char *);
-
extern void ide_fix_driveid(struct hd_driveid *);
extern void ide_fixstring(u8 *, const int, const int);
@@ -857,25 +839,12 @@ int ide_wait_stat(ide_startstop_t *, ide_drive_t *, u8, u8, unsigned long);
extern ide_startstop_t ide_do_reset (ide_drive_t *);
-extern void ide_init_drive_cmd (struct request *rq);
-
-/*
- * "action" parameter type for ide_do_drive_cmd() below.
- */
-typedef enum {
- ide_wait, /* insert rq at end of list, and wait for it */
- ide_preempt, /* insert rq in front of current request */
- ide_head_wait, /* insert rq in front of current request and wait for it */
- ide_end /* insert rq at end of list, but don't wait for it */
-} ide_action_t;
-
-extern int ide_do_drive_cmd(ide_drive_t *, struct request *, ide_action_t);
+extern void ide_do_drive_cmd(ide_drive_t *, struct request *);
extern void ide_end_drive_cmd(ide_drive_t *, u8, u8);
enum {
IDE_TFLAG_LBA48 = (1 << 0),
- IDE_TFLAG_NO_SELECT_MASK = (1 << 1),
IDE_TFLAG_FLAGGED = (1 << 2),
IDE_TFLAG_OUT_DATA = (1 << 3),
IDE_TFLAG_OUT_HOB_FEATURE = (1 << 4),
@@ -980,11 +949,23 @@ typedef struct ide_task_s {
void ide_tf_dump(const char *, struct ide_taskfile *);
extern void SELECT_DRIVE(ide_drive_t *);
+void SELECT_MASK(ide_drive_t *, int);
extern int drive_is_ready(ide_drive_t *);
void ide_pktcmd_tf_load(ide_drive_t *, u32, u16, u8);
+ide_startstop_t ide_pc_intr(ide_drive_t *drive, struct ide_atapi_pc *pc,
+ ide_handler_t *handler, unsigned int timeout, ide_expiry_t *expiry,
+ void (*update_buffers)(ide_drive_t *, struct ide_atapi_pc *),
+ void (*retry_pc)(ide_drive_t *), void (*dsc_handle)(ide_drive_t *),
+ void (*io_buffers)(ide_drive_t *, struct ide_atapi_pc *, unsigned int,
+ int));
+ide_startstop_t ide_transfer_pc(ide_drive_t *, struct ide_atapi_pc *,
+ ide_handler_t *, unsigned int, ide_expiry_t *);
+ide_startstop_t ide_issue_pc(ide_drive_t *, struct ide_atapi_pc *,
+ ide_handler_t *, unsigned int, ide_expiry_t *);
+
ide_startstop_t do_rw_taskfile(ide_drive_t *, ide_task_t *);
void task_end_request(ide_drive_t *, struct request *, u8);
@@ -996,8 +977,6 @@ int ide_taskfile_ioctl(ide_drive_t *, unsigned int, unsigned long);
int ide_cmd_ioctl(ide_drive_t *, unsigned int, unsigned long);
int ide_task_ioctl(ide_drive_t *, unsigned int, unsigned long);
-extern int system_bus_clock(void);
-
extern int ide_driveid_update(ide_drive_t *);
extern int ide_config_drive_speed(ide_drive_t *, u8);
extern u8 eighty_ninty_three (ide_drive_t *);
@@ -1279,16 +1258,43 @@ static inline int ide_dev_is_sata(struct hd_driveid *id)
u64 ide_get_lba_addr(struct ide_taskfile *, int);
u8 ide_dump_status(ide_drive_t *, const char *, u8);
-typedef struct ide_pio_timings_s {
- int setup_time; /* Address setup (ns) minimum */
- int active_time; /* Active pulse (ns) minimum */
- int cycle_time; /* Cycle time (ns) minimum = */
- /* active + recovery (+ setup for some chips) */
-} ide_pio_timings_t;
+struct ide_timing {
+ u8 mode;
+ u8 setup; /* t1 */
+ u16 act8b; /* t2 for 8-bit io */
+ u16 rec8b; /* t2i for 8-bit io */
+ u16 cyc8b; /* t0 for 8-bit io */
+ u16 active; /* t2 or tD */
+ u16 recover; /* t2i or tK */
+ u16 cycle; /* t0 */
+ u16 udma; /* t2CYCTYP/2 */
+};
+
+enum {
+ IDE_TIMING_SETUP = (1 << 0),
+ IDE_TIMING_ACT8B = (1 << 1),
+ IDE_TIMING_REC8B = (1 << 2),
+ IDE_TIMING_CYC8B = (1 << 3),
+ IDE_TIMING_8BIT = IDE_TIMING_ACT8B | IDE_TIMING_REC8B |
+ IDE_TIMING_CYC8B,
+ IDE_TIMING_ACTIVE = (1 << 4),
+ IDE_TIMING_RECOVER = (1 << 5),
+ IDE_TIMING_CYCLE = (1 << 6),
+ IDE_TIMING_UDMA = (1 << 7),
+ IDE_TIMING_ALL = IDE_TIMING_SETUP | IDE_TIMING_8BIT |
+ IDE_TIMING_ACTIVE | IDE_TIMING_RECOVER |
+ IDE_TIMING_CYCLE | IDE_TIMING_UDMA,
+};
+
+struct ide_timing *ide_timing_find_mode(u8);
+u16 ide_pio_cycle_time(ide_drive_t *, u8);
+void ide_timing_merge(struct ide_timing *, struct ide_timing *,
+ struct ide_timing *, unsigned int);
+int ide_timing_compute(ide_drive_t *, u8, struct ide_timing *, int, int);
+
+int ide_scan_pio_blacklist(char *);
-unsigned int ide_pio_cycle_time(ide_drive_t *, u8);
u8 ide_get_best_pio_mode(ide_drive_t *, u8, u8);
-extern const ide_pio_timings_t ide_pio_timings[6];
int ide_set_pio_mode(ide_drive_t *, u8);
int ide_set_dma_mode(ide_drive_t *, u8);
@@ -1349,7 +1355,8 @@ static inline void ide_set_irq(ide_drive_t *drive, int on)
{
ide_hwif_t *hwif = drive->hwif;
- hwif->OUTB(drive->ctl | (on ? 0 : 2), hwif->io_ports.ctl_addr);
+ hwif->OUTBSYNC(hwif, ATA_DEVCTL_OBS | (on ? 0 : 2),
+ hwif->io_ports.ctl_addr);
}
static inline u8 ide_read_status(ide_drive_t *drive)
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 0b5e03eae6d..a1630ba0b87 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -98,6 +98,9 @@
#define IEEE80211_MAX_SSID_LEN 32
#define IEEE80211_MAX_MESH_ID_LEN 32
+#define IEEE80211_QOS_CTL_LEN 2
+#define IEEE80211_QOS_CTL_TID_MASK 0x000F
+#define IEEE80211_QOS_CTL_TAG1D_MASK 0x0007
struct ieee80211_hdr {
__le16 frame_control;
@@ -109,6 +112,355 @@ struct ieee80211_hdr {
u8 addr4[6];
} __attribute__ ((packed));
+/**
+ * ieee80211_has_tods - check if IEEE80211_FCTL_TODS is set
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline int ieee80211_has_tods(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_TODS)) != 0;
+}
+
+/**
+ * ieee80211_has_fromds - check if IEEE80211_FCTL_FROMDS is set
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline int ieee80211_has_fromds(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_FROMDS)) != 0;
+}
+
+/**
+ * ieee80211_has_a4 - check if IEEE80211_FCTL_TODS and IEEE80211_FCTL_FROMDS are set
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline int ieee80211_has_a4(__le16 fc)
+{
+ __le16 tmp = cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS);
+ return (fc & tmp) == tmp;
+}
+
+/**
+ * ieee80211_has_morefrags - check if IEEE80211_FCTL_MOREFRAGS is set
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline int ieee80211_has_morefrags(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_MOREFRAGS)) != 0;
+}
+
+/**
+ * ieee80211_has_retry - check if IEEE80211_FCTL_RETRY is set
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline int ieee80211_has_retry(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_RETRY)) != 0;
+}
+
+/**
+ * ieee80211_has_pm - check if IEEE80211_FCTL_PM is set
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline int ieee80211_has_pm(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_PM)) != 0;
+}
+
+/**
+ * ieee80211_has_moredata - check if IEEE80211_FCTL_MOREDATA is set
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline int ieee80211_has_moredata(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_MOREDATA)) != 0;
+}
+
+/**
+ * ieee80211_has_protected - check if IEEE80211_FCTL_PROTECTED is set
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline int ieee80211_has_protected(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_PROTECTED)) != 0;
+}
+
+/**
+ * ieee80211_has_order - check if IEEE80211_FCTL_ORDER is set
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline int ieee80211_has_order(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_ORDER)) != 0;
+}
+
+/**
+ * ieee80211_is_mgmt - check if type is IEEE80211_FTYPE_MGMT
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline int ieee80211_is_mgmt(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE)) ==
+ cpu_to_le16(IEEE80211_FTYPE_MGMT);
+}
+
+/**
+ * ieee80211_is_ctl - check if type is IEEE80211_FTYPE_CTL
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline int ieee80211_is_ctl(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE)) ==
+ cpu_to_le16(IEEE80211_FTYPE_CTL);
+}
+
+/**
+ * ieee80211_is_data - check if type is IEEE80211_FTYPE_DATA
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline int ieee80211_is_data(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE)) ==
+ cpu_to_le16(IEEE80211_FTYPE_DATA);
+}
+
+/**
+ * ieee80211_is_data_qos - check if type is IEEE80211_FTYPE_DATA and IEEE80211_STYPE_QOS_DATA is set
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline int ieee80211_is_data_qos(__le16 fc)
+{
+ /*
+ * mask with QOS_DATA rather than IEEE80211_FCTL_STYPE as we just need
+ * to check the one bit
+ */
+ return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_STYPE_QOS_DATA)) ==
+ cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA);
+}
+
+/**
+ * ieee80211_is_data_present - check if type is IEEE80211_FTYPE_DATA and has data
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline int ieee80211_is_data_present(__le16 fc)
+{
+ /*
+ * mask with 0x40 and test that that bit is clear to only return true
+ * for the data-containing substypes.
+ */
+ return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | 0x40)) ==
+ cpu_to_le16(IEEE80211_FTYPE_DATA);
+}
+
+/**
+ * ieee80211_is_assoc_req - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ASSOC_REQ
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline int ieee80211_is_assoc_req(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
+ cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ASSOC_REQ);
+}
+
+/**
+ * ieee80211_is_assoc_resp - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ASSOC_RESP
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline int ieee80211_is_assoc_resp(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
+ cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ASSOC_RESP);
+}
+
+/**
+ * ieee80211_is_reassoc_req - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_REASSOC_REQ
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline int ieee80211_is_reassoc_req(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
+ cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_REASSOC_REQ);
+}
+
+/**
+ * ieee80211_is_reassoc_resp - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_REASSOC_RESP
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline int ieee80211_is_reassoc_resp(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
+ cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_REASSOC_RESP);
+}
+
+/**
+ * ieee80211_is_probe_req - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_PROBE_REQ
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline int ieee80211_is_probe_req(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
+ cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_REQ);
+}
+
+/**
+ * ieee80211_is_probe_resp - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_PROBE_RESP
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline int ieee80211_is_probe_resp(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
+ cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_RESP);
+}
+
+/**
+ * ieee80211_is_beacon - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_BEACON
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline int ieee80211_is_beacon(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
+ cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_BEACON);
+}
+
+/**
+ * ieee80211_is_atim - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ATIM
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline int ieee80211_is_atim(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
+ cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ATIM);
+}
+
+/**
+ * ieee80211_is_disassoc - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_DISASSOC
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline int ieee80211_is_disassoc(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
+ cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_DISASSOC);
+}
+
+/**
+ * ieee80211_is_auth - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_AUTH
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline int ieee80211_is_auth(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
+ cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_AUTH);
+}
+
+/**
+ * ieee80211_is_deauth - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_DEAUTH
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline int ieee80211_is_deauth(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
+ cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_DEAUTH);
+}
+
+/**
+ * ieee80211_is_action - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ACTION
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline int ieee80211_is_action(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
+ cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ACTION);
+}
+
+/**
+ * ieee80211_is_back_req - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_BACK_REQ
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline int ieee80211_is_back_req(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
+ cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_BACK_REQ);
+}
+
+/**
+ * ieee80211_is_back - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_BACK
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline int ieee80211_is_back(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
+ cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_BACK);
+}
+
+/**
+ * ieee80211_is_pspoll - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_PSPOLL
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline int ieee80211_is_pspoll(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
+ cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_PSPOLL);
+}
+
+/**
+ * ieee80211_is_rts - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_RTS
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline int ieee80211_is_rts(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
+ cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_RTS);
+}
+
+/**
+ * ieee80211_is_cts - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_CTS
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline int ieee80211_is_cts(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
+ cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CTS);
+}
+
+/**
+ * ieee80211_is_ack - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_ACK
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline int ieee80211_is_ack(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
+ cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_ACK);
+}
+
+/**
+ * ieee80211_is_cfend - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_CFEND
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline int ieee80211_is_cfend(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
+ cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CFEND);
+}
+
+/**
+ * ieee80211_is_cfendack - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_CFENDACK
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline int ieee80211_is_cfendack(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
+ cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CFENDACK);
+}
+
+/**
+ * ieee80211_is_nullfunc - check if FTYPE=IEEE80211_FTYPE_DATA and STYPE=IEEE80211_STYPE_NULLFUNC
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline int ieee80211_is_nullfunc(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
+ cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC);
+}
struct ieee80211s_hdr {
u8 flags;
@@ -119,6 +471,40 @@ struct ieee80211s_hdr {
u8 eaddr3[6];
} __attribute__ ((packed));
+/**
+ * struct ieee80211_quiet_ie
+ *
+ * This structure refers to "Quiet information element"
+ */
+struct ieee80211_quiet_ie {
+ u8 count;
+ u8 period;
+ __le16 duration;
+ __le16 offset;
+} __attribute__ ((packed));
+
+/**
+ * struct ieee80211_msrment_ie
+ *
+ * This structure refers to "Measurement Request/Report information element"
+ */
+struct ieee80211_msrment_ie {
+ u8 token;
+ u8 mode;
+ u8 type;
+ u8 request[0];
+} __attribute__ ((packed));
+
+/**
+ * struct ieee80211_channel_sw_ie
+ *
+ * This structure refers to "Channel Switch Announcement information element"
+ */
+struct ieee80211_channel_sw_ie {
+ u8 mode;
+ u8 new_ch_num;
+ u8 count;
+} __attribute__ ((packed));
struct ieee80211_mgmt {
__le16 frame_control;
@@ -194,13 +580,18 @@ struct ieee80211_mgmt {
u8 action_code;
u8 element_id;
u8 length;
- u8 switch_mode;
- u8 new_chan;
- u8 switch_count;
+ struct ieee80211_channel_sw_ie sw_elem;
} __attribute__((packed)) chan_switch;
struct{
u8 action_code;
u8 dialog_token;
+ u8 element_id;
+ u8 length;
+ struct ieee80211_msrment_ie msr_elem;
+ } __attribute__((packed)) measurement;
+ struct{
+ u8 action_code;
+ u8 dialog_token;
__le16 capab;
__le16 timeout;
__le16 start_seq_num;
@@ -269,6 +660,10 @@ struct ieee80211_bar {
__le16 start_seq_num;
} __attribute__((packed));
+/* 802.11 BAR control masks */
+#define IEEE80211_BAR_CTRL_ACK_POLICY_NORMAL 0x0000
+#define IEEE80211_BAR_CTRL_CBMTID_COMPRESSED_BA 0x0004
+
/**
* struct ieee80211_ht_cap - HT capabilities
*
@@ -306,20 +701,33 @@ struct ieee80211_ht_addt_info {
#define IEEE80211_HT_CAP_SGI_40 0x0040
#define IEEE80211_HT_CAP_DELAY_BA 0x0400
#define IEEE80211_HT_CAP_MAX_AMSDU 0x0800
+/* 802.11n HT capability AMPDU settings */
#define IEEE80211_HT_CAP_AMPDU_FACTOR 0x03
#define IEEE80211_HT_CAP_AMPDU_DENSITY 0x1C
+/* 802.11n HT capability MSC set */
+#define IEEE80211_SUPP_MCS_SET_UEQM 4
+#define IEEE80211_HT_CAP_MAX_STREAMS 4
+#define IEEE80211_SUPP_MCS_SET_LEN 10
+/* maximum streams the spec allows */
+#define IEEE80211_HT_CAP_MCS_TX_DEFINED 0x01
+#define IEEE80211_HT_CAP_MCS_TX_RX_DIFF 0x02
+#define IEEE80211_HT_CAP_MCS_TX_STREAMS 0x0C
+#define IEEE80211_HT_CAP_MCS_TX_UEQM 0x10
/* 802.11n HT IE masks */
#define IEEE80211_HT_IE_CHA_SEC_OFFSET 0x03
+#define IEEE80211_HT_IE_CHA_SEC_NONE 0x00
+#define IEEE80211_HT_IE_CHA_SEC_ABOVE 0x01
+#define IEEE80211_HT_IE_CHA_SEC_BELOW 0x03
#define IEEE80211_HT_IE_CHA_WIDTH 0x04
#define IEEE80211_HT_IE_HT_PROTECTION 0x0003
#define IEEE80211_HT_IE_NON_GF_STA_PRSNT 0x0004
#define IEEE80211_HT_IE_NON_HT_STA_PRSNT 0x0010
/* MIMO Power Save Modes */
-#define WLAN_HT_CAP_MIMO_PS_STATIC 0
-#define WLAN_HT_CAP_MIMO_PS_DYNAMIC 1
-#define WLAN_HT_CAP_MIMO_PS_INVALID 2
-#define WLAN_HT_CAP_MIMO_PS_DISABLED 3
+#define WLAN_HT_CAP_MIMO_PS_STATIC 0
+#define WLAN_HT_CAP_MIMO_PS_DYNAMIC 1
+#define WLAN_HT_CAP_MIMO_PS_INVALID 2
+#define WLAN_HT_CAP_MIMO_PS_DISABLED 3
/* Authentication algorithms */
#define WLAN_AUTH_OPEN 0
@@ -337,11 +745,21 @@ struct ieee80211_ht_addt_info {
#define WLAN_CAPABILITY_SHORT_PREAMBLE (1<<5)
#define WLAN_CAPABILITY_PBCC (1<<6)
#define WLAN_CAPABILITY_CHANNEL_AGILITY (1<<7)
+
/* 802.11h */
#define WLAN_CAPABILITY_SPECTRUM_MGMT (1<<8)
#define WLAN_CAPABILITY_QOS (1<<9)
#define WLAN_CAPABILITY_SHORT_SLOT_TIME (1<<10)
#define WLAN_CAPABILITY_DSSS_OFDM (1<<13)
+/* measurement */
+#define IEEE80211_SPCT_MSR_RPRT_MODE_LATE (1<<0)
+#define IEEE80211_SPCT_MSR_RPRT_MODE_INCAPABLE (1<<1)
+#define IEEE80211_SPCT_MSR_RPRT_MODE_REFUSED (1<<2)
+
+#define IEEE80211_SPCT_MSR_RPRT_TYPE_BASIC 0
+#define IEEE80211_SPCT_MSR_RPRT_TYPE_CCA 1
+#define IEEE80211_SPCT_MSR_RPRT_TYPE_RPI 2
+
/* 802.11g ERP information element */
#define WLAN_ERP_NON_ERP_PRESENT (1<<0)
@@ -512,6 +930,15 @@ enum ieee80211_category {
WLAN_CATEGORY_WMM = 17,
};
+/* SPECTRUM_MGMT action code */
+enum ieee80211_spectrum_mgmt_actioncode {
+ WLAN_ACTION_SPCT_MSR_REQ = 0,
+ WLAN_ACTION_SPCT_MSR_RPRT = 1,
+ WLAN_ACTION_SPCT_TPC_REQ = 2,
+ WLAN_ACTION_SPCT_TPC_RPRT = 3,
+ WLAN_ACTION_SPCT_CHL_SWITCH = 4,
+};
+
/* BACK action code */
enum ieee80211_back_actioncode {
WLAN_ACTION_ADDBA_REQ = 0,
@@ -540,63 +967,57 @@ enum ieee80211_back_parties {
#define WLAN_MAX_KEY_LEN 32
/**
+ * ieee80211_get_qos_ctl - get pointer to qos control bytes
+ * @hdr: the frame
+ *
+ * The qos ctrl bytes come after the frame_control, duration, seq_num
+ * and 3 or 4 addresses of length ETH_ALEN.
+ * 3 addr: 2 + 2 + 2 + 3*6 = 24
+ * 4 addr: 2 + 2 + 2 + 4*6 = 30
+ */
+static inline u8 *ieee80211_get_qos_ctl(struct ieee80211_hdr *hdr)
+{
+ if (ieee80211_has_a4(hdr->frame_control))
+ return (u8 *)hdr + 30;
+ else
+ return (u8 *)hdr + 24;
+}
+
+/**
* ieee80211_get_SA - get pointer to SA
+ * @hdr: the frame
*
* Given an 802.11 frame, this function returns the offset
* to the source address (SA). It does not verify that the
* header is long enough to contain the address, and the
* header must be long enough to contain the frame control
* field.
- *
- * @hdr: the frame
*/
static inline u8 *ieee80211_get_SA(struct ieee80211_hdr *hdr)
{
- u8 *raw = (u8 *) hdr;
- u8 tofrom = (*(raw+1)) & 3; /* get the TODS and FROMDS bits */
-
- switch (tofrom) {
- case 2:
- return hdr->addr3;
- case 3:
- return hdr->addr4;
- }
+ if (ieee80211_has_a4(hdr->frame_control))
+ return hdr->addr4;
+ if (ieee80211_has_fromds(hdr->frame_control))
+ return hdr->addr3;
return hdr->addr2;
}
/**
* ieee80211_get_DA - get pointer to DA
+ * @hdr: the frame
*
* Given an 802.11 frame, this function returns the offset
* to the destination address (DA). It does not verify that
* the header is long enough to contain the address, and the
* header must be long enough to contain the frame control
* field.
- *
- * @hdr: the frame
*/
static inline u8 *ieee80211_get_DA(struct ieee80211_hdr *hdr)
{
- u8 *raw = (u8 *) hdr;
- u8 to_ds = (*(raw+1)) & 1; /* get the TODS bit */
-
- if (to_ds)
+ if (ieee80211_has_tods(hdr->frame_control))
return hdr->addr3;
- return hdr->addr1;
-}
-
-/**
- * ieee80211_get_morefrag - determine whether the MOREFRAGS bit is set
- *
- * This function determines whether the "more fragments" bit is set
- * in the frame.
- *
- * @hdr: the frame
- */
-static inline int ieee80211_get_morefrag(struct ieee80211_hdr *hdr)
-{
- return (le16_to_cpu(hdr->frame_control) &
- IEEE80211_FCTL_MOREFRAGS) != 0;
+ else
+ return hdr->addr1;
}
#endif /* IEEE80211_H */
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h
index 950e13d09e0..6badb3e2c4e 100644
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -4,8 +4,6 @@
* Authors:
* Lennert Buytenhek <buytenh@gnu.org>
*
- * $Id: if_bridge.h,v 1.1 2000/02/18 16:47:01 davem Exp $
- *
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
diff --git a/include/linux/if_packet.h b/include/linux/if_packet.h
index ad09609227f..18db0668065 100644
--- a/include/linux/if_packet.h
+++ b/include/linux/if_packet.h
@@ -43,6 +43,9 @@ struct sockaddr_ll
#define PACKET_COPY_THRESH 7
#define PACKET_AUXDATA 8
#define PACKET_ORIGDEV 9
+#define PACKET_VERSION 10
+#define PACKET_HDRLEN 11
+#define PACKET_RESERVE 12
struct tpacket_stats
{
@@ -57,6 +60,7 @@ struct tpacket_auxdata
__u32 tp_snaplen;
__u16 tp_mac;
__u16 tp_net;
+ __u16 tp_vlan_tci;
};
struct tpacket_hdr
@@ -79,6 +83,26 @@ struct tpacket_hdr
#define TPACKET_ALIGN(x) (((x)+TPACKET_ALIGNMENT-1)&~(TPACKET_ALIGNMENT-1))
#define TPACKET_HDRLEN (TPACKET_ALIGN(sizeof(struct tpacket_hdr)) + sizeof(struct sockaddr_ll))
+struct tpacket2_hdr
+{
+ __u32 tp_status;
+ __u32 tp_len;
+ __u32 tp_snaplen;
+ __u16 tp_mac;
+ __u16 tp_net;
+ __u32 tp_sec;
+ __u32 tp_nsec;
+ __u16 tp_vlan_tci;
+};
+
+#define TPACKET2_HDRLEN (TPACKET_ALIGN(sizeof(struct tpacket2_hdr)) + sizeof(struct sockaddr_ll))
+
+enum tpacket_versions
+{
+ TPACKET_V1,
+ TPACKET_V2,
+};
+
/*
Frame structure:
diff --git a/include/linux/if_ppp.h b/include/linux/if_ppp.h
index 0f2f70d4e48..c3b1f856270 100644
--- a/include/linux/if_ppp.h
+++ b/include/linux/if_ppp.h
@@ -1,5 +1,3 @@
-/* $Id: if_ppp.h,v 1.21 2000/03/27 06:03:36 paulus Exp $ */
-
/*
* if_ppp.h - Point-to-Point Protocol definitions.
*
diff --git a/include/linux/if_tun.h b/include/linux/if_tun.h
index 8c71fe2fb1f..4c6307ad9fd 100644
--- a/include/linux/if_tun.h
+++ b/include/linux/if_tun.h
@@ -11,14 +11,13 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * $Id: if_tun.h,v 1.2 2001/06/01 18:39:47 davem Exp $
*/
#ifndef __IF_TUN_H
#define __IF_TUN_H
#include <linux/types.h>
+#include <linux/if_ether.h>
/* Read queue size */
#define TUN_READQ_SIZE 500
@@ -33,6 +32,7 @@
#define TUN_NO_PI 0x0040
#define TUN_ONE_QUEUE 0x0080
#define TUN_PERSIST 0x0100
+#define TUN_VNET_HDR 0x0200
/* Ioctl defines */
#define TUNSETNOCSUM _IOW('T', 200, int)
@@ -42,17 +42,43 @@
#define TUNSETOWNER _IOW('T', 204, int)
#define TUNSETLINK _IOW('T', 205, int)
#define TUNSETGROUP _IOW('T', 206, int)
+#define TUNGETFEATURES _IOR('T', 207, unsigned int)
+#define TUNSETOFFLOAD _IOW('T', 208, unsigned int)
+#define TUNSETTXFILTER _IOW('T', 209, unsigned int)
/* TUNSETIFF ifr flags */
#define IFF_TUN 0x0001
#define IFF_TAP 0x0002
#define IFF_NO_PI 0x1000
#define IFF_ONE_QUEUE 0x2000
+#define IFF_VNET_HDR 0x4000
+
+/* Features for GSO (TUNSETOFFLOAD). */
+#define TUN_F_CSUM 0x01 /* You can hand me unchecksummed packets. */
+#define TUN_F_TSO4 0x02 /* I can handle TSO for IPv4 packets */
+#define TUN_F_TSO6 0x04 /* I can handle TSO for IPv6 packets */
+#define TUN_F_TSO_ECN 0x08 /* I can handle TSO with ECN bits. */
+/* Protocol info prepended to the packets (when IFF_NO_PI is not set) */
+#define TUN_PKT_STRIP 0x0001
struct tun_pi {
- unsigned short flags;
+ __u16 flags;
__be16 proto;
};
-#define TUN_PKT_STRIP 0x0001
+
+/*
+ * Filter spec (used for SETXXFILTER ioctls)
+ * This stuff is applicable only to the TAP (Ethernet) devices.
+ * If the count is zero the filter is disabled and the driver accepts
+ * all packets (promisc mode).
+ * If the filter is enabled in order to accept broadcast packets
+ * broadcast addr must be explicitly included in the addr list.
+ */
+#define TUN_FLT_ALLMULTI 0x0001 /* Accept all multicast packets */
+struct tun_filter {
+ __u16 flags; /* TUN_FLT_ flags see above */
+ __u16 count; /* Number of addresses */
+ __u8 addr[0][ETH_ALEN];
+};
#endif /* __IF_TUN_H */
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 15ace02b7b2..9e7b49b8062 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -14,10 +14,6 @@
#define _LINUX_IF_VLAN_H_
#ifdef __KERNEL__
-
-/* externally defined structs */
-struct hlist_node;
-
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -91,7 +87,7 @@ struct vlan_group {
};
static inline struct net_device *vlan_group_get_device(struct vlan_group *vg,
- unsigned int vlan_id)
+ u16 vlan_id)
{
struct net_device **array;
array = vg->vlan_devices_arrays[vlan_id / VLAN_GROUP_ARRAY_PART_LEN];
@@ -99,7 +95,7 @@ static inline struct net_device *vlan_group_get_device(struct vlan_group *vg,
}
static inline void vlan_group_set_device(struct vlan_group *vg,
- unsigned int vlan_id,
+ u16 vlan_id,
struct net_device *dev)
{
struct net_device **array;
@@ -109,164 +105,81 @@ static inline void vlan_group_set_device(struct vlan_group *vg,
array[vlan_id % VLAN_GROUP_ARRAY_PART_LEN] = dev;
}
-struct vlan_priority_tci_mapping {
- u32 priority;
- unsigned short vlan_qos; /* This should be shifted when first set, so we only do it
- * at provisioning time.
- * ((skb->priority << 13) & 0xE000)
- */
- struct vlan_priority_tci_mapping *next;
-};
+#define vlan_tx_tag_present(__skb) ((__skb)->vlan_tci)
+#define vlan_tx_tag_get(__skb) ((__skb)->vlan_tci)
-/* Holds information that makes sense if this device is a VLAN device. */
-struct vlan_dev_info {
- /** This will be the mapping that correlates skb->priority to
- * 3 bits of VLAN QOS tags...
- */
- unsigned int nr_ingress_mappings;
- u32 ingress_priority_map[8];
-
- unsigned int nr_egress_mappings;
- struct vlan_priority_tci_mapping *egress_priority_map[16]; /* hash table */
-
- unsigned short vlan_id; /* The VLAN Identifier for this interface. */
- unsigned short flags; /* (1 << 0) re_order_header This option will cause the
- * VLAN code to move around the ethernet header on
- * ingress to make the skb look **exactly** like it
- * came in from an ethernet port. This destroys some of
- * the VLAN information in the skb, but it fixes programs
- * like DHCP that use packet-filtering and don't understand
- * 802.1Q
- */
- struct net_device *real_dev; /* the underlying device/interface */
- unsigned char real_dev_addr[ETH_ALEN];
- struct proc_dir_entry *dent; /* Holds the proc data */
- unsigned long cnt_inc_headroom_on_tx; /* How many times did we have to grow the skb on TX. */
- unsigned long cnt_encap_on_xmit; /* How many times did we have to encapsulate the skb on TX. */
-};
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+extern struct net_device *vlan_dev_real_dev(const struct net_device *dev);
+extern u16 vlan_dev_vlan_id(const struct net_device *dev);
-static inline struct vlan_dev_info *vlan_dev_info(const struct net_device *dev)
+extern int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
+ u16 vlan_tci, int polling);
+#else
+static inline struct net_device *vlan_dev_real_dev(const struct net_device *dev)
{
- return netdev_priv(dev);
+ BUG();
+ return NULL;
}
-/* inline functions */
-static inline __u32 vlan_get_ingress_priority(struct net_device *dev,
- unsigned short vlan_tag)
+static inline u16 vlan_dev_vlan_id(const struct net_device *dev)
{
- struct vlan_dev_info *vip = vlan_dev_info(dev);
-
- return vip->ingress_priority_map[(vlan_tag >> 13) & 0x7];
+ BUG();
+ return 0;
}
-/* VLAN tx hw acceleration helpers. */
-struct vlan_skb_tx_cookie {
- u32 magic;
- u32 vlan_tag;
-};
-
-#define VLAN_TX_COOKIE_MAGIC 0x564c414e /* "VLAN" in ascii. */
-#define VLAN_TX_SKB_CB(__skb) ((struct vlan_skb_tx_cookie *)&((__skb)->cb[0]))
-#define vlan_tx_tag_present(__skb) \
- (VLAN_TX_SKB_CB(__skb)->magic == VLAN_TX_COOKIE_MAGIC)
-#define vlan_tx_tag_get(__skb) (VLAN_TX_SKB_CB(__skb)->vlan_tag)
-
-/* VLAN rx hw acceleration helper. This acts like netif_{rx,receive_skb}(). */
-static inline int __vlan_hwaccel_rx(struct sk_buff *skb,
- struct vlan_group *grp,
- unsigned short vlan_tag, int polling)
+static inline int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
+ u16 vlan_tci, int polling)
{
- struct net_device_stats *stats;
-
- if (skb_bond_should_drop(skb)) {
- dev_kfree_skb_any(skb);
- return NET_RX_DROP;
- }
-
- skb->dev = vlan_group_get_device(grp, vlan_tag & VLAN_VID_MASK);
- if (skb->dev == NULL) {
- dev_kfree_skb_any(skb);
-
- /* Not NET_RX_DROP, this is not being dropped
- * due to congestion.
- */
- return 0;
- }
-
- skb->dev->last_rx = jiffies;
-
- stats = &skb->dev->stats;
- stats->rx_packets++;
- stats->rx_bytes += skb->len;
-
- skb->priority = vlan_get_ingress_priority(skb->dev, vlan_tag);
- switch (skb->pkt_type) {
- case PACKET_BROADCAST:
- break;
-
- case PACKET_MULTICAST:
- stats->multicast++;
- break;
-
- case PACKET_OTHERHOST:
- /* Our lower layer thinks this is not local, let's make sure.
- * This allows the VLAN to have a different MAC than the underlying
- * device, and still route correctly.
- */
- if (!compare_ether_addr(eth_hdr(skb)->h_dest,
- skb->dev->dev_addr))
- skb->pkt_type = PACKET_HOST;
- break;
- };
-
- return (polling ? netif_receive_skb(skb) : netif_rx(skb));
+ BUG();
+ return NET_XMIT_SUCCESS;
}
+#endif
+/**
+ * vlan_hwaccel_rx - netif_rx wrapper for VLAN RX acceleration
+ * @skb: buffer
+ * @grp: vlan group
+ * @vlan_tci: VLAN TCI as received from the card
+ */
static inline int vlan_hwaccel_rx(struct sk_buff *skb,
struct vlan_group *grp,
- unsigned short vlan_tag)
+ u16 vlan_tci)
{
- return __vlan_hwaccel_rx(skb, grp, vlan_tag, 0);
+ return __vlan_hwaccel_rx(skb, grp, vlan_tci, 0);
}
+/**
+ * vlan_hwaccel_receive_skb - netif_receive_skb wrapper for VLAN RX acceleration
+ * @skb: buffer
+ * @grp: vlan group
+ * @vlan_tci: VLAN TCI as received from the card
+ */
static inline int vlan_hwaccel_receive_skb(struct sk_buff *skb,
struct vlan_group *grp,
- unsigned short vlan_tag)
+ u16 vlan_tci)
{
- return __vlan_hwaccel_rx(skb, grp, vlan_tag, 1);
+ return __vlan_hwaccel_rx(skb, grp, vlan_tci, 1);
}
/**
* __vlan_put_tag - regular VLAN tag inserting
* @skb: skbuff to tag
- * @tag: VLAN tag to insert
+ * @vlan_tci: VLAN TCI to insert
*
* Inserts the VLAN tag into @skb as part of the payload
* Returns a VLAN tagged skb. If a new skb is created, @skb is freed.
- *
+ *
* Following the skb_unshare() example, in case of error, the calling function
* doesn't have to worry about freeing the original skb.
*/
-static inline struct sk_buff *__vlan_put_tag(struct sk_buff *skb, unsigned short tag)
+static inline struct sk_buff *__vlan_put_tag(struct sk_buff *skb, u16 vlan_tci)
{
struct vlan_ethhdr *veth;
- if (skb_headroom(skb) < VLAN_HLEN) {
- struct sk_buff *sk_tmp = skb;
- skb = skb_realloc_headroom(sk_tmp, VLAN_HLEN);
- kfree_skb(sk_tmp);
- if (!skb) {
- printk(KERN_ERR "vlan: failed to realloc headroom\n");
- return NULL;
- }
- } else {
- skb = skb_unshare(skb, GFP_ATOMIC);
- if (!skb) {
- printk(KERN_ERR "vlan: failed to unshare skbuff\n");
- return NULL;
- }
+ if (skb_cow_head(skb, VLAN_HLEN) < 0) {
+ kfree_skb(skb);
+ return NULL;
}
-
veth = (struct vlan_ethhdr *)skb_push(skb, VLAN_HLEN);
/* Move the mac addresses to the beginning of the new header. */
@@ -275,12 +188,10 @@ static inline struct sk_buff *__vlan_put_tag(struct sk_buff *skb, unsigned short
/* first, the ethernet type */
veth->h_vlan_proto = htons(ETH_P_8021Q);
- /* now, the tag */
- veth->h_vlan_TCI = htons(tag);
+ /* now, the TCI */
+ veth->h_vlan_TCI = htons(vlan_tci);
skb->protocol = htons(ETH_P_8021Q);
- skb->mac_header -= VLAN_HLEN;
- skb->network_header -= VLAN_HLEN;
return skb;
}
@@ -288,18 +199,14 @@ static inline struct sk_buff *__vlan_put_tag(struct sk_buff *skb, unsigned short
/**
* __vlan_hwaccel_put_tag - hardware accelerated VLAN inserting
* @skb: skbuff to tag
- * @tag: VLAN tag to insert
+ * @vlan_tci: VLAN TCI to insert
*
- * Puts the VLAN tag in @skb->cb[] and lets the device do the rest
+ * Puts the VLAN TCI in @skb->vlan_tci and lets the device do the rest
*/
-static inline struct sk_buff *__vlan_hwaccel_put_tag(struct sk_buff *skb, unsigned short tag)
+static inline struct sk_buff *__vlan_hwaccel_put_tag(struct sk_buff *skb,
+ u16 vlan_tci)
{
- struct vlan_skb_tx_cookie *cookie;
-
- cookie = VLAN_TX_SKB_CB(skb);
- cookie->magic = VLAN_TX_COOKIE_MAGIC;
- cookie->vlan_tag = tag;
-
+ skb->vlan_tci = vlan_tci;
return skb;
}
@@ -308,28 +215,28 @@ static inline struct sk_buff *__vlan_hwaccel_put_tag(struct sk_buff *skb, unsign
/**
* vlan_put_tag - inserts VLAN tag according to device features
* @skb: skbuff to tag
- * @tag: VLAN tag to insert
+ * @vlan_tci: VLAN TCI to insert
*
* Assumes skb->dev is the target that will xmit this frame.
* Returns a VLAN tagged skb.
*/
-static inline struct sk_buff *vlan_put_tag(struct sk_buff *skb, unsigned short tag)
+static inline struct sk_buff *vlan_put_tag(struct sk_buff *skb, u16 vlan_tci)
{
if (skb->dev->features & NETIF_F_HW_VLAN_TX) {
- return __vlan_hwaccel_put_tag(skb, tag);
+ return __vlan_hwaccel_put_tag(skb, vlan_tci);
} else {
- return __vlan_put_tag(skb, tag);
+ return __vlan_put_tag(skb, vlan_tci);
}
}
/**
* __vlan_get_tag - get the VLAN ID that is part of the payload
* @skb: skbuff to query
- * @tag: buffer to store vlaue
- *
+ * @vlan_tci: buffer to store vlaue
+ *
* Returns error if the skb is not of VLAN type
*/
-static inline int __vlan_get_tag(const struct sk_buff *skb, unsigned short *tag)
+static inline int __vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
{
struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb->data;
@@ -337,29 +244,25 @@ static inline int __vlan_get_tag(const struct sk_buff *skb, unsigned short *tag)
return -EINVAL;
}
- *tag = ntohs(veth->h_vlan_TCI);
-
+ *vlan_tci = ntohs(veth->h_vlan_TCI);
return 0;
}
/**
* __vlan_hwaccel_get_tag - get the VLAN ID that is in @skb->cb[]
* @skb: skbuff to query
- * @tag: buffer to store vlaue
- *
- * Returns error if @skb->cb[] is not set correctly
+ * @vlan_tci: buffer to store vlaue
+ *
+ * Returns error if @skb->vlan_tci is not set correctly
*/
static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb,
- unsigned short *tag)
+ u16 *vlan_tci)
{
- struct vlan_skb_tx_cookie *cookie;
-
- cookie = VLAN_TX_SKB_CB(skb);
- if (cookie->magic == VLAN_TX_COOKIE_MAGIC) {
- *tag = cookie->vlan_tag;
+ if (vlan_tx_tag_present(skb)) {
+ *vlan_tci = skb->vlan_tci;
return 0;
} else {
- *tag = 0;
+ *vlan_tci = 0;
return -EINVAL;
}
}
@@ -369,16 +272,16 @@ static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb,
/**
* vlan_get_tag - get the VLAN ID from the skb
* @skb: skbuff to query
- * @tag: buffer to store vlaue
- *
+ * @vlan_tci: buffer to store vlaue
+ *
* Returns error if the skb is not VLAN tagged
*/
-static inline int vlan_get_tag(const struct sk_buff *skb, unsigned short *tag)
+static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
{
if (skb->dev->features & NETIF_F_HW_VLAN_TX) {
- return __vlan_hwaccel_get_tag(skb, tag);
+ return __vlan_hwaccel_get_tag(skb, vlan_tci);
} else {
- return __vlan_get_tag(skb, tag);
+ return __vlan_get_tag(skb, vlan_tci);
}
}
@@ -402,6 +305,7 @@ enum vlan_ioctl_cmds {
enum vlan_flags {
VLAN_FLAG_REORDER_HDR = 0x1,
+ VLAN_FLAG_GVRP = 0x2,
};
enum vlan_name_types {
diff --git a/include/linux/igmp.h b/include/linux/igmp.h
index f5a1a0db2e8..7bb3c095c15 100644
--- a/include/linux/igmp.h
+++ b/include/linux/igmp.h
@@ -228,7 +228,6 @@ extern int ip_mc_msfget(struct sock *sk, struct ip_msfilter *msf,
extern int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf,
struct group_filter __user *optval, int __user *optlen);
extern int ip_mc_sf_allow(struct sock *sk, __be32 local, __be32 rmt, int dif);
-extern void ip_mr_init(void);
extern void ip_mc_init_dev(struct in_device *);
extern void ip_mc_destroy_dev(struct in_device *);
extern void ip_mc_up(struct in_device *);
diff --git a/include/linux/ihex.h b/include/linux/ihex.h
new file mode 100644
index 00000000000..2baace2788a
--- /dev/null
+++ b/include/linux/ihex.h
@@ -0,0 +1,74 @@
+/*
+ * Compact binary representation of ihex records. Some devices need their
+ * firmware loaded in strange orders rather than a single big blob, but
+ * actually parsing ihex-as-text within the kernel seems silly. Thus,...
+ */
+
+#ifndef __LINUX_IHEX_H__
+#define __LINUX_IHEX_H__
+
+#include <linux/types.h>
+#include <linux/firmware.h>
+#include <linux/device.h>
+
+/* Intel HEX files actually limit the length to 256 bytes, but we have
+ drivers which would benefit from using separate records which are
+ longer than that, so we extend to 16 bits of length */
+struct ihex_binrec {
+ __be32 addr;
+ __be16 len;
+ uint8_t data[0];
+} __attribute__((aligned(4)));
+
+/* Find the next record, taking into account the 4-byte alignment */
+static inline const struct ihex_binrec *
+ihex_next_binrec(const struct ihex_binrec *rec)
+{
+ int next = ((be16_to_cpu(rec->len) + 5) & ~3) - 2;
+ rec = (void *)&rec->data[next];
+
+ return be16_to_cpu(rec->len) ? rec : NULL;
+}
+
+/* Check that ihex_next_binrec() won't take us off the end of the image... */
+static inline int ihex_validate_fw(const struct firmware *fw)
+{
+ const struct ihex_binrec *rec;
+ size_t ofs = 0;
+
+ while (ofs <= fw->size - sizeof(*rec)) {
+ rec = (void *)&fw->data[ofs];
+
+ /* Zero length marks end of records */
+ if (!be16_to_cpu(rec->len))
+ return 0;
+
+ /* Point to next record... */
+ ofs += (sizeof(*rec) + be16_to_cpu(rec->len) + 3) & ~3;
+ }
+ return -EINVAL;
+}
+
+/* Request firmware and validate it so that we can trust we won't
+ * run off the end while reading records... */
+static inline int request_ihex_firmware(const struct firmware **fw,
+ const char *fw_name,
+ struct device *dev)
+{
+ const struct firmware *lfw;
+ int ret;
+
+ ret = request_firmware(&lfw, fw_name, dev);
+ if (ret)
+ return ret;
+ ret = ihex_validate_fw(lfw);
+ if (ret) {
+ dev_err(dev, "Firmware \"%s\" not valid IHEX records\n",
+ fw_name);
+ release_firmware(lfw);
+ return ret;
+ }
+ *fw = lfw;
+ return 0;
+}
+#endif /* __LINUX_IHEX_H__ */
diff --git a/include/linux/inet.h b/include/linux/inet.h
index 1354080cf8c..4cca05c9678 100644
--- a/include/linux/inet.h
+++ b/include/linux/inet.h
@@ -44,6 +44,13 @@
#include <linux/types.h>
+/*
+ * These mimic similar macros defined in user-space for inet_ntop(3).
+ * See /usr/include/netinet/in.h .
+ */
+#define INET_ADDRSTRLEN (16)
+#define INET6_ADDRSTRLEN (48)
+
extern __be32 in_aton(const char *str);
extern int in4_pton(const char *src, int srclen, u8 *dst, int delim, const char **end);
extern int in6_pton(const char *src, int srclen, u8 *dst, int delim, const char **end);
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 9927a88674a..93c45acf249 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -140,8 +140,8 @@ extern struct group_info init_groups;
.nr_cpus_allowed = NR_CPUS, \
}, \
.tasks = LIST_HEAD_INIT(tsk.tasks), \
- .ptrace_children= LIST_HEAD_INIT(tsk.ptrace_children), \
- .ptrace_list = LIST_HEAD_INIT(tsk.ptrace_list), \
+ .ptraced = LIST_HEAD_INIT(tsk.ptraced), \
+ .ptrace_entry = LIST_HEAD_INIT(tsk.ptrace_entry), \
.real_parent = &tsk, \
.parent = &tsk, \
.children = LIST_HEAD_INIT(tsk.children), \
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index f1fc7470d26..62aa4f895ab 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -104,8 +104,11 @@ extern void enable_irq(unsigned int irq);
#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
+extern cpumask_t irq_default_affinity;
+
extern int irq_set_affinity(unsigned int irq, cpumask_t cpumask);
extern int irq_can_set_affinity(unsigned int irq);
+extern int irq_select_affinity(unsigned int irq);
#else /* CONFIG_SMP */
@@ -119,6 +122,8 @@ static inline int irq_can_set_affinity(unsigned int irq)
return 0;
}
+static inline int irq_select_affinity(unsigned int irq) { return 0; }
+
#endif /* CONFIG_SMP && CONFIG_GENERIC_HARDIRQS */
#ifdef CONFIG_GENERIC_HARDIRQS
@@ -285,12 +290,11 @@ enum
struct softirq_action
{
void (*action)(struct softirq_action *);
- void *data;
};
asmlinkage void do_softirq(void);
asmlinkage void __do_softirq(void);
-extern void open_softirq(int nr, void (*action)(struct softirq_action*), void *data);
+extern void open_softirq(int nr, void (*action)(struct softirq_action *));
extern void softirq_init(void);
#define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
extern void raise_softirq_irqoff(unsigned int nr);
diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h
index 2b7a1187cb2..08b987bccf8 100644
--- a/include/linux/iocontext.h
+++ b/include/linux/iocontext.h
@@ -99,4 +99,22 @@ static inline struct io_context *ioc_task_link(struct io_context *ioc)
return NULL;
}
+#ifdef CONFIG_BLOCK
+int put_io_context(struct io_context *ioc);
+void exit_io_context(void);
+struct io_context *get_io_context(gfp_t gfp_flags, int node);
+struct io_context *alloc_io_context(gfp_t gfp_flags, int node);
+void copy_io_context(struct io_context **pdst, struct io_context **psrc);
+#else
+static inline void exit_io_context(void)
+{
+}
+
+struct io_context;
+static inline int put_io_context(struct io_context *ioc)
+{
+ return 1;
+}
+#endif
+
#endif
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index c6801bffe76..2cd07cc2968 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -59,6 +59,7 @@ struct resource_list {
#define IORESOURCE_IRQ_HIGHLEVEL (1<<2)
#define IORESOURCE_IRQ_LOWLEVEL (1<<3)
#define IORESOURCE_IRQ_SHAREABLE (1<<4)
+#define IORESOURCE_IRQ_OPTIONAL (1<<5)
/* PnP DMA specific bits (IORESOURCE_BITS) */
#define IORESOURCE_DMA_TYPE_MASK (3<<0)
@@ -88,6 +89,10 @@ struct resource_list {
#define IORESOURCE_MEM_SHADOWABLE (1<<5) /* dup: IORESOURCE_SHADOWABLE */
#define IORESOURCE_MEM_EXPANSIONROM (1<<6)
+/* PnP I/O specific bits (IORESOURCE_BITS) */
+#define IORESOURCE_IO_16BIT_ADDR (1<<0)
+#define IORESOURCE_IO_FIXED (1<<1)
+
/* PCI ROM control bits (IORESOURCE_BITS) */
#define IORESOURCE_ROM_ENABLE (1<<0) /* ROM is enabled, same as PCI_ROM_ADDRESS_ENABLE */
#define IORESOURCE_ROM_SHADOW (1<<1) /* ROM is copy at C000:0 */
diff --git a/include/linux/ip6_tunnel.h b/include/linux/ip6_tunnel.h
index af3f4a70f3d..1e7cc4af40d 100644
--- a/include/linux/ip6_tunnel.h
+++ b/include/linux/ip6_tunnel.h
@@ -1,7 +1,3 @@
-/*
- * $Id$
- */
-
#ifndef _IP6_TUNNEL_H
#define _IP6_TUNNEL_H
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index cde056e0818..391ad0843a4 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -163,6 +163,8 @@ struct ipv6_devconf {
#ifdef CONFIG_IPV6_MROUTE
__s32 mc_forwarding;
#endif
+ __s32 disable_ipv6;
+ __s32 accept_dad;
void *sysctl;
};
@@ -194,6 +196,8 @@ enum {
DEVCONF_OPTIMISTIC_DAD,
DEVCONF_ACCEPT_SOURCE_ROUTE,
DEVCONF_MC_FORWARDING,
+ DEVCONF_DISABLE_IPV6,
+ DEVCONF_ACCEPT_DAD,
DEVCONF_MAX
};
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 552e0ec269c..8ccb462ea42 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -244,15 +244,6 @@ static inline void set_balance_irq_affinity(unsigned int irq, cpumask_t mask)
}
#endif
-#ifdef CONFIG_AUTO_IRQ_AFFINITY
-extern int select_smp_affinity(unsigned int irq);
-#else
-static inline int select_smp_affinity(unsigned int irq)
-{
- return 1;
-}
-#endif
-
extern int no_irq_affinity;
static inline int irq_balancing_disabled(unsigned int irq)
diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
index e600c4e9b8c..2b1c2e58566 100644
--- a/include/linux/irqflags.h
+++ b/include/linux/irqflags.h
@@ -12,10 +12,10 @@
#define _LINUX_TRACE_IRQFLAGS_H
#ifdef CONFIG_TRACE_IRQFLAGS
- extern void trace_hardirqs_on(void);
- extern void trace_hardirqs_off(void);
extern void trace_softirqs_on(unsigned long ip);
extern void trace_softirqs_off(unsigned long ip);
+ extern void trace_hardirqs_on(void);
+ extern void trace_hardirqs_off(void);
# define trace_hardirq_context(p) ((p)->hardirq_context)
# define trace_softirq_context(p) ((p)->softirq_context)
# define trace_hardirqs_enabled(p) ((p)->hardirqs_enabled)
@@ -41,6 +41,15 @@
# define INIT_TRACE_IRQFLAGS
#endif
+#if defined(CONFIG_IRQSOFF_TRACER) || \
+ defined(CONFIG_PREEMPT_TRACER)
+ extern void stop_critical_timings(void);
+ extern void start_critical_timings(void);
+#else
+# define stop_critical_timings() do { } while (0)
+# define start_critical_timings() do { } while (0)
+#endif
+
#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
#include <asm/irqflags.h>
diff --git a/include/linux/istallion.h b/include/linux/istallion.h
index 5a84fe944b7..0d184072324 100644
--- a/include/linux/istallion.h
+++ b/include/linux/istallion.h
@@ -51,25 +51,21 @@
*/
struct stliport {
unsigned long magic;
+ struct tty_port port;
unsigned int portnr;
unsigned int panelnr;
unsigned int brdnr;
unsigned long state;
unsigned int devnr;
- int flags;
int baud_base;
int custom_divisor;
int close_delay;
int closing_wait;
- int refcount;
int openwaitcnt;
int rc;
int argsize;
void *argp;
unsigned int rxmarkmsk;
- struct tty_struct *tty;
- wait_queue_head_t open_wait;
- wait_queue_head_t close_wait;
wait_queue_head_t raw_wait;
struct asysigs asig;
unsigned long addr;
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index d147f0f9036..3dd20900709 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -168,6 +168,8 @@ struct commit_header {
unsigned char h_chksum_size;
unsigned char h_padding[2];
__be32 h_chksum[JBD2_CHECKSUM_BYTES];
+ __be64 h_commit_sec;
+ __be32 h_commit_nsec;
};
/*
@@ -379,6 +381,38 @@ static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh)
bit_spin_unlock(BH_JournalHead, &bh->b_state);
}
+/* Flags in jbd_inode->i_flags */
+#define __JI_COMMIT_RUNNING 0
+/* Commit of the inode data in progress. We use this flag to protect us from
+ * concurrent deletion of inode. We cannot use reference to inode for this
+ * since we cannot afford doing last iput() on behalf of kjournald
+ */
+#define JI_COMMIT_RUNNING (1 << __JI_COMMIT_RUNNING)
+
+/**
+ * struct jbd_inode is the structure linking inodes in ordered mode
+ * present in a transaction so that we can sync them during commit.
+ */
+struct jbd2_inode {
+ /* Which transaction does this inode belong to? Either the running
+ * transaction or the committing one. [j_list_lock] */
+ transaction_t *i_transaction;
+
+ /* Pointer to the running transaction modifying inode's data in case
+ * there is already a committing transaction touching it. [j_list_lock] */
+ transaction_t *i_next_transaction;
+
+ /* List of inodes in the i_transaction [j_list_lock] */
+ struct list_head i_list;
+
+ /* VFS inode this inode belongs to [constant during the lifetime
+ * of the structure] */
+ struct inode *i_vfs_inode;
+
+ /* Flags of inode [j_list_lock] */
+ unsigned int i_flags;
+};
+
struct jbd2_revoke_table_s;
/**
@@ -509,24 +543,12 @@ struct transaction_s
struct journal_head *t_reserved_list;
/*
- * Doubly-linked circular list of all buffers under writeout during
- * commit [j_list_lock]
- */
- struct journal_head *t_locked_list;
-
- /*
* Doubly-linked circular list of all metadata buffers owned by this
* transaction [j_list_lock]
*/
struct journal_head *t_buffers;
/*
- * Doubly-linked circular list of all data buffers still to be
- * flushed before this transaction can be committed [j_list_lock]
- */
- struct journal_head *t_sync_datalist;
-
- /*
* Doubly-linked circular list of all forget buffers (superseded
* buffers which we can un-checkpoint once this transaction commits)
* [j_list_lock]
@@ -565,6 +587,12 @@ struct transaction_s
struct journal_head *t_log_list;
/*
+ * List of inodes whose data we've modified in data=ordered mode.
+ * [j_list_lock]
+ */
+ struct list_head t_inode_list;
+
+ /*
* Protects info related to handles
*/
spinlock_t t_handle_lock;
@@ -1004,7 +1032,6 @@ extern int jbd2_journal_extend (handle_t *, int nblocks);
extern int jbd2_journal_get_write_access(handle_t *, struct buffer_head *);
extern int jbd2_journal_get_create_access (handle_t *, struct buffer_head *);
extern int jbd2_journal_get_undo_access(handle_t *, struct buffer_head *);
-extern int jbd2_journal_dirty_data (handle_t *, struct buffer_head *);
extern int jbd2_journal_dirty_metadata (handle_t *, struct buffer_head *);
extern void jbd2_journal_release_buffer (handle_t *, struct buffer_head *);
extern int jbd2_journal_forget (handle_t *, struct buffer_head *);
@@ -1044,6 +1071,10 @@ extern void jbd2_journal_ack_err (journal_t *);
extern int jbd2_journal_clear_err (journal_t *);
extern int jbd2_journal_bmap(journal_t *, unsigned long, unsigned long long *);
extern int jbd2_journal_force_commit(journal_t *);
+extern int jbd2_journal_file_inode(handle_t *handle, struct jbd2_inode *inode);
+extern int jbd2_journal_begin_ordered_truncate(struct jbd2_inode *inode, loff_t new_size);
+extern void jbd2_journal_init_jbd_inode(struct jbd2_inode *jinode, struct inode *inode);
+extern void jbd2_journal_release_jbd_inode(journal_t *journal, struct jbd2_inode *jinode);
/*
* journal_head management
@@ -1179,15 +1210,13 @@ static inline int jbd_space_needed(journal_t *journal)
/* journaling buffer types */
#define BJ_None 0 /* Not journaled */
-#define BJ_SyncData 1 /* Normal data: flush before commit */
-#define BJ_Metadata 2 /* Normal journaled metadata */
-#define BJ_Forget 3 /* Buffer superseded by this transaction */
-#define BJ_IO 4 /* Buffer is for temporary IO use */
-#define BJ_Shadow 5 /* Buffer contents being shadowed to the log */
-#define BJ_LogCtl 6 /* Buffer contains log descriptors */
-#define BJ_Reserved 7 /* Buffer is reserved for access by journal */
-#define BJ_Locked 8 /* Locked for I/O during commit */
-#define BJ_Types 9
+#define BJ_Metadata 1 /* Normal journaled metadata */
+#define BJ_Forget 2 /* Buffer superseded by this transaction */
+#define BJ_IO 3 /* Buffer is for temporary IO use */
+#define BJ_Shadow 4 /* Buffer contents being shadowed to the log */
+#define BJ_LogCtl 5 /* Buffer contains log descriptors */
+#define BJ_Reserved 6 /* Buffer is reserved for access by journal */
+#define BJ_Types 7
extern int jbd_blocks_per_page(struct inode *inode);
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 2e70006c7fa..f9cd7a513f9 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -187,9 +187,6 @@ asmlinkage int vprintk(const char *fmt, va_list args)
__attribute__ ((format (printf, 1, 0)));
asmlinkage int printk(const char * fmt, ...)
__attribute__ ((format (printf, 1, 2))) __cold;
-extern int log_buf_get_len(void);
-extern int log_buf_read(int idx);
-extern int log_buf_copy(char *dest, int idx, int len);
extern int printk_ratelimit_jiffies;
extern int printk_ratelimit_burst;
@@ -205,9 +202,6 @@ static inline int vprintk(const char *s, va_list args) { return 0; }
static inline int printk(const char *s, ...)
__attribute__ ((format (printf, 1, 2)));
static inline int __cold printk(const char *s, ...) { return 0; }
-static inline int log_buf_get_len(void) { return 0; }
-static inline int log_buf_read(int idx) { return 0; }
-static inline int log_buf_copy(char *dest, int idx, int len) { return 0; }
static inline int printk_ratelimit(void) { return 0; }
static inline int __printk_ratelimit(int ratelimit_jiffies, \
int ratelimit_burst) { return 0; }
@@ -216,7 +210,7 @@ static inline bool printk_timed_ratelimit(unsigned long *caller_jiffies, \
{ return false; }
#endif
-extern void __attribute__((format(printf, 1, 2)))
+extern void asmlinkage __attribute__((format(printf, 1, 2)))
early_printk(const char *fmt, ...);
unsigned long int_sqrt(unsigned long);
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 1036631ff4f..04a3556bdea 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -259,6 +259,10 @@ void recycle_rp_inst(struct kretprobe_instance *ri, struct hlist_head *head);
struct jprobe;
struct kretprobe;
+static inline struct kprobe *get_kprobe(void *addr)
+{
+ return NULL;
+}
static inline struct kprobe *kprobe_running(void)
{
return NULL;
diff --git a/include/linux/kvm.h b/include/linux/kvm.h
index a281afeddfb..0ea064cbfbc 100644
--- a/include/linux/kvm.h
+++ b/include/linux/kvm.h
@@ -173,6 +173,30 @@ struct kvm_run {
};
};
+/* for KVM_REGISTER_COALESCED_MMIO / KVM_UNREGISTER_COALESCED_MMIO */
+
+struct kvm_coalesced_mmio_zone {
+ __u64 addr;
+ __u32 size;
+ __u32 pad;
+};
+
+struct kvm_coalesced_mmio {
+ __u64 phys_addr;
+ __u32 len;
+ __u32 pad;
+ __u8 data[8];
+};
+
+struct kvm_coalesced_mmio_ring {
+ __u32 first, last;
+ struct kvm_coalesced_mmio coalesced_mmio[0];
+};
+
+#define KVM_COALESCED_MMIO_MAX \
+ ((PAGE_SIZE - sizeof(struct kvm_coalesced_mmio_ring)) / \
+ sizeof(struct kvm_coalesced_mmio))
+
/* for KVM_TRANSLATE */
struct kvm_translation {
/* in */
@@ -294,14 +318,14 @@ struct kvm_trace_rec {
__u32 vcpu_id;
union {
struct {
- __u32 cycle_lo, cycle_hi;
+ __u64 cycle_u64;
__u32 extra_u32[KVM_TRC_EXTRA_MAX];
} cycle;
struct {
__u32 extra_u32[KVM_TRC_EXTRA_MAX];
} nocycle;
} u;
-};
+} __attribute__((packed));
#define KVMIO 0xAE
@@ -346,6 +370,7 @@ struct kvm_trace_rec {
#define KVM_CAP_NOP_IO_DELAY 12
#define KVM_CAP_PV_MMU 13
#define KVM_CAP_MP_STATE 14
+#define KVM_CAP_COALESCED_MMIO 15
/*
* ioctls for VM fds
@@ -371,6 +396,10 @@ struct kvm_trace_rec {
#define KVM_CREATE_PIT _IO(KVMIO, 0x64)
#define KVM_GET_PIT _IOWR(KVMIO, 0x65, struct kvm_pit_state)
#define KVM_SET_PIT _IOR(KVMIO, 0x66, struct kvm_pit_state)
+#define KVM_REGISTER_COALESCED_MMIO \
+ _IOW(KVMIO, 0x67, struct kvm_coalesced_mmio_zone)
+#define KVM_UNREGISTER_COALESCED_MMIO \
+ _IOW(KVMIO, 0x68, struct kvm_coalesced_mmio_zone)
/*
* ioctls for vcpu fds
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index de9d1df4bba..07d68a8ae8e 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -52,7 +52,8 @@ struct kvm_io_bus {
void kvm_io_bus_init(struct kvm_io_bus *bus);
void kvm_io_bus_destroy(struct kvm_io_bus *bus);
-struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr);
+struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus,
+ gpa_t addr, int len, int is_write);
void kvm_io_bus_register_dev(struct kvm_io_bus *bus,
struct kvm_io_device *dev);
@@ -116,6 +117,10 @@ struct kvm {
struct kvm_vm_stat stat;
struct kvm_arch arch;
atomic_t users_count;
+#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
+ struct kvm_coalesced_mmio_dev *coalesced_mmio_dev;
+ struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
+#endif
};
/* The guest did something we don't support. */
@@ -135,9 +140,6 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
void vcpu_load(struct kvm_vcpu *vcpu);
void vcpu_put(struct kvm_vcpu *vcpu);
-void decache_vcpus_on_cpu(int cpu);
-
-
int kvm_init(void *opaque, unsigned int vcpu_size,
struct module *module);
void kvm_exit(void);
@@ -166,6 +168,7 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
struct kvm_memory_slot old,
int user_alloc);
+void kvm_arch_flush_shadow(struct kvm *kvm);
gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn);
struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
diff --git a/include/linux/libata.h b/include/linux/libata.h
index e57e5d08312..5b247b8a6b3 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -27,6 +27,7 @@
#define __LINUX_LIBATA_H__
#include <linux/delay.h>
+#include <linux/jiffies.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <linux/scatterlist.h>
@@ -115,7 +116,7 @@ enum {
/* tag ATA_MAX_QUEUE - 1 is reserved for internal commands */
ATA_MAX_QUEUE = 32,
ATA_TAG_INTERNAL = ATA_MAX_QUEUE - 1,
- ATA_SHORT_PAUSE = (HZ >> 6) + 1,
+ ATA_SHORT_PAUSE = 16,
ATAPI_MAX_DRAIN = 16 << 10,
@@ -168,6 +169,7 @@ enum {
ATA_LFLAG_ASSUME_CLASS = ATA_LFLAG_ASSUME_ATA | ATA_LFLAG_ASSUME_SEMB,
ATA_LFLAG_NO_RETRY = (1 << 5), /* don't retry this link */
ATA_LFLAG_DISABLED = (1 << 6), /* link is disabled */
+ ATA_LFLAG_SW_ACTIVITY = (1 << 7), /* keep activity stats */
/* struct ata_port flags */
ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */
@@ -190,6 +192,10 @@ enum {
ATA_FLAG_AN = (1 << 18), /* controller supports AN */
ATA_FLAG_PMP = (1 << 19), /* controller supports PMP */
ATA_FLAG_IPM = (1 << 20), /* driver can handle IPM */
+ ATA_FLAG_EM = (1 << 21), /* driver supports enclosure
+ * management */
+ ATA_FLAG_SW_ACTIVITY = (1 << 22), /* driver supports sw activity
+ * led */
/* The following flag belongs to ap->pflags but is kept in
* ap->flags because it's referenced in many LLDs and will be
@@ -234,17 +240,16 @@ enum {
/* bits 24:31 of host->flags are reserved for LLD specific flags */
/* various lengths of time */
- ATA_TMOUT_BOOT = 30 * HZ, /* heuristic */
- ATA_TMOUT_BOOT_QUICK = 7 * HZ, /* heuristic */
- ATA_TMOUT_INTERNAL = 30 * HZ,
- ATA_TMOUT_INTERNAL_QUICK = 5 * HZ,
+ ATA_TMOUT_BOOT = 30000, /* heuristic */
+ ATA_TMOUT_BOOT_QUICK = 7000, /* heuristic */
+ ATA_TMOUT_INTERNAL_QUICK = 5000,
/* FIXME: GoVault needs 2s but we can't afford that without
* parallel probing. 800ms is enough for iVDR disk
* HHD424020F7SV00. Increase to 2secs when parallel probing
* is in place.
*/
- ATA_TMOUT_FF_WAIT = 4 * HZ / 5,
+ ATA_TMOUT_FF_WAIT = 800,
/* Spec mandates to wait for ">= 2ms" before checking status
* after reset. We wait 150ms, because that was the magic
@@ -256,14 +261,14 @@ enum {
*
* Old drivers/ide uses the 2mS rule and then waits for ready.
*/
- ATA_WAIT_AFTER_RESET_MSECS = 150,
+ ATA_WAIT_AFTER_RESET = 150,
/* If PMP is supported, we have to do follow-up SRST. As some
* PMPs don't send D2H Reg FIS after hardreset, LLDs are
* advised to wait only for the following duration before
* doing SRST.
*/
- ATA_TMOUT_PMP_SRST_WAIT = 1 * HZ,
+ ATA_TMOUT_PMP_SRST_WAIT = 1000,
/* ATA bus states */
BUS_UNKNOWN = 0,
@@ -340,6 +345,11 @@ enum {
SATA_PMP_RW_TIMEOUT = 3000, /* PMP read/write timeout */
+ /* This should match the actual table size of
+ * ata_eh_cmd_timeout_table in libata-eh.c.
+ */
+ ATA_EH_CMD_TIMEOUT_TABLE_SIZE = 5,
+
/* Horkage types. May be set by libata or controller on drives
(some horkage may be drive/controller pair dependant */
@@ -441,6 +451,15 @@ enum link_pm {
MEDIUM_POWER,
};
extern struct device_attribute dev_attr_link_power_management_policy;
+extern struct device_attribute dev_attr_em_message_type;
+extern struct device_attribute dev_attr_em_message;
+extern struct device_attribute dev_attr_sw_activity;
+
+enum sw_activity {
+ OFF,
+ BLINK_ON,
+ BLINK_OFF,
+};
#ifdef CONFIG_ATA_SFF
struct ata_ioports {
@@ -597,10 +616,14 @@ struct ata_eh_info {
struct ata_eh_context {
struct ata_eh_info i;
int tries[ATA_MAX_DEVICES];
+ int cmd_timeout_idx[ATA_MAX_DEVICES]
+ [ATA_EH_CMD_TIMEOUT_TABLE_SIZE];
unsigned int classes[ATA_MAX_DEVICES];
unsigned int did_probe_mask;
unsigned int saved_ncq_enabled;
u8 saved_xfer_mode[ATA_MAX_DEVICES];
+ /* timestamp for the last reset attempt or success */
+ unsigned long last_reset;
};
struct ata_acpi_drive
@@ -692,6 +715,7 @@ struct ata_port {
struct timer_list fastdrain_timer;
unsigned long fastdrain_cnt;
+ int em_message_type;
void *private_data;
#ifdef CONFIG_ATA_ACPI
@@ -783,6 +807,12 @@ struct ata_port_operations {
u8 (*bmdma_status)(struct ata_port *ap);
#endif /* CONFIG_ATA_SFF */
+ ssize_t (*em_show)(struct ata_port *ap, char *buf);
+ ssize_t (*em_store)(struct ata_port *ap, const char *message,
+ size_t size);
+ ssize_t (*sw_activity_show)(struct ata_device *dev, char *buf);
+ ssize_t (*sw_activity_store)(struct ata_device *dev,
+ enum sw_activity val);
/*
* Obsolete
*/
@@ -895,8 +925,7 @@ extern void ata_host_resume(struct ata_host *host);
#endif
extern int ata_ratelimit(void);
extern u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
- unsigned long interval_msec,
- unsigned long timeout_msec);
+ unsigned long interval, unsigned long timeout);
extern int atapi_cmd_type(u8 opcode);
extern void ata_tf_to_fis(const struct ata_taskfile *tf,
u8 pmp, int is_cmd, u8 *fis);
@@ -1389,6 +1418,12 @@ static inline int ata_check_ready(u8 status)
return 0;
}
+static inline unsigned long ata_deadline(unsigned long from_jiffies,
+ unsigned long timeout_msecs)
+{
+ return from_jiffies + msecs_to_jiffies(timeout_msecs);
+}
+
/**************************************************************************
* PMP - drivers/ata/libata-pmp.c
diff --git a/include/linux/linkage.h b/include/linux/linkage.h
index 9fd1f859021..56ba3739465 100644
--- a/include/linux/linkage.h
+++ b/include/linux/linkage.h
@@ -4,6 +4,8 @@
#include <linux/compiler.h>
#include <asm/linkage.h>
+#define notrace __attribute__((no_instrument_function))
+
#ifdef __cplusplus
#define CPP_ASMLINKAGE extern "C"
#else
diff --git a/include/linux/list.h b/include/linux/list.h
index 08cf4f65188..139ec41d9c2 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -85,65 +85,6 @@ static inline void list_add_tail(struct list_head *new, struct list_head *head)
}
/*
- * Insert a new entry between two known consecutive entries.
- *
- * This is only for internal list manipulation where we know
- * the prev/next entries already!
- */
-static inline void __list_add_rcu(struct list_head * new,
- struct list_head * prev, struct list_head * next)
-{
- new->next = next;
- new->prev = prev;
- smp_wmb();
- next->prev = new;
- prev->next = new;
-}
-
-/**
- * list_add_rcu - add a new entry to rcu-protected list
- * @new: new entry to be added
- * @head: list head to add it after
- *
- * Insert a new entry after the specified head.
- * This is good for implementing stacks.
- *
- * The caller must take whatever precautions are necessary
- * (such as holding appropriate locks) to avoid racing
- * with another list-mutation primitive, such as list_add_rcu()
- * or list_del_rcu(), running on this same list.
- * However, it is perfectly legal to run concurrently with
- * the _rcu list-traversal primitives, such as
- * list_for_each_entry_rcu().
- */
-static inline void list_add_rcu(struct list_head *new, struct list_head *head)
-{
- __list_add_rcu(new, head, head->next);
-}
-
-/**
- * list_add_tail_rcu - add a new entry to rcu-protected list
- * @new: new entry to be added
- * @head: list head to add it before
- *
- * Insert a new entry before the specified head.
- * This is useful for implementing queues.
- *
- * The caller must take whatever precautions are necessary
- * (such as holding appropriate locks) to avoid racing
- * with another list-mutation primitive, such as list_add_tail_rcu()
- * or list_del_rcu(), running on this same list.
- * However, it is perfectly legal to run concurrently with
- * the _rcu list-traversal primitives, such as
- * list_for_each_entry_rcu().
- */
-static inline void list_add_tail_rcu(struct list_head *new,
- struct list_head *head)
-{
- __list_add_rcu(new, head->prev, head);
-}
-
-/*
* Delete a list entry by making the prev/next entries
* point to each other.
*
@@ -174,36 +115,6 @@ extern void list_del(struct list_head *entry);
#endif
/**
- * list_del_rcu - deletes entry from list without re-initialization
- * @entry: the element to delete from the list.
- *
- * Note: list_empty() on entry does not return true after this,
- * the entry is in an undefined state. It is useful for RCU based
- * lockfree traversal.
- *
- * In particular, it means that we can not poison the forward
- * pointers that may still be used for walking the list.
- *
- * The caller must take whatever precautions are necessary
- * (such as holding appropriate locks) to avoid racing
- * with another list-mutation primitive, such as list_del_rcu()
- * or list_add_rcu(), running on this same list.
- * However, it is perfectly legal to run concurrently with
- * the _rcu list-traversal primitives, such as
- * list_for_each_entry_rcu().
- *
- * Note that the caller is not permitted to immediately free
- * the newly deleted entry. Instead, either synchronize_rcu()
- * or call_rcu() must be used to defer freeing until an RCU
- * grace period has elapsed.
- */
-static inline void list_del_rcu(struct list_head *entry)
-{
- __list_del(entry->prev, entry->next);
- entry->prev = LIST_POISON2;
-}
-
-/**
* list_replace - replace old entry by new one
* @old : the element to be replaced
* @new : the new element to insert
@@ -227,25 +138,6 @@ static inline void list_replace_init(struct list_head *old,
}
/**
- * list_replace_rcu - replace old entry by new one
- * @old : the element to be replaced
- * @new : the new element to insert
- *
- * The @old entry will be replaced with the @new entry atomically.
- * Note: @old should not be empty.
- */
-static inline void list_replace_rcu(struct list_head *old,
- struct list_head *new)
-{
- new->next = old->next;
- new->prev = old->prev;
- smp_wmb();
- new->next->prev = new;
- new->prev->next = new;
- old->prev = LIST_POISON2;
-}
-
-/**
* list_del_init - deletes entry from list and reinitialize it.
* @entry: the element to delete from the list.
*/
@@ -369,62 +261,6 @@ static inline void list_splice_init(struct list_head *list,
}
/**
- * list_splice_init_rcu - splice an RCU-protected list into an existing list.
- * @list: the RCU-protected list to splice
- * @head: the place in the list to splice the first list into
- * @sync: function to sync: synchronize_rcu(), synchronize_sched(), ...
- *
- * @head can be RCU-read traversed concurrently with this function.
- *
- * Note that this function blocks.
- *
- * Important note: the caller must take whatever action is necessary to
- * prevent any other updates to @head. In principle, it is possible
- * to modify the list as soon as sync() begins execution.
- * If this sort of thing becomes necessary, an alternative version
- * based on call_rcu() could be created. But only if -really-
- * needed -- there is no shortage of RCU API members.
- */
-static inline void list_splice_init_rcu(struct list_head *list,
- struct list_head *head,
- void (*sync)(void))
-{
- struct list_head *first = list->next;
- struct list_head *last = list->prev;
- struct list_head *at = head->next;
-
- if (list_empty(head))
- return;
-
- /* "first" and "last" tracking list, so initialize it. */
-
- INIT_LIST_HEAD(list);
-
- /*
- * At this point, the list body still points to the source list.
- * Wait for any readers to finish using the list before splicing
- * the list body into the new list. Any new readers will see
- * an empty list.
- */
-
- sync();
-
- /*
- * Readers are finished with the source list, so perform splice.
- * The order is important if the new list is global and accessible
- * to concurrent RCU readers. Note that RCU readers are not
- * permitted to traverse the prev pointers without excluding
- * this function.
- */
-
- last->next = at;
- smp_wmb();
- head->next = first;
- first->prev = head;
- at->prev = last;
-}
-
-/**
* list_entry - get the struct for this entry
* @ptr: the &struct list_head pointer.
* @type: the type of the struct this is embedded in.
@@ -629,57 +465,6 @@ static inline void list_splice_init_rcu(struct list_head *list,
&pos->member != (head); \
pos = n, n = list_entry(n->member.prev, typeof(*n), member))
-/**
- * list_for_each_rcu - iterate over an rcu-protected list
- * @pos: the &struct list_head to use as a loop cursor.
- * @head: the head for your list.
- *
- * This list-traversal primitive may safely run concurrently with
- * the _rcu list-mutation primitives such as list_add_rcu()
- * as long as the traversal is guarded by rcu_read_lock().
- */
-#define list_for_each_rcu(pos, head) \
- for (pos = rcu_dereference((head)->next); \
- prefetch(pos->next), pos != (head); \
- pos = rcu_dereference(pos->next))
-
-#define __list_for_each_rcu(pos, head) \
- for (pos = rcu_dereference((head)->next); \
- pos != (head); \
- pos = rcu_dereference(pos->next))
-
-/**
- * list_for_each_entry_rcu - iterate over rcu list of given type
- * @pos: the type * to use as a loop cursor.
- * @head: the head for your list.
- * @member: the name of the list_struct within the struct.
- *
- * This list-traversal primitive may safely run concurrently with
- * the _rcu list-mutation primitives such as list_add_rcu()
- * as long as the traversal is guarded by rcu_read_lock().
- */
-#define list_for_each_entry_rcu(pos, head, member) \
- for (pos = list_entry(rcu_dereference((head)->next), typeof(*pos), member); \
- prefetch(pos->member.next), &pos->member != (head); \
- pos = list_entry(rcu_dereference(pos->member.next), typeof(*pos), member))
-
-
-/**
- * list_for_each_continue_rcu
- * @pos: the &struct list_head to use as a loop cursor.
- * @head: the head for your list.
- *
- * Iterate over an rcu-protected list, continuing after current point.
- *
- * This list-traversal primitive may safely run concurrently with
- * the _rcu list-mutation primitives such as list_add_rcu()
- * as long as the traversal is guarded by rcu_read_lock().
- */
-#define list_for_each_continue_rcu(pos, head) \
- for ((pos) = rcu_dereference((pos)->next); \
- prefetch((pos)->next), (pos) != (head); \
- (pos) = rcu_dereference((pos)->next))
-
/*
* Double linked lists with a single pointer list head.
* Mostly useful for hash tables where the two pointer list head is
@@ -730,31 +515,6 @@ static inline void hlist_del(struct hlist_node *n)
n->pprev = LIST_POISON2;
}
-/**
- * hlist_del_rcu - deletes entry from hash list without re-initialization
- * @n: the element to delete from the hash list.
- *
- * Note: list_unhashed() on entry does not return true after this,
- * the entry is in an undefined state. It is useful for RCU based
- * lockfree traversal.
- *
- * In particular, it means that we can not poison the forward
- * pointers that may still be used for walking the hash list.
- *
- * The caller must take whatever precautions are necessary
- * (such as holding appropriate locks) to avoid racing
- * with another list-mutation primitive, such as hlist_add_head_rcu()
- * or hlist_del_rcu(), running on this same list.
- * However, it is perfectly legal to run concurrently with
- * the _rcu list-traversal primitives, such as
- * hlist_for_each_entry().
- */
-static inline void hlist_del_rcu(struct hlist_node *n)
-{
- __hlist_del(n);
- n->pprev = LIST_POISON2;
-}
-
static inline void hlist_del_init(struct hlist_node *n)
{
if (!hlist_unhashed(n)) {
@@ -763,27 +523,6 @@ static inline void hlist_del_init(struct hlist_node *n)
}
}
-/**
- * hlist_replace_rcu - replace old entry by new one
- * @old : the element to be replaced
- * @new : the new element to insert
- *
- * The @old entry will be replaced with the @new entry atomically.
- */
-static inline void hlist_replace_rcu(struct hlist_node *old,
- struct hlist_node *new)
-{
- struct hlist_node *next = old->next;
-
- new->next = next;
- new->pprev = old->pprev;
- smp_wmb();
- if (next)
- new->next->pprev = &new->next;
- *new->pprev = new;
- old->pprev = LIST_POISON2;
-}
-
static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
{
struct hlist_node *first = h->first;
@@ -794,38 +533,6 @@ static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
n->pprev = &h->first;
}
-
-/**
- * hlist_add_head_rcu
- * @n: the element to add to the hash list.
- * @h: the list to add to.
- *
- * Description:
- * Adds the specified element to the specified hlist,
- * while permitting racing traversals.
- *
- * The caller must take whatever precautions are necessary
- * (such as holding appropriate locks) to avoid racing
- * with another list-mutation primitive, such as hlist_add_head_rcu()
- * or hlist_del_rcu(), running on this same list.
- * However, it is perfectly legal to run concurrently with
- * the _rcu list-traversal primitives, such as
- * hlist_for_each_entry_rcu(), used to prevent memory-consistency
- * problems on Alpha CPUs. Regardless of the type of CPU, the
- * list-traversal primitive must be guarded by rcu_read_lock().
- */
-static inline void hlist_add_head_rcu(struct hlist_node *n,
- struct hlist_head *h)
-{
- struct hlist_node *first = h->first;
- n->next = first;
- n->pprev = &h->first;
- smp_wmb();
- if (first)
- first->pprev = &n->next;
- h->first = n;
-}
-
/* next must be != NULL */
static inline void hlist_add_before(struct hlist_node *n,
struct hlist_node *next)
@@ -847,63 +554,6 @@ static inline void hlist_add_after(struct hlist_node *n,
next->next->pprev = &next->next;
}
-/**
- * hlist_add_before_rcu
- * @n: the new element to add to the hash list.
- * @next: the existing element to add the new element before.
- *
- * Description:
- * Adds the specified element to the specified hlist
- * before the specified node while permitting racing traversals.
- *
- * The caller must take whatever precautions are necessary
- * (such as holding appropriate locks) to avoid racing
- * with another list-mutation primitive, such as hlist_add_head_rcu()
- * or hlist_del_rcu(), running on this same list.
- * However, it is perfectly legal to run concurrently with
- * the _rcu list-traversal primitives, such as
- * hlist_for_each_entry_rcu(), used to prevent memory-consistency
- * problems on Alpha CPUs.
- */
-static inline void hlist_add_before_rcu(struct hlist_node *n,
- struct hlist_node *next)
-{
- n->pprev = next->pprev;
- n->next = next;
- smp_wmb();
- next->pprev = &n->next;
- *(n->pprev) = n;
-}
-
-/**
- * hlist_add_after_rcu
- * @prev: the existing element to add the new element after.
- * @n: the new element to add to the hash list.
- *
- * Description:
- * Adds the specified element to the specified hlist
- * after the specified node while permitting racing traversals.
- *
- * The caller must take whatever precautions are necessary
- * (such as holding appropriate locks) to avoid racing
- * with another list-mutation primitive, such as hlist_add_head_rcu()
- * or hlist_del_rcu(), running on this same list.
- * However, it is perfectly legal to run concurrently with
- * the _rcu list-traversal primitives, such as
- * hlist_for_each_entry_rcu(), used to prevent memory-consistency
- * problems on Alpha CPUs.
- */
-static inline void hlist_add_after_rcu(struct hlist_node *prev,
- struct hlist_node *n)
-{
- n->next = prev->next;
- n->pprev = &prev->next;
- smp_wmb();
- prev->next = n;
- if (n->next)
- n->next->pprev = &n->next;
-}
-
#define hlist_entry(ptr, type, member) container_of(ptr,type,member)
#define hlist_for_each(pos, head) \
@@ -964,21 +614,4 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev,
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
pos = n)
-/**
- * hlist_for_each_entry_rcu - iterate over rcu list of given type
- * @tpos: the type * to use as a loop cursor.
- * @pos: the &struct hlist_node to use as a loop cursor.
- * @head: the head for your list.
- * @member: the name of the hlist_node within the struct.
- *
- * This list-traversal primitive may safely run concurrently with
- * the _rcu list-mutation primitives such as hlist_add_head_rcu()
- * as long as the traversal is guarded by rcu_read_lock().
- */
-#define hlist_for_each_entry_rcu(tpos, pos, head, member) \
- for (pos = rcu_dereference((head)->first); \
- pos && ({ prefetch(pos->next); 1;}) && \
- ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
- pos = rcu_dereference(pos->next))
-
#endif
diff --git a/include/linux/lm_interface.h b/include/linux/lm_interface.h
index f274997bc28..2ed8fa1b762 100644
--- a/include/linux/lm_interface.h
+++ b/include/linux/lm_interface.h
@@ -122,11 +122,9 @@ typedef void (*lm_callback_t) (void *ptr, unsigned int type, void *data);
*/
#define LM_OUT_ST_MASK 0x00000003
-#define LM_OUT_CACHEABLE 0x00000004
#define LM_OUT_CANCELED 0x00000008
#define LM_OUT_ASYNC 0x00000080
#define LM_OUT_ERROR 0x00000100
-#define LM_OUT_CONV_DEADLK 0x00000200
/*
* lm_callback_t types
@@ -138,9 +136,6 @@ typedef void (*lm_callback_t) (void *ptr, unsigned int type, void *data);
* LM_CB_NEED_RECOVERY
* The given journal needs to be recovered.
*
- * LM_CB_DROPLOCKS
- * Reduce the number of cached locks.
- *
* LM_CB_ASYNC
* The given lock has been granted.
*/
@@ -149,7 +144,6 @@ typedef void (*lm_callback_t) (void *ptr, unsigned int type, void *data);
#define LM_CB_NEED_D 258
#define LM_CB_NEED_S 259
#define LM_CB_NEED_RECOVERY 260
-#define LM_CB_DROPLOCKS 261
#define LM_CB_ASYNC 262
/*
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
index 102d928f720..dbb87ab282e 100644
--- a/include/linux/lockd/lockd.h
+++ b/include/linux/lockd/lockd.h
@@ -200,10 +200,12 @@ typedef int (*nlm_host_match_fn_t)(void *cur, struct nlm_host *ref);
* Server-side lock handling
*/
__be32 nlmsvc_lock(struct svc_rqst *, struct nlm_file *,
- struct nlm_lock *, int, struct nlm_cookie *);
+ struct nlm_host *, struct nlm_lock *, int,
+ struct nlm_cookie *);
__be32 nlmsvc_unlock(struct nlm_file *, struct nlm_lock *);
__be32 nlmsvc_testlock(struct svc_rqst *, struct nlm_file *,
- struct nlm_lock *, struct nlm_lock *, struct nlm_cookie *);
+ struct nlm_host *, struct nlm_lock *,
+ struct nlm_lock *, struct nlm_cookie *);
__be32 nlmsvc_cancel_blocked(struct nlm_file *, struct nlm_lock *);
unsigned long nlmsvc_retry_blocked(void);
void nlmsvc_traverse_blocks(struct nlm_host *, struct nlm_file *,
@@ -224,7 +226,7 @@ void nlmsvc_invalidate_all(void);
* Cluster failover support
*/
int nlmsvc_unlock_all_by_sb(struct super_block *sb);
-int nlmsvc_unlock_all_by_ip(__be32 server_addr);
+int nlmsvc_unlock_all_by_ip(struct sockaddr *server_addr);
static inline struct inode *nlmsvc_file_inode(struct nlm_file *file)
{
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 4c4d236ded1..2486eb4edbf 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -182,6 +182,9 @@ struct lock_list {
* We record lock dependency chains, so that we can cache them:
*/
struct lock_chain {
+ u8 irq_context;
+ u8 depth;
+ u16 base;
struct list_head entry;
u64 chain_key;
};
@@ -276,14 +279,6 @@ extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
(lock)->dep_map.key, sub)
/*
- * To initialize a lockdep_map statically use this macro.
- * Note that _name must not be NULL.
- */
-#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
- { .name = (_name), .key = (void *)(_key), }
-
-
-/*
* Acquire a lock.
*
* Values for "read":
diff --git a/include/linux/marker.h b/include/linux/marker.h
index 430f6adf976..1290653f924 100644
--- a/include/linux/marker.h
+++ b/include/linux/marker.h
@@ -44,8 +44,8 @@ struct marker {
*/
char state; /* Marker state. */
char ptype; /* probe type : 0 : single, 1 : multi */
- void (*call)(const struct marker *mdata, /* Probe wrapper */
- void *call_private, const char *fmt, ...);
+ /* Probe wrapper */
+ void (*call)(const struct marker *mdata, void *call_private, ...);
struct marker_probe_closure single;
struct marker_probe_closure *multi;
} __attribute__((aligned(8)));
@@ -58,8 +58,12 @@ struct marker {
* Make sure the alignment of the structure in the __markers section will
* not add unwanted padding between the beginning of the section and the
* structure. Force alignment to the same alignment as the section start.
+ *
+ * The "generic" argument controls which marker enabling mechanism must be used.
+ * If generic is true, a variable read is used.
+ * If generic is false, immediate values are used.
*/
-#define __trace_mark(name, call_private, format, args...) \
+#define __trace_mark(generic, name, call_private, format, args...) \
do { \
static const char __mstrtab_##name[] \
__attribute__((section("__markers_strings"))) \
@@ -72,15 +76,14 @@ struct marker {
__mark_check_format(format, ## args); \
if (unlikely(__mark_##name.state)) { \
(*__mark_##name.call) \
- (&__mark_##name, call_private, \
- format, ## args); \
+ (&__mark_##name, call_private, ## args);\
} \
} while (0)
extern void marker_update_probe_range(struct marker *begin,
struct marker *end);
#else /* !CONFIG_MARKERS */
-#define __trace_mark(name, call_private, format, args...) \
+#define __trace_mark(generic, name, call_private, format, args...) \
__mark_check_format(format, ## args)
static inline void marker_update_probe_range(struct marker *begin,
struct marker *end)
@@ -88,15 +91,30 @@ static inline void marker_update_probe_range(struct marker *begin,
#endif /* CONFIG_MARKERS */
/**
- * trace_mark - Marker
+ * trace_mark - Marker using code patching
* @name: marker name, not quoted.
* @format: format string
* @args...: variable argument list
*
- * Places a marker.
+ * Places a marker using optimized code patching technique (imv_read())
+ * to be enabled when immediate values are present.
*/
#define trace_mark(name, format, args...) \
- __trace_mark(name, NULL, format, ## args)
+ __trace_mark(0, name, NULL, format, ## args)
+
+/**
+ * _trace_mark - Marker using variable read
+ * @name: marker name, not quoted.
+ * @format: format string
+ * @args...: variable argument list
+ *
+ * Places a marker using a standard memory read (_imv_read()) to be
+ * enabled. Should be used for markers in code paths where instruction
+ * modification based enabling is not welcome. (__init and __exit functions,
+ * lockdep, some traps, printk).
+ */
+#define _trace_mark(name, format, args...) \
+ __trace_mark(1, name, NULL, format, ## args)
/**
* MARK_NOARGS - Format string for a marker with no argument.
@@ -117,9 +135,9 @@ static inline void __printf(1, 2) ___mark_check_format(const char *fmt, ...)
extern marker_probe_func __mark_empty_function;
extern void marker_probe_cb(const struct marker *mdata,
- void *call_private, const char *fmt, ...);
+ void *call_private, ...);
extern void marker_probe_cb_noarg(const struct marker *mdata,
- void *call_private, const char *fmt, ...);
+ void *call_private, ...);
/*
* Connect a probe to a marker.
diff --git a/include/linux/mfd/asic3.h b/include/linux/mfd/asic3.h
index 4ab2162db13..322cd6deb9f 100644
--- a/include/linux/mfd/asic3.h
+++ b/include/linux/mfd/asic3.h
@@ -8,7 +8,7 @@
* published by the Free Software Foundation.
*
* Copyright 2001 Compaq Computer Corporation.
- * Copyright 2007 OpendHand.
+ * Copyright 2007-2008 OpenedHand Ltd.
*/
#ifndef __ASIC3_H__
@@ -16,43 +16,22 @@
#include <linux/types.h>
-struct asic3 {
- void __iomem *mapping;
- unsigned int bus_shift;
- unsigned int irq_nr;
- unsigned int irq_base;
- spinlock_t lock;
- u16 irq_bothedge[4];
- struct device *dev;
-};
-
struct asic3_platform_data {
- struct {
- u32 dir;
- u32 init;
- u32 sleep_mask;
- u32 sleep_out;
- u32 batt_fault_out;
- u32 sleep_conf;
- u32 alt_function;
- } gpio_a, gpio_b, gpio_c, gpio_d;
-
- unsigned int bus_shift;
+ u16 *gpio_config;
+ unsigned int gpio_config_num;
unsigned int irq_base;
- struct platform_device **children;
- unsigned int n_children;
+ unsigned int gpio_base;
};
-int asic3_gpio_get_value(struct asic3 *asic, unsigned gpio);
-void asic3_gpio_set_value(struct asic3 *asic, unsigned gpio, int val);
-
#define ASIC3_NUM_GPIO_BANKS 4
#define ASIC3_GPIOS_PER_BANK 16
#define ASIC3_NUM_GPIOS 64
#define ASIC3_NR_IRQS ASIC3_NUM_GPIOS + 6
+#define ASIC3_TO_GPIO(gpio) (NR_BUILTIN_GPIO + (gpio))
+
#define ASIC3_GPIO_BANK_A 0
#define ASIC3_GPIO_BANK_B 1
#define ASIC3_GPIO_BANK_C 2
@@ -64,32 +43,89 @@ void asic3_gpio_set_value(struct asic3 *asic, unsigned gpio, int val);
/* All offsets below are specified with this address bus shift */
#define ASIC3_DEFAULT_ADDR_SHIFT 2
-#define ASIC3_OFFSET(base, reg) (ASIC3_##base##_Base + ASIC3_##base##_##reg)
+#define ASIC3_OFFSET(base, reg) (ASIC3_##base##_BASE + ASIC3_##base##_##reg)
#define ASIC3_GPIO_OFFSET(base, reg) \
- (ASIC3_GPIO_##base##_Base + ASIC3_GPIO_##reg)
-
-#define ASIC3_GPIO_A_Base 0x0000
-#define ASIC3_GPIO_B_Base 0x0100
-#define ASIC3_GPIO_C_Base 0x0200
-#define ASIC3_GPIO_D_Base 0x0300
-
-#define ASIC3_GPIO_Mask 0x00 /* R/W 0:don't mask */
-#define ASIC3_GPIO_Direction 0x04 /* R/W 0:input */
-#define ASIC3_GPIO_Out 0x08 /* R/W 0:output low */
-#define ASIC3_GPIO_TriggerType 0x0c /* R/W 0:level */
-#define ASIC3_GPIO_EdgeTrigger 0x10 /* R/W 0:falling */
-#define ASIC3_GPIO_LevelTrigger 0x14 /* R/W 0:low level detect */
-#define ASIC3_GPIO_SleepMask 0x18 /* R/W 0:don't mask in sleep mode */
-#define ASIC3_GPIO_SleepOut 0x1c /* R/W level 0:low in sleep mode */
-#define ASIC3_GPIO_BattFaultOut 0x20 /* R/W level 0:low in batt_fault */
-#define ASIC3_GPIO_IntStatus 0x24 /* R/W 0:none, 1:detect */
-#define ASIC3_GPIO_AltFunction 0x28 /* R/W 1:LED register control */
-#define ASIC3_GPIO_SleepConf 0x2c /*
+ (ASIC3_GPIO_##base##_BASE + ASIC3_GPIO_##reg)
+
+#define ASIC3_GPIO_A_BASE 0x0000
+#define ASIC3_GPIO_B_BASE 0x0100
+#define ASIC3_GPIO_C_BASE 0x0200
+#define ASIC3_GPIO_D_BASE 0x0300
+
+#define ASIC3_GPIO_TO_BANK(gpio) ((gpio) >> 4)
+#define ASIC3_GPIO_TO_BIT(gpio) ((gpio) - \
+ (ASIC3_GPIOS_PER_BANK * ((gpio) >> 4)))
+#define ASIC3_GPIO_TO_MASK(gpio) (1 << ASIC3_GPIO_TO_BIT(gpio))
+#define ASIC3_GPIO_TO_BASE(gpio) (ASIC3_GPIO_A_BASE + (((gpio) >> 4) * 0x0100))
+#define ASIC3_BANK_TO_BASE(bank) (ASIC3_GPIO_A_BASE + ((bank) * 0x100))
+
+#define ASIC3_GPIO_MASK 0x00 /* R/W 0:don't mask */
+#define ASIC3_GPIO_DIRECTION 0x04 /* R/W 0:input */
+#define ASIC3_GPIO_OUT 0x08 /* R/W 0:output low */
+#define ASIC3_GPIO_TRIGGER_TYPE 0x0c /* R/W 0:level */
+#define ASIC3_GPIO_EDGE_TRIGGER 0x10 /* R/W 0:falling */
+#define ASIC3_GPIO_LEVEL_TRIGGER 0x14 /* R/W 0:low level detect */
+#define ASIC3_GPIO_SLEEP_MASK 0x18 /* R/W 0:don't mask in sleep mode */
+#define ASIC3_GPIO_SLEEP_OUT 0x1c /* R/W level 0:low in sleep mode */
+#define ASIC3_GPIO_BAT_FAULT_OUT 0x20 /* R/W level 0:low in batt_fault */
+#define ASIC3_GPIO_INT_STATUS 0x24 /* R/W 0:none, 1:detect */
+#define ASIC3_GPIO_ALT_FUNCTION 0x28 /* R/W 1:LED register control */
+#define ASIC3_GPIO_SLEEP_CONF 0x2c /*
* R/W bit 1: autosleep
* 0: disable gposlpout in normal mode,
* enable gposlpout in sleep mode.
*/
-#define ASIC3_GPIO_Status 0x30 /* R Pin status */
+#define ASIC3_GPIO_STATUS 0x30 /* R Pin status */
+
+/*
+ * ASIC3 GPIO config
+ *
+ * Bits 0..6 gpio number
+ * Bits 7..13 Alternate function
+ * Bit 14 Direction
+ * Bit 15 Initial value
+ *
+ */
+#define ASIC3_CONFIG_GPIO_PIN(config) ((config) & 0x7f)
+#define ASIC3_CONFIG_GPIO_ALT(config) (((config) & (0x7f << 7)) >> 7)
+#define ASIC3_CONFIG_GPIO_DIR(config) ((config & (1 << 14)) >> 14)
+#define ASIC3_CONFIG_GPIO_INIT(config) ((config & (1 << 15)) >> 15)
+#define ASIC3_CONFIG_GPIO(gpio, alt, dir, init) (((gpio) & 0x7f) \
+ | (((alt) & 0x7f) << 7) | (((dir) & 0x1) << 14) \
+ | (((init) & 0x1) << 15))
+#define ASIC3_CONFIG_GPIO_DEFAULT(gpio, dir, init) \
+ ASIC3_CONFIG_GPIO((gpio), 0, (dir), (init))
+#define ASIC3_CONFIG_GPIO_DEFAULT_OUT(gpio, init) \
+ ASIC3_CONFIG_GPIO((gpio), 0, 1, (init))
+
+/*
+ * Alternate functions
+ */
+#define ASIC3_GPIOA11_PWM0 ASIC3_CONFIG_GPIO(11, 1, 1, 0)
+#define ASIC3_GPIOA12_PWM1 ASIC3_CONFIG_GPIO(12, 1, 1, 0)
+#define ASIC3_GPIOA15_CONTROL_CX ASIC3_CONFIG_GPIO(15, 1, 1, 0)
+#define ASIC3_GPIOC0_LED0 ASIC3_CONFIG_GPIO(32, 1, 1, 0)
+#define ASIC3_GPIOC1_LED1 ASIC3_CONFIG_GPIO(33, 1, 1, 0)
+#define ASIC3_GPIOC2_LED2 ASIC3_CONFIG_GPIO(34, 1, 1, 0)
+#define ASIC3_GPIOC3_SPI_RXD ASIC3_CONFIG_GPIO(35, 1, 0, 0)
+#define ASIC3_GPIOC4_CF_nCD ASIC3_CONFIG_GPIO(36, 1, 0, 0)
+#define ASIC3_GPIOC4_SPI_TXD ASIC3_CONFIG_GPIO(36, 1, 1, 0)
+#define ASIC3_GPIOC5_SPI_CLK ASIC3_CONFIG_GPIO(37, 1, 1, 0)
+#define ASIC3_GPIOC5_nCIOW ASIC3_CONFIG_GPIO(37, 1, 1, 0)
+#define ASIC3_GPIOC6_nCIOR ASIC3_CONFIG_GPIO(38, 1, 1, 0)
+#define ASIC3_GPIOC7_nPCE_1 ASIC3_CONFIG_GPIO(39, 1, 0, 0)
+#define ASIC3_GPIOC8_nPCE_2 ASIC3_CONFIG_GPIO(40, 1, 0, 0)
+#define ASIC3_GPIOC9_nPOE ASIC3_CONFIG_GPIO(41, 1, 0, 0)
+#define ASIC3_GPIOC10_nPWE ASIC3_CONFIG_GPIO(42, 1, 0, 0)
+#define ASIC3_GPIOC11_PSKTSEL ASIC3_CONFIG_GPIO(43, 1, 0, 0)
+#define ASIC3_GPIOC12_nPREG ASIC3_CONFIG_GPIO(44, 1, 0, 0)
+#define ASIC3_GPIOC13_nPWAIT ASIC3_CONFIG_GPIO(45, 1, 1, 0)
+#define ASIC3_GPIOC14_nPIOIS16 ASIC3_CONFIG_GPIO(46, 1, 1, 0)
+#define ASIC3_GPIOC15_nPIOR ASIC3_CONFIG_GPIO(47, 1, 0, 0)
+#define ASIC3_GPIOD11_nCIOIS16 ASIC3_CONFIG_GPIO(59, 1, 0, 0)
+#define ASIC3_GPIOD12_nCWAIT ASIC3_CONFIG_GPIO(60, 1, 0, 0)
+#define ASIC3_GPIOD15_nPIOW ASIC3_CONFIG_GPIO(63, 1, 0, 0)
+
#define ASIC3_SPI_Base 0x0400
#define ASIC3_SPI_Control 0x0000
@@ -128,7 +164,7 @@ void asic3_gpio_set_value(struct asic3 *asic, unsigned gpio, int val);
#define LED_AUTOSTOP (1 << 5) /* LED ON/OFF auto stop 0:disable, 1:enable */
#define LED_ALWAYS (1 << 6) /* LED Interrupt Mask 0:No mask, 1:mask */
-#define ASIC3_CLOCK_Base 0x0A00
+#define ASIC3_CLOCK_BASE 0x0A00
#define ASIC3_CLOCK_CDEX 0x00
#define ASIC3_CLOCK_SEL 0x04
@@ -159,12 +195,12 @@ void asic3_gpio_set_value(struct asic3 *asic, unsigned gpio, int val);
#define CLOCK_SEL_CX (1 << 2)
-#define ASIC3_INTR_Base 0x0B00
+#define ASIC3_INTR_BASE 0x0B00
-#define ASIC3_INTR_IntMask 0x00 /* Interrupt mask control */
-#define ASIC3_INTR_PIntStat 0x04 /* Peripheral interrupt status */
-#define ASIC3_INTR_IntCPS 0x08 /* Interrupt timer clock pre-scale */
-#define ASIC3_INTR_IntTBS 0x0c /* Interrupt timer set */
+#define ASIC3_INTR_INT_MASK 0x00 /* Interrupt mask control */
+#define ASIC3_INTR_P_INT_STAT 0x04 /* Peripheral interrupt status */
+#define ASIC3_INTR_INT_CPS 0x08 /* Interrupt timer clock pre-scale */
+#define ASIC3_INTR_INT_TBS 0x0c /* Interrupt timer set */
#define ASIC3_INTMASK_GINTMASK (1 << 0) /* Global INTs mask 1:enable */
#define ASIC3_INTMASK_GINTEL (1 << 1) /* 1: rising edge, 0: hi level */
@@ -227,44 +263,12 @@ void asic3_gpio_set_value(struct asic3 *asic, unsigned gpio, int val);
#define ASIC3_EXTCF_CF_SLEEP (1 << 15) /* CF sleep mode control */
/*********************************************
- * The Onewire interface registers
- *
- * OWM_CMD
- * OWM_DAT
- * OWM_INTR
- * OWM_INTEN
- * OWM_CLKDIV
+ * The Onewire interface (DS1WM) is handled
+ * by the ds1wm driver.
*
*********************************************/
-#define ASIC3_OWM_Base 0xC00
-
-#define ASIC3_OWM_CMD 0x00
-#define ASIC3_OWM_DAT 0x04
-#define ASIC3_OWM_INTR 0x08
-#define ASIC3_OWM_INTEN 0x0C
-#define ASIC3_OWM_CLKDIV 0x10
-
-#define ASIC3_OWM_CMD_ONEWR (1 << 0)
-#define ASIC3_OWM_CMD_SRA (1 << 1)
-#define ASIC3_OWM_CMD_DQO (1 << 2)
-#define ASIC3_OWM_CMD_DQI (1 << 3)
-
-#define ASIC3_OWM_INTR_PD (1 << 0)
-#define ASIC3_OWM_INTR_PDR (1 << 1)
-#define ASIC3_OWM_INTR_TBE (1 << 2)
-#define ASIC3_OWM_INTR_TEMP (1 << 3)
-#define ASIC3_OWM_INTR_RBF (1 << 4)
-
-#define ASIC3_OWM_INTEN_EPD (1 << 0)
-#define ASIC3_OWM_INTEN_IAS (1 << 1)
-#define ASIC3_OWM_INTEN_ETBE (1 << 2)
-#define ASIC3_OWM_INTEN_ETMT (1 << 3)
-#define ASIC3_OWM_INTEN_ERBF (1 << 4)
-
-#define ASIC3_OWM_CLKDIV_PRE (3 << 0) /* two bits wide at bit 0 */
-#define ASIC3_OWM_CLKDIV_DIV (7 << 2) /* 3 bits wide at bit 2 */
-
+#define ASIC3_OWM_BASE 0xC00
/*****************************************************************************
* The SD configuration registers are at a completely different location
@@ -492,6 +496,7 @@ void asic3_gpio_set_value(struct asic3 *asic, unsigned gpio, int val);
#define ASIC3_SDIO_CTRL_LEDCtrl 0x7C
#define ASIC3_SDIO_CTRL_SoftwareReset 0x1C0
-#define ASIC3_MAP_SIZE 0x2000
+#define ASIC3_MAP_SIZE_32BIT 0x2000
+#define ASIC3_MAP_SIZE_16BIT 0x1000
#endif /* __ASIC3_H__ */
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index a744383d16e..81b3dd5206e 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -398,7 +398,8 @@ int mlx4_srq_query(struct mlx4_dev *dev, struct mlx4_srq *srq, int *limit_waterm
int mlx4_INIT_PORT(struct mlx4_dev *dev, int port);
int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port);
-int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16]);
+int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
+ int block_mcast_loopback);
int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16]);
int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list,
diff --git a/include/linux/mm.h b/include/linux/mm.h
index cf1cd3a2ed7..2128ef7780c 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -108,6 +108,7 @@ extern unsigned int kobjsize(const void *objp);
#define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
#define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
+#define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
#ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */
#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
diff --git a/include/linux/mman.h b/include/linux/mman.h
index dab8892e6ff..30d1073bac3 100644
--- a/include/linux/mman.h
+++ b/include/linux/mman.h
@@ -34,6 +34,32 @@ static inline void vm_unacct_memory(long pages)
}
/*
+ * Allow architectures to handle additional protection bits
+ */
+
+#ifndef arch_calc_vm_prot_bits
+#define arch_calc_vm_prot_bits(prot) 0
+#endif
+
+#ifndef arch_vm_get_page_prot
+#define arch_vm_get_page_prot(vm_flags) __pgprot(0)
+#endif
+
+#ifndef arch_validate_prot
+/*
+ * This is called from mprotect(). PROT_GROWSDOWN and PROT_GROWSUP have
+ * already been masked out.
+ *
+ * Returns true if the prot flags are valid
+ */
+static inline int arch_validate_prot(unsigned long prot)
+{
+ return (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM)) == 0;
+}
+#define arch_validate_prot arch_validate_prot
+#endif
+
+/*
* Optimisation macro. It is equivalent to:
* (x & bit1) ? bit2 : 0
* but this version is faster.
@@ -51,7 +77,8 @@ calc_vm_prot_bits(unsigned long prot)
{
return _calc_vm_trans(prot, PROT_READ, VM_READ ) |
_calc_vm_trans(prot, PROT_WRITE, VM_WRITE) |
- _calc_vm_trans(prot, PROT_EXEC, VM_EXEC );
+ _calc_vm_trans(prot, PROT_EXEC, VM_EXEC) |
+ arch_calc_vm_prot_bits(prot);
}
/*
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index d0c3abed74c..143cebf0586 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -135,6 +135,7 @@ extern int mmc_wait_for_app_cmd(struct mmc_host *, struct mmc_card *,
struct mmc_command *, int);
extern void mmc_set_data_timeout(struct mmc_data *, const struct mmc_card *);
+extern unsigned int mmc_align_data_size(struct mmc_card *, unsigned int);
extern int __mmc_claim_host(struct mmc_host *host, atomic_t *abort);
extern void mmc_release_host(struct mmc_host *host);
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 7ab962fa1d7..10a2080086c 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -51,8 +51,30 @@ struct mmc_ios {
struct mmc_host_ops {
void (*request)(struct mmc_host *host, struct mmc_request *req);
+ /*
+ * Avoid calling these three functions too often or in a "fast path",
+ * since underlaying controller might implement them in an expensive
+ * and/or slow way.
+ *
+ * Also note that these functions might sleep, so don't call them
+ * in the atomic contexts!
+ *
+ * Return values for the get_ro callback should be:
+ * 0 for a read/write card
+ * 1 for a read-only card
+ * -ENOSYS when not supported (equal to NULL callback)
+ * or a negative errno value when something bad happened
+ *
+ * Return values for the get_ro callback should be:
+ * 0 for a absent card
+ * 1 for a present card
+ * -ENOSYS when not supported (equal to NULL callback)
+ * or a negative errno value when something bad happened
+ */
void (*set_ios)(struct mmc_host *host, struct mmc_ios *ios);
int (*get_ro)(struct mmc_host *host);
+ int (*get_cd)(struct mmc_host *host);
+
void (*enable_sdio_irq)(struct mmc_host *host, int enable);
};
@@ -89,11 +111,11 @@ struct mmc_host {
unsigned long caps; /* Host capabilities */
#define MMC_CAP_4_BIT_DATA (1 << 0) /* Can the host do 4 bit transfers */
-#define MMC_CAP_MULTIWRITE (1 << 1) /* Can accurately report bytes sent to card on error */
-#define MMC_CAP_MMC_HIGHSPEED (1 << 2) /* Can do MMC high-speed timing */
-#define MMC_CAP_SD_HIGHSPEED (1 << 3) /* Can do SD high-speed timing */
-#define MMC_CAP_SDIO_IRQ (1 << 4) /* Can signal pending SDIO IRQs */
-#define MMC_CAP_SPI (1 << 5) /* Talks only SPI protocols */
+#define MMC_CAP_MMC_HIGHSPEED (1 << 1) /* Can do MMC high-speed timing */
+#define MMC_CAP_SD_HIGHSPEED (1 << 2) /* Can do SD high-speed timing */
+#define MMC_CAP_SDIO_IRQ (1 << 3) /* Can signal pending SDIO IRQs */
+#define MMC_CAP_SPI (1 << 4) /* Talks only SPI protocols */
+#define MMC_CAP_NEEDS_POLL (1 << 5) /* Needs polling for card-detection */
/* host specific block data */
unsigned int max_seg_size; /* see blk_queue_max_segment_size */
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
index 4236fbf0b6f..14b81f3e523 100644
--- a/include/linux/mmc/mmc.h
+++ b/include/linux/mmc/mmc.h
@@ -16,7 +16,6 @@
* Based strongly on code by:
*
* Author: Yong-iL Joh <tolkien@mizi.com>
- * Date : $Date: 2002/06/18 12:37:30 $
*
* Author: Andrew Christian
* 15 May 2002
diff --git a/include/linux/mmc/sdio_func.h b/include/linux/mmc/sdio_func.h
index b050f4d7b41..07bee4a0d45 100644
--- a/include/linux/mmc/sdio_func.h
+++ b/include/linux/mmc/sdio_func.h
@@ -1,7 +1,7 @@
/*
* include/linux/mmc/sdio_func.h
*
- * Copyright 2007 Pierre Ossman
+ * Copyright 2007-2008 Pierre Ossman
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -46,6 +46,8 @@ struct sdio_func {
unsigned max_blksize; /* maximum block size */
unsigned cur_blksize; /* current block size */
+ unsigned enable_timeout; /* max enable timeout in msec */
+
unsigned int state; /* function state */
#define SDIO_STATE_PRESENT (1<<0) /* present in sysfs */
@@ -120,23 +122,22 @@ extern int sdio_set_block_size(struct sdio_func *func, unsigned blksz);
extern int sdio_claim_irq(struct sdio_func *func, sdio_irq_handler_t *handler);
extern int sdio_release_irq(struct sdio_func *func);
-extern unsigned char sdio_readb(struct sdio_func *func,
- unsigned int addr, int *err_ret);
-extern unsigned short sdio_readw(struct sdio_func *func,
- unsigned int addr, int *err_ret);
-extern unsigned long sdio_readl(struct sdio_func *func,
- unsigned int addr, int *err_ret);
+extern unsigned int sdio_align_size(struct sdio_func *func, unsigned int sz);
+
+extern u8 sdio_readb(struct sdio_func *func, unsigned int addr, int *err_ret);
+extern u16 sdio_readw(struct sdio_func *func, unsigned int addr, int *err_ret);
+extern u32 sdio_readl(struct sdio_func *func, unsigned int addr, int *err_ret);
extern int sdio_memcpy_fromio(struct sdio_func *func, void *dst,
unsigned int addr, int count);
extern int sdio_readsb(struct sdio_func *func, void *dst,
unsigned int addr, int count);
-extern void sdio_writeb(struct sdio_func *func, unsigned char b,
+extern void sdio_writeb(struct sdio_func *func, u8 b,
unsigned int addr, int *err_ret);
-extern void sdio_writew(struct sdio_func *func, unsigned short b,
+extern void sdio_writew(struct sdio_func *func, u16 b,
unsigned int addr, int *err_ret);
-extern void sdio_writel(struct sdio_func *func, unsigned long b,
+extern void sdio_writel(struct sdio_func *func, u32 b,
unsigned int addr, int *err_ret);
extern int sdio_memcpy_toio(struct sdio_func *func, unsigned int addr,
diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h
new file mode 100644
index 00000000000..61d19e1b7a0
--- /dev/null
+++ b/include/linux/mmiotrace.h
@@ -0,0 +1,85 @@
+#ifndef MMIOTRACE_H
+#define MMIOTRACE_H
+
+#include <linux/types.h>
+#include <linux/list.h>
+
+struct kmmio_probe;
+struct pt_regs;
+
+typedef void (*kmmio_pre_handler_t)(struct kmmio_probe *,
+ struct pt_regs *, unsigned long addr);
+typedef void (*kmmio_post_handler_t)(struct kmmio_probe *,
+ unsigned long condition, struct pt_regs *);
+
+struct kmmio_probe {
+ struct list_head list; /* kmmio internal list */
+ unsigned long addr; /* start location of the probe point */
+ unsigned long len; /* length of the probe region */
+ kmmio_pre_handler_t pre_handler; /* Called before addr is executed. */
+ kmmio_post_handler_t post_handler; /* Called after addr is executed */
+ void *private;
+};
+
+/* kmmio is active by some kmmio_probes? */
+static inline int is_kmmio_active(void)
+{
+ extern unsigned int kmmio_count;
+ return kmmio_count;
+}
+
+extern int register_kmmio_probe(struct kmmio_probe *p);
+extern void unregister_kmmio_probe(struct kmmio_probe *p);
+
+/* Called from page fault handler. */
+extern int kmmio_handler(struct pt_regs *regs, unsigned long addr);
+
+/* Called from ioremap.c */
+#ifdef CONFIG_MMIOTRACE
+extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
+ void __iomem *addr);
+extern void mmiotrace_iounmap(volatile void __iomem *addr);
+#else
+static inline void mmiotrace_ioremap(resource_size_t offset,
+ unsigned long size, void __iomem *addr)
+{
+}
+
+static inline void mmiotrace_iounmap(volatile void __iomem *addr)
+{
+}
+#endif /* CONFIG_MMIOTRACE_HOOKS */
+
+enum mm_io_opcode {
+ MMIO_READ = 0x1, /* struct mmiotrace_rw */
+ MMIO_WRITE = 0x2, /* struct mmiotrace_rw */
+ MMIO_PROBE = 0x3, /* struct mmiotrace_map */
+ MMIO_UNPROBE = 0x4, /* struct mmiotrace_map */
+ MMIO_MARKER = 0x5, /* raw char data */
+ MMIO_UNKNOWN_OP = 0x6, /* struct mmiotrace_rw */
+};
+
+struct mmiotrace_rw {
+ resource_size_t phys; /* PCI address of register */
+ unsigned long value;
+ unsigned long pc; /* optional program counter */
+ int map_id;
+ unsigned char opcode; /* one of MMIO_{READ,WRITE,UNKNOWN_OP} */
+ unsigned char width; /* size of register access in bytes */
+};
+
+struct mmiotrace_map {
+ resource_size_t phys; /* base address in PCI space */
+ unsigned long virt; /* base virtual address */
+ unsigned long len; /* mapping size */
+ int map_id;
+ unsigned char opcode; /* MMIO_PROBE or MMIO_UNPROBE */
+};
+
+/* in kernel/trace/trace_mmiotrace.c */
+extern void enable_mmiotrace(void);
+extern void disable_mmiotrace(void);
+extern void mmio_trace_rw(struct mmiotrace_rw *rw);
+extern void mmio_trace_mapping(struct mmiotrace_map *map);
+
+#endif /* MMIOTRACE_H */
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 69b2342d5eb..c4db5827963 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -159,6 +159,15 @@ struct ap_device_id {
#define AP_DEVICE_ID_MATCH_DEVICE_TYPE 0x01
+/* s390 css bus devices (subchannels) */
+struct css_device_id {
+ __u8 match_flags;
+ __u8 type; /* subchannel type */
+ __u16 pad2;
+ __u32 pad3;
+ kernel_ulong_t driver_data;
+};
+
#define ACPI_ID_LEN 16 /* only 9 bytes needed here, 16 bytes are used */
/* to workaround crosscompile issues */
diff --git a/include/linux/mpage.h b/include/linux/mpage.h
index 068a0c9946a..5c42821da2d 100644
--- a/include/linux/mpage.h
+++ b/include/linux/mpage.h
@@ -11,11 +11,21 @@
*/
#ifdef CONFIG_BLOCK
+struct mpage_data {
+ struct bio *bio;
+ sector_t last_block_in_bio;
+ get_block_t *get_block;
+ unsigned use_writepage;
+};
+
struct writeback_control;
+struct bio *mpage_bio_submit(int rw, struct bio *bio);
int mpage_readpages(struct address_space *mapping, struct list_head *pages,
unsigned nr_pages, get_block_t get_block);
int mpage_readpage(struct page *page, get_block_t get_block);
+int __mpage_writepage(struct page *page, struct writeback_control *wbc,
+ void *data);
int mpage_writepages(struct address_space *mapping,
struct writeback_control *wbc, get_block_t get_block);
int mpage_writepage(struct page *page, get_block_t *get_block,
diff --git a/include/linux/mroute.h b/include/linux/mroute.h
index de4decfa1bf..07112ee9293 100644
--- a/include/linux/mroute.h
+++ b/include/linux/mroute.h
@@ -144,11 +144,37 @@ static inline int ip_mroute_opt(int opt)
}
#endif
+#ifdef CONFIG_IP_MROUTE
extern int ip_mroute_setsockopt(struct sock *, int, char __user *, int);
extern int ip_mroute_getsockopt(struct sock *, int, char __user *, int __user *);
extern int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg);
-extern void ip_mr_init(void);
+extern int ip_mr_init(void);
+#else
+static inline
+int ip_mroute_setsockopt(struct sock *sock,
+ int optname, char __user *optval, int optlen)
+{
+ return -ENOPROTOOPT;
+}
+
+static inline
+int ip_mroute_getsockopt(struct sock *sock,
+ int optname, char __user *optval, int __user *optlen)
+{
+ return -ENOPROTOOPT;
+}
+static inline
+int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
+{
+ return -ENOIOCTLCMD;
+}
+
+static inline int ip_mr_init(void)
+{
+ return 0;
+}
+#endif
struct vif_device
{
diff --git a/include/linux/mroute6.h b/include/linux/mroute6.h
index e7989593142..5cf50473a10 100644
--- a/include/linux/mroute6.h
+++ b/include/linux/mroute6.h
@@ -131,11 +131,44 @@ static inline int ip6_mroute_opt(int opt)
struct sock;
+#ifdef CONFIG_IPV6_MROUTE
extern int ip6_mroute_setsockopt(struct sock *, int, char __user *, int);
extern int ip6_mroute_getsockopt(struct sock *, int, char __user *, int __user *);
extern int ip6_mr_input(struct sk_buff *skb);
extern int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg);
-extern void ip6_mr_init(void);
+extern int ip6_mr_init(void);
+extern void ip6_mr_cleanup(void);
+#else
+static inline
+int ip6_mroute_setsockopt(struct sock *sock,
+ int optname, char __user *optval, int optlen)
+{
+ return -ENOPROTOOPT;
+}
+
+static inline
+int ip6_mroute_getsockopt(struct sock *sock,
+ int optname, char __user *optval, int __user *optlen)
+{
+ return -ENOPROTOOPT;
+}
+
+static inline
+int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg)
+{
+ return -ENOIOCTLCMD;
+}
+
+static inline int ip6_mr_init(void)
+{
+ return 0;
+}
+
+static inline void ip6_mr_cleanup(void)
+{
+ return;
+}
+#endif
struct mif_device
{
diff --git a/include/linux/mv643xx_eth.h b/include/linux/mv643xx_eth.h
index a15cdd4a8e5..12078577aef 100644
--- a/include/linux/mv643xx_eth.h
+++ b/include/linux/mv643xx_eth.h
@@ -17,30 +17,59 @@
struct mv643xx_eth_shared_platform_data {
struct mbus_dram_target_info *dram;
- unsigned int t_clk;
+ unsigned int t_clk;
};
struct mv643xx_eth_platform_data {
+ /*
+ * Pointer back to our parent instance, and our port number.
+ */
struct platform_device *shared;
- int port_number;
+ int port_number;
+ /*
+ * Whether a PHY is present, and if yes, at which address.
+ */
struct platform_device *shared_smi;
+ int force_phy_addr;
+ int phy_addr;
- u16 force_phy_addr; /* force override if phy_addr == 0 */
- u16 phy_addr;
-
- /* If speed is 0, then speed and duplex are autonegotiated. */
- int speed; /* 0, SPEED_10, SPEED_100, SPEED_1000 */
- int duplex; /* DUPLEX_HALF or DUPLEX_FULL */
-
- /* non-zero values of the following fields override defaults */
- u32 tx_queue_size;
- u32 rx_queue_size;
- u32 tx_sram_addr;
- u32 tx_sram_size;
- u32 rx_sram_addr;
- u32 rx_sram_size;
- u8 mac_addr[6]; /* mac address if non-zero*/
+ /*
+ * Use this MAC address if it is valid, overriding the
+ * address that is already in the hardware.
+ */
+ u8 mac_addr[6];
+
+ /*
+ * If speed is 0, autonegotiation is enabled.
+ * Valid values for speed: 0, SPEED_10, SPEED_100, SPEED_1000.
+ * Valid values for duplex: DUPLEX_HALF, DUPLEX_FULL.
+ */
+ int speed;
+ int duplex;
+
+ /*
+ * Which RX/TX queues to use.
+ */
+ int rx_queue_mask;
+ int tx_queue_mask;
+
+ /*
+ * Override default RX/TX queue sizes if nonzero.
+ */
+ int rx_queue_size;
+ int tx_queue_size;
+
+ /*
+ * Use on-chip SRAM for RX/TX descriptors if size is nonzero
+ * and sufficient to contain all descriptors for the requested
+ * ring sizes.
+ */
+ unsigned long rx_sram_addr;
+ int rx_sram_size;
+ unsigned long tx_sram_addr;
+ int tx_sram_size;
};
-#endif /* __LINUX_MV643XX_ETH_H */
+
+#endif
diff --git a/include/linux/net.h b/include/linux/net.h
index 71f7dd55928..150a48c68d5 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -106,23 +106,23 @@ enum sock_shutdown_cmd {
/**
* struct socket - general BSD socket
* @state: socket state (%SS_CONNECTED, etc)
+ * @type: socket type (%SOCK_STREAM, etc)
* @flags: socket flags (%SOCK_ASYNC_NOSPACE, etc)
* @ops: protocol specific socket operations
* @fasync_list: Asynchronous wake up list
* @file: File back pointer for gc
* @sk: internal networking protocol agnostic socket representation
* @wait: wait queue for several uses
- * @type: socket type (%SOCK_STREAM, etc)
*/
struct socket {
socket_state state;
+ short type;
unsigned long flags;
const struct proto_ops *ops;
struct fasync_struct *fasync_list;
struct file *file;
struct sock *sk;
wait_queue_head_t wait;
- short type;
};
struct vm_area_struct;
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 25f87102ab6..812bcd8b436 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -118,14 +118,6 @@ struct wireless_dev;
#endif /* __KERNEL__ */
-struct net_device_subqueue
-{
- /* Give a control state for each queue. This struct may contain
- * per-queue locks in the future.
- */
- unsigned long state;
-};
-
/*
* Network device statistics. Akin to the 2.0 ether stats but
* with byte counters.
@@ -281,14 +273,11 @@ struct header_ops {
enum netdev_state_t
{
- __LINK_STATE_XOFF=0,
__LINK_STATE_START,
__LINK_STATE_PRESENT,
- __LINK_STATE_SCHED,
__LINK_STATE_NOCARRIER,
__LINK_STATE_LINKWATCH_PENDING,
__LINK_STATE_DORMANT,
- __LINK_STATE_QDISC_RUNNING,
};
@@ -448,6 +437,20 @@ static inline void napi_synchronize(const struct napi_struct *n)
# define napi_synchronize(n) barrier()
#endif
+enum netdev_queue_state_t
+{
+ __QUEUE_STATE_XOFF,
+};
+
+struct netdev_queue {
+ struct net_device *dev;
+ struct Qdisc *qdisc;
+ unsigned long state;
+ spinlock_t _xmit_lock;
+ int xmit_lock_owner;
+ struct Qdisc *qdisc_sleeping;
+} ____cacheline_aligned_in_smp;
+
/*
* The DEVICE structure.
* Actually, this whole structure is a big mistake. It mixes I/O
@@ -516,7 +519,6 @@ struct net_device
#define NETIF_F_LLTX 4096 /* LockLess TX - deprecated. Please */
/* do not use LLTX in new drivers */
#define NETIF_F_NETNS_LOCAL 8192 /* Does not change network namespaces */
-#define NETIF_F_MULTI_QUEUE 16384 /* Has multiple TX/RX queues */
#define NETIF_F_LRO 32768 /* large receive offload */
/* Segmentation offload features */
@@ -537,8 +539,6 @@ struct net_device
#define NETIF_F_V6_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM)
#define NETIF_F_ALL_CSUM (NETIF_F_V4_CSUM | NETIF_F_V6_CSUM)
- struct net_device *next_sched;
-
/* Interface index. Unique device identifier */
int ifindex;
int iflink;
@@ -594,13 +594,14 @@ struct net_device
unsigned char addr_len; /* hardware address length */
unsigned short dev_id; /* for shared network cards */
+ spinlock_t addr_list_lock;
struct dev_addr_list *uc_list; /* Secondary unicast mac addresses */
int uc_count; /* Number of installed ucasts */
int uc_promisc;
struct dev_addr_list *mc_list; /* Multicast mac addresses */
int mc_count; /* Number of installed mcasts */
- int promiscuity;
- int allmulti;
+ unsigned int promiscuity;
+ unsigned int allmulti;
/* Protocol specific pointers */
@@ -624,32 +625,21 @@ struct net_device
unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */
- /* ingress path synchronizer */
- spinlock_t ingress_lock;
- struct Qdisc *qdisc_ingress;
+ struct netdev_queue rx_queue;
-/*
- * Cache line mostly used on queue transmit path (qdisc)
- */
- /* device queue lock */
- spinlock_t queue_lock ____cacheline_aligned_in_smp;
- struct Qdisc *qdisc;
- struct Qdisc *qdisc_sleeping;
- struct list_head qdisc_list;
- unsigned long tx_queue_len; /* Max frames per queue allowed */
+ struct netdev_queue *_tx ____cacheline_aligned_in_smp;
- /* Partially transmitted GSO packet. */
- struct sk_buff *gso_skb;
+ /* Number of TX queues allocated at alloc_netdev_mq() time */
+ unsigned int num_tx_queues;
+
+ /* Number of TX queues currently active in device */
+ unsigned int real_num_tx_queues;
+
+ unsigned long tx_queue_len; /* Max frames per queue allowed */
/*
* One part is mostly used on xmit path (device)
*/
- /* hard_start_xmit synchronizer */
- spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
- /* cpu id of processor entered to hard_start_xmit or -1,
- if nobody entered there.
- */
- int xmit_lock_owner;
void *priv; /* pointer to private data */
int (*hard_start_xmit) (struct sk_buff *skb,
struct net_device *dev);
@@ -728,6 +718,9 @@ struct net_device
void (*poll_controller)(struct net_device *dev);
#endif
+ u16 (*select_queue)(struct net_device *dev,
+ struct sk_buff *skb);
+
#ifdef CONFIG_NET_NS
/* Network namespace this network device is inside */
struct net *nd_net;
@@ -740,6 +733,8 @@ struct net_device
struct net_bridge_port *br_port;
/* macvlan */
struct macvlan_port *macvlan_port;
+ /* GARP */
+ struct garp_port *garp_port;
/* class/net/name entry */
struct device dev;
@@ -755,16 +750,31 @@ struct net_device
/* for setting kernel sock attribute on TCP connection setup */
#define GSO_MAX_SIZE 65536
unsigned int gso_max_size;
-
- /* The TX queue control structures */
- unsigned int egress_subqueue_count;
- struct net_device_subqueue egress_subqueue[1];
};
#define to_net_dev(d) container_of(d, struct net_device, dev)
#define NETDEV_ALIGN 32
#define NETDEV_ALIGN_CONST (NETDEV_ALIGN - 1)
+static inline
+struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
+ unsigned int index)
+{
+ return &dev->_tx[index];
+}
+
+static inline void netdev_for_each_tx_queue(struct net_device *dev,
+ void (*f)(struct net_device *,
+ struct netdev_queue *,
+ void *),
+ void *arg)
+{
+ unsigned int i;
+
+ for (i = 0; i < dev->num_tx_queues; i++)
+ f(dev, &dev->_tx[i], arg);
+}
+
/*
* Net namespace inlines
*/
@@ -795,7 +805,9 @@ void dev_net_set(struct net_device *dev, struct net *net)
*/
static inline void *netdev_priv(const struct net_device *dev)
{
- return dev->priv;
+ return (char *)dev + ((sizeof(struct net_device)
+ + NETDEV_ALIGN_CONST)
+ & ~NETDEV_ALIGN_CONST);
}
/* Set the sysfs physical device reference for the network logical device
@@ -830,6 +842,19 @@ static inline void netif_napi_add(struct net_device *dev,
set_bit(NAPI_STATE_SCHED, &napi->state);
}
+/**
+ * netif_napi_del - remove a napi context
+ * @napi: napi context
+ *
+ * netif_napi_del() removes a napi context from the network device napi list
+ */
+static inline void netif_napi_del(struct napi_struct *napi)
+{
+#ifdef CONFIG_NETPOLL
+ list_del(&napi->dev_list);
+#endif
+}
+
struct packet_type {
__be16 type; /* This is really htons(ether_type). */
struct net_device *dev; /* NULL is wildcarded here */
@@ -890,6 +915,7 @@ extern struct net_device *__dev_get_by_name(struct net *net, const char *name);
extern int dev_alloc_name(struct net_device *dev, const char *name);
extern int dev_open(struct net_device *dev);
extern int dev_close(struct net_device *dev);
+extern void dev_disable_lro(struct net_device *dev);
extern int dev_queue_xmit(struct sk_buff *skb);
extern int register_netdevice(struct net_device *dev);
extern void unregister_netdevice(struct net_device *dev);
@@ -939,7 +965,7 @@ static inline int unregister_gifconf(unsigned int family)
*/
struct softnet_data
{
- struct net_device *output_queue;
+ struct Qdisc *output_queue;
struct sk_buff_head input_pkt_queue;
struct list_head poll_list;
struct sk_buff *completion_queue;
@@ -954,12 +980,20 @@ DECLARE_PER_CPU(struct softnet_data,softnet_data);
#define HAVE_NETIF_QUEUE
-extern void __netif_schedule(struct net_device *dev);
+extern void __netif_schedule(struct Qdisc *q);
-static inline void netif_schedule(struct net_device *dev)
+static inline void netif_schedule_queue(struct netdev_queue *txq)
{
- if (!test_bit(__LINK_STATE_XOFF, &dev->state))
- __netif_schedule(dev);
+ if (!test_bit(__QUEUE_STATE_XOFF, &txq->state))
+ __netif_schedule(txq->qdisc);
+}
+
+static inline void netif_tx_schedule_all(struct net_device *dev)
+{
+ unsigned int i;
+
+ for (i = 0; i < dev->num_tx_queues; i++)
+ netif_schedule_queue(netdev_get_tx_queue(dev, i));
}
/**
@@ -968,9 +1002,24 @@ static inline void netif_schedule(struct net_device *dev)
*
* Allow upper layers to call the device hard_start_xmit routine.
*/
+static inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
+{
+ clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
+}
+
static inline void netif_start_queue(struct net_device *dev)
{
- clear_bit(__LINK_STATE_XOFF, &dev->state);
+ netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
+}
+
+static inline void netif_tx_start_all_queues(struct net_device *dev)
+{
+ unsigned int i;
+
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
+ netif_tx_start_queue(txq);
+ }
}
/**
@@ -980,16 +1029,31 @@ static inline void netif_start_queue(struct net_device *dev)
* Allow upper layers to call the device hard_start_xmit routine.
* Used for flow control when transmit resources are available.
*/
-static inline void netif_wake_queue(struct net_device *dev)
+static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue)
{
#ifdef CONFIG_NETPOLL_TRAP
if (netpoll_trap()) {
- clear_bit(__LINK_STATE_XOFF, &dev->state);
+ clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
return;
}
#endif
- if (test_and_clear_bit(__LINK_STATE_XOFF, &dev->state))
- __netif_schedule(dev);
+ if (test_and_clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state))
+ __netif_schedule(dev_queue->qdisc);
+}
+
+static inline void netif_wake_queue(struct net_device *dev)
+{
+ netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
+}
+
+static inline void netif_tx_wake_all_queues(struct net_device *dev)
+{
+ unsigned int i;
+
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
+ netif_tx_wake_queue(txq);
+ }
}
/**
@@ -999,9 +1063,24 @@ static inline void netif_wake_queue(struct net_device *dev)
* Stop upper layers calling the device hard_start_xmit routine.
* Used for flow control when transmit resources are unavailable.
*/
+static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
+{
+ set_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
+}
+
static inline void netif_stop_queue(struct net_device *dev)
{
- set_bit(__LINK_STATE_XOFF, &dev->state);
+ netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
+}
+
+static inline void netif_tx_stop_all_queues(struct net_device *dev)
+{
+ unsigned int i;
+
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
+ netif_tx_stop_queue(txq);
+ }
}
/**
@@ -1010,9 +1089,14 @@ static inline void netif_stop_queue(struct net_device *dev)
*
* Test if transmit queue on device is currently unable to send.
*/
+static inline int netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
+{
+ return test_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
+}
+
static inline int netif_queue_stopped(const struct net_device *dev)
{
- return test_bit(__LINK_STATE_XOFF, &dev->state);
+ return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
}
/**
@@ -1042,9 +1126,8 @@ static inline int netif_running(const struct net_device *dev)
*/
static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
{
-#ifdef CONFIG_NETDEVICES_MULTIQUEUE
- clear_bit(__LINK_STATE_XOFF, &dev->egress_subqueue[queue_index].state);
-#endif
+ struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
+ clear_bit(__QUEUE_STATE_XOFF, &txq->state);
}
/**
@@ -1056,13 +1139,12 @@ static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
*/
static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
{
-#ifdef CONFIG_NETDEVICES_MULTIQUEUE
+ struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
#ifdef CONFIG_NETPOLL_TRAP
if (netpoll_trap())
return;
#endif
- set_bit(__LINK_STATE_XOFF, &dev->egress_subqueue[queue_index].state);
-#endif
+ set_bit(__QUEUE_STATE_XOFF, &txq->state);
}
/**
@@ -1075,12 +1157,8 @@ static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
static inline int __netif_subqueue_stopped(const struct net_device *dev,
u16 queue_index)
{
-#ifdef CONFIG_NETDEVICES_MULTIQUEUE
- return test_bit(__LINK_STATE_XOFF,
- &dev->egress_subqueue[queue_index].state);
-#else
- return 0;
-#endif
+ struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
+ return test_bit(__QUEUE_STATE_XOFF, &txq->state);
}
static inline int netif_subqueue_stopped(const struct net_device *dev,
@@ -1098,15 +1176,13 @@ static inline int netif_subqueue_stopped(const struct net_device *dev,
*/
static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
{
-#ifdef CONFIG_NETDEVICES_MULTIQUEUE
+ struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
#ifdef CONFIG_NETPOLL_TRAP
if (netpoll_trap())
return;
#endif
- if (test_and_clear_bit(__LINK_STATE_XOFF,
- &dev->egress_subqueue[queue_index].state))
- __netif_schedule(dev);
-#endif
+ if (test_and_clear_bit(__QUEUE_STATE_XOFF, &txq->state))
+ __netif_schedule(txq->qdisc);
}
/**
@@ -1114,15 +1190,10 @@ static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
* @dev: network device
*
* Check if device has multiple transmit queues
- * Always falls if NETDEVICE_MULTIQUEUE is not configured
*/
static inline int netif_is_multiqueue(const struct net_device *dev)
{
-#ifdef CONFIG_NETDEVICES_MULTIQUEUE
- return (!!(NETIF_F_MULTI_QUEUE & dev->features));
-#else
- return 0;
-#endif
+ return (dev->num_tx_queues > 1);
}
/* Use this variant when it is known for sure that it
@@ -1142,6 +1213,7 @@ extern int netif_rx(struct sk_buff *skb);
extern int netif_rx_ni(struct sk_buff *skb);
#define HAVE_NETIF_RECEIVE_SKB 1
extern int netif_receive_skb(struct sk_buff *skb);
+extern void netif_nit_deliver(struct sk_buff *skb);
extern int dev_valid_name(const char *name);
extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
extern int dev_ethtool(struct net *net, struct ifreq *);
@@ -1154,7 +1226,8 @@ extern int dev_set_mtu(struct net_device *, int);
extern int dev_set_mac_address(struct net_device *,
struct sockaddr *);
extern int dev_hard_start_xmit(struct sk_buff *skb,
- struct net_device *dev);
+ struct net_device *dev,
+ struct netdev_queue *txq);
extern int netdev_budget;
@@ -1397,62 +1470,121 @@ static inline void netif_rx_complete(struct net_device *dev,
*
* Get network device transmit lock
*/
-static inline void __netif_tx_lock(struct net_device *dev, int cpu)
+static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
{
- spin_lock(&dev->_xmit_lock);
- dev->xmit_lock_owner = cpu;
+ spin_lock(&txq->_xmit_lock);
+ txq->xmit_lock_owner = cpu;
+}
+
+static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
+{
+ spin_lock_bh(&txq->_xmit_lock);
+ txq->xmit_lock_owner = smp_processor_id();
}
static inline void netif_tx_lock(struct net_device *dev)
{
- __netif_tx_lock(dev, smp_processor_id());
+ int cpu = smp_processor_id();
+ unsigned int i;
+
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
+ __netif_tx_lock(txq, cpu);
+ }
}
static inline void netif_tx_lock_bh(struct net_device *dev)
{
- spin_lock_bh(&dev->_xmit_lock);
- dev->xmit_lock_owner = smp_processor_id();
+ local_bh_disable();
+ netif_tx_lock(dev);
}
-static inline int netif_tx_trylock(struct net_device *dev)
+static inline int __netif_tx_trylock(struct netdev_queue *txq)
{
- int ok = spin_trylock(&dev->_xmit_lock);
+ int ok = spin_trylock(&txq->_xmit_lock);
if (likely(ok))
- dev->xmit_lock_owner = smp_processor_id();
+ txq->xmit_lock_owner = smp_processor_id();
return ok;
}
+static inline int netif_tx_trylock(struct net_device *dev)
+{
+ return __netif_tx_trylock(netdev_get_tx_queue(dev, 0));
+}
+
+static inline void __netif_tx_unlock(struct netdev_queue *txq)
+{
+ txq->xmit_lock_owner = -1;
+ spin_unlock(&txq->_xmit_lock);
+}
+
+static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
+{
+ txq->xmit_lock_owner = -1;
+ spin_unlock_bh(&txq->_xmit_lock);
+}
+
static inline void netif_tx_unlock(struct net_device *dev)
{
- dev->xmit_lock_owner = -1;
- spin_unlock(&dev->_xmit_lock);
+ unsigned int i;
+
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
+ __netif_tx_unlock(txq);
+ }
+
}
static inline void netif_tx_unlock_bh(struct net_device *dev)
{
- dev->xmit_lock_owner = -1;
- spin_unlock_bh(&dev->_xmit_lock);
+ netif_tx_unlock(dev);
+ local_bh_enable();
}
-#define HARD_TX_LOCK(dev, cpu) { \
+#define HARD_TX_LOCK(dev, txq, cpu) { \
if ((dev->features & NETIF_F_LLTX) == 0) { \
- __netif_tx_lock(dev, cpu); \
+ __netif_tx_lock(txq, cpu); \
} \
}
-#define HARD_TX_UNLOCK(dev) { \
+#define HARD_TX_UNLOCK(dev, txq) { \
if ((dev->features & NETIF_F_LLTX) == 0) { \
- netif_tx_unlock(dev); \
+ __netif_tx_unlock(txq); \
} \
}
static inline void netif_tx_disable(struct net_device *dev)
{
+ unsigned int i;
+
netif_tx_lock_bh(dev);
- netif_stop_queue(dev);
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
+ netif_tx_stop_queue(txq);
+ }
netif_tx_unlock_bh(dev);
}
+static inline void netif_addr_lock(struct net_device *dev)
+{
+ spin_lock(&dev->addr_list_lock);
+}
+
+static inline void netif_addr_lock_bh(struct net_device *dev)
+{
+ spin_lock_bh(&dev->addr_list_lock);
+}
+
+static inline void netif_addr_unlock(struct net_device *dev)
+{
+ spin_unlock(&dev->addr_list_lock);
+}
+
+static inline void netif_addr_unlock_bh(struct net_device *dev)
+{
+ spin_unlock_bh(&dev->addr_list_lock);
+}
+
/* These functions live elsewhere (drivers/net/net_init.c, but related) */
extern void ether_setup(struct net_device *dev);
@@ -1480,9 +1612,10 @@ extern int __dev_addr_delete(struct dev_addr_list **list, int *count, void *ad
extern int __dev_addr_add(struct dev_addr_list **list, int *count, void *addr, int alen, int newonly);
extern int __dev_addr_sync(struct dev_addr_list **to, int *to_count, struct dev_addr_list **from, int *from_count);
extern void __dev_addr_unsync(struct dev_addr_list **to, int *to_count, struct dev_addr_list **from, int *from_count);
-extern void dev_set_promiscuity(struct net_device *dev, int inc);
-extern void dev_set_allmulti(struct net_device *dev, int inc);
+extern int dev_set_promiscuity(struct net_device *dev, int inc);
+extern int dev_set_allmulti(struct net_device *dev, int inc);
extern void netdev_state_change(struct net_device *dev);
+extern void netdev_bonding_change(struct net_device *dev);
extern void netdev_features_change(struct net_device *dev);
/* Load a device via the kmod */
extern void dev_load(struct net *net, const char *name);
@@ -1509,6 +1642,9 @@ extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos);
extern void dev_seq_stop(struct seq_file *seq, void *v);
#endif
+extern int netdev_class_create_file(struct class_attribute *class_attr);
+extern void netdev_class_remove_file(struct class_attribute *class_attr);
+
extern void linkwatch_run_queue(void);
extern int netdev_compute_features(unsigned long all, unsigned long one);
diff --git a/include/linux/netfilter/nfnetlink_conntrack.h b/include/linux/netfilter/nfnetlink_conntrack.h
index 0a383ac083c..759bc043dc6 100644
--- a/include/linux/netfilter/nfnetlink_conntrack.h
+++ b/include/linux/netfilter/nfnetlink_conntrack.h
@@ -81,6 +81,7 @@ enum ctattr_protoinfo {
CTA_PROTOINFO_UNSPEC,
CTA_PROTOINFO_TCP,
CTA_PROTOINFO_DCCP,
+ CTA_PROTOINFO_SCTP,
__CTA_PROTOINFO_MAX
};
#define CTA_PROTOINFO_MAX (__CTA_PROTOINFO_MAX - 1)
@@ -103,6 +104,15 @@ enum ctattr_protoinfo_dccp {
};
#define CTA_PROTOINFO_DCCP_MAX (__CTA_PROTOINFO_DCCP_MAX - 1)
+enum ctattr_protoinfo_sctp {
+ CTA_PROTOINFO_SCTP_UNSPEC,
+ CTA_PROTOINFO_SCTP_STATE,
+ CTA_PROTOINFO_SCTP_VTAG_ORIGINAL,
+ CTA_PROTOINFO_SCTP_VTAG_REPLY,
+ __CTA_PROTOINFO_SCTP_MAX
+};
+#define CTA_PROTOINFO_SCTP_MAX (__CTA_PROTOINFO_SCTP_MAX - 1)
+
enum ctattr_counters {
CTA_COUNTERS_UNSPEC,
CTA_COUNTERS_PACKETS, /* old 64bit counters */
diff --git a/include/linux/netfilter/xt_string.h b/include/linux/netfilter/xt_string.h
index bb21dd1aee2..8a6ba7bbef9 100644
--- a/include/linux/netfilter/xt_string.h
+++ b/include/linux/netfilter/xt_string.h
@@ -4,6 +4,11 @@
#define XT_STRING_MAX_PATTERN_SIZE 128
#define XT_STRING_MAX_ALGO_NAME_SIZE 16
+enum {
+ XT_STRING_FLAG_INVERT = 0x01,
+ XT_STRING_FLAG_IGNORECASE = 0x02
+};
+
struct xt_string_info
{
u_int16_t from_offset;
@@ -11,7 +16,15 @@ struct xt_string_info
char algo[XT_STRING_MAX_ALGO_NAME_SIZE];
char pattern[XT_STRING_MAX_PATTERN_SIZE];
u_int8_t patlen;
- u_int8_t invert;
+ union {
+ struct {
+ u_int8_t invert;
+ } v0;
+
+ struct {
+ u_int8_t flags;
+ } v1;
+ } u;
/* Used internally by the kernel */
struct ts_config __attribute__((aligned(8))) *config;
diff --git a/include/linux/netfilter_bridge/ebt_ip6.h b/include/linux/netfilter_bridge/ebt_ip6.h
new file mode 100644
index 00000000000..2273c3ae33c
--- /dev/null
+++ b/include/linux/netfilter_bridge/ebt_ip6.h
@@ -0,0 +1,40 @@
+/*
+ * ebt_ip6
+ *
+ * Authors:
+ * Kuo-Lang Tseng <kuo-lang.tseng@intel.com>
+ * Manohar Castelino <manohar.r.castelino@intel.com>
+ *
+ * Jan 11, 2008
+ *
+ */
+
+#ifndef __LINUX_BRIDGE_EBT_IP6_H
+#define __LINUX_BRIDGE_EBT_IP6_H
+
+#define EBT_IP6_SOURCE 0x01
+#define EBT_IP6_DEST 0x02
+#define EBT_IP6_TCLASS 0x04
+#define EBT_IP6_PROTO 0x08
+#define EBT_IP6_SPORT 0x10
+#define EBT_IP6_DPORT 0x20
+#define EBT_IP6_MASK (EBT_IP6_SOURCE | EBT_IP6_DEST | EBT_IP6_TCLASS |\
+ EBT_IP6_PROTO | EBT_IP6_SPORT | EBT_IP6_DPORT)
+#define EBT_IP6_MATCH "ip6"
+
+/* the same values are used for the invflags */
+struct ebt_ip6_info
+{
+ struct in6_addr saddr;
+ struct in6_addr daddr;
+ struct in6_addr smsk;
+ struct in6_addr dmsk;
+ uint8_t tclass;
+ uint8_t protocol;
+ uint8_t bitmask;
+ uint8_t invflags;
+ uint16_t sport[2];
+ uint16_t dport[2];
+};
+
+#endif
diff --git a/include/linux/netfilter_bridge/ebt_log.h b/include/linux/netfilter_bridge/ebt_log.h
index 96e231ae755..b76e653157e 100644
--- a/include/linux/netfilter_bridge/ebt_log.h
+++ b/include/linux/netfilter_bridge/ebt_log.h
@@ -4,7 +4,8 @@
#define EBT_LOG_IP 0x01 /* if the frame is made by ip, log the ip information */
#define EBT_LOG_ARP 0x02
#define EBT_LOG_NFLOG 0x04
-#define EBT_LOG_MASK (EBT_LOG_IP | EBT_LOG_ARP)
+#define EBT_LOG_IP6 0x08
+#define EBT_LOG_MASK (EBT_LOG_IP | EBT_LOG_ARP | EBT_LOG_IP6)
#define EBT_LOG_PREFIX_SIZE 30
#define EBT_LOG_WATCHER "log"
diff --git a/include/linux/netfilter_ipv4.h b/include/linux/netfilter_ipv4.h
index 650318b0c40..29c7727ff0e 100644
--- a/include/linux/netfilter_ipv4.h
+++ b/include/linux/netfilter_ipv4.h
@@ -60,6 +60,7 @@ enum nf_ip_hook_priorities {
NF_IP_PRI_MANGLE = -150,
NF_IP_PRI_NAT_DST = -100,
NF_IP_PRI_FILTER = 0,
+ NF_IP_PRI_SECURITY = 50,
NF_IP_PRI_NAT_SRC = 100,
NF_IP_PRI_SELINUX_LAST = 225,
NF_IP_PRI_CONNTRACK_CONFIRM = INT_MAX,
diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h
index 3475a65dae9..d654873aa25 100644
--- a/include/linux/netfilter_ipv6.h
+++ b/include/linux/netfilter_ipv6.h
@@ -64,11 +64,14 @@ enum nf_ip6_hook_priorities {
NF_IP6_PRI_MANGLE = -150,
NF_IP6_PRI_NAT_DST = -100,
NF_IP6_PRI_FILTER = 0,
+ NF_IP6_PRI_SECURITY = 50,
NF_IP6_PRI_NAT_SRC = 100,
NF_IP6_PRI_SELINUX_LAST = 225,
NF_IP6_PRI_LAST = INT_MAX,
};
+#ifdef __KERNEL__
+
#ifdef CONFIG_NETFILTER
extern int ip6_route_me_harder(struct sk_buff *skb);
extern __sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
@@ -81,4 +84,6 @@ static inline int ipv6_netfilter_init(void) { return 0; }
static inline void ipv6_netfilter_fini(void) { return; }
#endif /* CONFIG_NETFILTER */
+#endif /* __KERNEL__ */
+
#endif /*__LINUX_IP6_NETFILTER_H*/
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index bec1062a25a..9ff1b54908f 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -193,7 +193,7 @@ extern int netlink_unregister_notifier(struct notifier_block *nb);
/* finegrained unicast helpers: */
struct sock *netlink_getsockbyfilp(struct file *filp);
-int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock,
+int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
long *timeo, struct sock *ssk);
void netlink_detachskb(struct sock *sk, struct sk_buff *skb);
int netlink_sendskb(struct sock *sk, struct sk_buff *skb);
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index 8726491de15..ea036676948 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -65,9 +65,6 @@
#define NFS4_ACE_SUCCESSFUL_ACCESS_ACE_FLAG 0x00000010
#define NFS4_ACE_FAILED_ACCESS_ACE_FLAG 0x00000020
#define NFS4_ACE_IDENTIFIER_GROUP 0x00000040
-#define NFS4_ACE_OWNER 0x00000080
-#define NFS4_ACE_GROUP 0x00000100
-#define NFS4_ACE_EVERYONE 0x00000200
#define NFS4_ACE_READ_DATA 0x00000001
#define NFS4_ACE_LIST_DIRECTORY 0x00000001
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 27d6a8d98ce..29d26191873 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -12,9 +12,19 @@
#include <linux/magic.h>
/* Default timeout values */
+#define NFS_DEF_UDP_TIMEO (11)
+#define NFS_DEF_UDP_RETRANS (3)
+#define NFS_DEF_TCP_TIMEO (600)
+#define NFS_DEF_TCP_RETRANS (2)
+
#define NFS_MAX_UDP_TIMEOUT (60*HZ)
#define NFS_MAX_TCP_TIMEOUT (600*HZ)
+#define NFS_DEF_ACREGMIN (3)
+#define NFS_DEF_ACREGMAX (60)
+#define NFS_DEF_ACDIRMIN (30)
+#define NFS_DEF_ACDIRMAX (60)
+
/*
* When flushing a cluster of dirty pages, there can be different
* strategies:
diff --git a/include/linux/nfs_iostat.h b/include/linux/nfs_iostat.h
new file mode 100644
index 00000000000..1cb9a3fed2b
--- /dev/null
+++ b/include/linux/nfs_iostat.h
@@ -0,0 +1,119 @@
+/*
+ * User-space visible declarations for NFS client per-mount
+ * point statistics
+ *
+ * Copyright (C) 2005, 2006 Chuck Lever <cel@netapp.com>
+ *
+ * NFS client per-mount statistics provide information about the
+ * health of the NFS client and the health of each NFS mount point.
+ * Generally these are not for detailed problem diagnosis, but
+ * simply to indicate that there is a problem.
+ *
+ * These counters are not meant to be human-readable, but are meant
+ * to be integrated into system monitoring tools such as "sar" and
+ * "iostat". As such, the counters are sampled by the tools over
+ * time, and are never zeroed after a file system is mounted.
+ * Moving averages can be computed by the tools by taking the
+ * difference between two instantaneous samples and dividing that
+ * by the time between the samples.
+ */
+
+#ifndef _LINUX_NFS_IOSTAT
+#define _LINUX_NFS_IOSTAT
+
+#define NFS_IOSTAT_VERS "1.0"
+
+/*
+ * NFS byte counters
+ *
+ * 1. SERVER - the number of payload bytes read from or written
+ * to the server by the NFS client via an NFS READ or WRITE
+ * request.
+ *
+ * 2. NORMAL - the number of bytes read or written by applications
+ * via the read(2) and write(2) system call interfaces.
+ *
+ * 3. DIRECT - the number of bytes read or written from files
+ * opened with the O_DIRECT flag.
+ *
+ * These counters give a view of the data throughput into and out
+ * of the NFS client. Comparing the number of bytes requested by
+ * an application with the number of bytes the client requests from
+ * the server can provide an indication of client efficiency
+ * (per-op, cache hits, etc).
+ *
+ * These counters can also help characterize which access methods
+ * are in use. DIRECT by itself shows whether there is any O_DIRECT
+ * traffic. NORMAL + DIRECT shows how much data is going through
+ * the system call interface. A large amount of SERVER traffic
+ * without much NORMAL or DIRECT traffic shows that applications
+ * are using mapped files.
+ *
+ * NFS page counters
+ *
+ * These count the number of pages read or written via nfs_readpage(),
+ * nfs_readpages(), or their write equivalents.
+ *
+ * NB: When adding new byte counters, please include the measured
+ * units in the name of each byte counter to help users of this
+ * interface determine what exactly is being counted.
+ */
+enum nfs_stat_bytecounters {
+ NFSIOS_NORMALREADBYTES = 0,
+ NFSIOS_NORMALWRITTENBYTES,
+ NFSIOS_DIRECTREADBYTES,
+ NFSIOS_DIRECTWRITTENBYTES,
+ NFSIOS_SERVERREADBYTES,
+ NFSIOS_SERVERWRITTENBYTES,
+ NFSIOS_READPAGES,
+ NFSIOS_WRITEPAGES,
+ __NFSIOS_BYTESMAX,
+};
+
+/*
+ * NFS event counters
+ *
+ * These counters provide a low-overhead way of monitoring client
+ * activity without enabling NFS trace debugging. The counters
+ * show the rate at which VFS requests are made, and how often the
+ * client invalidates its data and attribute caches. This allows
+ * system administrators to monitor such things as how close-to-open
+ * is working, and answer questions such as "why are there so many
+ * GETATTR requests on the wire?"
+ *
+ * They also count anamolous events such as short reads and writes,
+ * silly renames due to close-after-delete, and operations that
+ * change the size of a file (such operations can often be the
+ * source of data corruption if applications aren't using file
+ * locking properly).
+ */
+enum nfs_stat_eventcounters {
+ NFSIOS_INODEREVALIDATE = 0,
+ NFSIOS_DENTRYREVALIDATE,
+ NFSIOS_DATAINVALIDATE,
+ NFSIOS_ATTRINVALIDATE,
+ NFSIOS_VFSOPEN,
+ NFSIOS_VFSLOOKUP,
+ NFSIOS_VFSACCESS,
+ NFSIOS_VFSUPDATEPAGE,
+ NFSIOS_VFSREADPAGE,
+ NFSIOS_VFSREADPAGES,
+ NFSIOS_VFSWRITEPAGE,
+ NFSIOS_VFSWRITEPAGES,
+ NFSIOS_VFSGETDENTS,
+ NFSIOS_VFSSETATTR,
+ NFSIOS_VFSFLUSH,
+ NFSIOS_VFSFSYNC,
+ NFSIOS_VFSLOCK,
+ NFSIOS_VFSRELEASE,
+ NFSIOS_CONGESTIONWAIT,
+ NFSIOS_SETATTRTRUNC,
+ NFSIOS_EXTENDWRITE,
+ NFSIOS_SILLYRENAME,
+ NFSIOS_SHORTREAD,
+ NFSIOS_SHORTWRITE,
+ NFSIOS_DELAY,
+ __NFSIOS_COUNTSMAX,
+};
+
+#endif /* _LINUX_NFS_IOSTAT */
diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h
index a1676e19e49..3c60685d972 100644
--- a/include/linux/nfs_page.h
+++ b/include/linux/nfs_page.h
@@ -27,9 +27,12 @@
/*
* Valid flags for a dirty buffer
*/
-#define PG_BUSY 0
-#define PG_NEED_COMMIT 1
-#define PG_NEED_RESCHED 2
+enum {
+ PG_BUSY = 0,
+ PG_CLEAN,
+ PG_NEED_COMMIT,
+ PG_NEED_RESCHED,
+};
struct nfs_inode;
struct nfs_page {
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 24263bb8e0b..8c77c11224d 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -829,9 +829,8 @@ struct nfs_rpc_ops {
int (*write_done) (struct rpc_task *, struct nfs_write_data *);
void (*commit_setup) (struct nfs_write_data *, struct rpc_message *);
int (*commit_done) (struct rpc_task *, struct nfs_write_data *);
- int (*file_open) (struct inode *, struct file *);
- int (*file_release) (struct inode *, struct file *);
int (*lock)(struct file *, int, struct file_lock *);
+ int (*lock_check_bounds)(const struct file_lock *);
void (*clear_acl_cache)(struct inode *);
};
diff --git a/include/linux/nfsd/nfsd.h b/include/linux/nfsd/nfsd.h
index 41d30c9c9de..a2861d95ecc 100644
--- a/include/linux/nfsd/nfsd.h
+++ b/include/linux/nfsd/nfsd.h
@@ -28,20 +28,20 @@
#define NFSD_SUPPORTED_MINOR_VERSION 0
/*
- * Special flags for nfsd_permission. These must be different from MAY_READ,
- * MAY_WRITE, and MAY_EXEC.
+ * Flags for nfsd_permission
*/
-#define MAY_NOP 0
-#define MAY_SATTR 8
-#define MAY_TRUNC 16
-#define MAY_LOCK 32
-#define MAY_OWNER_OVERRIDE 64
-#define MAY_LOCAL_ACCESS 128 /* IRIX doing local access check on device special file*/
-#if (MAY_SATTR | MAY_TRUNC | MAY_LOCK | MAY_OWNER_OVERRIDE | MAY_LOCAL_ACCESS) & (MAY_READ | MAY_WRITE | MAY_EXEC)
-# error "please use a different value for MAY_SATTR or MAY_TRUNC or MAY_LOCK or MAY_LOCAL_ACCESS or MAY_OWNER_OVERRIDE."
-#endif
-#define MAY_CREATE (MAY_EXEC|MAY_WRITE)
-#define MAY_REMOVE (MAY_EXEC|MAY_WRITE|MAY_TRUNC)
+#define NFSD_MAY_NOP 0
+#define NFSD_MAY_EXEC 1 /* == MAY_EXEC */
+#define NFSD_MAY_WRITE 2 /* == MAY_WRITE */
+#define NFSD_MAY_READ 4 /* == MAY_READ */
+#define NFSD_MAY_SATTR 8
+#define NFSD_MAY_TRUNC 16
+#define NFSD_MAY_LOCK 32
+#define NFSD_MAY_OWNER_OVERRIDE 64
+#define NFSD_MAY_LOCAL_ACCESS 128 /* IRIX doing local access check on device special file*/
+
+#define NFSD_MAY_CREATE (NFSD_MAY_EXEC|NFSD_MAY_WRITE)
+#define NFSD_MAY_REMOVE (NFSD_MAY_EXEC|NFSD_MAY_WRITE|NFSD_MAY_TRUNC)
/*
* Callback function for readdir
@@ -54,6 +54,7 @@ typedef int (*nfsd_dirop_t)(struct inode *, struct dentry *, int, int);
extern struct svc_program nfsd_program;
extern struct svc_version nfsd_version2, nfsd_version3,
nfsd_version4;
+extern struct mutex nfsd_mutex;
extern struct svc_serv *nfsd_serv;
extern struct seq_operations nfs_exports_op;
diff --git a/include/linux/nfsd/state.h b/include/linux/nfsd/state.h
index db348f74937..d0fe2e37845 100644
--- a/include/linux/nfsd/state.h
+++ b/include/linux/nfsd/state.h
@@ -98,8 +98,6 @@ struct nfs4_callback {
u32 cb_ident;
/* RPC client info */
atomic_t cb_set; /* successful CB_NULL call */
- struct rpc_program cb_program;
- struct rpc_stat cb_stat;
struct rpc_clnt * cb_client;
};
diff --git a/include/linux/nl80211.h b/include/linux/nl80211.h
index ea6517e58b0..2be7c63bc0f 100644
--- a/include/linux/nl80211.h
+++ b/include/linux/nl80211.h
@@ -122,13 +122,13 @@ enum nl80211_commands {
NL80211_CMD_NEW_STATION,
NL80211_CMD_DEL_STATION,
- /* add commands here */
-
NL80211_CMD_GET_MPATH,
NL80211_CMD_SET_MPATH,
NL80211_CMD_NEW_MPATH,
NL80211_CMD_DEL_MPATH,
+ /* add commands here */
+
/* used to define NL80211_CMD_MAX below */
__NL80211_CMD_AFTER_LAST,
NL80211_CMD_MAX = __NL80211_CMD_AFTER_LAST - 1
@@ -230,18 +230,21 @@ enum nl80211_attrs {
NL80211_ATTR_MNTR_FLAGS,
- /* add attributes here, update the policy in nl80211.c */
-
NL80211_ATTR_MESH_ID,
NL80211_ATTR_STA_PLINK_ACTION,
NL80211_ATTR_MPATH_NEXT_HOP,
NL80211_ATTR_MPATH_INFO,
+ /* add attributes here, update the policy in nl80211.c */
+
__NL80211_ATTR_AFTER_LAST,
NL80211_ATTR_MAX = __NL80211_ATTR_AFTER_LAST - 1
};
-#define NL80211_MAX_SUPP_RATES 32
+#define NL80211_MAX_SUPP_RATES 32
+#define NL80211_TKIP_DATA_OFFSET_ENCR_KEY 0
+#define NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY 16
+#define NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY 24
/**
* enum nl80211_iftype - (virtual) interface types
diff --git a/include/linux/notifier.h b/include/linux/notifier.h
index 0ff6224d172..bd3d72ddf33 100644
--- a/include/linux/notifier.h
+++ b/include/linux/notifier.h
@@ -197,6 +197,7 @@ static inline int notifier_to_errno(int ret)
#define NETDEV_GOING_DOWN 0x0009
#define NETDEV_CHANGENAME 0x000A
#define NETDEV_FEAT_CHANGE 0x000B
+#define NETDEV_BONDING_FAILOVER 0x000C
#define SYS_DOWN 0x0001 /* Notify of system down */
#define SYS_RESTART SYS_DOWN
diff --git a/include/linux/of_device.h b/include/linux/of_device.h
index afe338217d9..d3a74e00a3e 100644
--- a/include/linux/of_device.h
+++ b/include/linux/of_device.h
@@ -24,4 +24,7 @@ static inline void of_device_free(struct of_device *dev)
of_release_dev(&dev->dev);
}
+extern ssize_t of_device_get_modalias(struct of_device *ofdev,
+ char *str, ssize_t len);
+
#endif /* _LINUX_OF_DEVICE_H */
diff --git a/include/linux/pci.h b/include/linux/pci.h
index d18b1dd49fa..a6a088e1a80 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -17,8 +17,7 @@
#ifndef LINUX_PCI_H
#define LINUX_PCI_H
-/* Include the pci register defines */
-#include <linux/pci_regs.h>
+#include <linux/pci_regs.h> /* The pci register defines */
/*
* The PCI interface treats multi-function devices as independent
@@ -49,12 +48,22 @@
#include <linux/list.h>
#include <linux/compiler.h>
#include <linux/errno.h>
+#include <linux/kobject.h>
#include <asm/atomic.h>
#include <linux/device.h>
/* Include the ID list */
#include <linux/pci_ids.h>
+/* pci_slot represents a physical slot */
+struct pci_slot {
+ struct pci_bus *bus; /* The bus this slot is on */
+ struct list_head list; /* node in list of slots on this bus */
+ struct hotplug_slot *hotplug; /* Hotplug info (migrate over time) */
+ unsigned char number; /* PCI_SLOT(pci_dev->devfn) */
+ struct kobject kobj;
+};
+
/* File state for mmap()s on /proc/bus/pci/X/Y */
enum pci_mmap_state {
pci_mmap_io,
@@ -142,6 +151,7 @@ struct pci_dev {
void *sysdata; /* hook for sys-specific extension */
struct proc_dir_entry *procent; /* device entry in /proc/bus/pci */
+ struct pci_slot *slot; /* Physical slot this device is in */
unsigned int devfn; /* encoded device & function index */
unsigned short vendor;
@@ -167,6 +177,13 @@ struct pci_dev {
pci_power_t current_state; /* Current operating state. In ACPI-speak,
this is D0-D3, D0 being fully functional,
and D3 being off. */
+ int pm_cap; /* PM capability offset in the
+ configuration space */
+ unsigned int pme_support:5; /* Bitmask of states from which PME#
+ can be generated */
+ unsigned int d1_support:1; /* Low power state D1 is supported */
+ unsigned int d2_support:1; /* Low power state D2 is supported */
+ unsigned int no_d1d2:1; /* Only allow D0 and D3 */
#ifdef CONFIG_PCIEASPM
struct pcie_link_state *link_state; /* ASPM link state. */
@@ -191,7 +208,6 @@ struct pci_dev {
unsigned int is_added:1;
unsigned int is_busmaster:1; /* device is busmaster */
unsigned int no_msi:1; /* device may not use msi */
- unsigned int no_d1d2:1; /* only allow d0 or d3 */
unsigned int block_ucfg_access:1; /* userspace config space access is blocked */
unsigned int broken_parity_status:1; /* Device generates false positive parity */
unsigned int msi_enabled:1;
@@ -267,6 +283,7 @@ struct pci_bus {
struct list_head children; /* list of child buses */
struct list_head devices; /* list of devices on this bus */
struct pci_dev *self; /* bridge device as seen by parent */
+ struct list_head slots; /* list of slots on this bus */
struct resource *resource[PCI_BUS_NUM_RESOURCES];
/* address space routed to this bus */
@@ -328,7 +345,7 @@ struct pci_bus_region {
struct pci_dynids {
spinlock_t lock; /* protects list, index */
struct list_head list; /* for IDs added at runtime */
- unsigned int use_driver_data:1; /* pci_driver->driver_data is used */
+ unsigned int use_driver_data:1; /* pci_device_id->driver_data is used */
};
/* ---------------------------------------------------------------- */
@@ -390,7 +407,7 @@ struct pci_driver {
int (*resume_early) (struct pci_dev *dev);
int (*resume) (struct pci_dev *dev); /* Device woken up */
void (*shutdown) (struct pci_dev *dev);
-
+ struct pm_ext_ops *pm;
struct pci_error_handlers *err_handler;
struct device_driver driver;
struct pci_dynids dynids;
@@ -489,6 +506,10 @@ struct pci_bus *pci_create_bus(struct device *parent, int bus,
struct pci_ops *ops, void *sysdata);
struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
int busnr);
+struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr,
+ const char *name);
+void pci_destroy_slot(struct pci_slot *slot);
+void pci_update_slot_number(struct pci_slot *slot, int slot_nr);
int pci_scan_slot(struct pci_bus *bus, int devfn);
struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn);
void pci_device_add(struct pci_dev *dev, struct pci_bus *bus);
@@ -618,6 +639,8 @@ int pci_restore_state(struct pci_dev *dev);
int pci_set_power_state(struct pci_dev *dev, pci_power_t state);
pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state);
int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable);
+int pci_prepare_to_sleep(struct pci_dev *dev);
+int pci_back_from_sleep(struct pci_dev *dev);
/* Functions for PCI Hotplug drivers to use */
int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap);
@@ -839,6 +862,11 @@ static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask)
return -EIO;
}
+static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
+{
+ return -EIO;
+}
+
static inline int pci_set_dma_max_seg_size(struct pci_dev *dev,
unsigned int size)
{
@@ -977,9 +1005,9 @@ static inline void pci_set_drvdata(struct pci_dev *pdev, void *data)
/* If you want to know what to call your pci_dev, ask this function.
* Again, it's a wrapper around the generic device.
*/
-static inline char *pci_name(struct pci_dev *pdev)
+static inline const char *pci_name(struct pci_dev *pdev)
{
- return pdev->dev.bus_id;
+ return dev_name(&pdev->dev);
}
@@ -1014,7 +1042,9 @@ enum pci_fixup_pass {
pci_fixup_header, /* After reading configuration header */
pci_fixup_final, /* Final phase of device fixups */
pci_fixup_enable, /* pci_enable_device() time */
- pci_fixup_resume, /* pci_enable_device() time */
+ pci_fixup_resume, /* pci_device_resume() */
+ pci_fixup_suspend, /* pci_device_suspend */
+ pci_fixup_resume_early, /* pci_device_resume_early() */
};
/* Anonymous variables would be nice... */
@@ -1036,6 +1066,12 @@ enum pci_fixup_pass {
#define DECLARE_PCI_FIXUP_RESUME(vendor, device, hook) \
DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \
resume##vendor##device##hook, vendor, device, hook)
+#define DECLARE_PCI_FIXUP_RESUME_EARLY(vendor, device, hook) \
+ DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \
+ resume_early##vendor##device##hook, vendor, device, hook)
+#define DECLARE_PCI_FIXUP_SUSPEND(vendor, device, hook) \
+ DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \
+ suspend##vendor##device##hook, vendor, device, hook)
void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev);
@@ -1060,7 +1096,10 @@ extern int pci_pci_problems;
extern unsigned long pci_cardbus_io_size;
extern unsigned long pci_cardbus_mem_size;
-extern int pcibios_add_platform_entries(struct pci_dev *dev);
+int pcibios_add_platform_entries(struct pci_dev *dev);
+void pcibios_disable_device(struct pci_dev *dev);
+int pcibios_set_pcie_reset_state(struct pci_dev *dev,
+ enum pcie_reset_state state);
#ifdef CONFIG_PCI_MMCONFIG
extern void __init pci_mmcfg_early_init(void);
diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
index 8f67e8f2a3c..a08cd06b541 100644
--- a/include/linux/pci_hotplug.h
+++ b/include/linux/pci_hotplug.h
@@ -95,9 +95,6 @@ struct hotplug_slot_attribute {
* @get_adapter_status: Called to get see if an adapter is present in the slot or not.
* If this field is NULL, the value passed in the struct hotplug_slot_info
* will be used when this value is requested by a user.
- * @get_address: Called to get pci address of a slot.
- * If this field is NULL, the value passed in the struct hotplug_slot_info
- * will be used when this value is requested by a user.
* @get_max_bus_speed: Called to get the max bus speed for a slot.
* If this field is NULL, the value passed in the struct hotplug_slot_info
* will be used when this value is requested by a user.
@@ -120,7 +117,6 @@ struct hotplug_slot_ops {
int (*get_attention_status) (struct hotplug_slot *slot, u8 *value);
int (*get_latch_status) (struct hotplug_slot *slot, u8 *value);
int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value);
- int (*get_address) (struct hotplug_slot *slot, u32 *value);
int (*get_max_bus_speed) (struct hotplug_slot *slot, enum pci_bus_speed *value);
int (*get_cur_bus_speed) (struct hotplug_slot *slot, enum pci_bus_speed *value);
};
@@ -140,7 +136,6 @@ struct hotplug_slot_info {
u8 attention_status;
u8 latch_status;
u8 adapter_status;
- u32 address;
enum pci_bus_speed max_bus_speed;
enum pci_bus_speed cur_bus_speed;
};
@@ -166,15 +161,14 @@ struct hotplug_slot {
/* Variables below this are for use only by the hotplug pci core. */
struct list_head slot_list;
- struct kobject kobj;
+ struct pci_slot *pci_slot;
};
#define to_hotplug_slot(n) container_of(n, struct hotplug_slot, kobj)
-extern int pci_hp_register (struct hotplug_slot *slot);
-extern int pci_hp_deregister (struct hotplug_slot *slot);
+extern int pci_hp_register(struct hotplug_slot *, struct pci_bus *, int nr);
+extern int pci_hp_deregister(struct hotplug_slot *slot);
extern int __must_check pci_hp_change_slot_info (struct hotplug_slot *slot,
struct hotplug_slot_info *info);
-extern struct kset *pci_hotplug_slots_kset;
/* PCI Setting Record (Type 0) */
struct hpp_type0 {
@@ -227,9 +221,9 @@ struct hotplug_params {
#include <acpi/acpi.h>
#include <acpi/acpi_bus.h>
#include <acpi/actypes.h>
-extern acpi_status acpi_run_oshp(acpi_handle handle);
extern acpi_status acpi_get_hp_params_from_firmware(struct pci_bus *bus,
struct hotplug_params *hpp);
+int acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev, u32 flags);
int acpi_root_bridge(acpi_handle handle);
#endif
#endif
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 65953822c9c..d8507eb394c 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -1950,6 +1950,8 @@
#define PCI_DEVICE_ID_NX2_5708 0x164c
#define PCI_DEVICE_ID_TIGON3_5702FE 0x164d
#define PCI_DEVICE_ID_NX2_57710 0x164e
+#define PCI_DEVICE_ID_NX2_57711 0x164f
+#define PCI_DEVICE_ID_NX2_57711E 0x1650
#define PCI_DEVICE_ID_TIGON3_5705 0x1653
#define PCI_DEVICE_ID_TIGON3_5705_2 0x1654
#define PCI_DEVICE_ID_TIGON3_5720 0x1658
@@ -1982,6 +1984,7 @@
#define PCI_DEVICE_ID_TIGON3_5787M 0x1693
#define PCI_DEVICE_ID_TIGON3_5782 0x1696
#define PCI_DEVICE_ID_TIGON3_5784 0x1698
+#define PCI_DEVICE_ID_TIGON3_5785 0x1699
#define PCI_DEVICE_ID_TIGON3_5786 0x169a
#define PCI_DEVICE_ID_TIGON3_5787 0x169b
#define PCI_DEVICE_ID_TIGON3_5788 0x169c
@@ -2171,6 +2174,8 @@
#define PCI_DEVICE_ID_MPC8544 0x0033
#define PCI_DEVICE_ID_MPC8572E 0x0040
#define PCI_DEVICE_ID_MPC8572 0x0041
+#define PCI_DEVICE_ID_MPC8536E 0x0050
+#define PCI_DEVICE_ID_MPC8536 0x0051
#define PCI_DEVICE_ID_MPC8641 0x7010
#define PCI_DEVICE_ID_MPC8641D 0x7011
#define PCI_DEVICE_ID_MPC8610 0x7018
@@ -2188,6 +2193,7 @@
#define PCI_DEVICE_ID_JMICRON_JMB366 0x2366
#define PCI_DEVICE_ID_JMICRON_JMB368 0x2368
#define PCI_DEVICE_ID_JMICRON_JMB38X_SD 0x2381
+#define PCI_DEVICE_ID_JMICRON_JMB38X_MMC 0x2382
#define PCI_DEVICE_ID_JMICRON_JMB38X_MS 0x2383
#define PCI_VENDOR_ID_KORENIX 0x1982
diff --git a/include/linux/pci_regs.h b/include/linux/pci_regs.h
index c0c1223c919..19958b92990 100644
--- a/include/linux/pci_regs.h
+++ b/include/linux/pci_regs.h
@@ -231,6 +231,7 @@
#define PCI_PM_CAP_PME_D2 0x2000 /* PME# from D2 */
#define PCI_PM_CAP_PME_D3 0x4000 /* PME# from D3 (hot) */
#define PCI_PM_CAP_PME_D3cold 0x8000 /* PME# from D3 (cold) */
+#define PCI_PM_CAP_PME_SHIFT 11 /* Start of the PME Mask in PMC */
#define PCI_PM_CTRL 4 /* PM control and status register */
#define PCI_PM_CTRL_STATE_MASK 0x0003 /* Current power state (D0 to D3) */
#define PCI_PM_CTRL_NO_SOFT_RESET 0x0004 /* No reset for D3hot->D0 */
diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h
index 9007ccdfc11..20838883535 100644
--- a/include/linux/percpu_counter.h
+++ b/include/linux/percpu_counter.h
@@ -35,7 +35,7 @@ int percpu_counter_init_irq(struct percpu_counter *fbc, s64 amount);
void percpu_counter_destroy(struct percpu_counter *fbc);
void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch);
-s64 __percpu_counter_sum(struct percpu_counter *fbc);
+s64 __percpu_counter_sum(struct percpu_counter *fbc, int set);
static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
{
@@ -44,13 +44,19 @@ static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
{
- s64 ret = __percpu_counter_sum(fbc);
+ s64 ret = __percpu_counter_sum(fbc, 0);
return ret < 0 ? 0 : ret;
}
+static inline s64 percpu_counter_sum_and_set(struct percpu_counter *fbc)
+{
+ return __percpu_counter_sum(fbc, 1);
+}
+
+
static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
{
- return __percpu_counter_sum(fbc);
+ return __percpu_counter_sum(fbc, 0);
}
static inline s64 percpu_counter_read(struct percpu_counter *fbc)
diff --git a/include/linux/pkt_cls.h b/include/linux/pkt_cls.h
index 99efbed81fa..7cf7824df77 100644
--- a/include/linux/pkt_cls.h
+++ b/include/linux/pkt_cls.h
@@ -374,6 +374,7 @@ enum
TCA_FLOW_ACT,
TCA_FLOW_POLICE,
TCA_FLOW_EMATCHES,
+ TCA_FLOW_PERTURB,
__TCA_FLOW_MAX
};
diff --git a/include/linux/pkt_sched.h b/include/linux/pkt_sched.h
index dbb7ac37960..e5de421ac7b 100644
--- a/include/linux/pkt_sched.h
+++ b/include/linux/pkt_sched.h
@@ -85,6 +85,26 @@ struct tc_ratespec
#define TC_RTAB_SIZE 1024
+struct tc_sizespec {
+ unsigned char cell_log;
+ unsigned char size_log;
+ short cell_align;
+ int overhead;
+ unsigned int linklayer;
+ unsigned int mpu;
+ unsigned int mtu;
+ unsigned int tsize;
+};
+
+enum {
+ TCA_STAB_UNSPEC,
+ TCA_STAB_BASE,
+ TCA_STAB_DATA,
+ __TCA_STAB_MAX
+};
+
+#define TCA_STAB_MAX (__TCA_STAB_MAX - 1)
+
/* FIFO section */
struct tc_fifo_qopt
@@ -103,15 +123,6 @@ struct tc_prio_qopt
__u8 priomap[TC_PRIO_MAX+1]; /* Map: logical priority -> PRIO band */
};
-enum
-{
- TCA_PRIO_UNSPEC,
- TCA_PRIO_MQ,
- __TCA_PRIO_MAX
-};
-
-#define TCA_PRIO_MAX (__TCA_PRIO_MAX - 1)
-
/* TBF section */
struct tc_tbf_qopt
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
index 3261681c82a..95ac21ab3a0 100644
--- a/include/linux/platform_device.h
+++ b/include/linux/platform_device.h
@@ -53,6 +53,7 @@ struct platform_driver {
int (*suspend_late)(struct platform_device *, pm_message_t state);
int (*resume_early)(struct platform_device *);
int (*resume)(struct platform_device *);
+ struct pm_ext_ops *pm;
struct device_driver driver;
};
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 39a7ee859b6..4ad9de94449 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -112,7 +112,9 @@ typedef struct pm_message {
int event;
} pm_message_t;
-/*
+/**
+ * struct pm_ops - device PM callbacks
+ *
* Several driver power state transitions are externally visible, affecting
* the state of pending I/O queues and (for drivers that touch hardware)
* interrupts, wakeups, DMA, and other hardware state. There may also be
@@ -120,6 +122,284 @@ typedef struct pm_message {
* to the rest of the driver stack (such as a driver that's ON gating off
* clocks which are not in active use).
*
+ * The externally visible transitions are handled with the help of the following
+ * callbacks included in this structure:
+ *
+ * @prepare: Prepare the device for the upcoming transition, but do NOT change
+ * its hardware state. Prevent new children of the device from being
+ * registered after @prepare() returns (the driver's subsystem and
+ * generally the rest of the kernel is supposed to prevent new calls to the
+ * probe method from being made too once @prepare() has succeeded). If
+ * @prepare() detects a situation it cannot handle (e.g. registration of a
+ * child already in progress), it may return -EAGAIN, so that the PM core
+ * can execute it once again (e.g. after the new child has been registered)
+ * to recover from the race condition. This method is executed for all
+ * kinds of suspend transitions and is followed by one of the suspend
+ * callbacks: @suspend(), @freeze(), or @poweroff().
+ * The PM core executes @prepare() for all devices before starting to
+ * execute suspend callbacks for any of them, so drivers may assume all of
+ * the other devices to be present and functional while @prepare() is being
+ * executed. In particular, it is safe to make GFP_KERNEL memory
+ * allocations from within @prepare(). However, drivers may NOT assume
+ * anything about the availability of the user space at that time and it
+ * is not correct to request firmware from within @prepare() (it's too
+ * late to do that). [To work around this limitation, drivers may
+ * register suspend and hibernation notifiers that are executed before the
+ * freezing of tasks.]
+ *
+ * @complete: Undo the changes made by @prepare(). This method is executed for
+ * all kinds of resume transitions, following one of the resume callbacks:
+ * @resume(), @thaw(), @restore(). Also called if the state transition
+ * fails before the driver's suspend callback (@suspend(), @freeze(),
+ * @poweroff()) can be executed (e.g. if the suspend callback fails for one
+ * of the other devices that the PM core has unsuccessfully attempted to
+ * suspend earlier).
+ * The PM core executes @complete() after it has executed the appropriate
+ * resume callback for all devices.
+ *
+ * @suspend: Executed before putting the system into a sleep state in which the
+ * contents of main memory are preserved. Quiesce the device, put it into
+ * a low power state appropriate for the upcoming system state (such as
+ * PCI_D3hot), and enable wakeup events as appropriate.
+ *
+ * @resume: Executed after waking the system up from a sleep state in which the
+ * contents of main memory were preserved. Put the device into the
+ * appropriate state, according to the information saved in memory by the
+ * preceding @suspend(). The driver starts working again, responding to
+ * hardware events and software requests. The hardware may have gone
+ * through a power-off reset, or it may have maintained state from the
+ * previous suspend() which the driver may rely on while resuming. On most
+ * platforms, there are no restrictions on availability of resources like
+ * clocks during @resume().
+ *
+ * @freeze: Hibernation-specific, executed before creating a hibernation image.
+ * Quiesce operations so that a consistent image can be created, but do NOT
+ * otherwise put the device into a low power device state and do NOT emit
+ * system wakeup events. Save in main memory the device settings to be
+ * used by @restore() during the subsequent resume from hibernation or by
+ * the subsequent @thaw(), if the creation of the image or the restoration
+ * of main memory contents from it fails.
+ *
+ * @thaw: Hibernation-specific, executed after creating a hibernation image OR
+ * if the creation of the image fails. Also executed after a failing
+ * attempt to restore the contents of main memory from such an image.
+ * Undo the changes made by the preceding @freeze(), so the device can be
+ * operated in the same way as immediately before the call to @freeze().
+ *
+ * @poweroff: Hibernation-specific, executed after saving a hibernation image.
+ * Quiesce the device, put it into a low power state appropriate for the
+ * upcoming system state (such as PCI_D3hot), and enable wakeup events as
+ * appropriate.
+ *
+ * @restore: Hibernation-specific, executed after restoring the contents of main
+ * memory from a hibernation image. Driver starts working again,
+ * responding to hardware events and software requests. Drivers may NOT
+ * make ANY assumptions about the hardware state right prior to @restore().
+ * On most platforms, there are no restrictions on availability of
+ * resources like clocks during @restore().
+ *
+ * All of the above callbacks, except for @complete(), return error codes.
+ * However, the error codes returned by the resume operations, @resume(),
+ * @thaw(), and @restore(), do not cause the PM core to abort the resume
+ * transition during which they are returned. The error codes returned in
+ * that cases are only printed by the PM core to the system logs for debugging
+ * purposes. Still, it is recommended that drivers only return error codes
+ * from their resume methods in case of an unrecoverable failure (i.e. when the
+ * device being handled refuses to resume and becomes unusable) to allow us to
+ * modify the PM core in the future, so that it can avoid attempting to handle
+ * devices that failed to resume and their children.
+ *
+ * It is allowed to unregister devices while the above callbacks are being
+ * executed. However, it is not allowed to unregister a device from within any
+ * of its own callbacks.
+ */
+
+struct pm_ops {
+ int (*prepare)(struct device *dev);
+ void (*complete)(struct device *dev);
+ int (*suspend)(struct device *dev);
+ int (*resume)(struct device *dev);
+ int (*freeze)(struct device *dev);
+ int (*thaw)(struct device *dev);
+ int (*poweroff)(struct device *dev);
+ int (*restore)(struct device *dev);
+};
+
+/**
+ * struct pm_ext_ops - extended device PM callbacks
+ *
+ * Some devices require certain operations related to suspend and hibernation
+ * to be carried out with interrupts disabled. Thus, 'struct pm_ext_ops' below
+ * is defined, adding callbacks to be executed with interrupts disabled to
+ * 'struct pm_ops'.
+ *
+ * The following callbacks included in 'struct pm_ext_ops' are executed with
+ * the nonboot CPUs switched off and with interrupts disabled on the only
+ * functional CPU. They also are executed with the PM core list of devices
+ * locked, so they must NOT unregister any devices.
+ *
+ * @suspend_noirq: Complete the operations of ->suspend() by carrying out any
+ * actions required for suspending the device that need interrupts to be
+ * disabled
+ *
+ * @resume_noirq: Prepare for the execution of ->resume() by carrying out any
+ * actions required for resuming the device that need interrupts to be
+ * disabled
+ *
+ * @freeze_noirq: Complete the operations of ->freeze() by carrying out any
+ * actions required for freezing the device that need interrupts to be
+ * disabled
+ *
+ * @thaw_noirq: Prepare for the execution of ->thaw() by carrying out any
+ * actions required for thawing the device that need interrupts to be
+ * disabled
+ *
+ * @poweroff_noirq: Complete the operations of ->poweroff() by carrying out any
+ * actions required for handling the device that need interrupts to be
+ * disabled
+ *
+ * @restore_noirq: Prepare for the execution of ->restore() by carrying out any
+ * actions required for restoring the operations of the device that need
+ * interrupts to be disabled
+ *
+ * All of the above callbacks return error codes, but the error codes returned
+ * by the resume operations, @resume_noirq(), @thaw_noirq(), and
+ * @restore_noirq(), do not cause the PM core to abort the resume transition
+ * during which they are returned. The error codes returned in that cases are
+ * only printed by the PM core to the system logs for debugging purposes.
+ * Still, as stated above, it is recommended that drivers only return error
+ * codes from their resume methods if the device being handled fails to resume
+ * and is not usable any more.
+ */
+
+struct pm_ext_ops {
+ struct pm_ops base;
+ int (*suspend_noirq)(struct device *dev);
+ int (*resume_noirq)(struct device *dev);
+ int (*freeze_noirq)(struct device *dev);
+ int (*thaw_noirq)(struct device *dev);
+ int (*poweroff_noirq)(struct device *dev);
+ int (*restore_noirq)(struct device *dev);
+};
+
+/**
+ * PM_EVENT_ messages
+ *
+ * The following PM_EVENT_ messages are defined for the internal use of the PM
+ * core, in order to provide a mechanism allowing the high level suspend and
+ * hibernation code to convey the necessary information to the device PM core
+ * code:
+ *
+ * ON No transition.
+ *
+ * FREEZE System is going to hibernate, call ->prepare() and ->freeze()
+ * for all devices.
+ *
+ * SUSPEND System is going to suspend, call ->prepare() and ->suspend()
+ * for all devices.
+ *
+ * HIBERNATE Hibernation image has been saved, call ->prepare() and
+ * ->poweroff() for all devices.
+ *
+ * QUIESCE Contents of main memory are going to be restored from a (loaded)
+ * hibernation image, call ->prepare() and ->freeze() for all
+ * devices.
+ *
+ * RESUME System is resuming, call ->resume() and ->complete() for all
+ * devices.
+ *
+ * THAW Hibernation image has been created, call ->thaw() and
+ * ->complete() for all devices.
+ *
+ * RESTORE Contents of main memory have been restored from a hibernation
+ * image, call ->restore() and ->complete() for all devices.
+ *
+ * RECOVER Creation of a hibernation image or restoration of the main
+ * memory contents from a hibernation image has failed, call
+ * ->thaw() and ->complete() for all devices.
+ */
+
+#define PM_EVENT_ON 0x0000
+#define PM_EVENT_FREEZE 0x0001
+#define PM_EVENT_SUSPEND 0x0002
+#define PM_EVENT_HIBERNATE 0x0004
+#define PM_EVENT_QUIESCE 0x0008
+#define PM_EVENT_RESUME 0x0010
+#define PM_EVENT_THAW 0x0020
+#define PM_EVENT_RESTORE 0x0040
+#define PM_EVENT_RECOVER 0x0080
+
+#define PM_EVENT_SLEEP (PM_EVENT_SUSPEND | PM_EVENT_HIBERNATE)
+
+#define PMSG_FREEZE ((struct pm_message){ .event = PM_EVENT_FREEZE, })
+#define PMSG_QUIESCE ((struct pm_message){ .event = PM_EVENT_QUIESCE, })
+#define PMSG_SUSPEND ((struct pm_message){ .event = PM_EVENT_SUSPEND, })
+#define PMSG_HIBERNATE ((struct pm_message){ .event = PM_EVENT_HIBERNATE, })
+#define PMSG_RESUME ((struct pm_message){ .event = PM_EVENT_RESUME, })
+#define PMSG_THAW ((struct pm_message){ .event = PM_EVENT_THAW, })
+#define PMSG_RESTORE ((struct pm_message){ .event = PM_EVENT_RESTORE, })
+#define PMSG_RECOVER ((struct pm_message){ .event = PM_EVENT_RECOVER, })
+#define PMSG_ON ((struct pm_message){ .event = PM_EVENT_ON, })
+
+/**
+ * Device power management states
+ *
+ * These state labels are used internally by the PM core to indicate the current
+ * status of a device with respect to the PM core operations.
+ *
+ * DPM_ON Device is regarded as operational. Set this way
+ * initially and when ->complete() is about to be called.
+ * Also set when ->prepare() fails.
+ *
+ * DPM_PREPARING Device is going to be prepared for a PM transition. Set
+ * when ->prepare() is about to be called.
+ *
+ * DPM_RESUMING Device is going to be resumed. Set when ->resume(),
+ * ->thaw(), or ->restore() is about to be called.
+ *
+ * DPM_SUSPENDING Device has been prepared for a power transition. Set
+ * when ->prepare() has just succeeded.
+ *
+ * DPM_OFF Device is regarded as inactive. Set immediately after
+ * ->suspend(), ->freeze(), or ->poweroff() has succeeded.
+ * Also set when ->resume()_noirq, ->thaw_noirq(), or
+ * ->restore_noirq() is about to be called.
+ *
+ * DPM_OFF_IRQ Device is in a "deep sleep". Set immediately after
+ * ->suspend_noirq(), ->freeze_noirq(), or
+ * ->poweroff_noirq() has just succeeded.
+ */
+
+enum dpm_state {
+ DPM_INVALID,
+ DPM_ON,
+ DPM_PREPARING,
+ DPM_RESUMING,
+ DPM_SUSPENDING,
+ DPM_OFF,
+ DPM_OFF_IRQ,
+};
+
+struct dev_pm_info {
+ pm_message_t power_state;
+ unsigned can_wakeup:1;
+ unsigned should_wakeup:1;
+ enum dpm_state status; /* Owned by the PM core */
+#ifdef CONFIG_PM_SLEEP
+ struct list_head entry;
+#endif
+};
+
+/*
+ * The PM_EVENT_ messages are also used by drivers implementing the legacy
+ * suspend framework, based on the ->suspend() and ->resume() callbacks common
+ * for suspend and hibernation transitions, according to the rules below.
+ */
+
+/* Necessary, because several drivers use PM_EVENT_PRETHAW */
+#define PM_EVENT_PRETHAW PM_EVENT_QUIESCE
+
+/*
* One transition is triggered by resume(), after a suspend() call; the
* message is implicit:
*
@@ -164,35 +444,13 @@ typedef struct pm_message {
* or from system low-power states such as standby or suspend-to-RAM.
*/
-#define PM_EVENT_ON 0
-#define PM_EVENT_FREEZE 1
-#define PM_EVENT_SUSPEND 2
-#define PM_EVENT_HIBERNATE 4
-#define PM_EVENT_PRETHAW 8
-
-#define PM_EVENT_SLEEP (PM_EVENT_SUSPEND | PM_EVENT_HIBERNATE)
-
-#define PMSG_FREEZE ((struct pm_message){ .event = PM_EVENT_FREEZE, })
-#define PMSG_PRETHAW ((struct pm_message){ .event = PM_EVENT_PRETHAW, })
-#define PMSG_SUSPEND ((struct pm_message){ .event = PM_EVENT_SUSPEND, })
-#define PMSG_HIBERNATE ((struct pm_message){ .event = PM_EVENT_HIBERNATE, })
-#define PMSG_ON ((struct pm_message){ .event = PM_EVENT_ON, })
-
-struct dev_pm_info {
- pm_message_t power_state;
- unsigned can_wakeup:1;
- unsigned should_wakeup:1;
- bool sleeping:1; /* Owned by the PM core */
-#ifdef CONFIG_PM_SLEEP
- struct list_head entry;
-#endif
-};
+#ifdef CONFIG_PM_SLEEP
+extern void device_pm_lock(void);
+extern void device_power_up(pm_message_t state);
+extern void device_resume(pm_message_t state);
+extern void device_pm_unlock(void);
extern int device_power_down(pm_message_t state);
-extern void device_power_up(void);
-extern void device_resume(void);
-
-#ifdef CONFIG_PM_SLEEP
extern int device_suspend(pm_message_t state);
extern int device_prepare_suspend(pm_message_t state);
diff --git a/include/linux/pm_wakeup.h b/include/linux/pm_wakeup.h
index f0d0b2cb8d2..0aae7776185 100644
--- a/include/linux/pm_wakeup.h
+++ b/include/linux/pm_wakeup.h
@@ -35,6 +35,11 @@ static inline void device_init_wakeup(struct device *dev, int val)
dev->power.can_wakeup = dev->power.should_wakeup = !!val;
}
+static inline void device_set_wakeup_capable(struct device *dev, int val)
+{
+ dev->power.can_wakeup = !!val;
+}
+
static inline int device_can_wakeup(struct device *dev)
{
return dev->power.can_wakeup;
@@ -47,21 +52,7 @@ static inline void device_set_wakeup_enable(struct device *dev, int val)
static inline int device_may_wakeup(struct device *dev)
{
- return dev->power.can_wakeup & dev->power.should_wakeup;
-}
-
-/*
- * Platform hook to activate device wakeup capability, if that's not already
- * handled by enable_irq_wake() etc.
- * Returns zero on success, else negative errno
- */
-extern int (*platform_enable_wakeup)(struct device *dev, int is_on);
-
-static inline int call_platform_enable_wakeup(struct device *dev, int is_on)
-{
- if (platform_enable_wakeup)
- return (*platform_enable_wakeup)(dev, is_on);
- return 0;
+ return dev->power.can_wakeup && dev->power.should_wakeup;
}
#else /* !CONFIG_PM */
@@ -72,6 +63,8 @@ static inline void device_init_wakeup(struct device *dev, int val)
dev->power.can_wakeup = !!val;
}
+static inline void device_set_wakeup_capable(struct device *dev, int val) { }
+
static inline int device_can_wakeup(struct device *dev)
{
return dev->power.can_wakeup;
@@ -80,11 +73,6 @@ static inline int device_can_wakeup(struct device *dev)
#define device_set_wakeup_enable(dev, val) do {} while (0)
#define device_may_wakeup(dev) 0
-static inline int call_platform_enable_wakeup(struct device *dev, int is_on)
-{
- return 0;
-}
-
#endif /* !CONFIG_PM */
#endif /* _LINUX_PM_WAKEUP_H */
diff --git a/include/linux/pnp.h b/include/linux/pnp.h
index 63b128d512f..1ce54b63085 100644
--- a/include/linux/pnp.h
+++ b/include/linux/pnp.h
@@ -1,6 +1,8 @@
/*
* Linux Plug and Play Support
* Copyright by Adam Belay <ambx1@neo.rr.com>
+ * Copyright (C) 2008 Hewlett-Packard Development Company, L.P.
+ * Bjorn Helgaas <bjorn.helgaas@hp.com>
*/
#ifndef _LINUX_PNP_H
@@ -15,7 +17,6 @@
struct pnp_protocol;
struct pnp_dev;
-struct pnp_resource_table;
/*
* Resource Management
@@ -24,7 +25,14 @@ struct resource *pnp_get_resource(struct pnp_dev *, unsigned int, unsigned int);
static inline int pnp_resource_valid(struct resource *res)
{
- if (res && !(res->flags & IORESOURCE_UNSET))
+ if (res)
+ return 1;
+ return 0;
+}
+
+static inline int pnp_resource_enabled(struct resource *res)
+{
+ if (res && !(res->flags & IORESOURCE_DISABLED))
return 1;
return 0;
}
@@ -40,19 +48,31 @@ static inline resource_size_t pnp_resource_len(struct resource *res)
static inline resource_size_t pnp_port_start(struct pnp_dev *dev,
unsigned int bar)
{
- return pnp_get_resource(dev, IORESOURCE_IO, bar)->start;
+ struct resource *res = pnp_get_resource(dev, IORESOURCE_IO, bar);
+
+ if (pnp_resource_valid(res))
+ return res->start;
+ return 0;
}
static inline resource_size_t pnp_port_end(struct pnp_dev *dev,
unsigned int bar)
{
- return pnp_get_resource(dev, IORESOURCE_IO, bar)->end;
+ struct resource *res = pnp_get_resource(dev, IORESOURCE_IO, bar);
+
+ if (pnp_resource_valid(res))
+ return res->end;
+ return 0;
}
static inline unsigned long pnp_port_flags(struct pnp_dev *dev,
unsigned int bar)
{
- return pnp_get_resource(dev, IORESOURCE_IO, bar)->flags;
+ struct resource *res = pnp_get_resource(dev, IORESOURCE_IO, bar);
+
+ if (pnp_resource_valid(res))
+ return res->flags;
+ return IORESOURCE_IO | IORESOURCE_AUTO;
}
static inline int pnp_port_valid(struct pnp_dev *dev, unsigned int bar)
@@ -63,25 +83,41 @@ static inline int pnp_port_valid(struct pnp_dev *dev, unsigned int bar)
static inline resource_size_t pnp_port_len(struct pnp_dev *dev,
unsigned int bar)
{
- return pnp_resource_len(pnp_get_resource(dev, IORESOURCE_IO, bar));
+ struct resource *res = pnp_get_resource(dev, IORESOURCE_IO, bar);
+
+ if (pnp_resource_valid(res))
+ return pnp_resource_len(res);
+ return 0;
}
static inline resource_size_t pnp_mem_start(struct pnp_dev *dev,
unsigned int bar)
{
- return pnp_get_resource(dev, IORESOURCE_MEM, bar)->start;
+ struct resource *res = pnp_get_resource(dev, IORESOURCE_MEM, bar);
+
+ if (pnp_resource_valid(res))
+ return res->start;
+ return 0;
}
static inline resource_size_t pnp_mem_end(struct pnp_dev *dev,
unsigned int bar)
{
- return pnp_get_resource(dev, IORESOURCE_MEM, bar)->end;
+ struct resource *res = pnp_get_resource(dev, IORESOURCE_MEM, bar);
+
+ if (pnp_resource_valid(res))
+ return res->end;
+ return 0;
}
static inline unsigned long pnp_mem_flags(struct pnp_dev *dev, unsigned int bar)
{
- return pnp_get_resource(dev, IORESOURCE_MEM, bar)->flags;
+ struct resource *res = pnp_get_resource(dev, IORESOURCE_MEM, bar);
+
+ if (pnp_resource_valid(res))
+ return res->flags;
+ return IORESOURCE_MEM | IORESOURCE_AUTO;
}
static inline int pnp_mem_valid(struct pnp_dev *dev, unsigned int bar)
@@ -92,18 +128,30 @@ static inline int pnp_mem_valid(struct pnp_dev *dev, unsigned int bar)
static inline resource_size_t pnp_mem_len(struct pnp_dev *dev,
unsigned int bar)
{
- return pnp_resource_len(pnp_get_resource(dev, IORESOURCE_MEM, bar));
+ struct resource *res = pnp_get_resource(dev, IORESOURCE_MEM, bar);
+
+ if (pnp_resource_valid(res))
+ return pnp_resource_len(res);
+ return 0;
}
static inline resource_size_t pnp_irq(struct pnp_dev *dev, unsigned int bar)
{
- return pnp_get_resource(dev, IORESOURCE_IRQ, bar)->start;
+ struct resource *res = pnp_get_resource(dev, IORESOURCE_IRQ, bar);
+
+ if (pnp_resource_valid(res))
+ return res->start;
+ return -1;
}
static inline unsigned long pnp_irq_flags(struct pnp_dev *dev, unsigned int bar)
{
- return pnp_get_resource(dev, IORESOURCE_IRQ, bar)->flags;
+ struct resource *res = pnp_get_resource(dev, IORESOURCE_IRQ, bar);
+
+ if (pnp_resource_valid(res))
+ return res->flags;
+ return IORESOURCE_IRQ | IORESOURCE_AUTO;
}
static inline int pnp_irq_valid(struct pnp_dev *dev, unsigned int bar)
@@ -114,12 +162,20 @@ static inline int pnp_irq_valid(struct pnp_dev *dev, unsigned int bar)
static inline resource_size_t pnp_dma(struct pnp_dev *dev, unsigned int bar)
{
- return pnp_get_resource(dev, IORESOURCE_DMA, bar)->start;
+ struct resource *res = pnp_get_resource(dev, IORESOURCE_DMA, bar);
+
+ if (pnp_resource_valid(res))
+ return res->start;
+ return -1;
}
static inline unsigned long pnp_dma_flags(struct pnp_dev *dev, unsigned int bar)
{
- return pnp_get_resource(dev, IORESOURCE_DMA, bar)->flags;
+ struct resource *res = pnp_get_resource(dev, IORESOURCE_DMA, bar);
+
+ if (pnp_resource_valid(res))
+ return res->flags;
+ return IORESOURCE_DMA | IORESOURCE_AUTO;
}
static inline int pnp_dma_valid(struct pnp_dev *dev, unsigned int bar)
@@ -128,57 +184,6 @@ static inline int pnp_dma_valid(struct pnp_dev *dev, unsigned int bar)
}
-#define PNP_PORT_FLAG_16BITADDR (1<<0)
-#define PNP_PORT_FLAG_FIXED (1<<1)
-
-struct pnp_port {
- unsigned short min; /* min base number */
- unsigned short max; /* max base number */
- unsigned char align; /* align boundary */
- unsigned char size; /* size of range */
- unsigned char flags; /* port flags */
- unsigned char pad; /* pad */
- struct pnp_port *next; /* next port */
-};
-
-#define PNP_IRQ_NR 256
-struct pnp_irq {
- DECLARE_BITMAP(map, PNP_IRQ_NR); /* bitmask for IRQ lines */
- unsigned char flags; /* IRQ flags */
- unsigned char pad; /* pad */
- struct pnp_irq *next; /* next IRQ */
-};
-
-struct pnp_dma {
- unsigned char map; /* bitmask for DMA channels */
- unsigned char flags; /* DMA flags */
- struct pnp_dma *next; /* next port */
-};
-
-struct pnp_mem {
- unsigned int min; /* min base number */
- unsigned int max; /* max base number */
- unsigned int align; /* align boundary */
- unsigned int size; /* size of range */
- unsigned char flags; /* memory flags */
- unsigned char pad; /* pad */
- struct pnp_mem *next; /* next memory resource */
-};
-
-#define PNP_RES_PRIORITY_PREFERRED 0
-#define PNP_RES_PRIORITY_ACCEPTABLE 1
-#define PNP_RES_PRIORITY_FUNCTIONAL 2
-#define PNP_RES_PRIORITY_INVALID 65535
-
-struct pnp_option {
- unsigned short priority; /* priority */
- struct pnp_port *port; /* first port */
- struct pnp_irq *irq; /* first IRQ */
- struct pnp_dma *dma; /* first DMA */
- struct pnp_mem *mem; /* first memory resource */
- struct pnp_option *next; /* used to chain dependent resources */
-};
-
/*
* Device Management
*/
@@ -246,9 +251,9 @@ struct pnp_dev {
int active;
int capabilities;
- struct pnp_option *independent;
- struct pnp_option *dependent;
- struct pnp_resource_table *res;
+ unsigned int num_dependent_sets;
+ struct list_head resources;
+ struct list_head options;
char name[PNP_NAME_LEN]; /* contains a human-readable name */
int flags; /* used by protocols */
@@ -425,6 +430,8 @@ void pnp_unregister_card_driver(struct pnp_card_driver *drv);
extern struct list_head pnp_cards;
/* resource management */
+int pnp_possible_config(struct pnp_dev *dev, int type, resource_size_t base,
+ resource_size_t size);
int pnp_auto_config_dev(struct pnp_dev *dev);
int pnp_start_dev(struct pnp_dev *dev);
int pnp_stop_dev(struct pnp_dev *dev);
@@ -452,6 +459,9 @@ static inline int pnp_register_card_driver(struct pnp_card_driver *drv) { return
static inline void pnp_unregister_card_driver(struct pnp_card_driver *drv) { }
/* resource management */
+static inline int pnp_possible_config(struct pnp_dev *dev, int type,
+ resource_size_t base,
+ resource_size_t size) { return 0; }
static inline int pnp_auto_config_dev(struct pnp_dev *dev) { return -ENODEV; }
static inline int pnp_start_dev(struct pnp_dev *dev) { return -ENODEV; }
static inline int pnp_stop_dev(struct pnp_dev *dev) { return -ENODEV; }
diff --git a/include/linux/ppp-comp.h b/include/linux/ppp-comp.h
index e86a7a5cf35..b8d4ddd2273 100644
--- a/include/linux/ppp-comp.h
+++ b/include/linux/ppp-comp.h
@@ -23,8 +23,6 @@
* ON AN "AS IS" BASIS, AND THE AUSTRALIAN NATIONAL UNIVERSITY HAS NO
* OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS,
* OR MODIFICATIONS.
- *
- * $Id: ppp-comp.h,v 1.6 1997/11/27 06:04:44 paulus Exp $
*/
/*
diff --git a/include/linux/ppp_defs.h b/include/linux/ppp_defs.h
index c6b13ff8502..6e8adc77522 100644
--- a/include/linux/ppp_defs.h
+++ b/include/linux/ppp_defs.h
@@ -1,5 +1,3 @@
-/* $Id: ppp_defs.h,v 1.2 1994/09/21 01:31:06 paulus Exp $ */
-
/*
* ppp_defs.h - PPP definitions.
*
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index 23f0c54175c..72b1a10a59b 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -10,7 +10,7 @@
#include <linux/linkage.h>
#include <linux/list.h>
-#ifdef CONFIG_DEBUG_PREEMPT
+#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
extern void add_preempt_count(int val);
extern void sub_preempt_count(int val);
#else
@@ -52,6 +52,34 @@ do { \
preempt_check_resched(); \
} while (0)
+/* For debugging and tracer internals only! */
+#define add_preempt_count_notrace(val) \
+ do { preempt_count() += (val); } while (0)
+#define sub_preempt_count_notrace(val) \
+ do { preempt_count() -= (val); } while (0)
+#define inc_preempt_count_notrace() add_preempt_count_notrace(1)
+#define dec_preempt_count_notrace() sub_preempt_count_notrace(1)
+
+#define preempt_disable_notrace() \
+do { \
+ inc_preempt_count_notrace(); \
+ barrier(); \
+} while (0)
+
+#define preempt_enable_no_resched_notrace() \
+do { \
+ barrier(); \
+ dec_preempt_count_notrace(); \
+} while (0)
+
+/* preempt_check_resched is OK to trace */
+#define preempt_enable_notrace() \
+do { \
+ preempt_enable_no_resched_notrace(); \
+ barrier(); \
+ preempt_check_resched(); \
+} while (0)
+
#else
#define preempt_disable() do { } while (0)
@@ -59,6 +87,10 @@ do { \
#define preempt_enable() do { } while (0)
#define preempt_check_resched() do { } while (0)
+#define preempt_disable_notrace() do { } while (0)
+#define preempt_enable_no_resched_notrace() do { } while (0)
+#define preempt_enable_notrace() do { } while (0)
+
#endif
#ifdef CONFIG_PREEMPT_NOTIFIERS
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index f98501ba557..c6f5f9dd0ce 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -95,8 +95,12 @@ extern void __ptrace_link(struct task_struct *child,
struct task_struct *new_parent);
extern void __ptrace_unlink(struct task_struct *child);
extern void ptrace_untrace(struct task_struct *child);
-extern int ptrace_may_attach(struct task_struct *task);
-extern int __ptrace_may_attach(struct task_struct *task);
+#define PTRACE_MODE_READ 1
+#define PTRACE_MODE_ATTACH 2
+/* Returns 0 on success, -errno on denial. */
+extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
+/* Returns true on success, false on denial. */
+extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
static inline int ptrace_reparented(struct task_struct *child)
{
diff --git a/include/linux/pwm.h b/include/linux/pwm.h
new file mode 100644
index 00000000000..3945f803d51
--- /dev/null
+++ b/include/linux/pwm.h
@@ -0,0 +1,31 @@
+#ifndef __LINUX_PWM_H
+#define __LINUX_PWM_H
+
+struct pwm_device;
+
+/*
+ * pwm_request - request a PWM device
+ */
+struct pwm_device *pwm_request(int pwm_id, const char *label);
+
+/*
+ * pwm_free - free a PWM device
+ */
+void pwm_free(struct pwm_device *pwm);
+
+/*
+ * pwm_config - change a PWM device configuration
+ */
+int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns);
+
+/*
+ * pwm_enable - start a PWM output toggling
+ */
+int pwm_enable(struct pwm_device *pwm);
+
+/*
+ * pwm_disable - stop a PWM output toggling
+ */
+void pwm_disable(struct pwm_device *pwm);
+
+#endif /* __ASM_ARCH_PWM_H */
diff --git a/include/linux/pwm_backlight.h b/include/linux/pwm_backlight.h
new file mode 100644
index 00000000000..7a9754c9677
--- /dev/null
+++ b/include/linux/pwm_backlight.h
@@ -0,0 +1,17 @@
+/*
+ * Generic PWM backlight driver data - see drivers/video/backlight/pwm_bl.c
+ */
+#ifndef __LINUX_PWM_BACKLIGHT_H
+#define __LINUX_PWM_BACKLIGHT_H
+
+struct platform_pwm_backlight_data {
+ int pwm_id;
+ unsigned int max_brightness;
+ unsigned int dft_brightness;
+ unsigned int pwm_period_ns;
+ int (*init)(struct device *dev);
+ int (*notify)(int brightness);
+ void (*exit)(struct device *dev);
+};
+
+#endif
diff --git a/include/linux/rcuclassic.h b/include/linux/rcuclassic.h
index b3aa05baab8..8c774905dcf 100644
--- a/include/linux/rcuclassic.h
+++ b/include/linux/rcuclassic.h
@@ -151,7 +151,10 @@ extern struct lockdep_map rcu_lock_map;
#define __synchronize_sched() synchronize_rcu()
+#define call_rcu_sched(head, func) call_rcu(head, func)
+
extern void __rcu_init(void);
+#define rcu_init_sched() do { } while (0)
extern void rcu_check_callbacks(int cpu, int user);
extern void rcu_restart_cpu(int cpu);
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index bde4586f438..b0f39be08b6 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -1,6 +1,373 @@
#ifndef _LINUX_RCULIST_H
#define _LINUX_RCULIST_H
+#ifdef __KERNEL__
+
+/*
+ * RCU-protected list version
+ */
#include <linux/list.h>
+#include <linux/rcupdate.h>
+
+/*
+ * Insert a new entry between two known consecutive entries.
+ *
+ * This is only for internal list manipulation where we know
+ * the prev/next entries already!
+ */
+static inline void __list_add_rcu(struct list_head *new,
+ struct list_head *prev, struct list_head *next)
+{
+ new->next = next;
+ new->prev = prev;
+ rcu_assign_pointer(prev->next, new);
+ next->prev = new;
+}
+
+/**
+ * list_add_rcu - add a new entry to rcu-protected list
+ * @new: new entry to be added
+ * @head: list head to add it after
+ *
+ * Insert a new entry after the specified head.
+ * This is good for implementing stacks.
+ *
+ * The caller must take whatever precautions are necessary
+ * (such as holding appropriate locks) to avoid racing
+ * with another list-mutation primitive, such as list_add_rcu()
+ * or list_del_rcu(), running on this same list.
+ * However, it is perfectly legal to run concurrently with
+ * the _rcu list-traversal primitives, such as
+ * list_for_each_entry_rcu().
+ */
+static inline void list_add_rcu(struct list_head *new, struct list_head *head)
+{
+ __list_add_rcu(new, head, head->next);
+}
+
+/**
+ * list_add_tail_rcu - add a new entry to rcu-protected list
+ * @new: new entry to be added
+ * @head: list head to add it before
+ *
+ * Insert a new entry before the specified head.
+ * This is useful for implementing queues.
+ *
+ * The caller must take whatever precautions are necessary
+ * (such as holding appropriate locks) to avoid racing
+ * with another list-mutation primitive, such as list_add_tail_rcu()
+ * or list_del_rcu(), running on this same list.
+ * However, it is perfectly legal to run concurrently with
+ * the _rcu list-traversal primitives, such as
+ * list_for_each_entry_rcu().
+ */
+static inline void list_add_tail_rcu(struct list_head *new,
+ struct list_head *head)
+{
+ __list_add_rcu(new, head->prev, head);
+}
+
+/**
+ * list_del_rcu - deletes entry from list without re-initialization
+ * @entry: the element to delete from the list.
+ *
+ * Note: list_empty() on entry does not return true after this,
+ * the entry is in an undefined state. It is useful for RCU based
+ * lockfree traversal.
+ *
+ * In particular, it means that we can not poison the forward
+ * pointers that may still be used for walking the list.
+ *
+ * The caller must take whatever precautions are necessary
+ * (such as holding appropriate locks) to avoid racing
+ * with another list-mutation primitive, such as list_del_rcu()
+ * or list_add_rcu(), running on this same list.
+ * However, it is perfectly legal to run concurrently with
+ * the _rcu list-traversal primitives, such as
+ * list_for_each_entry_rcu().
+ *
+ * Note that the caller is not permitted to immediately free
+ * the newly deleted entry. Instead, either synchronize_rcu()
+ * or call_rcu() must be used to defer freeing until an RCU
+ * grace period has elapsed.
+ */
+static inline void list_del_rcu(struct list_head *entry)
+{
+ __list_del(entry->prev, entry->next);
+ entry->prev = LIST_POISON2;
+}
+
+/**
+ * list_replace_rcu - replace old entry by new one
+ * @old : the element to be replaced
+ * @new : the new element to insert
+ *
+ * The @old entry will be replaced with the @new entry atomically.
+ * Note: @old should not be empty.
+ */
+static inline void list_replace_rcu(struct list_head *old,
+ struct list_head *new)
+{
+ new->next = old->next;
+ new->prev = old->prev;
+ rcu_assign_pointer(new->prev->next, new);
+ new->next->prev = new;
+ old->prev = LIST_POISON2;
+}
+
+/**
+ * list_splice_init_rcu - splice an RCU-protected list into an existing list.
+ * @list: the RCU-protected list to splice
+ * @head: the place in the list to splice the first list into
+ * @sync: function to sync: synchronize_rcu(), synchronize_sched(), ...
+ *
+ * @head can be RCU-read traversed concurrently with this function.
+ *
+ * Note that this function blocks.
+ *
+ * Important note: the caller must take whatever action is necessary to
+ * prevent any other updates to @head. In principle, it is possible
+ * to modify the list as soon as sync() begins execution.
+ * If this sort of thing becomes necessary, an alternative version
+ * based on call_rcu() could be created. But only if -really-
+ * needed -- there is no shortage of RCU API members.
+ */
+static inline void list_splice_init_rcu(struct list_head *list,
+ struct list_head *head,
+ void (*sync)(void))
+{
+ struct list_head *first = list->next;
+ struct list_head *last = list->prev;
+ struct list_head *at = head->next;
+
+ if (list_empty(head))
+ return;
+
+ /* "first" and "last" tracking list, so initialize it. */
+
+ INIT_LIST_HEAD(list);
+
+ /*
+ * At this point, the list body still points to the source list.
+ * Wait for any readers to finish using the list before splicing
+ * the list body into the new list. Any new readers will see
+ * an empty list.
+ */
+
+ sync();
+
+ /*
+ * Readers are finished with the source list, so perform splice.
+ * The order is important if the new list is global and accessible
+ * to concurrent RCU readers. Note that RCU readers are not
+ * permitted to traverse the prev pointers without excluding
+ * this function.
+ */
+
+ last->next = at;
+ rcu_assign_pointer(head->next, first);
+ first->prev = head;
+ at->prev = last;
+}
+
+/**
+ * list_for_each_rcu - iterate over an rcu-protected list
+ * @pos: the &struct list_head to use as a loop cursor.
+ * @head: the head for your list.
+ *
+ * This list-traversal primitive may safely run concurrently with
+ * the _rcu list-mutation primitives such as list_add_rcu()
+ * as long as the traversal is guarded by rcu_read_lock().
+ */
+#define list_for_each_rcu(pos, head) \
+ for (pos = rcu_dereference((head)->next); \
+ prefetch(pos->next), pos != (head); \
+ pos = rcu_dereference(pos->next))
+
+#define __list_for_each_rcu(pos, head) \
+ for (pos = rcu_dereference((head)->next); \
+ pos != (head); \
+ pos = rcu_dereference(pos->next))
+
+/**
+ * list_for_each_entry_rcu - iterate over rcu list of given type
+ * @pos: the type * to use as a loop cursor.
+ * @head: the head for your list.
+ * @member: the name of the list_struct within the struct.
+ *
+ * This list-traversal primitive may safely run concurrently with
+ * the _rcu list-mutation primitives such as list_add_rcu()
+ * as long as the traversal is guarded by rcu_read_lock().
+ */
+#define list_for_each_entry_rcu(pos, head, member) \
+ for (pos = list_entry(rcu_dereference((head)->next), typeof(*pos), member); \
+ prefetch(pos->member.next), &pos->member != (head); \
+ pos = list_entry(rcu_dereference(pos->member.next), typeof(*pos), member))
+
+
+/**
+ * list_for_each_continue_rcu
+ * @pos: the &struct list_head to use as a loop cursor.
+ * @head: the head for your list.
+ *
+ * Iterate over an rcu-protected list, continuing after current point.
+ *
+ * This list-traversal primitive may safely run concurrently with
+ * the _rcu list-mutation primitives such as list_add_rcu()
+ * as long as the traversal is guarded by rcu_read_lock().
+ */
+#define list_for_each_continue_rcu(pos, head) \
+ for ((pos) = rcu_dereference((pos)->next); \
+ prefetch((pos)->next), (pos) != (head); \
+ (pos) = rcu_dereference((pos)->next))
+
+/**
+ * hlist_del_rcu - deletes entry from hash list without re-initialization
+ * @n: the element to delete from the hash list.
+ *
+ * Note: list_unhashed() on entry does not return true after this,
+ * the entry is in an undefined state. It is useful for RCU based
+ * lockfree traversal.
+ *
+ * In particular, it means that we can not poison the forward
+ * pointers that may still be used for walking the hash list.
+ *
+ * The caller must take whatever precautions are necessary
+ * (such as holding appropriate locks) to avoid racing
+ * with another list-mutation primitive, such as hlist_add_head_rcu()
+ * or hlist_del_rcu(), running on this same list.
+ * However, it is perfectly legal to run concurrently with
+ * the _rcu list-traversal primitives, such as
+ * hlist_for_each_entry().
+ */
+static inline void hlist_del_rcu(struct hlist_node *n)
+{
+ __hlist_del(n);
+ n->pprev = LIST_POISON2;
+}
+
+/**
+ * hlist_replace_rcu - replace old entry by new one
+ * @old : the element to be replaced
+ * @new : the new element to insert
+ *
+ * The @old entry will be replaced with the @new entry atomically.
+ */
+static inline void hlist_replace_rcu(struct hlist_node *old,
+ struct hlist_node *new)
+{
+ struct hlist_node *next = old->next;
+
+ new->next = next;
+ new->pprev = old->pprev;
+ rcu_assign_pointer(*new->pprev, new);
+ if (next)
+ new->next->pprev = &new->next;
+ old->pprev = LIST_POISON2;
+}
+
+/**
+ * hlist_add_head_rcu
+ * @n: the element to add to the hash list.
+ * @h: the list to add to.
+ *
+ * Description:
+ * Adds the specified element to the specified hlist,
+ * while permitting racing traversals.
+ *
+ * The caller must take whatever precautions are necessary
+ * (such as holding appropriate locks) to avoid racing
+ * with another list-mutation primitive, such as hlist_add_head_rcu()
+ * or hlist_del_rcu(), running on this same list.
+ * However, it is perfectly legal to run concurrently with
+ * the _rcu list-traversal primitives, such as
+ * hlist_for_each_entry_rcu(), used to prevent memory-consistency
+ * problems on Alpha CPUs. Regardless of the type of CPU, the
+ * list-traversal primitive must be guarded by rcu_read_lock().
+ */
+static inline void hlist_add_head_rcu(struct hlist_node *n,
+ struct hlist_head *h)
+{
+ struct hlist_node *first = h->first;
+
+ n->next = first;
+ n->pprev = &h->first;
+ rcu_assign_pointer(h->first, n);
+ if (first)
+ first->pprev = &n->next;
+}
+
+/**
+ * hlist_add_before_rcu
+ * @n: the new element to add to the hash list.
+ * @next: the existing element to add the new element before.
+ *
+ * Description:
+ * Adds the specified element to the specified hlist
+ * before the specified node while permitting racing traversals.
+ *
+ * The caller must take whatever precautions are necessary
+ * (such as holding appropriate locks) to avoid racing
+ * with another list-mutation primitive, such as hlist_add_head_rcu()
+ * or hlist_del_rcu(), running on this same list.
+ * However, it is perfectly legal to run concurrently with
+ * the _rcu list-traversal primitives, such as
+ * hlist_for_each_entry_rcu(), used to prevent memory-consistency
+ * problems on Alpha CPUs.
+ */
+static inline void hlist_add_before_rcu(struct hlist_node *n,
+ struct hlist_node *next)
+{
+ n->pprev = next->pprev;
+ n->next = next;
+ rcu_assign_pointer(*(n->pprev), n);
+ next->pprev = &n->next;
+}
+
+/**
+ * hlist_add_after_rcu
+ * @prev: the existing element to add the new element after.
+ * @n: the new element to add to the hash list.
+ *
+ * Description:
+ * Adds the specified element to the specified hlist
+ * after the specified node while permitting racing traversals.
+ *
+ * The caller must take whatever precautions are necessary
+ * (such as holding appropriate locks) to avoid racing
+ * with another list-mutation primitive, such as hlist_add_head_rcu()
+ * or hlist_del_rcu(), running on this same list.
+ * However, it is perfectly legal to run concurrently with
+ * the _rcu list-traversal primitives, such as
+ * hlist_for_each_entry_rcu(), used to prevent memory-consistency
+ * problems on Alpha CPUs.
+ */
+static inline void hlist_add_after_rcu(struct hlist_node *prev,
+ struct hlist_node *n)
+{
+ n->next = prev->next;
+ n->pprev = &prev->next;
+ rcu_assign_pointer(prev->next, n);
+ if (n->next)
+ n->next->pprev = &n->next;
+}
+
+/**
+ * hlist_for_each_entry_rcu - iterate over rcu list of given type
+ * @tpos: the type * to use as a loop cursor.
+ * @pos: the &struct hlist_node to use as a loop cursor.
+ * @head: the head for your list.
+ * @member: the name of the hlist_node within the struct.
+ *
+ * This list-traversal primitive may safely run concurrently with
+ * the _rcu list-mutation primitives such as hlist_add_head_rcu()
+ * as long as the traversal is guarded by rcu_read_lock().
+ */
+#define hlist_for_each_entry_rcu(tpos, pos, head, member) \
+ for (pos = rcu_dereference((head)->first); \
+ pos && ({ prefetch(pos->next); 1; }) && \
+ ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \
+ pos = rcu_dereference(pos->next))
-#endif /* _LINUX_RCULIST_H */
+#endif /* __KERNEL__ */
+#endif
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index d42dbec0608..e8b4039cfb2 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -40,6 +40,7 @@
#include <linux/cpumask.h>
#include <linux/seqlock.h>
#include <linux/lockdep.h>
+#include <linux/completion.h>
/**
* struct rcu_head - callback structure for use with RCU
@@ -168,6 +169,27 @@ struct rcu_head {
(p) = (v); \
})
+/* Infrastructure to implement the synchronize_() primitives. */
+
+struct rcu_synchronize {
+ struct rcu_head head;
+ struct completion completion;
+};
+
+extern void wakeme_after_rcu(struct rcu_head *head);
+
+#define synchronize_rcu_xxx(name, func) \
+void name(void) \
+{ \
+ struct rcu_synchronize rcu; \
+ \
+ init_completion(&rcu.completion); \
+ /* Will wake me after RCU finished. */ \
+ func(&rcu.head, wakeme_after_rcu); \
+ /* Wait for it. */ \
+ wait_for_completion(&rcu.completion); \
+}
+
/**
* synchronize_sched - block until all CPUs have exited any non-preemptive
* kernel code sequences.
@@ -224,8 +246,8 @@ extern void call_rcu_bh(struct rcu_head *head,
/* Exported common interfaces */
extern void synchronize_rcu(void);
extern void rcu_barrier(void);
-extern long rcu_batches_completed(void);
-extern long rcu_batches_completed_bh(void);
+extern void rcu_barrier_bh(void);
+extern void rcu_barrier_sched(void);
/* Internal to kernel */
extern void rcu_init(void);
diff --git a/include/linux/rcupreempt.h b/include/linux/rcupreempt.h
index 8a05c7e20bc..f04b64eca63 100644
--- a/include/linux/rcupreempt.h
+++ b/include/linux/rcupreempt.h
@@ -40,10 +40,39 @@
#include <linux/cpumask.h>
#include <linux/seqlock.h>
-#define rcu_qsctr_inc(cpu)
+struct rcu_dyntick_sched {
+ int dynticks;
+ int dynticks_snap;
+ int sched_qs;
+ int sched_qs_snap;
+ int sched_dynticks_snap;
+};
+
+DECLARE_PER_CPU(struct rcu_dyntick_sched, rcu_dyntick_sched);
+
+static inline void rcu_qsctr_inc(int cpu)
+{
+ struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu);
+
+ rdssp->sched_qs++;
+}
#define rcu_bh_qsctr_inc(cpu)
#define call_rcu_bh(head, rcu) call_rcu(head, rcu)
+/**
+ * call_rcu_sched - Queue RCU callback for invocation after sched grace period.
+ * @head: structure to be used for queueing the RCU updates.
+ * @func: actual update function to be invoked after the grace period
+ *
+ * The update function will be invoked some time after a full
+ * synchronize_sched()-style grace period elapses, in other words after
+ * all currently executing preempt-disabled sections of code (including
+ * hardirq handlers, NMI handlers, and local_irq_save() blocks) have
+ * completed.
+ */
+extern void call_rcu_sched(struct rcu_head *head,
+ void (*func)(struct rcu_head *head));
+
extern void __rcu_read_lock(void) __acquires(RCU);
extern void __rcu_read_unlock(void) __releases(RCU);
extern int rcu_pending(int cpu);
@@ -55,6 +84,7 @@ extern int rcu_needs_cpu(int cpu);
extern void __synchronize_sched(void);
extern void __rcu_init(void);
+extern void rcu_init_sched(void);
extern void rcu_check_callbacks(int cpu, int user);
extern void rcu_restart_cpu(int cpu);
extern long rcu_batches_completed(void);
@@ -81,20 +111,20 @@ extern struct rcupreempt_trace *rcupreempt_trace_cpu(int cpu);
struct softirq_action;
#ifdef CONFIG_NO_HZ
-DECLARE_PER_CPU(long, dynticks_progress_counter);
+DECLARE_PER_CPU(struct rcu_dyntick_sched, rcu_dyntick_sched);
static inline void rcu_enter_nohz(void)
{
smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */
- __get_cpu_var(dynticks_progress_counter)++;
- WARN_ON(__get_cpu_var(dynticks_progress_counter) & 0x1);
+ __get_cpu_var(rcu_dyntick_sched).dynticks++;
+ WARN_ON(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1);
}
static inline void rcu_exit_nohz(void)
{
- __get_cpu_var(dynticks_progress_counter)++;
smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
- WARN_ON(!(__get_cpu_var(dynticks_progress_counter) & 0x1));
+ __get_cpu_var(rcu_dyntick_sched).dynticks++;
+ WARN_ON(!(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1));
}
#else /* CONFIG_NO_HZ */
diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
index e3ab21d7fc7..c5f6e54ec6a 100644
--- a/include/linux/rfkill.h
+++ b/include/linux/rfkill.h
@@ -34,26 +34,37 @@
* RFKILL_TYPE_BLUETOOTH: switch is on a bluetooth device.
* RFKILL_TYPE_UWB: switch is on a ultra wideband device.
* RFKILL_TYPE_WIMAX: switch is on a WiMAX device.
+ * RFKILL_TYPE_WWAN: switch is on a wireless WAN device.
*/
enum rfkill_type {
RFKILL_TYPE_WLAN ,
RFKILL_TYPE_BLUETOOTH,
RFKILL_TYPE_UWB,
RFKILL_TYPE_WIMAX,
+ RFKILL_TYPE_WWAN,
RFKILL_TYPE_MAX,
};
enum rfkill_state {
- RFKILL_STATE_OFF = 0,
- RFKILL_STATE_ON = 1,
+ RFKILL_STATE_SOFT_BLOCKED = 0, /* Radio output blocked */
+ RFKILL_STATE_UNBLOCKED = 1, /* Radio output allowed */
+ RFKILL_STATE_HARD_BLOCKED = 2, /* Output blocked, non-overrideable */
};
+/*
+ * These are DEPRECATED, drivers using them should be verified to
+ * comply with the rfkill usage guidelines in Documentation/rfkill.txt
+ * and then converted to use the new names for rfkill_state
+ */
+#define RFKILL_STATE_OFF RFKILL_STATE_SOFT_BLOCKED
+#define RFKILL_STATE_ON RFKILL_STATE_UNBLOCKED
+
/**
* struct rfkill - rfkill control structure.
* @name: Name of the switch.
* @type: Radio type which the button controls, the value stored
* here should be a value from enum rfkill_type.
- * @state: State of the switch (on/off).
+ * @state: State of the switch, "UNBLOCKED" means radio can operate.
* @user_claim_unsupported: Whether the hardware supports exclusive
* RF-kill control by userspace. Set this before registering.
* @user_claim: Set when the switch is controlled exlusively by userspace.
@@ -61,6 +72,12 @@ enum rfkill_state {
* @data: Pointer to the RF button drivers private data which will be
* passed along when toggling radio state.
* @toggle_radio(): Mandatory handler to control state of the radio.
+ * only RFKILL_STATE_SOFT_BLOCKED and RFKILL_STATE_UNBLOCKED are
+ * valid parameters.
+ * @get_state(): handler to read current radio state from hardware,
+ * may be called from atomic context, should return 0 on success.
+ * Either this handler OR judicious use of rfkill_force_state() is
+ * MANDATORY for any driver capable of RFKILL_STATE_HARD_BLOCKED.
* @led_trigger: A LED trigger for this button's LED.
* @dev: Device structure integrating the switch into device tree.
* @node: Used to place switch into list of all switches known to the
@@ -80,6 +97,7 @@ struct rfkill {
void *data;
int (*toggle_radio)(void *data, enum rfkill_state state);
+ int (*get_state)(void *data, enum rfkill_state *state);
#ifdef CONFIG_RFKILL_LEDS
struct led_trigger led_trigger;
@@ -95,6 +113,21 @@ void rfkill_free(struct rfkill *rfkill);
int rfkill_register(struct rfkill *rfkill);
void rfkill_unregister(struct rfkill *rfkill);
+int rfkill_force_state(struct rfkill *rfkill, enum rfkill_state state);
+
+/**
+ * rfkill_state_complement - return complementar state
+ * @state: state to return the complement of
+ *
+ * Returns RFKILL_STATE_SOFT_BLOCKED if @state is RFKILL_STATE_UNBLOCKED,
+ * returns RFKILL_STATE_UNBLOCKED otherwise.
+ */
+static inline enum rfkill_state rfkill_state_complement(enum rfkill_state state)
+{
+ return (state == RFKILL_STATE_UNBLOCKED) ?
+ RFKILL_STATE_SOFT_BLOCKED : RFKILL_STATE_UNBLOCKED;
+}
+
/**
* rfkill_get_led_name - Get the LED trigger name for the button's LED.
* This function might return a NULL pointer if registering of the
@@ -110,4 +143,11 @@ static inline char *rfkill_get_led_name(struct rfkill *rfkill)
#endif
}
+/* rfkill notification chain */
+#define RFKILL_STATE_CHANGED 0x0001 /* state of a normal rfkill
+ switch has changed */
+
+int register_rfkill_notifier(struct notifier_block *nb);
+int unregister_rfkill_notifier(struct notifier_block *nb);
+
#endif /* RFKILL_H */
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index b358c704d10..f4d386c191f 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -482,6 +482,7 @@ enum
TCA_RATE,
TCA_FCNT,
TCA_STATS2,
+ TCA_STAB,
__TCA_MAX
};
diff --git a/include/linux/sched.h b/include/linux/sched.h
index c5d3f847ca8..1941d8b5cf1 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -134,7 +134,6 @@ extern unsigned long nr_running(void);
extern unsigned long nr_uninterruptible(void);
extern unsigned long nr_active(void);
extern unsigned long nr_iowait(void);
-extern unsigned long weighted_cpuload(const int cpu);
struct seq_file;
struct cfs_rq;
@@ -246,6 +245,8 @@ extern asmlinkage void schedule_tail(struct task_struct *prev);
extern void init_idle(struct task_struct *idle, int cpu);
extern void init_idle_bootup_task(struct task_struct *idle);
+extern int runqueue_is_locked(void);
+
extern cpumask_t nohz_cpu_mask;
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
extern int select_nohz_load_balancer(int cpu);
@@ -784,6 +785,8 @@ struct sched_domain {
unsigned int balance_interval; /* initialise to 1. units in ms. */
unsigned int nr_balance_failed; /* initialise to 0 */
+ u64 last_update;
+
#ifdef CONFIG_SCHEDSTATS
/* load_balance() stats */
unsigned int lb_count[CPU_MAX_IDLE_TYPES];
@@ -823,23 +826,6 @@ extern int arch_reinit_sched_domains(void);
#endif /* CONFIG_SMP */
-/*
- * A runqueue laden with a single nice 0 task scores a weighted_cpuload of
- * SCHED_LOAD_SCALE. This function returns 1 if any cpu is laden with a
- * task of nice 0 or enough lower priority tasks to bring up the
- * weighted_cpuload
- */
-static inline int above_background_load(void)
-{
- unsigned long cpu;
-
- for_each_online_cpu(cpu) {
- if (weighted_cpuload(cpu) >= SCHED_LOAD_SCALE)
- return 1;
- }
- return 0;
-}
-
struct io_context; /* See blkdev.h */
#define NGROUPS_SMALL 32
#define NGROUPS_PER_BLOCK ((unsigned int)(PAGE_SIZE / sizeof(gid_t)))
@@ -921,8 +907,8 @@ struct sched_class {
void (*set_cpus_allowed)(struct task_struct *p,
const cpumask_t *newmask);
- void (*join_domain)(struct rq *rq);
- void (*leave_domain)(struct rq *rq);
+ void (*rq_online)(struct rq *rq);
+ void (*rq_offline)(struct rq *rq);
void (*switched_from) (struct rq *this_rq, struct task_struct *task,
int running);
@@ -1039,6 +1025,7 @@ struct task_struct {
#endif
int prio, static_prio, normal_prio;
+ unsigned int rt_priority;
const struct sched_class *sched_class;
struct sched_entity se;
struct sched_rt_entity rt;
@@ -1075,12 +1062,6 @@ struct task_struct {
#endif
struct list_head tasks;
- /*
- * ptrace_list/ptrace_children forms the list of my children
- * that were stolen by a ptracer.
- */
- struct list_head ptrace_children;
- struct list_head ptrace_list;
struct mm_struct *mm, *active_mm;
@@ -1102,18 +1083,25 @@ struct task_struct {
/*
* pointers to (original) parent process, youngest child, younger sibling,
* older sibling, respectively. (p->father can be replaced with
- * p->parent->pid)
+ * p->real_parent->pid)
*/
- struct task_struct *real_parent; /* real parent process (when being debugged) */
- struct task_struct *parent; /* parent process */
+ struct task_struct *real_parent; /* real parent process */
+ struct task_struct *parent; /* recipient of SIGCHLD, wait4() reports */
/*
- * children/sibling forms the list of my children plus the
- * tasks I'm ptracing.
+ * children/sibling forms the list of my natural children
*/
struct list_head children; /* list of my children */
struct list_head sibling; /* linkage in my parent's children list */
struct task_struct *group_leader; /* threadgroup leader */
+ /*
+ * ptraced is the list of tasks this task is using ptrace on.
+ * This includes both natural children and PTRACE_ATTACH targets.
+ * p->ptrace_entry is p's link on the p->parent->ptraced list.
+ */
+ struct list_head ptraced;
+ struct list_head ptrace_entry;
+
/* PID/PID hash table linkage. */
struct pid_link pids[PIDTYPE_MAX];
struct list_head thread_group;
@@ -1122,7 +1110,6 @@ struct task_struct {
int __user *set_child_tid; /* CLONE_CHILD_SETTID */
int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
- unsigned int rt_priority;
cputime_t utime, stime, utimescaled, stimescaled;
cputime_t gtime;
cputime_t prev_utime, prev_stime;
@@ -1141,12 +1128,12 @@ struct task_struct {
gid_t gid,egid,sgid,fsgid;
struct group_info *group_info;
kernel_cap_t cap_effective, cap_inheritable, cap_permitted, cap_bset;
- unsigned securebits;
struct user_struct *user;
+ unsigned securebits;
#ifdef CONFIG_KEYS
+ unsigned char jit_keyring; /* default keyring to attach requested keys to */
struct key *request_key_auth; /* assumed request_key authority */
struct key *thread_keyring; /* keyring private to this thread */
- unsigned char jit_keyring; /* default keyring to attach requested keys to */
#endif
char comm[TASK_COMM_LEN]; /* executable name excluding path
- access with [gs]et_task_comm (which lock
@@ -1233,8 +1220,8 @@ struct task_struct {
# define MAX_LOCK_DEPTH 48UL
u64 curr_chain_key;
int lockdep_depth;
- struct held_lock held_locks[MAX_LOCK_DEPTH];
unsigned int lockdep_recursion;
+ struct held_lock held_locks[MAX_LOCK_DEPTH];
#endif
/* journalling filesystem info */
@@ -1262,10 +1249,6 @@ struct task_struct {
u64 acct_vm_mem1; /* accumulated virtual memory usage */
cputime_t acct_stimexpd;/* stime since last update */
#endif
-#ifdef CONFIG_NUMA
- struct mempolicy *mempolicy;
- short il_next;
-#endif
#ifdef CONFIG_CPUSETS
nodemask_t mems_allowed;
int cpuset_mems_generation;
@@ -1285,6 +1268,10 @@ struct task_struct {
struct list_head pi_state_list;
struct futex_pi_state *pi_state_cache;
#endif
+#ifdef CONFIG_NUMA
+ struct mempolicy *mempolicy;
+ short il_next;
+#endif
atomic_t fs_excl; /* holding fs exclusive resources */
struct rcu_head rcu;
@@ -1504,9 +1491,11 @@ static inline void put_task_struct(struct task_struct *t)
#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */
#define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */
#define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */
+#define PF_THREAD_BOUND 0x04000000 /* Thread bound to specific cpu */
#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */
#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezeable */
+#define PF_FREEZER_NOSIG 0x80000000 /* Freezer won't send signals to it */
/*
* Only the _current_ task can read/write to tsk->flags, but other
@@ -1573,13 +1562,28 @@ static inline void sched_clock_idle_sleep_event(void)
static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
{
}
-#else
+
+#ifdef CONFIG_NO_HZ
+static inline void sched_clock_tick_stop(int cpu)
+{
+}
+
+static inline void sched_clock_tick_start(int cpu)
+{
+}
+#endif
+
+#else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
extern void sched_clock_init(void);
extern u64 sched_clock_cpu(int cpu);
extern void sched_clock_tick(void);
extern void sched_clock_idle_sleep_event(void);
extern void sched_clock_idle_wakeup_event(u64 delta_ns);
+#ifdef CONFIG_NO_HZ
+extern void sched_clock_tick_stop(int cpu);
+extern void sched_clock_tick_start(int cpu);
#endif
+#endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
/*
* For kernel-internal use: high-speed (but slightly incorrect) per-cpu
@@ -1622,6 +1626,7 @@ extern unsigned int sysctl_sched_child_runs_first;
extern unsigned int sysctl_sched_features;
extern unsigned int sysctl_sched_migration_cost;
extern unsigned int sysctl_sched_nr_migrate;
+extern unsigned int sysctl_sched_shares_ratelimit;
int sched_nr_latency_handler(struct ctl_table *table, int write,
struct file *file, void __user *buffer, size_t *length,
@@ -1655,6 +1660,8 @@ extern int can_nice(const struct task_struct *p, const int nice);
extern int task_curr(const struct task_struct *p);
extern int idle_cpu(int cpu);
extern int sched_setscheduler(struct task_struct *, int, struct sched_param *);
+extern int sched_setscheduler_nocheck(struct task_struct *, int,
+ struct sched_param *);
extern struct task_struct *idle_task(int cpu);
extern struct task_struct *curr_task(int cpu);
extern void set_curr_task(int cpu, struct task_struct *p);
@@ -1870,9 +1877,6 @@ extern void wait_task_inactive(struct task_struct * p);
#define wait_task_inactive(p) do { } while (0)
#endif
-#define remove_parent(p) list_del_init(&(p)->sibling)
-#define add_parent(p) list_add_tail(&(p)->sibling,&(p)->parent->children)
-
#define next_task(p) list_entry(rcu_dereference((p)->tasks.next), struct task_struct, tasks)
#define for_each_process(p) \
@@ -2131,6 +2135,18 @@ static inline void arch_pick_mmap_layout(struct mm_struct *mm)
}
#endif
+#ifdef CONFIG_TRACING
+extern void
+__trace_special(void *__tr, void *__data,
+ unsigned long arg1, unsigned long arg2, unsigned long arg3);
+#else
+static inline void
+__trace_special(void *__tr, void *__data,
+ unsigned long arg1, unsigned long arg2, unsigned long arg3)
+{
+}
+#endif
+
extern long sched_setaffinity(pid_t pid, const cpumask_t *new_mask);
extern long sched_getaffinity(pid_t pid, cpumask_t *mask);
@@ -2225,6 +2241,8 @@ static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
}
#endif /* CONFIG_MM_OWNER */
+#define TASK_STATE_TO_CHAR_STR "RSDTtZX"
+
#endif /* __KERNEL__ */
#endif
diff --git a/include/linux/security.h b/include/linux/security.h
index 50737c70e78..31c8851ec5d 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -46,7 +46,8 @@ struct audit_krule;
*/
extern int cap_capable(struct task_struct *tsk, int cap);
extern int cap_settime(struct timespec *ts, struct timezone *tz);
-extern int cap_ptrace(struct task_struct *parent, struct task_struct *child);
+extern int cap_ptrace(struct task_struct *parent, struct task_struct *child,
+ unsigned int mode);
extern int cap_capget(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted);
extern int cap_capset_check(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted);
extern void cap_capset_set(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted);
@@ -79,6 +80,7 @@ struct xfrm_selector;
struct xfrm_policy;
struct xfrm_state;
struct xfrm_user_sec_ctx;
+struct seq_file;
extern int cap_netlink_send(struct sock *sk, struct sk_buff *skb);
extern int cap_netlink_recv(struct sk_buff *skb, int cap);
@@ -289,10 +291,6 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* Update module state after a successful pivot.
* @old_path contains the path for the old root.
* @new_path contains the path for the new root.
- * @sb_get_mnt_opts:
- * Get the security relevant mount options used for a superblock
- * @sb the superblock to get security mount options from
- * @opts binary data structure containing all lsm mount data
* @sb_set_mnt_opts:
* Set the security relevant mount options used for a superblock
* @sb the superblock to set security mount options for
@@ -1170,6 +1168,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* attributes would be changed by the execve.
* @parent contains the task_struct structure for parent process.
* @child contains the task_struct structure for child process.
+ * @mode contains the PTRACE_MODE flags indicating the form of access.
* Return 0 if permission is granted.
* @capget:
* Get the @effective, @inheritable, and @permitted capability sets for
@@ -1240,11 +1239,6 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* @pages contains the number of pages.
* Return 0 if permission is granted.
*
- * @register_security:
- * allow module stacking.
- * @name contains the name of the security module being stacked.
- * @ops contains a pointer to the struct security_operations of the module to stack.
- *
* @secid_to_secctx:
* Convert secid to security context.
* @secid contains the security ID.
@@ -1295,7 +1289,8 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
struct security_operations {
char name[SECURITY_NAME_MAX + 1];
- int (*ptrace) (struct task_struct *parent, struct task_struct *child);
+ int (*ptrace) (struct task_struct *parent, struct task_struct *child,
+ unsigned int mode);
int (*capget) (struct task_struct *target,
kernel_cap_t *effective,
kernel_cap_t *inheritable, kernel_cap_t *permitted);
@@ -1328,6 +1323,7 @@ struct security_operations {
void (*sb_free_security) (struct super_block *sb);
int (*sb_copy_data) (char *orig, char *copy);
int (*sb_kern_mount) (struct super_block *sb, void *data);
+ int (*sb_show_options) (struct seq_file *m, struct super_block *sb);
int (*sb_statfs) (struct dentry *dentry);
int (*sb_mount) (char *dev_name, struct path *path,
char *type, unsigned long flags, void *data);
@@ -1343,8 +1339,6 @@ struct security_operations {
struct path *new_path);
void (*sb_post_pivotroot) (struct path *old_path,
struct path *new_path);
- int (*sb_get_mnt_opts) (const struct super_block *sb,
- struct security_mnt_opts *opts);
int (*sb_set_mnt_opts) (struct super_block *sb,
struct security_mnt_opts *opts);
void (*sb_clone_mnt_opts) (const struct super_block *oldsb,
@@ -1472,10 +1466,6 @@ struct security_operations {
int (*netlink_send) (struct sock *sk, struct sk_buff *skb);
int (*netlink_recv) (struct sk_buff *skb, int cap);
- /* allow module stacking */
- int (*register_security) (const char *name,
- struct security_operations *ops);
-
void (*d_instantiate) (struct dentry *dentry, struct inode *inode);
int (*getprocattr) (struct task_struct *p, char *name, char **value);
@@ -1565,7 +1555,6 @@ struct security_operations {
extern int security_init(void);
extern int security_module_enable(struct security_operations *ops);
extern int register_security(struct security_operations *ops);
-extern int mod_reg_security(const char *name, struct security_operations *ops);
extern struct dentry *securityfs_create_file(const char *name, mode_t mode,
struct dentry *parent, void *data,
const struct file_operations *fops);
@@ -1573,7 +1562,8 @@ extern struct dentry *securityfs_create_dir(const char *name, struct dentry *par
extern void securityfs_remove(struct dentry *dentry);
/* Security operations */
-int security_ptrace(struct task_struct *parent, struct task_struct *child);
+int security_ptrace(struct task_struct *parent, struct task_struct *child,
+ unsigned int mode);
int security_capget(struct task_struct *target,
kernel_cap_t *effective,
kernel_cap_t *inheritable,
@@ -1606,6 +1596,7 @@ int security_sb_alloc(struct super_block *sb);
void security_sb_free(struct super_block *sb);
int security_sb_copy_data(char *orig, char *copy);
int security_sb_kern_mount(struct super_block *sb, void *data);
+int security_sb_show_options(struct seq_file *m, struct super_block *sb);
int security_sb_statfs(struct dentry *dentry);
int security_sb_mount(char *dev_name, struct path *path,
char *type, unsigned long flags, void *data);
@@ -1617,8 +1608,6 @@ void security_sb_post_remount(struct vfsmount *mnt, unsigned long flags, void *d
void security_sb_post_addmount(struct vfsmount *mnt, struct path *mountpoint);
int security_sb_pivotroot(struct path *old_path, struct path *new_path);
void security_sb_post_pivotroot(struct path *old_path, struct path *new_path);
-int security_sb_get_mnt_opts(const struct super_block *sb,
- struct security_mnt_opts *opts);
int security_sb_set_mnt_opts(struct super_block *sb, struct security_mnt_opts *opts);
void security_sb_clone_mnt_opts(const struct super_block *oldsb,
struct super_block *newsb);
@@ -1755,9 +1744,11 @@ static inline int security_init(void)
return 0;
}
-static inline int security_ptrace(struct task_struct *parent, struct task_struct *child)
+static inline int security_ptrace(struct task_struct *parent,
+ struct task_struct *child,
+ unsigned int mode)
{
- return cap_ptrace(parent, child);
+ return cap_ptrace(parent, child, mode);
}
static inline int security_capget(struct task_struct *target,
@@ -1881,6 +1872,12 @@ static inline int security_sb_kern_mount(struct super_block *sb, void *data)
return 0;
}
+static inline int security_sb_show_options(struct seq_file *m,
+ struct super_block *sb)
+{
+ return 0;
+}
+
static inline int security_sb_statfs(struct dentry *dentry)
{
return 0;
@@ -1927,12 +1924,6 @@ static inline int security_sb_pivotroot(struct path *old_path,
static inline void security_sb_post_pivotroot(struct path *old_path,
struct path *new_path)
{ }
-static inline int security_sb_get_mnt_opts(const struct super_block *sb,
- struct security_mnt_opts *opts)
-{
- security_init_mnt_opts(opts);
- return 0;
-}
static inline int security_sb_set_mnt_opts(struct super_block *sb,
struct security_mnt_opts *opts)
diff --git a/include/linux/seq_file_net.h b/include/linux/seq_file_net.h
index 4ac52542a56..32c89bbe24a 100644
--- a/include/linux/seq_file_net.h
+++ b/include/linux/seq_file_net.h
@@ -14,7 +14,10 @@ struct seq_net_private {
int seq_open_net(struct inode *, struct file *,
const struct seq_operations *, int);
+int single_open_net(struct inode *, struct file *file,
+ int (*show)(struct seq_file *, void *));
int seq_release_net(struct inode *, struct file *);
+int single_release_net(struct inode *, struct file *);
static inline struct net *seq_file_net(struct seq_file *seq)
{
#ifdef CONFIG_NET_NS
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index d8f31de632c..f3a1c0e4502 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -190,6 +190,7 @@ struct uart_ops {
void (*break_ctl)(struct uart_port *, int ctl);
int (*startup)(struct uart_port *);
void (*shutdown)(struct uart_port *);
+ void (*flush_buffer)(struct uart_port *);
void (*set_termios)(struct uart_port *, struct ktermios *new,
struct ktermios *old);
void (*set_ldisc)(struct uart_port *);
@@ -343,13 +344,15 @@ typedef unsigned int __bitwise__ uif_t;
* stuff here.
*/
struct uart_info {
- struct tty_struct *tty;
+ struct tty_port port;
struct circ_buf xmit;
uif_t flags;
/*
* Definitions for info->flags. These are _private_ to serial_core, and
* are specific to this structure. They may be queried by low level drivers.
+ *
+ * FIXME: use the ASY_ definitions
*/
#define UIF_CHECK_CD ((__force uif_t) (1 << 25))
#define UIF_CTS_FLOW ((__force uif_t) (1 << 26))
@@ -357,11 +360,7 @@ struct uart_info {
#define UIF_INITIALIZED ((__force uif_t) (1 << 31))
#define UIF_SUSPENDED ((__force uif_t) (1 << 30))
- int blocked_open;
-
struct tasklet_struct tlet;
-
- wait_queue_head_t open_wait;
wait_queue_head_t delta_msr_wait;
};
@@ -438,8 +437,8 @@ int uart_resume_port(struct uart_driver *reg, struct uart_port *port);
#define uart_circ_chars_free(circ) \
(CIRC_SPACE((circ)->head, (circ)->tail, UART_XMIT_SIZE))
-#define uart_tx_stopped(port) \
- ((port)->info->tty->stopped || (port)->info->tty->hw_stopped)
+#define uart_tx_stopped(portp) \
+ ((portp)->info->port.tty->stopped || (portp)->info->port.tty->hw_stopped)
/*
* The following are helper functions for the low level drivers.
@@ -450,7 +449,7 @@ uart_handle_sysrq_char(struct uart_port *port, unsigned int ch)
#ifdef SUPPORT_SYSRQ
if (port->sysrq) {
if (ch && time_before(jiffies, port->sysrq)) {
- handle_sysrq(ch, port->info ? port->info->tty : NULL);
+ handle_sysrq(ch, port->info ? port->info->port.tty : NULL);
port->sysrq = 0;
return 1;
}
@@ -479,7 +478,7 @@ static inline int uart_handle_break(struct uart_port *port)
}
#endif
if (port->flags & UPF_SAK)
- do_SAK(info->tty);
+ do_SAK(info->port.tty);
return 0;
}
@@ -502,9 +501,9 @@ uart_handle_dcd_change(struct uart_port *port, unsigned int status)
if (info->flags & UIF_CHECK_CD) {
if (status)
- wake_up_interruptible(&info->open_wait);
- else if (info->tty)
- tty_hangup(info->tty);
+ wake_up_interruptible(&info->port.open_wait);
+ else if (info->port.tty)
+ tty_hangup(info->port.tty);
}
}
@@ -517,7 +516,7 @@ static inline void
uart_handle_cts_change(struct uart_port *port, unsigned int status)
{
struct uart_info *info = port->info;
- struct tty_struct *tty = info->tty;
+ struct tty_struct *tty = info->port.tty;
port->icount.cts++;
@@ -543,7 +542,7 @@ static inline void
uart_insert_char(struct uart_port *port, unsigned int status,
unsigned int overrun, unsigned int ch, unsigned int flag)
{
- struct tty_struct *tty = port->info->tty;
+ struct tty_struct *tty = port->info->port.tty;
if ((status & port->ignore_status_mask & ~overrun) == 0)
tty_insert_flip_char(tty, ch, flag);
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 299ec4b3141..7ea44f6621f 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -246,6 +246,7 @@ typedef unsigned char *sk_buff_data_t;
* @dma_cookie: a cookie to one of several possible DMA operations
* done by skb DMA functions
* @secmark: security marking
+ * @vlan_tci: vlan tag control information
*/
struct sk_buff {
@@ -305,9 +306,7 @@ struct sk_buff {
#endif
int iif;
-#ifdef CONFIG_NETDEVICES_MULTIQUEUE
__u16 queue_mapping;
-#endif
#ifdef CONFIG_NET_SCHED
__u16 tc_index; /* traffic control index */
#ifdef CONFIG_NET_CLS_ACT
@@ -328,6 +327,8 @@ struct sk_buff {
__u32 mark;
+ __u16 vlan_tci;
+
sk_buff_data_t transport_header;
sk_buff_data_t network_header;
sk_buff_data_t mac_header;
@@ -1671,25 +1672,17 @@ static inline void skb_init_secmark(struct sk_buff *skb)
static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping)
{
-#ifdef CONFIG_NETDEVICES_MULTIQUEUE
skb->queue_mapping = queue_mapping;
-#endif
}
static inline u16 skb_get_queue_mapping(struct sk_buff *skb)
{
-#ifdef CONFIG_NETDEVICES_MULTIQUEUE
return skb->queue_mapping;
-#else
- return 0;
-#endif
}
static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from)
{
-#ifdef CONFIG_NETDEVICES_MULTIQUEUE
to->queue_mapping = from->queue_mapping;
-#endif
}
static inline int skb_is_gso(const struct sk_buff *skb)
@@ -1702,6 +1695,20 @@ static inline int skb_is_gso_v6(const struct sk_buff *skb)
return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
}
+extern void __skb_warn_lro_forwarding(const struct sk_buff *skb);
+
+static inline bool skb_warn_if_lro(const struct sk_buff *skb)
+{
+ /* LRO sets gso_size but not gso_type, whereas if GSO is really
+ * wanted then gso_type will be set. */
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+ if (shinfo->gso_size != 0 && unlikely(shinfo->gso_type == 0)) {
+ __skb_warn_lro_forwarding(skb);
+ return true;
+ }
+ return false;
+}
+
static inline void skb_forward_csum(struct sk_buff *skb)
{
/* Unfortunately we don't support this one. Any brave souls? */
diff --git a/include/linux/smc911x.h b/include/linux/smc911x.h
new file mode 100644
index 00000000000..b58f54c2418
--- /dev/null
+++ b/include/linux/smc911x.h
@@ -0,0 +1,12 @@
+#ifndef __SMC911X_H__
+#define __SMC911X_H__
+
+#define SMC911X_USE_16BIT (1 << 0)
+#define SMC911X_USE_32BIT (1 << 1)
+
+struct smc911x_platdata {
+ unsigned long flags;
+ unsigned long irq_flags; /* IRQF_... */
+};
+
+#endif /* __SMC911X_H__ */
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 55232ccf9cf..48262f86c96 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -7,9 +7,18 @@
*/
#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/cpumask.h>
extern void cpu_idle(void);
+struct call_single_data {
+ struct list_head list;
+ void (*func) (void *info);
+ void *info;
+ unsigned int flags;
+};
+
#ifdef CONFIG_SMP
#include <linux/preempt.h>
@@ -52,15 +61,34 @@ extern void smp_cpus_done(unsigned int max_cpus);
/*
* Call a function on all other processors
*/
-int smp_call_function(void(*func)(void *info), void *info, int retry, int wait);
-
+int smp_call_function(void(*func)(void *info), void *info, int wait);
+int smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info,
+ int wait);
int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
- int retry, int wait);
+ int wait);
+void __smp_call_function_single(int cpuid, struct call_single_data *data);
+
+/*
+ * Generic and arch helpers
+ */
+#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
+void generic_smp_call_function_single_interrupt(void);
+void generic_smp_call_function_interrupt(void);
+void init_call_single_data(void);
+void ipi_call_lock(void);
+void ipi_call_unlock(void);
+void ipi_call_lock_irq(void);
+void ipi_call_unlock_irq(void);
+#else
+static inline void init_call_single_data(void)
+{
+}
+#endif
/*
* Call a function on all processors
*/
-int on_each_cpu(void (*func) (void *info), void *info, int retry, int wait);
+int on_each_cpu(void (*func) (void *info), void *info, int wait);
#define MSG_ALL_BUT_SELF 0x8000 /* Assume <32768 CPU's */
#define MSG_ALL 0x8001
@@ -90,9 +118,9 @@ static inline int up_smp_call_function(void (*func)(void *), void *info)
{
return 0;
}
-#define smp_call_function(func, info, retry, wait) \
+#define smp_call_function(func, info, wait) \
(up_smp_call_function(func, info))
-#define on_each_cpu(func,info,retry,wait) \
+#define on_each_cpu(func,info,wait) \
({ \
local_irq_disable(); \
func(info); \
@@ -102,7 +130,7 @@ static inline int up_smp_call_function(void (*func)(void *), void *info)
static inline void smp_send_reschedule(int cpu) { }
#define num_booting_cpus() 1
#define smp_prepare_boot_cpu() do {} while (0)
-#define smp_call_function_single(cpuid, func, info, retry, wait) \
+#define smp_call_function_single(cpuid, func, info, wait) \
({ \
WARN_ON(cpuid != 0); \
local_irq_disable(); \
@@ -112,7 +140,9 @@ static inline void smp_send_reschedule(int cpu) { }
})
#define smp_call_function_mask(mask, func, info, wait) \
(up_smp_call_function(func, info))
-
+static inline void init_call_single_data(void)
+{
+}
#endif /* !SMP */
/*
diff --git a/include/linux/smp_lock.h b/include/linux/smp_lock.h
index aab3a4cff4e..813be59bf34 100644
--- a/include/linux/smp_lock.h
+++ b/include/linux/smp_lock.h
@@ -27,11 +27,24 @@ static inline int reacquire_kernel_lock(struct task_struct *task)
extern void __lockfunc lock_kernel(void) __acquires(kernel_lock);
extern void __lockfunc unlock_kernel(void) __releases(kernel_lock);
+/*
+ * Various legacy drivers don't really need the BKL in a specific
+ * function, but they *do* need to know that the BKL became available.
+ * This function just avoids wrapping a bunch of lock/unlock pairs
+ * around code which doesn't really need it.
+ */
+static inline void cycle_kernel_lock(void)
+{
+ lock_kernel();
+ unlock_kernel();
+}
+
#else
#define lock_kernel() do { } while(0)
#define unlock_kernel() do { } while(0)
#define release_kernel_lock(task) do { } while(0)
+#define cycle_kernel_lock() do { } while(0)
#define reacquire_kernel_lock(task) 0
#define kernel_locked() 1
diff --git a/include/linux/socket.h b/include/linux/socket.h
index bd2b30a74e7..950af631e7f 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -306,10 +306,10 @@ extern int csum_partial_copy_fromiovecend(unsigned char *kdata,
int offset,
unsigned int len, __wsum *csump);
-extern int verify_iovec(struct msghdr *m, struct iovec *iov, char *address, int mode);
+extern int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode);
extern int memcpy_toiovec(struct iovec *v, unsigned char *kdata, int len);
-extern int move_addr_to_user(void *kaddr, int klen, void __user *uaddr, int __user *ulen);
-extern int move_addr_to_kernel(void __user *uaddr, int ulen, void *kaddr);
+extern int move_addr_to_user(struct sockaddr *kaddr, int klen, void __user *uaddr, int __user *ulen);
+extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr *kaddr);
extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data);
#endif
diff --git a/include/linux/sonet.h b/include/linux/sonet.h
index 753680296e1..67ad11fcf88 100644
--- a/include/linux/sonet.h
+++ b/include/linux/sonet.h
@@ -34,7 +34,7 @@ struct sonet_stats {
/* clear error insertion */
#define SONET_GETDIAG _IOR('a',ATMIOC_PHYTYP+4,int)
/* query error insertion */
-#define SONET_SETFRAMING _IO('a',ATMIOC_PHYTYP+5)
+#define SONET_SETFRAMING _IOW('a',ATMIOC_PHYTYP+5,int)
/* set framing mode (SONET/SDH) */
#define SONET_GETFRAMING _IOR('a',ATMIOC_PHYTYP+6,int)
/* get framing mode */
diff --git a/include/linux/spi/mmc_spi.h b/include/linux/spi/mmc_spi.h
index d5ca78b93a3..a3626aedaec 100644
--- a/include/linux/spi/mmc_spi.h
+++ b/include/linux/spi/mmc_spi.h
@@ -23,6 +23,15 @@ struct mmc_spi_platform_data {
/* sense switch on sd cards */
int (*get_ro)(struct device *);
+ /*
+ * If board does not use CD interrupts, driver can optimize polling
+ * using this function.
+ */
+ int (*get_cd)(struct device *);
+
+ /* Capabilities to pass into mmc core (e.g. MMC_CAP_NEEDS_POLL). */
+ unsigned long caps;
+
/* how long to debounce card detect, in msecs */
u16 detect_delay;
diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h
index 50dfd0dc409..4bf8cade9db 100644
--- a/include/linux/ssb/ssb.h
+++ b/include/linux/ssb/ssb.h
@@ -7,6 +7,7 @@
#include <linux/spinlock.h>
#include <linux/pci.h>
#include <linux/mod_devicetable.h>
+#include <linux/dma-mapping.h>
#include <linux/ssb/ssb_regs.h>
@@ -137,9 +138,6 @@ struct ssb_device {
const struct ssb_bus_ops *ops;
struct device *dev;
- /* Pointer to the device that has to be used for
- * any DMA related operation. */
- struct device *dma_dev;
struct ssb_bus *bus;
struct ssb_device_id id;
@@ -399,13 +397,151 @@ static inline void ssb_block_write(struct ssb_device *dev, const void *buffer,
#endif /* CONFIG_SSB_BLOCKIO */
+/* The SSB DMA API. Use this API for any DMA operation on the device.
+ * This API basically is a wrapper that calls the correct DMA API for
+ * the host device type the SSB device is attached to. */
+
/* Translation (routing) bits that need to be ORed to DMA
* addresses before they are given to a device. */
extern u32 ssb_dma_translation(struct ssb_device *dev);
#define SSB_DMA_TRANSLATION_MASK 0xC0000000
#define SSB_DMA_TRANSLATION_SHIFT 30
-extern int ssb_dma_set_mask(struct ssb_device *ssb_dev, u64 mask);
+extern int ssb_dma_set_mask(struct ssb_device *dev, u64 mask);
+
+extern void * ssb_dma_alloc_consistent(struct ssb_device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp_flags);
+extern void ssb_dma_free_consistent(struct ssb_device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle,
+ gfp_t gfp_flags);
+
+static inline void __cold __ssb_dma_not_implemented(struct ssb_device *dev)
+{
+#ifdef CONFIG_SSB_DEBUG
+ printk(KERN_ERR "SSB: BUG! Calling DMA API for "
+ "unsupported bustype %d\n", dev->bus->bustype);
+#endif /* DEBUG */
+}
+
+static inline int ssb_dma_mapping_error(struct ssb_device *dev, dma_addr_t addr)
+{
+ switch (dev->bus->bustype) {
+ case SSB_BUSTYPE_PCI:
+ return pci_dma_mapping_error(addr);
+ case SSB_BUSTYPE_SSB:
+ return dma_mapping_error(addr);
+ default:
+ __ssb_dma_not_implemented(dev);
+ }
+ return -ENOSYS;
+}
+
+static inline dma_addr_t ssb_dma_map_single(struct ssb_device *dev, void *p,
+ size_t size, enum dma_data_direction dir)
+{
+ switch (dev->bus->bustype) {
+ case SSB_BUSTYPE_PCI:
+ return pci_map_single(dev->bus->host_pci, p, size, dir);
+ case SSB_BUSTYPE_SSB:
+ return dma_map_single(dev->dev, p, size, dir);
+ default:
+ __ssb_dma_not_implemented(dev);
+ }
+ return 0;
+}
+
+static inline void ssb_dma_unmap_single(struct ssb_device *dev, dma_addr_t dma_addr,
+ size_t size, enum dma_data_direction dir)
+{
+ switch (dev->bus->bustype) {
+ case SSB_BUSTYPE_PCI:
+ pci_unmap_single(dev->bus->host_pci, dma_addr, size, dir);
+ return;
+ case SSB_BUSTYPE_SSB:
+ dma_unmap_single(dev->dev, dma_addr, size, dir);
+ return;
+ default:
+ __ssb_dma_not_implemented(dev);
+ }
+}
+
+static inline void ssb_dma_sync_single_for_cpu(struct ssb_device *dev,
+ dma_addr_t dma_addr,
+ size_t size,
+ enum dma_data_direction dir)
+{
+ switch (dev->bus->bustype) {
+ case SSB_BUSTYPE_PCI:
+ pci_dma_sync_single_for_cpu(dev->bus->host_pci, dma_addr,
+ size, dir);
+ return;
+ case SSB_BUSTYPE_SSB:
+ dma_sync_single_for_cpu(dev->dev, dma_addr, size, dir);
+ return;
+ default:
+ __ssb_dma_not_implemented(dev);
+ }
+}
+
+static inline void ssb_dma_sync_single_for_device(struct ssb_device *dev,
+ dma_addr_t dma_addr,
+ size_t size,
+ enum dma_data_direction dir)
+{
+ switch (dev->bus->bustype) {
+ case SSB_BUSTYPE_PCI:
+ pci_dma_sync_single_for_device(dev->bus->host_pci, dma_addr,
+ size, dir);
+ return;
+ case SSB_BUSTYPE_SSB:
+ dma_sync_single_for_device(dev->dev, dma_addr, size, dir);
+ return;
+ default:
+ __ssb_dma_not_implemented(dev);
+ }
+}
+
+static inline void ssb_dma_sync_single_range_for_cpu(struct ssb_device *dev,
+ dma_addr_t dma_addr,
+ unsigned long offset,
+ size_t size,
+ enum dma_data_direction dir)
+{
+ switch (dev->bus->bustype) {
+ case SSB_BUSTYPE_PCI:
+ /* Just sync everything. That's all the PCI API can do. */
+ pci_dma_sync_single_for_cpu(dev->bus->host_pci, dma_addr,
+ offset + size, dir);
+ return;
+ case SSB_BUSTYPE_SSB:
+ dma_sync_single_range_for_cpu(dev->dev, dma_addr, offset,
+ size, dir);
+ return;
+ default:
+ __ssb_dma_not_implemented(dev);
+ }
+}
+
+static inline void ssb_dma_sync_single_range_for_device(struct ssb_device *dev,
+ dma_addr_t dma_addr,
+ unsigned long offset,
+ size_t size,
+ enum dma_data_direction dir)
+{
+ switch (dev->bus->bustype) {
+ case SSB_BUSTYPE_PCI:
+ /* Just sync everything. That's all the PCI API can do. */
+ pci_dma_sync_single_for_device(dev->bus->host_pci, dma_addr,
+ offset + size, dir);
+ return;
+ case SSB_BUSTYPE_SSB:
+ dma_sync_single_range_for_device(dev->dev, dma_addr, offset,
+ size, dir);
+ return;
+ default:
+ __ssb_dma_not_implemented(dev);
+ }
+}
#ifdef CONFIG_SSB_PCIHOST
diff --git a/include/linux/stallion.h b/include/linux/stallion.h
index 0424d75a5aa..336af33c6ea 100644
--- a/include/linux/stallion.h
+++ b/include/linux/stallion.h
@@ -69,6 +69,7 @@ struct stlrq {
*/
struct stlport {
unsigned long magic;
+ struct tty_port port;
unsigned int portnr;
unsigned int panelnr;
unsigned int brdnr;
@@ -76,12 +77,10 @@ struct stlport {
int uartaddr;
unsigned int pagenr;
unsigned long istate;
- int flags;
int baud_base;
int custom_divisor;
int close_delay;
int closing_wait;
- int refcount;
int openwaitcnt;
int brklen;
unsigned int sigs;
@@ -92,9 +91,6 @@ struct stlport {
unsigned long clk;
unsigned long hwid;
void *uartp;
- struct tty_struct *tty;
- wait_queue_head_t open_wait;
- wait_queue_head_t close_wait;
comstats_t stats;
struct stlrq tx;
};
diff --git a/include/linux/sunrpc/auth_gss.h b/include/linux/sunrpc/auth_gss.h
index fec6899bf35..d48d4e605f7 100644
--- a/include/linux/sunrpc/auth_gss.h
+++ b/include/linux/sunrpc/auth_gss.h
@@ -7,8 +7,6 @@
* Andy Adamson <andros@umich.edu>
* Bruce Fields <bfields@umich.edu>
* Copyright (c) 2000 The Regents of the University of Michigan
- *
- * $Id$
*/
#ifndef _LINUX_SUNRPC_AUTH_GSS_H
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index 6fff7f82ef1..e5bfe01ee30 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -42,7 +42,8 @@ struct rpc_clnt {
unsigned int cl_softrtry : 1,/* soft timeouts */
cl_discrtry : 1,/* disconnect before retry */
- cl_autobind : 1;/* use getport() */
+ cl_autobind : 1,/* use getport() */
+ cl_chatty : 1;/* be verbose */
struct rpc_rtt * cl_rtt; /* RTO estimator data */
const struct rpc_timeout *cl_timeout; /* Timeout strategy */
@@ -114,6 +115,7 @@ struct rpc_create_args {
#define RPC_CLNT_CREATE_NONPRIVPORT (1UL << 3)
#define RPC_CLNT_CREATE_NOPING (1UL << 4)
#define RPC_CLNT_CREATE_DISCRTRY (1UL << 5)
+#define RPC_CLNT_CREATE_QUIET (1UL << 6)
struct rpc_clnt *rpc_create(struct rpc_create_args *args);
struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *,
@@ -123,6 +125,9 @@ void rpc_shutdown_client(struct rpc_clnt *);
void rpc_release_client(struct rpc_clnt *);
int rpcb_register(u32, u32, int, unsigned short, int *);
+int rpcb_v4_register(const u32 program, const u32 version,
+ const struct sockaddr *address,
+ const char *netid, int *result);
int rpcb_getport_sync(struct sockaddr_in *, u32, u32, int);
void rpcb_getport_async(struct rpc_task *);
diff --git a/include/linux/sunrpc/gss_api.h b/include/linux/sunrpc/gss_api.h
index 459c5fc11d5..03f33330ece 100644
--- a/include/linux/sunrpc/gss_api.h
+++ b/include/linux/sunrpc/gss_api.h
@@ -7,8 +7,6 @@
* Andy Adamson <andros@umich.edu>
* Bruce Fields <bfields@umich.edu>
* Copyright (c) 2000 The Regents of the University of Michigan
- *
- * $Id$
*/
#ifndef _LINUX_SUNRPC_GSS_API_H
diff --git a/include/linux/sunrpc/gss_krb5.h b/include/linux/sunrpc/gss_krb5.h
index a10f1fb0bf7..e7bbdba474d 100644
--- a/include/linux/sunrpc/gss_krb5.h
+++ b/include/linux/sunrpc/gss_krb5.h
@@ -51,6 +51,9 @@ struct krb5_ctx {
extern spinlock_t krb5_seq_lock;
+/* The length of the Kerberos GSS token header */
+#define GSS_KRB5_TOK_HDR_LEN (16)
+
#define KG_TOK_MIC_MSG 0x0101
#define KG_TOK_WRAP_MSG 0x0201
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index d1a5c8c1a0f..64981a2f1ca 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -135,7 +135,6 @@ struct rpc_task_setup {
#define RPC_IS_SWAPPER(t) ((t)->tk_flags & RPC_TASK_SWAPPER)
#define RPC_DO_ROOTOVERRIDE(t) ((t)->tk_flags & RPC_TASK_ROOTCREDS)
#define RPC_ASSASSINATED(t) ((t)->tk_flags & RPC_TASK_KILLED)
-#define RPC_DO_CALLBACK(t) ((t)->tk_callback != NULL)
#define RPC_IS_SOFT(t) ((t)->tk_flags & RPC_TASK_SOFT)
#define RPC_TASK_RUNNING 0
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index 4b54c5fdcfd..dc69068d94c 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -22,7 +22,7 @@
/*
* This is the RPC server thread function prototype
*/
-typedef void (*svc_thread_fn)(struct svc_rqst *);
+typedef int (*svc_thread_fn)(void *);
/*
*
@@ -80,7 +80,6 @@ struct svc_serv {
struct module * sv_module; /* optional module to count when
* adding threads */
svc_thread_fn sv_function; /* main function for threads */
- int sv_kill_signal; /* signal to kill threads */
};
/*
@@ -388,8 +387,8 @@ struct svc_rqst *svc_prepare_thread(struct svc_serv *serv,
struct svc_pool *pool);
void svc_exit_thread(struct svc_rqst *);
struct svc_serv * svc_create_pooled(struct svc_program *, unsigned int,
- void (*shutdown)(struct svc_serv*),
- svc_thread_fn, int sig, struct module *);
+ void (*shutdown)(struct svc_serv*), svc_thread_fn,
+ struct module *);
int svc_set_num_threads(struct svc_serv *, struct svc_pool *, int);
void svc_destroy(struct svc_serv *);
int svc_process(struct svc_rqst *);
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index 05eb4664d0d..ef2e3a20bf3 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -72,7 +72,7 @@ extern atomic_t rdma_stat_sq_prod;
*/
struct svc_rdma_op_ctxt {
struct svc_rdma_op_ctxt *read_hdr;
- struct list_head free_list;
+ int hdr_count;
struct xdr_buf arg;
struct list_head dto_q;
enum ib_wr_opcode wr_op;
@@ -86,6 +86,31 @@ struct svc_rdma_op_ctxt {
struct page *pages[RPCSVC_MAXPAGES];
};
+/*
+ * NFS_ requests are mapped on the client side by the chunk lists in
+ * the RPCRDMA header. During the fetching of the RPC from the client
+ * and the writing of the reply to the client, the memory in the
+ * client and the memory in the server must be mapped as contiguous
+ * vaddr/len for access by the hardware. These data strucures keep
+ * these mappings.
+ *
+ * For an RDMA_WRITE, the 'sge' maps the RPC REPLY. For RDMA_READ, the
+ * 'sge' in the svc_rdma_req_map maps the server side RPC reply and the
+ * 'ch' field maps the read-list of the RPCRDMA header to the 'sge'
+ * mapping of the reply.
+ */
+struct svc_rdma_chunk_sge {
+ int start; /* sge no for this chunk */
+ int count; /* sge count for this chunk */
+};
+struct svc_rdma_req_map {
+ unsigned long count;
+ union {
+ struct kvec sge[RPCSVC_MAXPAGES];
+ struct svc_rdma_chunk_sge ch[RPCSVC_MAXPAGES];
+ };
+};
+
#define RDMACTXT_F_LAST_CTXT 2
struct svcxprt_rdma {
@@ -93,7 +118,6 @@ struct svcxprt_rdma {
struct rdma_cm_id *sc_cm_id; /* RDMA connection id */
struct list_head sc_accept_q; /* Conn. waiting accept */
int sc_ord; /* RDMA read limit */
- wait_queue_head_t sc_read_wait;
int sc_max_sge;
int sc_sq_depth; /* Depth of SQ */
@@ -104,12 +128,8 @@ struct svcxprt_rdma {
struct ib_pd *sc_pd;
+ atomic_t sc_dma_used;
atomic_t sc_ctxt_used;
- struct list_head sc_ctxt_free;
- int sc_ctxt_cnt;
- int sc_ctxt_bump;
- int sc_ctxt_max;
- spinlock_t sc_ctxt_lock;
struct list_head sc_rq_dto_q;
spinlock_t sc_rq_dto_lock;
struct ib_qp *sc_qp;
@@ -173,6 +193,8 @@ extern int svc_rdma_post_recv(struct svcxprt_rdma *);
extern int svc_rdma_create_listen(struct svc_serv *, int, struct sockaddr *);
extern struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *);
extern void svc_rdma_put_context(struct svc_rdma_op_ctxt *, int);
+extern struct svc_rdma_req_map *svc_rdma_get_req_map(void);
+extern void svc_rdma_put_req_map(struct svc_rdma_req_map *);
extern void svc_sq_reap(struct svcxprt_rdma *);
extern void svc_rq_reap(struct svcxprt_rdma *);
extern struct svc_xprt_class svc_rdma_class;
diff --git a/include/linux/sunrpc/svcauth_gss.h b/include/linux/sunrpc/svcauth_gss.h
index 417a1def56d..c9165d9771a 100644
--- a/include/linux/sunrpc/svcauth_gss.h
+++ b/include/linux/sunrpc/svcauth_gss.h
@@ -3,9 +3,6 @@
*
* Bruce Fields <bfields@umich.edu>
* Copyright (c) 2002 The Regents of the Unviersity of Michigan
- *
- * $Id$
- *
*/
#ifndef _LINUX_SUNRPC_SVCAUTH_GSS_H
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index a6977423baf..e8e69159af7 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -86,6 +86,11 @@ typedef int __bitwise suspend_state_t;
* that implement @begin(), but platforms implementing @begin() should
* also provide a @end() which cleans up transitions aborted before
* @enter().
+ *
+ * @recover: Recover the platform from a suspend failure.
+ * Called by the PM core if the suspending of devices fails.
+ * This callback is optional and should only be implemented by platforms
+ * which require special recovery actions in that situation.
*/
struct platform_suspend_ops {
int (*valid)(suspend_state_t state);
@@ -94,6 +99,7 @@ struct platform_suspend_ops {
int (*enter)(suspend_state_t state);
void (*finish)(void);
void (*end)(void);
+ void (*recover)(void);
};
#ifdef CONFIG_SUSPEND
@@ -149,7 +155,7 @@ extern void mark_free_pages(struct zone *zone);
* The methods in this structure allow a platform to carry out special
* operations required by it during a hibernation transition.
*
- * All the methods below must be implemented.
+ * All the methods below, except for @recover(), must be implemented.
*
* @begin: Tell the platform driver that we're starting hibernation.
* Called right after shrinking memory and before freezing devices.
@@ -189,6 +195,11 @@ extern void mark_free_pages(struct zone *zone);
* @restore_cleanup: Clean up after a failing image restoration.
* Called right after the nonboot CPUs have been enabled and before
* thawing devices (runs with IRQs on).
+ *
+ * @recover: Recover the platform from a failure to suspend devices.
+ * Called by the PM core if the suspending of devices during hibernation
+ * fails. This callback is optional and should only be implemented by
+ * platforms which require special recovery actions in that situation.
*/
struct platform_hibernation_ops {
int (*begin)(void);
@@ -200,6 +211,7 @@ struct platform_hibernation_ops {
void (*leave)(void);
int (*pre_restore)(void);
void (*restore_cleanup)(void);
+ void (*recover)(void);
};
#ifdef CONFIG_HIBERNATION
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index b31b6b74aa2..2e2557388e3 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -224,6 +224,12 @@ struct tcp_options_received {
u16 mss_clamp; /* Maximal mss, negotiated at connection setup */
};
+/* This is the max number of SACKS that we'll generate and process. It's safe
+ * to increse this, although since:
+ * size = TCPOLEN_SACK_BASE_ALIGNED (4) + n * TCPOLEN_SACK_PERBLOCK (8)
+ * only four options will fit in a standard TCP header */
+#define TCP_NUM_SACKS 4
+
struct tcp_request_sock {
struct inet_request_sock req;
#ifdef CONFIG_TCP_MD5SIG
@@ -291,10 +297,9 @@ struct tcp_sock {
u32 rcv_ssthresh; /* Current window clamp */
u32 frto_highmark; /* snd_nxt when RTO occurred */
- u8 reordering; /* Packet reordering metric. */
+ u16 advmss; /* Advertised MSS */
u8 frto_counter; /* Number of new acks after RTO */
u8 nonagle; /* Disable Nagle algorithm? */
- u8 keepalive_probes; /* num of allowed keep alive probes */
/* RTT measurement */
u32 srtt; /* smoothed round trip time << 3 */
@@ -305,6 +310,10 @@ struct tcp_sock {
u32 packets_out; /* Packets which are "in flight" */
u32 retrans_out; /* Retransmitted packets out */
+
+ u16 urg_data; /* Saved octet of OOB data and control flags */
+ u8 urg_mode; /* In urgent mode */
+ u8 ecn_flags; /* ECN status bits. */
/*
* Options received (usually on last packet, some only on SYN packets).
*/
@@ -320,13 +329,24 @@ struct tcp_sock {
u32 snd_cwnd_used;
u32 snd_cwnd_stamp;
- struct sk_buff_head out_of_order_queue; /* Out of order segments go here */
-
u32 rcv_wnd; /* Current receiver window */
u32 write_seq; /* Tail(+1) of data held in tcp send buffer */
u32 pushed_seq; /* Last pushed seq, required to talk to windows */
+ u32 lost_out; /* Lost packets */
+ u32 sacked_out; /* SACK'd packets */
+ u32 fackets_out; /* FACK'd packets */
+ u32 tso_deferred;
+ u32 bytes_acked; /* Appropriate Byte Counting - RFC3465 */
-/* SACKs data */
+ /* from STCP, retrans queue hinting */
+ struct sk_buff* lost_skb_hint;
+ struct sk_buff *scoreboard_skb_hint;
+ struct sk_buff *retransmit_skb_hint;
+ struct sk_buff *forward_skb_hint;
+
+ struct sk_buff_head out_of_order_queue; /* Out of order segments go here */
+
+ /* SACKs data, these 2 need to be together (see tcp_build_and_update_options) */
struct tcp_sack_block duplicate_sack[1]; /* D-SACK block */
struct tcp_sack_block selective_acks[4]; /* The SACKS themselves*/
@@ -337,23 +357,14 @@ struct tcp_sock {
* sacked_out > 0)
*/
- /* from STCP, retrans queue hinting */
- struct sk_buff* lost_skb_hint;
-
- struct sk_buff *scoreboard_skb_hint;
- struct sk_buff *retransmit_skb_hint;
- struct sk_buff *forward_skb_hint;
-
int lost_cnt_hint;
int retransmit_cnt_hint;
u32 lost_retrans_low; /* Sent seq after any rxmit (lowest) */
- u16 advmss; /* Advertised MSS */
+ u8 reordering; /* Packet reordering metric. */
+ u8 keepalive_probes; /* num of allowed keep alive probes */
u32 prior_ssthresh; /* ssthresh saved at recovery start */
- u32 lost_out; /* Lost packets */
- u32 sacked_out; /* SACK'd packets */
- u32 fackets_out; /* FACK'd packets */
u32 high_seq; /* snd_nxt at onset of congestion */
u32 retrans_stamp; /* Timestamp of the last retransmit,
@@ -361,23 +372,16 @@ struct tcp_sock {
* the first SYN. */
u32 undo_marker; /* tracking retrans started here. */
int undo_retrans; /* number of undoable retransmissions. */
+ u32 total_retrans; /* Total retransmits for entire connection */
+
u32 urg_seq; /* Seq of received urgent pointer */
- u16 urg_data; /* Saved octet of OOB data and control flags */
- u8 urg_mode; /* In urgent mode */
- u8 ecn_flags; /* ECN status bits. */
u32 snd_up; /* Urgent pointer */
- u32 total_retrans; /* Total retransmits for entire connection */
- u32 bytes_acked; /* Appropriate Byte Counting - RFC3465 */
-
unsigned int keepalive_time; /* time before keep alive takes place */
unsigned int keepalive_intvl; /* time interval between keep alive probes */
- int linger2;
unsigned long last_synq_overflow;
- u32 tso_deferred;
-
/* Receiver side RTT estimation */
struct {
u32 rtt;
@@ -405,6 +409,8 @@ struct tcp_sock {
/* TCP MD5 Signagure Option information */
struct tcp_md5sig_info *md5sig_info;
#endif
+
+ int linger2;
};
static inline struct tcp_sock *tcp_sk(const struct sock *sk)
diff --git a/include/linux/textsearch.h b/include/linux/textsearch.h
index 6f371f24160..d9a85d61638 100644
--- a/include/linux/textsearch.h
+++ b/include/linux/textsearch.h
@@ -10,10 +10,8 @@
struct ts_config;
-/**
- * TS_AUTOLOAD - Automatically load textsearch modules when needed
- */
-#define TS_AUTOLOAD 1
+#define TS_AUTOLOAD 1 /* Automatically load textsearch modules when needed */
+#define TS_IGNORECASE 2 /* Searches string case insensitively */
/**
* struct ts_state - search state
@@ -39,7 +37,7 @@ struct ts_state
struct ts_ops
{
const char *name;
- struct ts_config * (*init)(const void *, unsigned int, gfp_t);
+ struct ts_config * (*init)(const void *, unsigned int, gfp_t, int);
unsigned int (*find)(struct ts_config *,
struct ts_state *);
void (*destroy)(struct ts_config *);
@@ -52,12 +50,14 @@ struct ts_ops
/**
* struct ts_config - search configuration
* @ops: operations of chosen algorithm
+ * @flags: flags
* @get_next_block: callback to fetch the next block to search in
* @finish: callback to finalize a search
*/
struct ts_config
{
struct ts_ops *ops;
+ int flags;
/**
* get_next_block - fetch next block of data
@@ -162,11 +162,10 @@ static inline struct ts_config *alloc_ts_config(size_t payload,
{
struct ts_config *conf;
- conf = kmalloc(TS_PRIV_ALIGN(sizeof(*conf)) + payload, gfp_mask);
+ conf = kzalloc(TS_PRIV_ALIGN(sizeof(*conf)) + payload, gfp_mask);
if (conf == NULL)
return ERR_PTR(-ENOMEM);
- memset(conf, 0, TS_PRIV_ALIGN(sizeof(*conf)) + payload);
return conf;
}
diff --git a/include/linux/tipc_config.h b/include/linux/tipc_config.h
index b0c916d1f37..2bc6fa4adeb 100644
--- a/include/linux/tipc_config.h
+++ b/include/linux/tipc_config.h
@@ -2,7 +2,7 @@
* include/linux/tipc_config.h: Include file for TIPC configuration interface
*
* Copyright (c) 2003-2006, Ericsson AB
- * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005-2007, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -136,6 +136,14 @@
#define TIPC_CMD_SET_NETID 0x800B /* tx unsigned, rx none */
/*
+ * Reserved commands:
+ * May not be issued by any process.
+ * Used internally by TIPC.
+ */
+
+#define TIPC_CMD_NOT_NET_ADMIN 0xC001 /* tx none, rx none */
+
+/*
* TLV types defined for TIPC
*/
diff --git a/include/linux/topology.h b/include/linux/topology.h
index 24f3d2282e1..2158fc0d5a5 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -179,4 +179,17 @@ void arch_update_cpu_topology(void);
#endif
#endif /* CONFIG_NUMA */
+#ifndef topology_physical_package_id
+#define topology_physical_package_id(cpu) ((void)(cpu), -1)
+#endif
+#ifndef topology_core_id
+#define topology_core_id(cpu) ((void)(cpu), 0)
+#endif
+#ifndef topology_thread_siblings
+#define topology_thread_siblings(cpu) cpumask_of_cpu(cpu)
+#endif
+#ifndef topology_core_siblings
+#define topology_core_siblings(cpu) cpumask_of_cpu(cpu)
+#endif
+
#endif /* _LINUX_TOPOLOGY_H */
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 324a3b231d4..4e5833073aa 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -71,7 +71,8 @@ struct tty_bufhead {
struct tty_buffer *head; /* Queue head */
struct tty_buffer *tail; /* Active buffer */
struct tty_buffer *free; /* Free queue head */
- int memory_used; /* Buffer space used excluding free queue */
+ int memory_used; /* Buffer space used excluding
+ free queue */
};
/*
* When a break, frame error, or parity error happens, these codes are
@@ -101,71 +102,96 @@ struct tty_bufhead {
#define LNEXT_CHAR(tty) ((tty)->termios->c_cc[VLNEXT])
#define EOL2_CHAR(tty) ((tty)->termios->c_cc[VEOL2])
-#define _I_FLAG(tty,f) ((tty)->termios->c_iflag & (f))
-#define _O_FLAG(tty,f) ((tty)->termios->c_oflag & (f))
-#define _C_FLAG(tty,f) ((tty)->termios->c_cflag & (f))
-#define _L_FLAG(tty,f) ((tty)->termios->c_lflag & (f))
-
-#define I_IGNBRK(tty) _I_FLAG((tty),IGNBRK)
-#define I_BRKINT(tty) _I_FLAG((tty),BRKINT)
-#define I_IGNPAR(tty) _I_FLAG((tty),IGNPAR)
-#define I_PARMRK(tty) _I_FLAG((tty),PARMRK)
-#define I_INPCK(tty) _I_FLAG((tty),INPCK)
-#define I_ISTRIP(tty) _I_FLAG((tty),ISTRIP)
-#define I_INLCR(tty) _I_FLAG((tty),INLCR)
-#define I_IGNCR(tty) _I_FLAG((tty),IGNCR)
-#define I_ICRNL(tty) _I_FLAG((tty),ICRNL)
-#define I_IUCLC(tty) _I_FLAG((tty),IUCLC)
-#define I_IXON(tty) _I_FLAG((tty),IXON)
-#define I_IXANY(tty) _I_FLAG((tty),IXANY)
-#define I_IXOFF(tty) _I_FLAG((tty),IXOFF)
-#define I_IMAXBEL(tty) _I_FLAG((tty),IMAXBEL)
-#define I_IUTF8(tty) _I_FLAG((tty),IUTF8)
-
-#define O_OPOST(tty) _O_FLAG((tty),OPOST)
-#define O_OLCUC(tty) _O_FLAG((tty),OLCUC)
-#define O_ONLCR(tty) _O_FLAG((tty),ONLCR)
-#define O_OCRNL(tty) _O_FLAG((tty),OCRNL)
-#define O_ONOCR(tty) _O_FLAG((tty),ONOCR)
-#define O_ONLRET(tty) _O_FLAG((tty),ONLRET)
-#define O_OFILL(tty) _O_FLAG((tty),OFILL)
-#define O_OFDEL(tty) _O_FLAG((tty),OFDEL)
-#define O_NLDLY(tty) _O_FLAG((tty),NLDLY)
-#define O_CRDLY(tty) _O_FLAG((tty),CRDLY)
-#define O_TABDLY(tty) _O_FLAG((tty),TABDLY)
-#define O_BSDLY(tty) _O_FLAG((tty),BSDLY)
-#define O_VTDLY(tty) _O_FLAG((tty),VTDLY)
-#define O_FFDLY(tty) _O_FLAG((tty),FFDLY)
-
-#define C_BAUD(tty) _C_FLAG((tty),CBAUD)
-#define C_CSIZE(tty) _C_FLAG((tty),CSIZE)
-#define C_CSTOPB(tty) _C_FLAG((tty),CSTOPB)
-#define C_CREAD(tty) _C_FLAG((tty),CREAD)
-#define C_PARENB(tty) _C_FLAG((tty),PARENB)
-#define C_PARODD(tty) _C_FLAG((tty),PARODD)
-#define C_HUPCL(tty) _C_FLAG((tty),HUPCL)
-#define C_CLOCAL(tty) _C_FLAG((tty),CLOCAL)
-#define C_CIBAUD(tty) _C_FLAG((tty),CIBAUD)
-#define C_CRTSCTS(tty) _C_FLAG((tty),CRTSCTS)
-
-#define L_ISIG(tty) _L_FLAG((tty),ISIG)
-#define L_ICANON(tty) _L_FLAG((tty),ICANON)
-#define L_XCASE(tty) _L_FLAG((tty),XCASE)
-#define L_ECHO(tty) _L_FLAG((tty),ECHO)
-#define L_ECHOE(tty) _L_FLAG((tty),ECHOE)
-#define L_ECHOK(tty) _L_FLAG((tty),ECHOK)
-#define L_ECHONL(tty) _L_FLAG((tty),ECHONL)
-#define L_NOFLSH(tty) _L_FLAG((tty),NOFLSH)
-#define L_TOSTOP(tty) _L_FLAG((tty),TOSTOP)
-#define L_ECHOCTL(tty) _L_FLAG((tty),ECHOCTL)
-#define L_ECHOPRT(tty) _L_FLAG((tty),ECHOPRT)
-#define L_ECHOKE(tty) _L_FLAG((tty),ECHOKE)
-#define L_FLUSHO(tty) _L_FLAG((tty),FLUSHO)
-#define L_PENDIN(tty) _L_FLAG((tty),PENDIN)
-#define L_IEXTEN(tty) _L_FLAG((tty),IEXTEN)
+#define _I_FLAG(tty, f) ((tty)->termios->c_iflag & (f))
+#define _O_FLAG(tty, f) ((tty)->termios->c_oflag & (f))
+#define _C_FLAG(tty, f) ((tty)->termios->c_cflag & (f))
+#define _L_FLAG(tty, f) ((tty)->termios->c_lflag & (f))
+
+#define I_IGNBRK(tty) _I_FLAG((tty), IGNBRK)
+#define I_BRKINT(tty) _I_FLAG((tty), BRKINT)
+#define I_IGNPAR(tty) _I_FLAG((tty), IGNPAR)
+#define I_PARMRK(tty) _I_FLAG((tty), PARMRK)
+#define I_INPCK(tty) _I_FLAG((tty), INPCK)
+#define I_ISTRIP(tty) _I_FLAG((tty), ISTRIP)
+#define I_INLCR(tty) _I_FLAG((tty), INLCR)
+#define I_IGNCR(tty) _I_FLAG((tty), IGNCR)
+#define I_ICRNL(tty) _I_FLAG((tty), ICRNL)
+#define I_IUCLC(tty) _I_FLAG((tty), IUCLC)
+#define I_IXON(tty) _I_FLAG((tty), IXON)
+#define I_IXANY(tty) _I_FLAG((tty), IXANY)
+#define I_IXOFF(tty) _I_FLAG((tty), IXOFF)
+#define I_IMAXBEL(tty) _I_FLAG((tty), IMAXBEL)
+#define I_IUTF8(tty) _I_FLAG((tty), IUTF8)
+
+#define O_OPOST(tty) _O_FLAG((tty), OPOST)
+#define O_OLCUC(tty) _O_FLAG((tty), OLCUC)
+#define O_ONLCR(tty) _O_FLAG((tty), ONLCR)
+#define O_OCRNL(tty) _O_FLAG((tty), OCRNL)
+#define O_ONOCR(tty) _O_FLAG((tty), ONOCR)
+#define O_ONLRET(tty) _O_FLAG((tty), ONLRET)
+#define O_OFILL(tty) _O_FLAG((tty), OFILL)
+#define O_OFDEL(tty) _O_FLAG((tty), OFDEL)
+#define O_NLDLY(tty) _O_FLAG((tty), NLDLY)
+#define O_CRDLY(tty) _O_FLAG((tty), CRDLY)
+#define O_TABDLY(tty) _O_FLAG((tty), TABDLY)
+#define O_BSDLY(tty) _O_FLAG((tty), BSDLY)
+#define O_VTDLY(tty) _O_FLAG((tty), VTDLY)
+#define O_FFDLY(tty) _O_FLAG((tty), FFDLY)
+
+#define C_BAUD(tty) _C_FLAG((tty), CBAUD)
+#define C_CSIZE(tty) _C_FLAG((tty), CSIZE)
+#define C_CSTOPB(tty) _C_FLAG((tty), CSTOPB)
+#define C_CREAD(tty) _C_FLAG((tty), CREAD)
+#define C_PARENB(tty) _C_FLAG((tty), PARENB)
+#define C_PARODD(tty) _C_FLAG((tty), PARODD)
+#define C_HUPCL(tty) _C_FLAG((tty), HUPCL)
+#define C_CLOCAL(tty) _C_FLAG((tty), CLOCAL)
+#define C_CIBAUD(tty) _C_FLAG((tty), CIBAUD)
+#define C_CRTSCTS(tty) _C_FLAG((tty), CRTSCTS)
+
+#define L_ISIG(tty) _L_FLAG((tty), ISIG)
+#define L_ICANON(tty) _L_FLAG((tty), ICANON)
+#define L_XCASE(tty) _L_FLAG((tty), XCASE)
+#define L_ECHO(tty) _L_FLAG((tty), ECHO)
+#define L_ECHOE(tty) _L_FLAG((tty), ECHOE)
+#define L_ECHOK(tty) _L_FLAG((tty), ECHOK)
+#define L_ECHONL(tty) _L_FLAG((tty), ECHONL)
+#define L_NOFLSH(tty) _L_FLAG((tty), NOFLSH)
+#define L_TOSTOP(tty) _L_FLAG((tty), TOSTOP)
+#define L_ECHOCTL(tty) _L_FLAG((tty), ECHOCTL)
+#define L_ECHOPRT(tty) _L_FLAG((tty), ECHOPRT)
+#define L_ECHOKE(tty) _L_FLAG((tty), ECHOKE)
+#define L_FLUSHO(tty) _L_FLAG((tty), FLUSHO)
+#define L_PENDIN(tty) _L_FLAG((tty), PENDIN)
+#define L_IEXTEN(tty) _L_FLAG((tty), IEXTEN)
struct device;
struct signal_struct;
+
+/*
+ * Port level information. Each device keeps its own port level information
+ * so provide a common structure for those ports wanting to use common support
+ * routines.
+ *
+ * The tty port has a different lifetime to the tty so must be kept apart.
+ * In addition be careful as tty -> port mappings are valid for the life
+ * of the tty object but in many cases port -> tty mappings are valid only
+ * until a hangup so don't use the wrong path.
+ */
+
+struct tty_port {
+ struct tty_struct *tty; /* Back pointer */
+ int blocked_open; /* Waiting to open */
+ int count; /* Usage count */
+ wait_queue_head_t open_wait; /* Open waiters */
+ wait_queue_head_t close_wait; /* Close waiters */
+ unsigned long flags; /* TTY flags ASY_*/
+ struct mutex mutex; /* Locking */
+ unsigned char *xmit_buf; /* Optional buffer */
+ int close_delay; /* Close port delay */
+ int closing_wait; /* Delay for output */
+};
+
/*
* Where all of the state associated with a tty is kept while the tty
* is open. Since the termios state should be kept even if the tty
@@ -185,6 +211,7 @@ struct tty_struct {
struct tty_driver *driver;
const struct tty_operations *ops;
int index;
+ /* The ldisc objects are protected by tty_ldisc_lock at the moment */
struct tty_ldisc ldisc;
struct mutex termios_mutex;
spinlock_t ctrl_lock;
@@ -213,7 +240,7 @@ struct tty_struct {
struct list_head tty_files;
#define N_TTY_BUF_SIZE 4096
-
+
/*
* The following is data for the N_TTY line discipline. For
* historical reasons, this is included in the tty structure.
@@ -241,6 +268,7 @@ struct tty_struct {
spinlock_t read_lock;
/* If the tty has a pending do_SAK, queue it here - akpm */
struct work_struct SAK_work;
+ struct tty_port *port;
};
/* tty magic number */
@@ -248,14 +276,14 @@ struct tty_struct {
/*
* These bits are used in the flags field of the tty structure.
- *
+ *
* So that interrupts won't be able to mess up the queues,
* copy_to_cooked must be atomic with respect to itself, as must
* tty->write. Thus, you must use the inline functions set_bit() and
* clear_bit() to make things atomic.
*/
#define TTY_THROTTLED 0 /* Call unthrottle() at threshold min */
-#define TTY_IO_ERROR 1 /* Canse an I/O error (may be no ldisc too) */
+#define TTY_IO_ERROR 1 /* Cause an I/O error (may be no ldisc too) */
#define TTY_OTHER_CLOSED 2 /* Other side (if any) has closed */
#define TTY_EXCLUSIVE 3 /* Exclusive open mode */
#define TTY_DEBUG 4 /* Debugging */
@@ -285,11 +313,11 @@ extern int vcs_init(void);
extern int tty_paranoia_check(struct tty_struct *tty, struct inode *inode,
const char *routine);
extern char *tty_name(struct tty_struct *tty, char *buf);
-extern void tty_wait_until_sent(struct tty_struct * tty, long timeout);
-extern int tty_check_change(struct tty_struct * tty);
-extern void stop_tty(struct tty_struct * tty);
-extern void start_tty(struct tty_struct * tty);
-extern int tty_register_ldisc(int disc, struct tty_ldisc *new_ldisc);
+extern void tty_wait_until_sent(struct tty_struct *tty, long timeout);
+extern int tty_check_change(struct tty_struct *tty);
+extern void stop_tty(struct tty_struct *tty);
+extern void start_tty(struct tty_struct *tty);
+extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc);
extern int tty_unregister_ldisc(int disc);
extern int tty_register_driver(struct tty_driver *driver);
extern int tty_unregister_driver(struct tty_driver *driver);
@@ -310,10 +338,10 @@ extern int is_current_pgrp_orphaned(void);
extern struct pid *tty_get_pgrp(struct tty_struct *tty);
extern int is_ignored(int sig);
extern int tty_signal(int sig, struct tty_struct *tty);
-extern void tty_hangup(struct tty_struct * tty);
-extern void tty_vhangup(struct tty_struct * tty);
+extern void tty_hangup(struct tty_struct *tty);
+extern void tty_vhangup(struct tty_struct *tty);
extern void tty_unhangup(struct file *filp);
-extern int tty_hung_up_p(struct file * filp);
+extern int tty_hung_up_p(struct file *filp);
extern void do_SAK(struct tty_struct *tty);
extern void __do_SAK(struct tty_struct *tty);
extern void disassociate_ctty(int priv);
@@ -322,17 +350,17 @@ extern void tty_flip_buffer_push(struct tty_struct *tty);
extern speed_t tty_get_baud_rate(struct tty_struct *tty);
extern speed_t tty_termios_baud_rate(struct ktermios *termios);
extern speed_t tty_termios_input_baud_rate(struct ktermios *termios);
-extern void tty_termios_encode_baud_rate(struct ktermios *termios, speed_t ibaud, speed_t obaud);
-extern void tty_encode_baud_rate(struct tty_struct *tty, speed_t ibaud, speed_t obaud);
+extern void tty_termios_encode_baud_rate(struct ktermios *termios,
+ speed_t ibaud, speed_t obaud);
+extern void tty_encode_baud_rate(struct tty_struct *tty,
+ speed_t ibaud, speed_t obaud);
extern void tty_termios_copy_hw(struct ktermios *new, struct ktermios *old);
extern int tty_termios_hw_change(struct ktermios *a, struct ktermios *b);
extern struct tty_ldisc *tty_ldisc_ref(struct tty_struct *);
extern void tty_ldisc_deref(struct tty_ldisc *);
extern struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *);
-
-extern struct tty_ldisc *tty_ldisc_get(int);
-extern void tty_ldisc_put(int);
+extern const struct file_operations tty_ldiscs_proc_fops;
extern void tty_wakeup(struct tty_struct *tty);
extern void tty_ldisc_flush(struct tty_struct *tty);
@@ -351,10 +379,14 @@ extern void tty_write_unlock(struct tty_struct *tty);
extern int tty_write_lock(struct tty_struct *tty, int ndelay);
#define tty_is_writelocked(tty) (mutex_is_locked(&tty->atomic_write_lock))
+extern void tty_port_init(struct tty_port *port);
+extern int tty_port_alloc_xmit_buf(struct tty_port *port);
+extern void tty_port_free_xmit_buf(struct tty_port *port);
+
/* n_tty.c */
-extern struct tty_ldisc tty_ldisc_N_TTY;
+extern struct tty_ldisc_ops tty_ldisc_N_TTY;
/* tty_audit.c */
#ifdef CONFIG_AUDIT
@@ -363,7 +395,8 @@ extern void tty_audit_add_data(struct tty_struct *tty, unsigned char *data,
extern void tty_audit_exit(void);
extern void tty_audit_fork(struct signal_struct *sig);
extern void tty_audit_push(struct tty_struct *tty);
-extern void tty_audit_push_task(struct task_struct *tsk, uid_t loginuid, u32 sessionid);
+extern void tty_audit_push_task(struct task_struct *tsk,
+ uid_t loginuid, u32 sessionid);
#else
static inline void tty_audit_add_data(struct tty_struct *tty,
unsigned char *data, size_t size)
@@ -378,19 +411,20 @@ static inline void tty_audit_fork(struct signal_struct *sig)
static inline void tty_audit_push(struct tty_struct *tty)
{
}
-static inline void tty_audit_push_task(struct task_struct *tsk, uid_t loginuid, u32 sessionid)
+static inline void tty_audit_push_task(struct task_struct *tsk,
+ uid_t loginuid, u32 sessionid)
{
}
#endif
/* tty_ioctl.c */
-extern int n_tty_ioctl(struct tty_struct * tty, struct file * file,
+extern int n_tty_ioctl(struct tty_struct *tty, struct file *file,
unsigned int cmd, unsigned long arg);
/* serial.c */
extern void serial_console_init(void);
-
+
/* pcxx.c */
extern int pcxe_open(struct tty_struct *tty, struct file *filp);
@@ -401,7 +435,7 @@ extern void console_print(const char *);
/* vt.c */
-extern int vt_ioctl(struct tty_struct *tty, struct file * file,
+extern int vt_ioctl(struct tty_struct *tty, struct file *file,
unsigned int cmd, unsigned long arg);
#endif /* __KERNEL__ */
diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
index 6226504d910..40f38d89677 100644
--- a/include/linux/tty_ldisc.h
+++ b/include/linux/tty_ldisc.h
@@ -104,7 +104,7 @@
#include <linux/fs.h>
#include <linux/wait.h>
-struct tty_ldisc {
+struct tty_ldisc_ops {
int magic;
char *name;
int num;
@@ -142,6 +142,11 @@ struct tty_ldisc {
int refcount;
};
+struct tty_ldisc {
+ struct tty_ldisc_ops *ops;
+ int refcount;
+};
+
#define TTY_LDISC_MAGIC 0x5403
#define LDISC_FLAG_DEFINED 0x00000001
diff --git a/include/linux/udp.h b/include/linux/udp.h
index 581ca2c14c5..0cf5c4c0ec8 100644
--- a/include/linux/udp.h
+++ b/include/linux/udp.h
@@ -38,6 +38,7 @@ struct udphdr {
#ifdef __KERNEL__
#include <net/inet_sock.h>
#include <linux/skbuff.h>
+#include <net/netns/hash.h>
static inline struct udphdr *udp_hdr(const struct sk_buff *skb)
{
@@ -46,6 +47,11 @@ static inline struct udphdr *udp_hdr(const struct sk_buff *skb)
#define UDP_HTABLE_SIZE 128
+static inline int udp_hashfn(struct net *net, const unsigned num)
+{
+ return (num + net_hash_mix(net)) & (UDP_HTABLE_SIZE - 1);
+}
+
struct udp_sock {
/* inet_sock has to be the first member */
struct inet_sock inet;
diff --git a/include/linux/usb/rndis_host.h b/include/linux/usb/rndis_host.h
index 29d6458ecb8..0a6e6d4b929 100644
--- a/include/linux/usb/rndis_host.h
+++ b/include/linux/usb/rndis_host.h
@@ -260,7 +260,8 @@ struct rndis_keepalive_c { /* IN (optionally OUT) */
extern void rndis_status(struct usbnet *dev, struct urb *urb);
-extern int rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf);
+extern int
+rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf, int buflen);
extern int
generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags);
extern void rndis_unbind(struct usbnet *dev, struct usb_interface *intf);
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
index 4a535ea1e12..2e66a95e8d3 100644
--- a/include/linux/videodev2.h
+++ b/include/linux/videodev2.h
@@ -246,6 +246,7 @@ struct v4l2_capability
#define V4L2_CAP_SLICED_VBI_OUTPUT 0x00000080 /* Is a sliced VBI output device */
#define V4L2_CAP_RDS_CAPTURE 0x00000100 /* RDS data capture */
#define V4L2_CAP_VIDEO_OUTPUT_OVERLAY 0x00000200 /* Can do video output overlay */
+#define V4L2_CAP_HW_FREQ_SEEK 0x00000400 /* Can do hardware frequency seek */
#define V4L2_CAP_TUNER 0x00010000 /* has a tuner */
#define V4L2_CAP_AUDIO 0x00020000 /* has audio support */
@@ -309,6 +310,7 @@ struct v4l2_pix_format
/* see http://www.siliconimaging.com/RGB%20Bayer.htm */
#define V4L2_PIX_FMT_SBGGR8 v4l2_fourcc('B','A','8','1') /* 8 BGBG.. GRGR.. */
+#define V4L2_PIX_FMT_SGBRG8 v4l2_fourcc('G','B','R','G') /* 8 GBGB.. RGRG.. */
#define V4L2_PIX_FMT_SBGGR16 v4l2_fourcc('B','Y','R','2') /* 16 BGBG.. GRGR.. */
/* compressed formats */
@@ -323,6 +325,9 @@ struct v4l2_pix_format
#define V4L2_PIX_FMT_PWC1 v4l2_fourcc('P','W','C','1') /* pwc older webcam */
#define V4L2_PIX_FMT_PWC2 v4l2_fourcc('P','W','C','2') /* pwc newer webcam */
#define V4L2_PIX_FMT_ET61X251 v4l2_fourcc('E','6','2','5') /* ET61X251 compression */
+#define V4L2_PIX_FMT_SPCA501 v4l2_fourcc('S','5','0','1') /* YUYV per line */
+#define V4L2_PIX_FMT_SPCA561 v4l2_fourcc('S','5','6','1') /* compressed GBRG bayer */
+#define V4L2_PIX_FMT_PAC207 v4l2_fourcc('P','2','0','7') /* compressed BGGR bayer */
/*
* F O R M A T E N U M E R A T I O N
@@ -1156,6 +1161,14 @@ struct v4l2_frequency
__u32 reserved[8];
};
+struct v4l2_hw_freq_seek {
+ __u32 tuner;
+ enum v4l2_tuner_type type;
+ __u32 seek_upward;
+ __u32 wrap_around;
+ __u32 reserved[8];
+};
+
/*
* A U D I O
*/
@@ -1441,6 +1454,7 @@ struct v4l2_chip_ident {
#define VIDIOC_G_CHIP_IDENT _IOWR ('V', 81, struct v4l2_chip_ident)
#endif
+#define VIDIOC_S_HW_FREQ_SEEK _IOW ('V', 82, struct v4l2_hw_freq_seek)
#ifdef __OLD_VIDIOC_
/* for compatibility, will go away some day */
diff --git a/include/linux/wanrouter.h b/include/linux/wanrouter.h
index 3add87465b1..e0aa39612eb 100644
--- a/include/linux/wanrouter.h
+++ b/include/linux/wanrouter.h
@@ -522,7 +522,7 @@ extern int wanrouter_proc_init(void);
extern void wanrouter_proc_cleanup(void);
extern int wanrouter_proc_add(struct wan_device *wandev);
extern int wanrouter_proc_delete(struct wan_device *wandev);
-extern int wanrouter_ioctl( struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg);
+extern long wanrouter_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
/* Public Data */
/* list of registered devices */
diff --git a/include/linux/wireless.h b/include/linux/wireless.h
index 0a9b5b41ed6..d7958f9b52c 100644
--- a/include/linux/wireless.h
+++ b/include/linux/wireless.h
@@ -611,6 +611,7 @@
#define IW_ENCODE_ALG_WEP 1
#define IW_ENCODE_ALG_TKIP 2
#define IW_ENCODE_ALG_CCMP 3
+#define IW_ENCODE_ALG_PMK 4
/* struct iw_encode_ext ->ext_flags */
#define IW_ENCODE_EXT_TX_SEQ_VALID 0x00000001
#define IW_ENCODE_EXT_RX_SEQ_VALID 0x00000002
@@ -630,6 +631,7 @@
#define IW_ENC_CAPA_WPA2 0x00000002
#define IW_ENC_CAPA_CIPHER_TKIP 0x00000004
#define IW_ENC_CAPA_CIPHER_CCMP 0x00000008
+#define IW_ENC_CAPA_4WAY_HANDSHAKE 0x00000010
/* Event capability macros - in (struct iw_range *)->event_capa
* Because we have more than 32 possible events, we use an array of
@@ -675,6 +677,19 @@ struct iw_point
__u16 flags; /* Optional params */
};
+#ifdef __KERNEL__
+#ifdef CONFIG_COMPAT
+
+#include <linux/compat.h>
+
+struct compat_iw_point {
+ compat_caddr_t pointer;
+ __u16 length;
+ __u16 flags;
+};
+#endif
+#endif
+
/*
* A frequency
* For numbers lower than 10^9, we encode the number in 'm' and
@@ -1098,6 +1113,21 @@ struct iw_event
#define IW_EV_POINT_LEN (IW_EV_LCP_LEN + sizeof(struct iw_point) - \
IW_EV_POINT_OFF)
+#ifdef __KERNEL__
+#ifdef CONFIG_COMPAT
+struct __compat_iw_event {
+ __u16 len; /* Real length of this stuff */
+ __u16 cmd; /* Wireless IOCTL */
+ compat_caddr_t pointer;
+};
+#define IW_EV_COMPAT_LCP_LEN offsetof(struct __compat_iw_event, pointer)
+#define IW_EV_COMPAT_POINT_OFF offsetof(struct compat_iw_point, length)
+#define IW_EV_COMPAT_POINT_LEN \
+ (IW_EV_COMPAT_LCP_LEN + sizeof(struct compat_iw_point) - \
+ IW_EV_COMPAT_POINT_OFF)
+#endif
+#endif
+
/* Size of the Event prefix when packed in stream */
#define IW_EV_LCP_PK_LEN (4)
/* Size of the various events when packed in stream */
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index f462439cc28..12b15c561a1 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -63,6 +63,7 @@ struct writeback_control {
unsigned for_writepages:1; /* This is a writepages() call */
unsigned range_cyclic:1; /* range_start is cyclic */
unsigned more_io:1; /* more io to be dispatched */
+ unsigned range_cont:1;
};
/*
@@ -105,6 +106,8 @@ extern int vm_highmem_is_dirtyable;
extern int block_dump;
extern int laptop_mode;
+extern unsigned long determine_dirtyable_memory(void);
+
extern int dirty_ratio_handler(struct ctl_table *table, int write,
struct file *filp, void __user *buffer, size_t *lenp,
loff_t *ppos);
diff --git a/include/linux/xfrm.h b/include/linux/xfrm.h
index 2ca6bae8872..fb0c215a305 100644
--- a/include/linux/xfrm.h
+++ b/include/linux/xfrm.h
@@ -339,6 +339,7 @@ struct xfrm_usersa_info {
#define XFRM_STATE_NOPMTUDISC 4
#define XFRM_STATE_WILDRECV 8
#define XFRM_STATE_ICMP 16
+#define XFRM_STATE_AF_UNSPEC 32
};
struct xfrm_usersa_id {