diff options
Diffstat (limited to 'include')
31 files changed, 329 insertions, 158 deletions
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h index ff8d27af478..a11cc9d3259 100644 --- a/include/drm/drm_edid.h +++ b/include/drm/drm_edid.h @@ -69,8 +69,8 @@ struct detailed_pixel_timing { u8 hborder; u8 vborder; u8 unknown0:1; - u8 vsync_positive:1; u8 hsync_positive:1; + u8 vsync_positive:1; u8 separate_sync:2; u8 stereo:1; u8 unknown6:1; diff --git a/include/linux/ata.h b/include/linux/ata.h index 08a86d5cdf1..9a061accd8b 100644 --- a/include/linux/ata.h +++ b/include/linux/ata.h @@ -89,6 +89,8 @@ enum { ATA_ID_DLF = 128, ATA_ID_CSFO = 129, ATA_ID_CFA_POWER = 160, + ATA_ID_CFA_KEY_MGMT = 162, + ATA_ID_CFA_MODES = 163, ATA_ID_ROT_SPEED = 217, ATA_ID_PIO4 = (1 << 1), diff --git a/include/linux/bio.h b/include/linux/bio.h index 1b16108a541..d8bd43bfdcf 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -531,7 +531,7 @@ extern void bio_integrity_endio(struct bio *, int); extern void bio_integrity_advance(struct bio *, unsigned int); extern void bio_integrity_trim(struct bio *, unsigned int, unsigned int); extern void bio_integrity_split(struct bio *, struct bio_pair *, int); -extern int bio_integrity_clone(struct bio *, struct bio *, struct bio_set *); +extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t, struct bio_set *); extern int bioset_integrity_create(struct bio_set *, int); extern void bioset_integrity_free(struct bio_set *); extern void bio_integrity_init_slab(void); @@ -542,7 +542,7 @@ extern void bio_integrity_init_slab(void); #define bioset_integrity_create(a, b) (0) #define bio_integrity_prep(a) (0) #define bio_integrity_enabled(a) (0) -#define bio_integrity_clone(a, b, c) (0) +#define bio_integrity_clone(a, b, c,d ) (0) #define bioset_integrity_free(a) do { } while (0) #define bio_integrity_free(a, b) do { } while (0) #define bio_integrity_endio(a, b) do { } while (0) diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h index 95837bfb525..455d83219fa 100644 --- a/include/linux/bootmem.h +++ b/include/linux/bootmem.h @@ -65,23 +65,20 @@ extern void free_bootmem(unsigned long addr, unsigned long size); #define BOOTMEM_DEFAULT 0 #define BOOTMEM_EXCLUSIVE (1<<0) +extern int reserve_bootmem(unsigned long addr, + unsigned long size, + int flags); extern int reserve_bootmem_node(pg_data_t *pgdat, - unsigned long physaddr, - unsigned long size, - int flags); -#ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE -extern int reserve_bootmem(unsigned long addr, unsigned long size, int flags); -#endif + unsigned long physaddr, + unsigned long size, + int flags); -extern void *__alloc_bootmem_nopanic(unsigned long size, +extern void *__alloc_bootmem(unsigned long size, unsigned long align, unsigned long goal); -extern void *__alloc_bootmem(unsigned long size, +extern void *__alloc_bootmem_nopanic(unsigned long size, unsigned long align, unsigned long goal); -extern void *__alloc_bootmem_low(unsigned long size, - unsigned long align, - unsigned long goal); extern void *__alloc_bootmem_node(pg_data_t *pgdat, unsigned long size, unsigned long align, @@ -90,30 +87,35 @@ extern void *__alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size, unsigned long align, unsigned long goal); +extern void *__alloc_bootmem_low(unsigned long size, + unsigned long align, + unsigned long goal); extern void *__alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size, unsigned long align, unsigned long goal); -#ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE + #define alloc_bootmem(x) \ __alloc_bootmem(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) #define alloc_bootmem_nopanic(x) \ __alloc_bootmem_nopanic(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) -#define alloc_bootmem_low(x) \ - __alloc_bootmem_low(x, SMP_CACHE_BYTES, 0) #define alloc_bootmem_pages(x) \ __alloc_bootmem(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) #define alloc_bootmem_pages_nopanic(x) \ __alloc_bootmem_nopanic(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) -#define alloc_bootmem_low_pages(x) \ - __alloc_bootmem_low(x, PAGE_SIZE, 0) #define alloc_bootmem_node(pgdat, x) \ __alloc_bootmem_node(pgdat, x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) #define alloc_bootmem_pages_node(pgdat, x) \ __alloc_bootmem_node(pgdat, x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) +#define alloc_bootmem_pages_node_nopanic(pgdat, x) \ + __alloc_bootmem_node_nopanic(pgdat, x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) + +#define alloc_bootmem_low(x) \ + __alloc_bootmem_low(x, SMP_CACHE_BYTES, 0) +#define alloc_bootmem_low_pages(x) \ + __alloc_bootmem_low(x, PAGE_SIZE, 0) #define alloc_bootmem_low_pages_node(pgdat, x) \ __alloc_bootmem_low_node(pgdat, x, PAGE_SIZE, 0) -#endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */ extern int reserve_bootmem_generic(unsigned long addr, unsigned long size, int flags); diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index 1514d534dee..a3ed7cb8ca3 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -52,7 +52,15 @@ #define __deprecated __attribute__((deprecated)) #define __packed __attribute__((packed)) #define __weak __attribute__((weak)) -#define __naked __attribute__((naked)) + +/* + * it doesn't make sense on ARM (currently the only user of __naked) to trace + * naked functions because then mcount is called without stack and frame pointer + * being set up and there is no chance to restore the lr register to the value + * before mcount was called. + */ +#define __naked __attribute__((naked)) notrace + #define __noreturn __attribute__((noreturn)) /* diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 384b38d3e8e..161042746af 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -234,7 +234,6 @@ struct cpufreq_driver { int (*suspend) (struct cpufreq_policy *policy, pm_message_t pmsg); int (*resume) (struct cpufreq_policy *policy); struct freq_attr **attr; - bool hide_interface; }; /* flags */ diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index f0413845f20..1956c8d46d3 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -97,7 +97,6 @@ typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t; /** * struct dma_chan_percpu - the per-CPU part of struct dma_chan - * @refcount: local_t used for open-coded "bigref" counting * @memcpy_count: transaction counter * @bytes_transferred: byte counter */ @@ -114,9 +113,6 @@ struct dma_chan_percpu { * @cookie: last cookie value returned to client * @chan_id: channel ID for sysfs * @dev: class device for sysfs - * @refcount: kref, used in "bigref" slow-mode - * @slow_ref: indicates that the DMA channel is free - * @rcu: the DMA channel's RCU head * @device_node: used to add this to the device chan list * @local: per-cpu pointer to a struct dma_chan_percpu * @client-count: how many clients are using this channel @@ -213,8 +209,6 @@ struct dma_async_tx_descriptor { * @global_node: list_head for global dma_device_list * @cap_mask: one or more dma_capability flags * @max_xor: maximum number of xor sources, 0 if no capability - * @refcount: reference count - * @done: IO completion struct * @dev_id: unique device ID * @dev: struct device reference for dma mapping api * @device_alloc_chan_resources: allocate resources and return the @@ -227,6 +221,7 @@ struct dma_async_tx_descriptor { * @device_prep_dma_interrupt: prepares an end of chain interrupt operation * @device_prep_slave_sg: prepares a slave dma operation * @device_terminate_all: terminate all pending operations + * @device_is_tx_complete: poll for transaction completion * @device_issue_pending: push pending transactions to hardware */ struct dma_device { diff --git a/include/linux/dmar.h b/include/linux/dmar.h index f28440784cf..2f342746895 100644 --- a/include/linux/dmar.h +++ b/include/linux/dmar.h @@ -24,10 +24,10 @@ #include <linux/acpi.h> #include <linux/types.h> #include <linux/msi.h> +#include <linux/irqreturn.h> -#if defined(CONFIG_DMAR) || defined(CONFIG_INTR_REMAP) struct intel_iommu; - +#if defined(CONFIG_DMAR) || defined(CONFIG_INTR_REMAP) struct dmar_drhd_unit { struct list_head list; /* list of drhd units */ struct acpi_dmar_header *hdr; /* ACPI header */ @@ -49,7 +49,7 @@ extern int dmar_dev_scope_init(void); /* Intel IOMMU detection */ extern void detect_intel_iommu(void); - +extern int enable_drhd_fault_handling(void); extern int parse_ioapics_under_ir(void); extern int alloc_iommu(struct dmar_drhd_unit *); @@ -63,12 +63,12 @@ static inline int dmar_table_init(void) { return -ENODEV; } +static inline int enable_drhd_fault_handling(void) +{ + return -1; +} #endif /* !CONFIG_DMAR && !CONFIG_INTR_REMAP */ -#ifdef CONFIG_INTR_REMAP -extern int intr_remapping_enabled; -extern int enable_intr_remapping(int); - struct irte { union { struct { @@ -97,6 +97,10 @@ struct irte { __u64 high; }; }; +#ifdef CONFIG_INTR_REMAP +extern int intr_remapping_enabled; +extern int enable_intr_remapping(int); + extern int get_irte(int irq, struct irte *entry); extern int modify_irte(int irq, struct irte *irte_modified); extern int alloc_irte(struct intel_iommu *iommu, int irq, u16 count); @@ -111,14 +115,40 @@ extern int irq_remapped(int irq); extern struct intel_iommu *map_dev_to_ir(struct pci_dev *dev); extern struct intel_iommu *map_ioapic_to_ir(int apic); #else +static inline int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) +{ + return -1; +} +static inline int modify_irte(int irq, struct irte *irte_modified) +{ + return -1; +} +static inline int free_irte(int irq) +{ + return -1; +} +static inline int map_irq_to_irte_handle(int irq, u16 *sub_handle) +{ + return -1; +} +static inline int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, + u16 sub_handle) +{ + return -1; +} +static inline struct intel_iommu *map_dev_to_ir(struct pci_dev *dev) +{ + return NULL; +} +static inline struct intel_iommu *map_ioapic_to_ir(int apic) +{ + return NULL; +} #define irq_remapped(irq) (0) #define enable_intr_remapping(mode) (-1) #define intr_remapping_enabled (0) #endif -#ifdef CONFIG_DMAR -extern const char *dmar_get_fault_reason(u8 fault_reason); - /* Can't use the common MSI interrupt functions * since DMAR is not a pci device */ @@ -127,8 +157,10 @@ extern void dmar_msi_mask(unsigned int irq); extern void dmar_msi_read(int irq, struct msi_msg *msg); extern void dmar_msi_write(int irq, struct msi_msg *msg); extern int dmar_set_interrupt(struct intel_iommu *iommu); +extern irqreturn_t dmar_fault(int irq, void *dev_id); extern int arch_setup_dmar_msi(unsigned int irq); +#ifdef CONFIG_DMAR extern int iommu_detected, no_iommu; extern struct list_head dmar_rmrr_units; struct dmar_rmrr_unit { diff --git a/include/linux/hdreg.h b/include/linux/hdreg.h index c37e9241fae..ed21bd3dbd2 100644 --- a/include/linux/hdreg.h +++ b/include/linux/hdreg.h @@ -511,7 +511,6 @@ struct hd_driveid { unsigned short words69_70[2]; /* reserved words 69-70 * future command overlap and queuing */ - /* HDIO_GET_IDENTITY currently returns only words 0 through 70 */ unsigned short words71_74[4]; /* reserved words 71-74 * for IDENTIFY PACKET DEVICE command */ diff --git a/include/linux/ide.h b/include/linux/ide.h index fe235b65207..25087aead65 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h @@ -797,6 +797,7 @@ typedef struct hwif_s { struct scatterlist *sg_table; int sg_max_nents; /* Maximum number of entries in it */ int sg_nents; /* Current number of entries in it */ + int orig_sg_nents; int sg_dma_direction; /* dma transfer direction */ /* data phase of the active command (currently only valid for PIO/DMA) */ @@ -866,6 +867,7 @@ struct ide_host { unsigned int n_ports; struct device *dev[2]; unsigned int (*init_chipset)(struct pci_dev *); + irq_handler_t irq_handler; unsigned long host_flags; void *host_priv; ide_hwif_t *cur_port; /* for hosts requiring serialization */ diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index d2e3cbfba14..78c1262e870 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -292,6 +292,8 @@ struct intel_iommu { spinlock_t register_lock; /* protect register handling */ int seq_id; /* sequence id of the iommu */ int agaw; /* agaw of this iommu */ + unsigned int irq; + unsigned char name[13]; /* Device Name */ #ifdef CONFIG_DMAR unsigned long *domain_ids; /* bitmap of domains */ @@ -299,8 +301,6 @@ struct intel_iommu { spinlock_t lock; /* protect context, domain ids */ struct root_entry *root_entry; /* virtual address */ - unsigned int irq; - unsigned char name[7]; /* Device Name */ struct iommu_flush flush; #endif struct q_inval *qi; /* Queued invalidation info */ @@ -321,6 +321,7 @@ extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev); extern int alloc_iommu(struct dmar_drhd_unit *drhd); extern void free_iommu(struct intel_iommu *iommu); extern int dmar_enable_qi(struct intel_iommu *iommu); +extern void dmar_disable_qi(struct intel_iommu *iommu); extern void qi_global_iec(struct intel_iommu *iommu); extern int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, diff --git a/include/linux/libata.h b/include/linux/libata.h index 5d87bc09a1f..dc18b87ed72 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -275,7 +275,7 @@ enum { * advised to wait only for the following duration before * doing SRST. */ - ATA_TMOUT_PMP_SRST_WAIT = 1000, + ATA_TMOUT_PMP_SRST_WAIT = 5000, /* ATA bus states */ BUS_UNKNOWN = 0, @@ -530,6 +530,7 @@ struct ata_queued_cmd { unsigned long flags; /* ATA_QCFLAG_xxx */ unsigned int tag; unsigned int n_elem; + unsigned int orig_n_elem; int dma_dir; @@ -750,7 +751,8 @@ struct ata_port { acpi_handle acpi_handle; struct ata_acpi_gtm __acpi_init_gtm; /* use ata_acpi_init_gtm() */ #endif - u8 sector_buf[ATA_SECT_SIZE]; /* owned by EH */ + /* owned by EH */ + u8 sector_buf[ATA_SECT_SIZE] ____cacheline_aligned; }; /* The following initializer overrides a method to NULL whether one of diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h index aa6fe7026de..51855dfd8ad 100644 --- a/include/linux/lockd/lockd.h +++ b/include/linux/lockd/lockd.h @@ -346,6 +346,7 @@ static inline int __nlm_cmp_addr4(const struct sockaddr *sap1, return sin1->sin_addr.s_addr == sin2->sin_addr.s_addr; } +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) static inline int __nlm_cmp_addr6(const struct sockaddr *sap1, const struct sockaddr *sap2) { @@ -353,6 +354,13 @@ static inline int __nlm_cmp_addr6(const struct sockaddr *sap1, const struct sockaddr_in6 *sin2 = (const struct sockaddr_in6 *)sap2; return ipv6_addr_equal(&sin1->sin6_addr, &sin2->sin6_addr); } +#else /* !(CONFIG_IPV6 || CONFIG_IPV6_MODULE) */ +static inline int __nlm_cmp_addr6(const struct sockaddr *sap1, + const struct sockaddr *sap2) +{ + return 0; +} +#endif /* !(CONFIG_IPV6 || CONFIG_IPV6_MODULE) */ /* * Compare two host addresses diff --git a/include/linux/mm.h b/include/linux/mm.h index 065cdf8c09f..b1ea37fc7a2 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -104,6 +104,7 @@ extern unsigned int kobjsize(const void *objp); #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */ #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */ #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */ +#define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */ #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */ #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS @@ -145,7 +146,7 @@ extern pgprot_t protection_map[16]; */ static inline int is_linear_pfn_mapping(struct vm_area_struct *vma) { - return ((vma->vm_flags & VM_PFNMAP) && vma->vm_pgoff); + return (vma->vm_flags & VM_PFN_AT_MMAP); } static inline int is_pfn_mapping(struct vm_area_struct *vma) diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 92915e81443..d84feb7bdbf 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -276,4 +276,7 @@ struct mm_struct { #endif }; +/* Future-safe accessor for struct mm_struct's cpu_vm_mask. */ +#define mm_cpumask(mm) (&(mm)->cpu_vm_mask) + #endif /* _LINUX_MM_TYPES_H */ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index ec54785d34f..659366734f3 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1079,6 +1079,7 @@ extern void synchronize_net(void); extern int register_netdevice_notifier(struct notifier_block *nb); extern int unregister_netdevice_notifier(struct notifier_block *nb); extern int init_dummy_netdev(struct net_device *dev); +extern void netdev_resync_ops(struct net_device *dev); extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev); extern struct net_device *dev_get_by_index(struct net *net, int ifindex); diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index a550b528319..2e5f00066af 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -406,6 +406,8 @@ struct nfs3_setaclargs { int mask; struct posix_acl * acl_access; struct posix_acl * acl_default; + size_t len; + unsigned int npages; struct page ** pages; }; diff --git a/include/linux/nfsacl.h b/include/linux/nfsacl.h index 54487a99beb..43011b69297 100644 --- a/include/linux/nfsacl.h +++ b/include/linux/nfsacl.h @@ -37,6 +37,9 @@ #define NFSACL_MAXPAGES ((2*(8+12*NFS_ACL_MAX_ENTRIES) + PAGE_SIZE-1) \ >> PAGE_SHIFT) +#define NFS_ACL_MAX_ENTRIES_INLINE (5) +#define NFS_ACL_INLINE_BUFSIZE ((2*(2+3*NFS_ACL_MAX_ENTRIES_INLINE)) << 2) + static inline unsigned int nfsacl_size(struct posix_acl *acl_access, struct posix_acl *acl_default) { diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 3577ffd90d4..ee5615d6521 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h @@ -5,6 +5,7 @@ #include <linux/slab.h> /* For kmalloc() */ #include <linux/smp.h> #include <linux/cpumask.h> +#include <linux/pfn.h> #include <asm/percpu.h> @@ -52,17 +53,18 @@ #define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var) #define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var) -/* Enough to cover all DEFINE_PER_CPUs in kernel, including modules. */ -#ifndef PERCPU_ENOUGH_ROOM +/* enough to cover all DEFINE_PER_CPUs in modules */ #ifdef CONFIG_MODULES -#define PERCPU_MODULE_RESERVE 8192 +#define PERCPU_MODULE_RESERVE (8 << 10) #else -#define PERCPU_MODULE_RESERVE 0 +#define PERCPU_MODULE_RESERVE 0 #endif +#ifndef PERCPU_ENOUGH_ROOM #define PERCPU_ENOUGH_ROOM \ - (__per_cpu_end - __per_cpu_start + PERCPU_MODULE_RESERVE) -#endif /* PERCPU_ENOUGH_ROOM */ + (ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES) + \ + PERCPU_MODULE_RESERVE) +#endif /* * Must be an lvalue. Since @var must be a simple identifier, @@ -76,52 +78,94 @@ #ifdef CONFIG_SMP +#ifdef CONFIG_HAVE_DYNAMIC_PER_CPU_AREA + +/* minimum unit size, also is the maximum supported allocation size */ +#define PCPU_MIN_UNIT_SIZE PFN_ALIGN(64 << 10) + +/* + * PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy + * back on the first chunk for dynamic percpu allocation if arch is + * manually allocating and mapping it for faster access (as a part of + * large page mapping for example). + * + * The following values give between one and two pages of free space + * after typical minimal boot (2-way SMP, single disk and NIC) with + * both defconfig and a distro config on x86_64 and 32. More + * intelligent way to determine this would be nice. + */ +#if BITS_PER_LONG > 32 +#define PERCPU_DYNAMIC_RESERVE (20 << 10) +#else +#define PERCPU_DYNAMIC_RESERVE (12 << 10) +#endif + +extern void *pcpu_base_addr; + +typedef struct page * (*pcpu_get_page_fn_t)(unsigned int cpu, int pageno); +typedef void (*pcpu_populate_pte_fn_t)(unsigned long addr); + +extern size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn, + size_t static_size, size_t reserved_size, + ssize_t dyn_size, ssize_t unit_size, + void *base_addr, + pcpu_populate_pte_fn_t populate_pte_fn); + +extern ssize_t __init pcpu_embed_first_chunk( + size_t static_size, size_t reserved_size, + ssize_t dyn_size, ssize_t unit_size); + +/* + * Use this to get to a cpu's version of the per-cpu object + * dynamically allocated. Non-atomic access to the current CPU's + * version should probably be combined with get_cpu()/put_cpu(). + */ +#define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu))) + +extern void *__alloc_reserved_percpu(size_t size, size_t align); + +#else /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */ + struct percpu_data { void *ptrs[1]; }; #define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata) -/* - * Use this to get to a cpu's version of the per-cpu object dynamically - * allocated. Non-atomic access to the current CPU's version should - * probably be combined with get_cpu()/put_cpu(). - */ -#define percpu_ptr(ptr, cpu) \ -({ \ - struct percpu_data *__p = __percpu_disguise(ptr); \ - (__typeof__(ptr))__p->ptrs[(cpu)]; \ + +#define per_cpu_ptr(ptr, cpu) \ +({ \ + struct percpu_data *__p = __percpu_disguise(ptr); \ + (__typeof__(ptr))__p->ptrs[(cpu)]; \ }) -extern void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask); -extern void percpu_free(void *__pdata); +#endif /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */ + +extern void *__alloc_percpu(size_t size, size_t align); +extern void free_percpu(void *__pdata); #else /* CONFIG_SMP */ -#define percpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); }) +#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); }) -static __always_inline void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask) +static inline void *__alloc_percpu(size_t size, size_t align) { - return kzalloc(size, gfp); + /* + * Can't easily make larger alignment work with kmalloc. WARN + * on it. Larger alignment should only be used for module + * percpu sections on SMP for which this path isn't used. + */ + WARN_ON_ONCE(align > SMP_CACHE_BYTES); + return kzalloc(size, GFP_KERNEL); } -static inline void percpu_free(void *__pdata) +static inline void free_percpu(void *p) { - kfree(__pdata); + kfree(p); } #endif /* CONFIG_SMP */ -#define percpu_alloc_mask(size, gfp, mask) \ - __percpu_alloc_mask((size), (gfp), &(mask)) - -#define percpu_alloc(size, gfp) percpu_alloc_mask((size), (gfp), cpu_online_map) - -/* (legacy) interface for use without CPU hotplug handling */ - -#define __alloc_percpu(size) percpu_alloc_mask((size), GFP_KERNEL, \ - cpu_possible_map) -#define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type)) -#define free_percpu(ptr) percpu_free((ptr)) -#define per_cpu_ptr(ptr, cpu) percpu_ptr((ptr), (cpu)) +#define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type), \ + __alignof__(type)) #endif /* __LINUX_PERCPU_H */ diff --git a/include/linux/rcuclassic.h b/include/linux/rcuclassic.h index f3f697df1d7..80044a4f3ab 100644 --- a/include/linux/rcuclassic.h +++ b/include/linux/rcuclassic.h @@ -181,4 +181,10 @@ extern long rcu_batches_completed_bh(void); #define rcu_enter_nohz() do { } while (0) #define rcu_exit_nohz() do { } while (0) +/* A context switch is a grace period for rcuclassic. */ +static inline int rcu_blocking_is_gp(void) +{ + return num_online_cpus() == 1; +} + #endif /* __LINUX_RCUCLASSIC_H */ diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 921340a7b71..528343e6da5 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -52,6 +52,9 @@ struct rcu_head { void (*func)(struct rcu_head *head); }; +/* Internal to kernel, but needed by rcupreempt.h. */ +extern int rcu_scheduler_active; + #if defined(CONFIG_CLASSIC_RCU) #include <linux/rcuclassic.h> #elif defined(CONFIG_TREE_RCU) @@ -265,6 +268,7 @@ extern void rcu_barrier_sched(void); /* Internal to kernel */ extern void rcu_init(void); +extern void rcu_scheduler_starting(void); extern int rcu_needs_cpu(int cpu); #endif /* __LINUX_RCUPDATE_H */ diff --git a/include/linux/rcupreempt.h b/include/linux/rcupreempt.h index 3e05c09b54a..74304b4538d 100644 --- a/include/linux/rcupreempt.h +++ b/include/linux/rcupreempt.h @@ -142,4 +142,19 @@ static inline void rcu_exit_nohz(void) #define rcu_exit_nohz() do { } while (0) #endif /* CONFIG_NO_HZ */ +/* + * A context switch is a grace period for rcupreempt synchronize_rcu() + * only during early boot, before the scheduler has been initialized. + * So, how the heck do we get a context switch? Well, if the caller + * invokes synchronize_rcu(), they are willing to accept a context + * switch, so we simply pretend that one happened. + * + * After boot, there might be a blocked or preempted task in an RCU + * read-side critical section, so we cannot then take the fastpath. + */ +static inline int rcu_blocking_is_gp(void) +{ + return num_online_cpus() == 1 && !rcu_scheduler_active; +} + #endif /* __LINUX_RCUPREEMPT_H */ diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index d4368b7975c..a722fb67bb2 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -326,4 +326,10 @@ static inline void rcu_exit_nohz(void) } #endif /* CONFIG_NO_HZ */ +/* A context switch is a grace period for rcutree. */ +static inline int rcu_blocking_is_gp(void) +{ + return num_online_cpus() == 1; +} + #endif /* __LINUX_RCUTREE_H */ diff --git a/include/linux/sched.h b/include/linux/sched.h index f0a50b20e8a..46d680643f8 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1418,6 +1418,9 @@ struct task_struct { #endif }; +/* Future-safe accessor for struct task_struct's cpus_allowed. */ +#define tsk_cpumask(tsk) (&(tsk)->cpus_allowed) + /* * Priority of a process goes from 0..MAX_PRIO-1, valid RT * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH @@ -2303,9 +2306,13 @@ extern long sched_group_rt_runtime(struct task_group *tg); extern int sched_group_set_rt_period(struct task_group *tg, long rt_period_us); extern long sched_group_rt_period(struct task_group *tg); +extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk); #endif #endif +extern int task_can_switch_user(struct user_struct *up, + struct task_struct *tsk); + #ifdef CONFIG_TASK_XACCT static inline void add_rchar(struct task_struct *tsk, ssize_t amt) { diff --git a/include/linux/serio.h b/include/linux/serio.h index 1bcb357a01a..e0417e4d3f1 100644 --- a/include/linux/serio.h +++ b/include/linux/serio.h @@ -212,7 +212,7 @@ static inline void serio_unpin_driver(struct serio *serio) #define SERIO_FUJITSU 0x35 #define SERIO_ZHENHUA 0x36 #define SERIO_INEXIO 0x37 -#define SERIO_TOUCHIT213 0x37 +#define SERIO_TOUCHIT213 0x38 #define SERIO_W8001 0x39 #endif diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 9c0890c7a06..a43ebec3a7b 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -95,6 +95,9 @@ extern struct vm_struct *remove_vm_area(const void *addr); extern int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages); +extern int map_kernel_range_noflush(unsigned long start, unsigned long size, + pgprot_t prot, struct page **pages); +extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size); extern void unmap_kernel_range(unsigned long addr, unsigned long size); /* Allocate/destroy a 'vmalloc' VM area. */ @@ -110,5 +113,6 @@ extern long vwrite(char *buf, char *addr, unsigned long count); */ extern rwlock_t vmlist_lock; extern struct vm_struct *vmlist; +extern __init void vm_area_register_early(struct vm_struct *vm, size_t align); #endif /* _LINUX_VMALLOC_H */ diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index 6fc13d905c5..ded434b032a 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h @@ -109,11 +109,6 @@ extern struct list_head net_namespace_list; #ifdef CONFIG_NET_NS extern void __put_net(struct net *net); -static inline int net_alive(struct net *net) -{ - return net && atomic_read(&net->count); -} - static inline struct net *get_net(struct net *net) { atomic_inc(&net->count); @@ -145,11 +140,6 @@ int net_eq(const struct net *net1, const struct net *net2) } #else -static inline int net_alive(struct net *net) -{ - return 1; -} - static inline struct net *get_net(struct net *net) { return net; @@ -234,6 +224,23 @@ struct pernet_operations { void (*exit)(struct net *net); }; +/* + * Use these carefully. If you implement a network device and it + * needs per network namespace operations use device pernet operations, + * otherwise use pernet subsys operations. + * + * This is critically important. Most of the network code cleanup + * runs with the assumption that dev_remove_pack has been called so no + * new packets will arrive during and after the cleanup functions have + * been called. dev_remove_pack is not per namespace so instead the + * guarantee of no more packets arriving in a network namespace is + * provided by ensuring that all network devices and all sockets have + * left the network namespace before the cleanup methods are called. + * + * For the longest time the ipv4 icmp code was registered as a pernet + * device which caused kernel oops, and panics during network + * namespace cleanup. So please don't get this wrong. + */ extern int register_pernet_subsys(struct pernet_operations *); extern void unregister_pernet_subsys(struct pernet_operations *); extern int register_pernet_gen_subsys(int *id, struct pernet_operations *); diff --git a/include/scsi/fc/fc_fcoe.h b/include/scsi/fc/fc_fcoe.h index 57aaa8f0d61..f271d9cc0fc 100644 --- a/include/scsi/fc/fc_fcoe.h +++ b/include/scsi/fc/fc_fcoe.h @@ -31,10 +31,6 @@ #define ETH_P_FCOE 0x8906 /* FCOE ether type */ #endif -#ifndef ETH_P_8021Q -#define ETH_P_8021Q 0x8100 -#endif - /* * FC_FCOE_OUI hasn't been standardized yet. XXX TBD. */ diff --git a/include/scsi/fc/fc_fs.h b/include/scsi/fc/fc_fs.h index 3e4801d2bdb..1b7af3a64c7 100644 --- a/include/scsi/fc/fc_fs.h +++ b/include/scsi/fc/fc_fs.h @@ -337,4 +337,9 @@ enum fc_pf_rjt_reason { FC_RJT_VENDOR = 0xff, /* vendor specific reject */ }; +/* default timeout values */ + +#define FC_DEF_E_D_TOV 2000UL +#define FC_DEF_R_A_TOV 10000UL + #endif /* _FC_FS_H_ */ diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h index 9f2876397dd..a2e126b86e3 100644 --- a/include/scsi/libfc.h +++ b/include/scsi/libfc.h @@ -68,9 +68,6 @@ /* * FC HBA status */ -#define FC_PAUSE (1 << 1) -#define FC_LINK_UP (1 << 0) - enum fc_lport_state { LPORT_ST_NONE = 0, LPORT_ST_FLOGI, @@ -339,31 +336,17 @@ struct fc_exch { struct libfc_function_template { - /** - * Mandatory Fields - * - * These handlers must be implemented by the LLD. - */ - /* * Interface to send a FC frame - */ - int (*frame_send)(struct fc_lport *lp, struct fc_frame *fp); - - /** - * Optional Fields * - * The LLD may choose to implement any of the following handlers. - * If LLD doesn't specify hander and leaves its pointer NULL then - * the default libfc function will be used for that handler. - */ - - /** - * ELS/CT interfaces + * STATUS: REQUIRED */ + int (*frame_send)(struct fc_lport *lp, struct fc_frame *fp); /* - * elsct_send - sends ELS/CT frame + * Interface to send ELS/CT frames + * + * STATUS: OPTIONAL */ struct fc_seq *(*elsct_send)(struct fc_lport *lport, struct fc_rport *rport, @@ -373,9 +356,6 @@ struct libfc_function_template { struct fc_frame *fp, void *arg), void *arg, u32 timer_msec); - /** - * Exhance Manager interfaces - */ /* * Send the FC frame payload using a new exchange and sequence. @@ -407,6 +387,8 @@ struct libfc_function_template { * timer_msec argument is specified. The timer is canceled when * it fires or when the exchange is done. The exchange timeout handler * is registered by EM layer. + * + * STATUS: OPTIONAL */ struct fc_seq *(*exch_seq_send)(struct fc_lport *lp, struct fc_frame *fp, @@ -418,14 +400,18 @@ struct libfc_function_template { void *arg, unsigned int timer_msec); /* - * send a frame using existing sequence and exchange. + * Send a frame using an existing sequence and exchange. + * + * STATUS: OPTIONAL */ int (*seq_send)(struct fc_lport *lp, struct fc_seq *sp, struct fc_frame *fp); /* - * Send ELS response using mainly infomation - * in exchange and sequence in EM layer. + * Send an ELS response using infomation from a previous + * exchange and sequence. + * + * STATUS: OPTIONAL */ void (*seq_els_rsp_send)(struct fc_seq *sp, enum fc_els_cmd els_cmd, struct fc_seq_els_data *els_data); @@ -437,6 +423,8 @@ struct libfc_function_template { * A timer_msec can be specified for abort timeout, if non-zero * timer_msec value is specified then exchange resp handler * will be called with timeout error if no response to abort. + * + * STATUS: OPTIONAL */ int (*seq_exch_abort)(const struct fc_seq *req_sp, unsigned int timer_msec); @@ -444,6 +432,8 @@ struct libfc_function_template { /* * Indicate that an exchange/sequence tuple is complete and the memory * allocated for the related objects may be freed. + * + * STATUS: OPTIONAL */ void (*exch_done)(struct fc_seq *sp); @@ -451,6 +441,8 @@ struct libfc_function_template { * Assigns a EM and a free XID for an new exchange and then * allocates a new exchange and sequence pair. * The fp can be used to determine free XID. + * + * STATUS: OPTIONAL */ struct fc_exch *(*exch_get)(struct fc_lport *lp, struct fc_frame *fp); @@ -458,12 +450,16 @@ struct libfc_function_template { * Release previously assigned XID by exch_get API. * The LLD may implement this if XID is assigned by LLD * in exch_get(). + * + * STATUS: OPTIONAL */ void (*exch_put)(struct fc_lport *lp, struct fc_exch_mgr *mp, u16 ex_id); /* * Start a new sequence on the same exchange/sequence tuple. + * + * STATUS: OPTIONAL */ struct fc_seq *(*seq_start_next)(struct fc_seq *sp); @@ -471,26 +467,38 @@ struct libfc_function_template { * Reset an exchange manager, completing all sequences and exchanges. * If s_id is non-zero, reset only exchanges originating from that FID. * If d_id is non-zero, reset only exchanges sending to that FID. + * + * STATUS: OPTIONAL */ - void (*exch_mgr_reset)(struct fc_exch_mgr *, + void (*exch_mgr_reset)(struct fc_lport *, u32 s_id, u32 d_id); - void (*rport_flush_queue)(void); - /** - * Local Port interfaces + /* + * Flush the rport work queue. Generally used before shutdown. + * + * STATUS: OPTIONAL */ + void (*rport_flush_queue)(void); /* - * Receive a frame to a local port. + * Receive a frame for a local port. + * + * STATUS: OPTIONAL */ void (*lport_recv)(struct fc_lport *lp, struct fc_seq *sp, struct fc_frame *fp); + /* + * Reset the local port. + * + * STATUS: OPTIONAL + */ int (*lport_reset)(struct fc_lport *); - /** - * Remote Port interfaces + /* + * Create a remote port */ + struct fc_rport *(*rport_create)(struct fc_disc_port *); /* * Initiates the RP state machine. It is called from the LP module. @@ -500,26 +508,33 @@ struct libfc_function_template { * - PLOGI * - PRLI * - RTV + * + * STATUS: OPTIONAL */ int (*rport_login)(struct fc_rport *rport); /* * Logoff, and remove the rport from the transport if * it had been added. This will send a LOGO to the target. + * + * STATUS: OPTIONAL */ int (*rport_logoff)(struct fc_rport *rport); /* * Recieve a request from a remote port. + * + * STATUS: OPTIONAL */ void (*rport_recv_req)(struct fc_seq *, struct fc_frame *, struct fc_rport *); - struct fc_rport *(*rport_lookup)(const struct fc_lport *, u32); - - /** - * FCP interfaces + /* + * lookup an rport by it's port ID. + * + * STATUS: OPTIONAL */ + struct fc_rport *(*rport_lookup)(const struct fc_lport *, u32); /* * Send a fcp cmd from fsp pkt. @@ -527,30 +542,38 @@ struct libfc_function_template { * * The resp handler is called when FCP_RSP received. * + * STATUS: OPTIONAL */ int (*fcp_cmd_send)(struct fc_lport *lp, struct fc_fcp_pkt *fsp, void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg)); /* - * Used at least durring linkdown and reset + * Cleanup the FCP layer, used durring link down and reset + * + * STATUS: OPTIONAL */ void (*fcp_cleanup)(struct fc_lport *lp); /* * Abort all I/O on a local port + * + * STATUS: OPTIONAL */ void (*fcp_abort_io)(struct fc_lport *lp); - /** - * Discovery interfaces + /* + * Receive a request for the discovery layer. + * + * STATUS: OPTIONAL */ - void (*disc_recv_req)(struct fc_seq *, struct fc_frame *, struct fc_lport *); /* * Start discovery for a local port. + * + * STATUS: OPTIONAL */ void (*disc_start)(void (*disc_callback)(struct fc_lport *, enum fc_disc_event), @@ -559,6 +582,8 @@ struct libfc_function_template { /* * Stop discovery for a given lport. This will remove * all discovered rports + * + * STATUS: OPTIONAL */ void (*disc_stop) (struct fc_lport *); @@ -566,6 +591,8 @@ struct libfc_function_template { * Stop discovery for a given lport. This will block * until all discovered rports are deleted from the * FC transport class + * + * STATUS: OPTIONAL */ void (*disc_stop_final) (struct fc_lport *); }; @@ -603,7 +630,8 @@ struct fc_lport { /* Operational Information */ struct libfc_function_template tt; - u16 link_status; + u8 link_up; + u8 qfull; enum fc_lport_state state; unsigned long boot_time; @@ -637,7 +665,7 @@ struct fc_lport { struct delayed_work disc_work; }; -/** +/* * FC_LPORT HELPER FUNCTIONS *****************************/ static inline void *lport_priv(const struct fc_lport *lp) @@ -669,7 +697,7 @@ static inline void fc_lport_state_enter(struct fc_lport *lp, } -/** +/* * LOCAL PORT LAYER *****************************/ int fc_lport_init(struct fc_lport *lp); @@ -704,12 +732,6 @@ void fc_linkup(struct fc_lport *); void fc_linkdown(struct fc_lport *); /* - * Pause and unpause traffic. - */ -void fc_pause(struct fc_lport *); -void fc_unpause(struct fc_lport *); - -/* * Configure the local port. */ int fc_lport_config(struct fc_lport *); @@ -725,19 +747,19 @@ int fc_lport_reset(struct fc_lport *); int fc_set_mfs(struct fc_lport *lp, u32 mfs); -/** +/* * REMOTE PORT LAYER *****************************/ int fc_rport_init(struct fc_lport *lp); void fc_rport_terminate_io(struct fc_rport *rp); -/** +/* * DISCOVERY LAYER *****************************/ int fc_disc_init(struct fc_lport *lp); -/** +/* * SCSI LAYER *****************************/ /* @@ -798,7 +820,7 @@ int fc_change_queue_type(struct scsi_device *sdev, int tag_type); */ void fc_fcp_destroy(struct fc_lport *); -/** +/* * ELS/CT interface *****************************/ /* @@ -807,7 +829,7 @@ void fc_fcp_destroy(struct fc_lport *); int fc_elsct_init(struct fc_lport *lp); -/** +/* * EXCHANGE MANAGER LAYER *****************************/ /* @@ -916,7 +938,7 @@ struct fc_seq *fc_seq_start_next(struct fc_seq *sp); * If s_id is non-zero, reset only exchanges originating from that FID. * If d_id is non-zero, reset only exchanges sending to that FID. */ -void fc_exch_mgr_reset(struct fc_exch_mgr *, u32 s_id, u32 d_id); +void fc_exch_mgr_reset(struct fc_lport *, u32 s_id, u32 d_id); /* * Functions for fc_functions_template diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h index 89fdbb9a6a1..941818f29f5 100644 --- a/include/scsi/libfcoe.h +++ b/include/scsi/libfcoe.h @@ -46,6 +46,7 @@ struct fcoe_softc { struct net_device *phys_dev; /* device with ethtool_ops */ struct packet_type fcoe_packet_type; struct sk_buff_head fcoe_pending_queue; + u8 fcoe_pending_queue_active; u8 dest_addr[ETH_ALEN]; u8 ctl_src_addr[ETH_ALEN]; @@ -58,16 +59,10 @@ struct fcoe_softc { u8 address_mode; }; -static inline struct fcoe_softc *fcoe_softc( - const struct fc_lport *lp) -{ - return (struct fcoe_softc *)lport_priv(lp); -} - static inline struct net_device *fcoe_netdev( const struct fc_lport *lp) { - return fcoe_softc(lp)->real_dev; + return ((struct fcoe_softc *)lport_priv(lp))->real_dev; } static inline struct fcoe_hdr *skb_fcoe_header(const struct sk_buff *skb) |