aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-i386/acpi.h10
-rw-r--r--include/asm-i386/pgtable.h5
-rw-r--r--include/asm-ia64/page.h2
-rw-r--r--include/asm-ia64/pgtable.h5
-rw-r--r--include/asm-powerpc/pgtable.h5
-rw-r--r--include/asm-s390/pgalloc.h7
-rw-r--r--include/asm-sh64/pgalloc.h16
-rw-r--r--include/asm-x86_64/pgtable.h4
-rw-r--r--include/linux/device.h1
-rw-r--r--include/linux/hugetlb.h45
-rw-r--r--include/linux/libata.h1
-rw-r--r--include/linux/migrate.h36
-rw-r--r--include/linux/mm.h48
-rw-r--r--include/linux/mm_inline.h2
-rw-r--r--include/linux/page-flags.h24
-rw-r--r--include/linux/pci_ids.h2
-rw-r--r--include/linux/rtc.h4
-rw-r--r--include/linux/slab.h3
-rw-r--r--include/linux/smp.h23
-rw-r--r--include/linux/swap.h38
-rw-r--r--include/linux/workqueue.h6
-rw-r--r--include/scsi/scsi.h2
-rw-r--r--include/scsi/scsi_cmnd.h20
-rw-r--r--include/scsi/scsi_device.h26
-rw-r--r--include/scsi/scsi_host.h14
-rw-r--r--include/scsi/scsi_transport.h11
-rw-r--r--include/scsi/scsi_transport_sas.h50
-rw-r--r--include/scsi/scsi_transport_spi.h4
28 files changed, 253 insertions, 161 deletions
diff --git a/include/asm-i386/acpi.h b/include/asm-i386/acpi.h
index 55059abf9c9..20f52395421 100644
--- a/include/asm-i386/acpi.h
+++ b/include/asm-i386/acpi.h
@@ -103,6 +103,12 @@ __acpi_release_global_lock (unsigned int *lock)
:"=r"(n_hi), "=r"(n_lo) \
:"0"(n_hi), "1"(n_lo))
+#ifdef CONFIG_X86_IO_APIC
+extern void check_acpi_pci(void);
+#else
+static inline void check_acpi_pci(void) { }
+#endif
+
#ifdef CONFIG_ACPI
extern int acpi_lapic;
extern int acpi_ioapic;
@@ -128,8 +134,6 @@ extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq);
extern int skip_ioapic_setup;
extern int acpi_skip_timer_override;
-extern void check_acpi_pci(void);
-
static inline void disable_ioapic_setup(void)
{
skip_ioapic_setup = 1;
@@ -142,8 +146,6 @@ static inline int ioapic_setup_disabled(void)
#else
static inline void disable_ioapic_setup(void) { }
-static inline void check_acpi_pci(void) { }
-
#endif
static inline void acpi_noirq_set(void) { acpi_noirq = 1; }
diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h
index 088a945bf26..ee056c41a9f 100644
--- a/include/asm-i386/pgtable.h
+++ b/include/asm-i386/pgtable.h
@@ -219,13 +219,12 @@ extern unsigned long pg0[];
* The following only work if pte_present() is true.
* Undefined behaviour if not..
*/
-#define __LARGE_PTE (_PAGE_PSE | _PAGE_PRESENT)
static inline int pte_user(pte_t pte) { return (pte).pte_low & _PAGE_USER; }
static inline int pte_read(pte_t pte) { return (pte).pte_low & _PAGE_USER; }
static inline int pte_dirty(pte_t pte) { return (pte).pte_low & _PAGE_DIRTY; }
static inline int pte_young(pte_t pte) { return (pte).pte_low & _PAGE_ACCESSED; }
static inline int pte_write(pte_t pte) { return (pte).pte_low & _PAGE_RW; }
-static inline int pte_huge(pte_t pte) { return ((pte).pte_low & __LARGE_PTE) == __LARGE_PTE; }
+static inline int pte_huge(pte_t pte) { return (pte).pte_low & _PAGE_PSE; }
/*
* The following only works if pte_present() is not true.
@@ -242,7 +241,7 @@ static inline pte_t pte_mkexec(pte_t pte) { (pte).pte_low |= _PAGE_USER; return
static inline pte_t pte_mkdirty(pte_t pte) { (pte).pte_low |= _PAGE_DIRTY; return pte; }
static inline pte_t pte_mkyoung(pte_t pte) { (pte).pte_low |= _PAGE_ACCESSED; return pte; }
static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte_low |= _PAGE_RW; return pte; }
-static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= __LARGE_PTE; return pte; }
+static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= _PAGE_PSE; return pte; }
#ifdef CONFIG_X86_PAE
# include <asm/pgtable-3level.h>
diff --git a/include/asm-ia64/page.h b/include/asm-ia64/page.h
index 5e6362a786b..3ab27333dae 100644
--- a/include/asm-ia64/page.h
+++ b/include/asm-ia64/page.h
@@ -57,6 +57,8 @@
# define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
# define ARCH_HAS_HUGEPAGE_ONLY_RANGE
+# define ARCH_HAS_PREPARE_HUGEPAGE_RANGE
+# define ARCH_HAS_HUGETLB_FREE_PGD_RANGE
#endif /* CONFIG_HUGETLB_PAGE */
#ifdef __ASSEMBLY__
diff --git a/include/asm-ia64/pgtable.h b/include/asm-ia64/pgtable.h
index e2560c58384..c0f8144f234 100644
--- a/include/asm-ia64/pgtable.h
+++ b/include/asm-ia64/pgtable.h
@@ -314,7 +314,7 @@ ia64_phys_addr_valid (unsigned long addr)
#define pte_mkyoung(pte) (__pte(pte_val(pte) | _PAGE_A))
#define pte_mkclean(pte) (__pte(pte_val(pte) & ~_PAGE_D))
#define pte_mkdirty(pte) (__pte(pte_val(pte) | _PAGE_D))
-#define pte_mkhuge(pte) (__pte(pte_val(pte) | _PAGE_P))
+#define pte_mkhuge(pte) (__pte(pte_val(pte)))
/*
* Macro to a page protection value as "uncacheable". Note that "protection" is really a
@@ -505,9 +505,6 @@ extern struct page *zero_page_memmap_ptr;
#define HUGETLB_PGDIR_SHIFT (HPAGE_SHIFT + 2*(PAGE_SHIFT-3))
#define HUGETLB_PGDIR_SIZE (__IA64_UL(1) << HUGETLB_PGDIR_SHIFT)
#define HUGETLB_PGDIR_MASK (~(HUGETLB_PGDIR_SIZE-1))
-struct mmu_gather;
-void hugetlb_free_pgd_range(struct mmu_gather **tlb, unsigned long addr,
- unsigned long end, unsigned long floor, unsigned long ceiling);
#endif
/*
diff --git a/include/asm-powerpc/pgtable.h b/include/asm-powerpc/pgtable.h
index e38931379a7..185ee15963a 100644
--- a/include/asm-powerpc/pgtable.h
+++ b/include/asm-powerpc/pgtable.h
@@ -468,11 +468,6 @@ extern pgd_t swapper_pg_dir[];
extern void paging_init(void);
-#ifdef CONFIG_HUGETLB_PAGE
-#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) \
- free_pgd_range(tlb, addr, end, floor, ceiling)
-#endif
-
/*
* This gets called at the end of handling a page fault, when
* the kernel has put a new PTE into the page table for the process.
diff --git a/include/asm-s390/pgalloc.h b/include/asm-s390/pgalloc.h
index 3417dd71ab4..e28aaf28e4a 100644
--- a/include/asm-s390/pgalloc.h
+++ b/include/asm-s390/pgalloc.h
@@ -158,11 +158,4 @@ static inline void pte_free(struct page *pte)
#define __pte_free_tlb(tlb,pte) tlb_remove_page(tlb,pte)
-/*
- * This establishes kernel virtual mappings (e.g., as a result of a
- * vmalloc call). Since s390-esame uses a separate kernel page table,
- * there is nothing to do here... :)
- */
-#define set_pgdir(addr,entry) do { } while(0)
-
#endif /* _S390_PGALLOC_H */
diff --git a/include/asm-sh64/pgalloc.h b/include/asm-sh64/pgalloc.h
index 678251ac1db..b29dd468817 100644
--- a/include/asm-sh64/pgalloc.h
+++ b/include/asm-sh64/pgalloc.h
@@ -167,22 +167,6 @@ static __inline__ void pmd_free(pmd_t *pmd)
extern int do_check_pgt_cache(int, int);
-static inline void set_pgdir(unsigned long address, pgd_t entry)
-{
- struct task_struct * p;
- pgd_t *pgd;
-
- read_lock(&tasklist_lock);
- for_each_process(p) {
- if (!p->mm)
- continue;
- *pgd_offset(p->mm,address) = entry;
- }
- read_unlock(&tasklist_lock);
- for (pgd = (pgd_t *)pgd_quicklist; pgd; pgd = (pgd_t *)*(unsigned long *)pgd)
- pgd[address >> PGDIR_SHIFT] = entry;
-}
-
#define pmd_populate_kernel(mm, pmd, pte) \
set_pmd(pmd, __pmd(_PAGE_TABLE + (unsigned long) (pte)))
diff --git a/include/asm-x86_64/pgtable.h b/include/asm-x86_64/pgtable.h
index 715fd94cf57..a617d364d08 100644
--- a/include/asm-x86_64/pgtable.h
+++ b/include/asm-x86_64/pgtable.h
@@ -273,7 +273,7 @@ static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
-static inline int pte_huge(pte_t pte) { return (pte_val(pte) & __LARGE_PTE) == __LARGE_PTE; }
+static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_PSE; }
static inline pte_t pte_rdprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_USER)); return pte; }
static inline pte_t pte_exprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_USER)); return pte; }
@@ -285,7 +285,7 @@ static inline pte_t pte_mkexec(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _
static inline pte_t pte_mkdirty(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; }
static inline pte_t pte_mkyoung(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; }
static inline pte_t pte_mkwrite(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_RW)); return pte; }
-static inline pte_t pte_mkhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | __LARGE_PTE)); return pte; }
+static inline pte_t pte_mkhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_PSE)); return pte; }
struct vm_area_struct;
diff --git a/include/linux/device.h b/include/linux/device.h
index 5b595fdfb67..6d2345d8608 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -378,6 +378,7 @@ extern void device_bind_driver(struct device * dev);
extern void device_release_driver(struct device * dev);
extern int device_attach(struct device * dev);
extern void driver_attach(struct device_driver * drv);
+extern void device_reprobe(struct device *dev);
/*
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 68d82ad6b17..d6f1019625a 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -20,10 +20,7 @@ void unmap_hugepage_range(struct vm_area_struct *, unsigned long, unsigned long)
int hugetlb_prefault(struct address_space *, struct vm_area_struct *);
int hugetlb_report_meminfo(char *);
int hugetlb_report_node_meminfo(int, char *);
-int is_hugepage_mem_enough(size_t);
unsigned long hugetlb_total_pages(void);
-struct page *alloc_huge_page(struct vm_area_struct *, unsigned long);
-void free_huge_page(struct page *);
int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, int write_access);
@@ -39,18 +36,35 @@ struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
int write);
struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
pmd_t *pmd, int write);
-int is_aligned_hugepage_range(unsigned long addr, unsigned long len);
int pmd_huge(pmd_t pmd);
+void hugetlb_change_protection(struct vm_area_struct *vma,
+ unsigned long address, unsigned long end, pgprot_t newprot);
#ifndef ARCH_HAS_HUGEPAGE_ONLY_RANGE
#define is_hugepage_only_range(mm, addr, len) 0
-#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) \
- do { } while (0)
+#endif
+
+#ifndef ARCH_HAS_HUGETLB_FREE_PGD_RANGE
+#define hugetlb_free_pgd_range free_pgd_range
+#else
+void hugetlb_free_pgd_range(struct mmu_gather **tlb, unsigned long addr,
+ unsigned long end, unsigned long floor,
+ unsigned long ceiling);
#endif
#ifndef ARCH_HAS_PREPARE_HUGEPAGE_RANGE
-#define prepare_hugepage_range(addr, len) \
- is_aligned_hugepage_range(addr, len)
+/*
+ * If the arch doesn't supply something else, assume that hugepage
+ * size aligned regions are ok without further preparation.
+ */
+static inline int prepare_hugepage_range(unsigned long addr, unsigned long len)
+{
+ if (len & ~HPAGE_MASK)
+ return -EINVAL;
+ if (addr & ~HPAGE_MASK)
+ return -EINVAL;
+ return 0;
+}
#else
int prepare_hugepage_range(unsigned long addr, unsigned long len);
#endif
@@ -87,20 +101,17 @@ static inline unsigned long hugetlb_total_pages(void)
#define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
#define hugetlb_prefault(mapping, vma) ({ BUG(); 0; })
#define unmap_hugepage_range(vma, start, end) BUG()
-#define is_hugepage_mem_enough(size) 0
#define hugetlb_report_meminfo(buf) 0
#define hugetlb_report_node_meminfo(n, buf) 0
#define follow_huge_pmd(mm, addr, pmd, write) NULL
-#define is_aligned_hugepage_range(addr, len) 0
#define prepare_hugepage_range(addr, len) (-EINVAL)
#define pmd_huge(x) 0
#define is_hugepage_only_range(mm, addr, len) 0
-#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) \
- do { } while (0)
-#define alloc_huge_page(vma, addr) ({ NULL; })
-#define free_huge_page(p) ({ (void)(p); BUG(); })
+#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
#define hugetlb_fault(mm, vma, addr, write) ({ BUG(); 0; })
+#define hugetlb_change_protection(vma, address, end, newprot)
+
#ifndef HPAGE_MASK
#define HPAGE_MASK PAGE_MASK /* Keep the compiler happy */
#define HPAGE_SIZE PAGE_SIZE
@@ -128,6 +139,8 @@ struct hugetlbfs_sb_info {
struct hugetlbfs_inode_info {
struct shared_policy policy;
+ /* Protected by the (global) hugetlb_lock */
+ unsigned long prereserved_hpages;
struct inode vfs_inode;
};
@@ -144,6 +157,10 @@ static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
extern struct file_operations hugetlbfs_file_operations;
extern struct vm_operations_struct hugetlb_vm_ops;
struct file *hugetlb_zero_setup(size_t);
+int hugetlb_extend_reservation(struct hugetlbfs_inode_info *info,
+ unsigned long atleast_hpages);
+void hugetlb_truncate_reservation(struct hugetlbfs_inode_info *info,
+ unsigned long atmost_hpages);
int hugetlb_get_quota(struct address_space *mapping);
void hugetlb_put_quota(struct address_space *mapping);
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 239408ecfdd..204c37a55f0 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -508,7 +508,6 @@ extern void ata_host_set_remove(struct ata_host_set *host_set);
extern int ata_scsi_detect(struct scsi_host_template *sht);
extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
-extern enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd);
extern int ata_scsi_error(struct Scsi_Host *host);
extern void ata_eh_qc_complete(struct ata_queued_cmd *qc);
extern void ata_eh_qc_retry(struct ata_queued_cmd *qc);
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
new file mode 100644
index 00000000000..7d09962c3c0
--- /dev/null
+++ b/include/linux/migrate.h
@@ -0,0 +1,36 @@
+#ifndef _LINUX_MIGRATE_H
+#define _LINUX_MIGRATE_H
+
+#include <linux/config.h>
+#include <linux/mm.h>
+
+#ifdef CONFIG_MIGRATION
+extern int isolate_lru_page(struct page *p, struct list_head *pagelist);
+extern int putback_lru_pages(struct list_head *l);
+extern int migrate_page(struct page *, struct page *);
+extern void migrate_page_copy(struct page *, struct page *);
+extern int migrate_page_remove_references(struct page *, struct page *, int);
+extern int migrate_pages(struct list_head *l, struct list_head *t,
+ struct list_head *moved, struct list_head *failed);
+int migrate_pages_to(struct list_head *pagelist,
+ struct vm_area_struct *vma, int dest);
+extern int fail_migrate_page(struct page *, struct page *);
+
+extern int migrate_prep(void);
+
+#else
+
+static inline int isolate_lru_page(struct page *p, struct list_head *list)
+ { return -ENOSYS; }
+static inline int putback_lru_pages(struct list_head *l) { return 0; }
+static inline int migrate_pages(struct list_head *l, struct list_head *t,
+ struct list_head *moved, struct list_head *failed) { return -ENOSYS; }
+
+static inline int migrate_prep(void) { return -ENOSYS; }
+
+/* Possible settings for the migrate_page() method in address_operations */
+#define migrate_page NULL
+#define fail_migrate_page NULL
+
+#endif /* CONFIG_MIGRATION */
+#endif /* _LINUX_MIGRATE_H */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 498ff8778fb..6aa016f1d3a 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -286,43 +286,34 @@ struct page {
*
* Also, many kernel routines increase the page count before a critical
* routine so they can be sure the page doesn't go away from under them.
- *
- * Since 2.6.6 (approx), a free page has ->_count = -1. This is so that we
- * can use atomic_add_negative(-1, page->_count) to detect when the page
- * becomes free and so that we can also use atomic_inc_and_test to atomically
- * detect when we just tried to grab a ref on a page which some other CPU has
- * already deemed to be freeable.
- *
- * NO code should make assumptions about this internal detail! Use the provided
- * macros which retain the old rules: page_count(page) == 0 is a free page.
*/
/*
* Drop a ref, return true if the logical refcount fell to zero (the page has
* no users)
*/
-#define put_page_testzero(p) \
- ({ \
- BUG_ON(atomic_read(&(p)->_count) == -1);\
- atomic_add_negative(-1, &(p)->_count); \
- })
+static inline int put_page_testzero(struct page *page)
+{
+ BUG_ON(atomic_read(&page->_count) == 0);
+ return atomic_dec_and_test(&page->_count);
+}
/*
- * Grab a ref, return true if the page previously had a logical refcount of
- * zero. ie: returns true if we just grabbed an already-deemed-to-be-free page
+ * Try to grab a ref unless the page has a refcount of zero, return false if
+ * that is the case.
*/
-#define get_page_testone(p) atomic_inc_and_test(&(p)->_count)
-
-#define set_page_count(p,v) atomic_set(&(p)->_count, (v) - 1)
-#define __put_page(p) atomic_dec(&(p)->_count)
+static inline int get_page_unless_zero(struct page *page)
+{
+ return atomic_inc_not_zero(&page->_count);
+}
extern void FASTCALL(__page_cache_release(struct page *));
static inline int page_count(struct page *page)
{
- if (PageCompound(page))
+ if (unlikely(PageCompound(page)))
page = (struct page *)page_private(page);
- return atomic_read(&page->_count) + 1;
+ return atomic_read(&page->_count);
}
static inline void get_page(struct page *page)
@@ -332,8 +323,19 @@ static inline void get_page(struct page *page)
atomic_inc(&page->_count);
}
+/*
+ * Setup the page count before being freed into the page allocator for
+ * the first time (boot or memory hotplug)
+ */
+static inline void init_page_count(struct page *page)
+{
+ atomic_set(&page->_count, 1);
+}
+
void put_page(struct page *page);
+void split_page(struct page *page, unsigned int order);
+
/*
* Multiple processes may "see" the same page. E.g. for untouched
* mappings of /dev/null, all processes see the same page full of
@@ -1046,7 +1048,7 @@ int in_gate_area_no_task(unsigned long addr);
int drop_caches_sysctl_handler(struct ctl_table *, int, struct file *,
void __user *, size_t *, loff_t *);
-int shrink_slab(unsigned long scanned, gfp_t gfp_mask,
+unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
unsigned long lru_pages);
void drop_pagecache(void);
void drop_slab(void);
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 8ac854f7f19..3b6723dfaff 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -32,7 +32,7 @@ del_page_from_lru(struct zone *zone, struct page *page)
{
list_del(&page->lru);
if (PageActive(page)) {
- ClearPageActive(page);
+ __ClearPageActive(page);
zone->nr_active--;
} else {
zone->nr_inactive--;
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index d52999c4333..9ea629c02a4 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -86,8 +86,9 @@
* - The __xxx_page_state variants can be used safely when interrupts are
* disabled.
* - The __xxx_page_state variants can be used if the field is only
- * modified from process context, or only modified from interrupt context.
- * In this case, the field should be commented here.
+ * modified from process context and protected from preemption, or only
+ * modified from interrupt context. In this case, the field should be
+ * commented here.
*/
struct page_state {
unsigned long nr_dirty; /* Dirty writeable pages */
@@ -239,22 +240,19 @@ extern void __mod_page_state_offset(unsigned long offset, unsigned long delta);
#define __ClearPageDirty(page) __clear_bit(PG_dirty, &(page)->flags)
#define TestClearPageDirty(page) test_and_clear_bit(PG_dirty, &(page)->flags)
-#define SetPageLRU(page) set_bit(PG_lru, &(page)->flags)
#define PageLRU(page) test_bit(PG_lru, &(page)->flags)
-#define TestSetPageLRU(page) test_and_set_bit(PG_lru, &(page)->flags)
-#define TestClearPageLRU(page) test_and_clear_bit(PG_lru, &(page)->flags)
+#define SetPageLRU(page) set_bit(PG_lru, &(page)->flags)
+#define ClearPageLRU(page) clear_bit(PG_lru, &(page)->flags)
+#define __ClearPageLRU(page) __clear_bit(PG_lru, &(page)->flags)
#define PageActive(page) test_bit(PG_active, &(page)->flags)
#define SetPageActive(page) set_bit(PG_active, &(page)->flags)
#define ClearPageActive(page) clear_bit(PG_active, &(page)->flags)
-#define TestClearPageActive(page) test_and_clear_bit(PG_active, &(page)->flags)
-#define TestSetPageActive(page) test_and_set_bit(PG_active, &(page)->flags)
+#define __ClearPageActive(page) __clear_bit(PG_active, &(page)->flags)
#define PageSlab(page) test_bit(PG_slab, &(page)->flags)
-#define SetPageSlab(page) set_bit(PG_slab, &(page)->flags)
-#define ClearPageSlab(page) clear_bit(PG_slab, &(page)->flags)
-#define TestClearPageSlab(page) test_and_clear_bit(PG_slab, &(page)->flags)
-#define TestSetPageSlab(page) test_and_set_bit(PG_slab, &(page)->flags)
+#define __SetPageSlab(page) __set_bit(PG_slab, &(page)->flags)
+#define __ClearPageSlab(page) __clear_bit(PG_slab, &(page)->flags)
#ifdef CONFIG_HIGHMEM
#define PageHighMem(page) is_highmem(page_zone(page))
@@ -329,8 +327,8 @@ extern void __mod_page_state_offset(unsigned long offset, unsigned long delta);
#define TestClearPageReclaim(page) test_and_clear_bit(PG_reclaim, &(page)->flags)
#define PageCompound(page) test_bit(PG_compound, &(page)->flags)
-#define SetPageCompound(page) set_bit(PG_compound, &(page)->flags)
-#define ClearPageCompound(page) clear_bit(PG_compound, &(page)->flags)
+#define __SetPageCompound(page) __set_bit(PG_compound, &(page)->flags)
+#define __ClearPageCompound(page) __clear_bit(PG_compound, &(page)->flags)
#ifdef CONFIG_SWAP
#define PageSwapCache(page) test_bit(PG_swapcache, &(page)->flags)
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index b9810ddf435..ec3c3293262 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -852,6 +852,8 @@
#define PCI_DEVICE_ID_QLOGIC_ISP2432 0x2432
#define PCI_DEVICE_ID_QLOGIC_ISP2512 0x2512
#define PCI_DEVICE_ID_QLOGIC_ISP2522 0x2522
+#define PCI_DEVICE_ID_QLOGIC_ISP5422 0x5422
+#define PCI_DEVICE_ID_QLOGIC_ISP5432 0x5432
#define PCI_VENDOR_ID_CYRIX 0x1078
#define PCI_DEVICE_ID_CYRIX_5510 0x0000
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index 0b2ba67ff13..b739ac1f7ca 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -11,8 +11,6 @@
#ifndef _LINUX_RTC_H_
#define _LINUX_RTC_H_
-#include <linux/interrupt.h>
-
/*
* The struct used to pass data via the following ioctl. Similar to the
* struct tm in <time.h>, but it needs to be here so that the kernel
@@ -95,6 +93,8 @@ struct rtc_pll_info {
#ifdef __KERNEL__
+#include <linux/interrupt.h>
+
typedef struct rtc_task {
void (*func)(void *private_data);
void *private_data;
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 8cf52939d0a..2b28c849d75 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -38,7 +38,6 @@ typedef struct kmem_cache kmem_cache_t;
#define SLAB_DEBUG_INITIAL 0x00000200UL /* Call constructor (as verifier) */
#define SLAB_RED_ZONE 0x00000400UL /* Red zone objs in a cache */
#define SLAB_POISON 0x00000800UL /* Poison objects */
-#define SLAB_NO_REAP 0x00001000UL /* never reap from the cache */
#define SLAB_HWCACHE_ALIGN 0x00002000UL /* align objs on a h/w cache lines */
#define SLAB_CACHE_DMA 0x00004000UL /* use GFP_DMA memory */
#define SLAB_MUST_HWCACHE_ALIGN 0x00008000UL /* force alignment */
@@ -118,7 +117,7 @@ extern void *kzalloc(size_t, gfp_t);
*/
static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
{
- if (n != 0 && size > INT_MAX / n)
+ if (n != 0 && size > ULONG_MAX / n)
return NULL;
return kzalloc(n * size, flags);
}
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 44153fdf73f..d699a16b0cb 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -52,23 +52,12 @@ extern void smp_cpus_done(unsigned int max_cpus);
/*
* Call a function on all other processors
*/
-extern int smp_call_function (void (*func) (void *info), void *info,
- int retry, int wait);
+int smp_call_function(void(*func)(void *info), void *info, int retry, int wait);
/*
* Call a function on all processors
*/
-static inline int on_each_cpu(void (*func) (void *info), void *info,
- int retry, int wait)
-{
- int ret = 0;
-
- preempt_disable();
- ret = smp_call_function(func, info, retry, wait);
- func(info);
- preempt_enable();
- return ret;
-}
+int on_each_cpu(void (*func) (void *info), void *info, int retry, int wait);
#define MSG_ALL_BUT_SELF 0x8000 /* Assume <32768 CPU's */
#define MSG_ALL 0x8001
@@ -94,7 +83,13 @@ void smp_prepare_boot_cpu(void);
#define raw_smp_processor_id() 0
#define hard_smp_processor_id() 0
#define smp_call_function(func,info,retry,wait) ({ 0; })
-#define on_each_cpu(func,info,retry,wait) ({ func(info); 0; })
+#define on_each_cpu(func,info,retry,wait) \
+ ({ \
+ local_irq_disable(); \
+ func(info); \
+ local_irq_enable(); \
+ 0; \
+ })
static inline void smp_send_reschedule(int cpu) { }
#define num_booting_cpus() 1
#define smp_prepare_boot_cpu() do {} while (0)
diff --git a/include/linux/swap.h b/include/linux/swap.h
index d572b19afb7..12415dd9445 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -172,9 +172,24 @@ extern int rotate_reclaimable_page(struct page *page);
extern void swap_setup(void);
/* linux/mm/vmscan.c */
-extern int try_to_free_pages(struct zone **, gfp_t);
-extern int shrink_all_memory(int);
+extern unsigned long try_to_free_pages(struct zone **, gfp_t);
+extern unsigned long shrink_all_memory(unsigned long nr_pages);
extern int vm_swappiness;
+extern int remove_mapping(struct address_space *mapping, struct page *page);
+
+/* possible outcome of pageout() */
+typedef enum {
+ /* failed to write page out, page is locked */
+ PAGE_KEEP,
+ /* move page to the active list, page is locked */
+ PAGE_ACTIVATE,
+ /* page has been sent to the disk successfully, page is unlocked */
+ PAGE_SUCCESS,
+ /* page is clean and locked */
+ PAGE_CLEAN,
+} pageout_t;
+
+extern pageout_t pageout(struct page *page, struct address_space *mapping);
#ifdef CONFIG_NUMA
extern int zone_reclaim_mode;
@@ -188,25 +203,6 @@ static inline int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order)
}
#endif
-#ifdef CONFIG_MIGRATION
-extern int isolate_lru_page(struct page *p);
-extern int putback_lru_pages(struct list_head *l);
-extern int migrate_page(struct page *, struct page *);
-extern void migrate_page_copy(struct page *, struct page *);
-extern int migrate_page_remove_references(struct page *, struct page *, int);
-extern int migrate_pages(struct list_head *l, struct list_head *t,
- struct list_head *moved, struct list_head *failed);
-extern int fail_migrate_page(struct page *, struct page *);
-#else
-static inline int isolate_lru_page(struct page *p) { return -ENOSYS; }
-static inline int putback_lru_pages(struct list_head *l) { return 0; }
-static inline int migrate_pages(struct list_head *l, struct list_head *t,
- struct list_head *moved, struct list_head *failed) { return -ENOSYS; }
-/* Possible settings for the migrate_page() method in address_operations */
-#define migrate_page NULL
-#define fail_migrate_page NULL
-#endif
-
#ifdef CONFIG_MMU
/* linux/mm/shmem.c */
extern int shmem_unuse(swp_entry_t entry, struct page *page);
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 86b11130023..957c21c16d6 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -20,6 +20,10 @@ struct work_struct {
struct timer_list timer;
};
+struct execute_work {
+ struct work_struct work;
+};
+
#define __WORK_INITIALIZER(n, f, d) { \
.entry = { &(n).entry, &(n).entry }, \
.func = (f), \
@@ -74,6 +78,8 @@ extern void init_workqueues(void);
void cancel_rearming_delayed_work(struct work_struct *work);
void cancel_rearming_delayed_workqueue(struct workqueue_struct *,
struct work_struct *);
+int execute_in_process_context(void (*fn)(void *), void *,
+ struct execute_work *);
/*
* Kill off a pending schedule_delayed_work(). Note that the work callback
diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h
index 9c331258bc2..c60b8ff2f5e 100644
--- a/include/scsi/scsi.h
+++ b/include/scsi/scsi.h
@@ -433,6 +433,4 @@ struct scsi_lun {
/* Used to obtain the PCI location of a device */
#define SCSI_IOCTL_GET_PCI 0x5387
-int scsi_execute_in_process_context(void (*fn)(void *data), void *data);
-
#endif /* _SCSI_SCSI_H */
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index 7529f4388bb..1ace1b9fe53 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -104,10 +104,10 @@ struct scsi_cmnd {
working on */
#define SCSI_SENSE_BUFFERSIZE 96
- unsigned char sense_buffer[SCSI_SENSE_BUFFERSIZE]; /* obtained by REQUEST SENSE
- * when CHECK CONDITION is
- * received on original command
- * (auto-sense) */
+ unsigned char sense_buffer[SCSI_SENSE_BUFFERSIZE];
+ /* obtained by REQUEST SENSE when
+ * CHECK CONDITION is received on original
+ * command (auto-sense) */
/* Low-level done function - can be used by low-level driver to point
* to completion function. Not used by mid/upper level code. */
@@ -120,12 +120,12 @@ struct scsi_cmnd {
struct scsi_pointer SCp; /* Scratchpad used by some host adapters */
unsigned char *host_scribble; /* The host adapter is allowed to
- * call scsi_malloc and get some memory
- * and hang it here. The host adapter
- * is also expected to call scsi_free
- * to release this memory. (The memory
- * obtained by scsi_malloc is guaranteed
- * to be at an address < 16Mb). */
+ * call scsi_malloc and get some memory
+ * and hang it here. The host adapter
+ * is also expected to call scsi_free
+ * to release this memory. (The memory
+ * obtained by scsi_malloc is guaranteed
+ * to be at an address < 16Mb). */
int result; /* Status code from lower level driver */
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 290e3b4d2ae..895d212864c 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -4,6 +4,7 @@
#include <linux/device.h>
#include <linux/list.h>
#include <linux/spinlock.h>
+#include <linux/workqueue.h>
#include <asm/atomic.h>
struct request_queue;
@@ -73,7 +74,6 @@ struct scsi_device {
unsigned sector_size; /* size in bytes */
void *hostdata; /* available to low-level driver */
- char devfs_name[256]; /* devfs junk */
char type;
char scsi_level;
char inq_periph_qual; /* PQ from INQUIRY data */
@@ -138,6 +138,8 @@ struct scsi_device {
struct device sdev_gendev;
struct class_device sdev_classdev;
+ struct execute_work ew; /* used to get process context on put */
+
enum scsi_device_state sdev_state;
unsigned long sdev_data[0];
} __attribute__((aligned(sizeof(unsigned long))));
@@ -154,6 +156,11 @@ struct scsi_device {
#define scmd_printk(prefix, scmd, fmt, a...) \
dev_printk(prefix, &(scmd)->device->sdev_gendev, fmt, ##a)
+enum scsi_target_state {
+ STARGET_RUNNING = 1,
+ STARGET_DEL,
+};
+
/*
* scsi_target: representation of a scsi target, for now, this is only
* used for single_lun devices. If no one has active IO to the target,
@@ -168,8 +175,13 @@ struct scsi_target {
unsigned int channel;
unsigned int id; /* target id ... replace
* scsi_device.id eventually */
- unsigned long create:1; /* signal that it needs to be added */
+ unsigned int create:1; /* signal that it needs to be added */
+ unsigned int pdt_1f_for_no_lun; /* PDT = 0x1f */
+ /* means no lun present */
+
char scsi_level;
+ struct execute_work ew;
+ enum scsi_target_state state;
void *hostdata; /* available to low-level driver */
unsigned long starget_data[0]; /* for the transport */
/* starget_data must be the last element!!!! */
@@ -249,6 +261,11 @@ extern int scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
unsigned char *buffer, int len, int timeout,
int retries, struct scsi_mode_data *data,
struct scsi_sense_hdr *);
+extern int scsi_mode_select(struct scsi_device *sdev, int pf, int sp,
+ int modepage, unsigned char *buffer, int len,
+ int timeout, int retries,
+ struct scsi_mode_data *data,
+ struct scsi_sense_hdr *);
extern int scsi_test_unit_ready(struct scsi_device *sdev, int timeout,
int retries);
extern int scsi_device_set_state(struct scsi_device *sdev,
@@ -281,6 +298,11 @@ extern int scsi_execute_async(struct scsi_device *sdev,
void (*done)(void *, char *, int, int),
gfp_t gfp);
+static inline void scsi_device_reprobe(struct scsi_device *sdev)
+{
+ device_reprobe(&sdev->sdev_gendev);
+}
+
static inline unsigned int sdev_channel(struct scsi_device *sdev)
{
return sdev->channel;
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 827992949c4..a6cf3e535c0 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -147,20 +147,6 @@ struct scsi_host_template {
int (* eh_host_reset_handler)(struct scsi_cmnd *);
/*
- * This is an optional routine to notify the host that the scsi
- * timer just fired. The returns tell the timer routine what to
- * do about this:
- *
- * EH_HANDLED: I fixed the error, please complete the command
- * EH_RESET_TIMER: I need more time, reset the timer and
- * begin counting again
- * EH_NOT_HANDLED Begin normal error recovery
- *
- * Status: OPTIONAL
- */
- enum scsi_eh_timer_return (* eh_timed_out)(struct scsi_cmnd *);
-
- /*
* Before the mid layer attempts to scan for a new device where none
* currently exists, it will call this entry in your driver. Should
* your driver need to allocate any structs or perform any other init
diff --git a/include/scsi/scsi_transport.h b/include/scsi/scsi_transport.h
index e7b1054adf8..b3657f11193 100644
--- a/include/scsi/scsi_transport.h
+++ b/include/scsi/scsi_transport.h
@@ -48,6 +48,17 @@ struct scsi_transport_template {
* True if the transport wants to use a host-based work-queue
*/
unsigned int create_work_queue : 1;
+
+ /*
+ * This is an optional routine that allows the transport to become
+ * involved when a scsi io timer fires. The return value tells the
+ * timer routine how to finish the io timeout handling:
+ * EH_HANDLED: I fixed the error, please complete the command
+ * EH_RESET_TIMER: I need more time, reset the timer and
+ * begin counting again
+ * EH_NOT_HANDLED Begin normal error recovery
+ */
+ enum scsi_eh_timer_return (* eh_timed_out)(struct scsi_cmnd *);
};
#define transport_class_to_shost(tc) \
diff --git a/include/scsi/scsi_transport_sas.h b/include/scsi/scsi_transport_sas.h
index b91400bfb02..93cfb4bf421 100644
--- a/include/scsi/scsi_transport_sas.h
+++ b/include/scsi/scsi_transport_sas.h
@@ -30,6 +30,7 @@ enum sas_linkrate {
SAS_SATA_PORT_SELECTOR,
SAS_LINK_RATE_1_5_GBPS,
SAS_LINK_RATE_3_0_GBPS,
+ SAS_LINK_RATE_6_0_GBPS,
SAS_LINK_VIRTUAL,
};
@@ -89,11 +90,45 @@ struct sas_rphy {
dev_to_rphy((cdev)->dev)
#define rphy_to_shost(rphy) \
dev_to_shost((rphy)->dev.parent)
+#define target_to_rphy(targ) \
+ dev_to_rphy((targ)->dev.parent)
+
+struct sas_end_device {
+ struct sas_rphy rphy;
+ /* flags */
+ unsigned ready_led_meaning:1;
+ /* parameters */
+ u16 I_T_nexus_loss_timeout;
+ u16 initiator_response_timeout;
+};
+#define rphy_to_end_device(r) \
+ container_of((r), struct sas_end_device, rphy)
+
+struct sas_expander_device {
+ int level;
+
+ #define SAS_EXPANDER_VENDOR_ID_LEN 8
+ char vendor_id[SAS_EXPANDER_VENDOR_ID_LEN+1];
+ #define SAS_EXPANDER_PRODUCT_ID_LEN 16
+ char product_id[SAS_EXPANDER_PRODUCT_ID_LEN+1];
+ #define SAS_EXPANDER_PRODUCT_REV_LEN 4
+ char product_rev[SAS_EXPANDER_PRODUCT_REV_LEN+1];
+ #define SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN 8
+ char component_vendor_id[SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN+1];
+ u16 component_id;
+ u8 component_revision_id;
+ struct sas_rphy rphy;
+
+};
+#define rphy_to_expander_device(r) \
+ container_of((r), struct sas_expander_device, rphy)
/* The functions by which the transport class and the driver communicate */
struct sas_function_template {
int (*get_linkerrors)(struct sas_phy *);
+ int (*get_enclosure_identifier)(struct sas_rphy *, u64 *);
+ int (*get_bay_identifier)(struct sas_rphy *);
int (*phy_reset)(struct sas_phy *, int);
};
@@ -106,7 +141,8 @@ extern int sas_phy_add(struct sas_phy *);
extern void sas_phy_delete(struct sas_phy *);
extern int scsi_is_sas_phy(const struct device *);
-extern struct sas_rphy *sas_rphy_alloc(struct sas_phy *);
+extern struct sas_rphy *sas_end_device_alloc(struct sas_phy *);
+extern struct sas_rphy *sas_expander_alloc(struct sas_phy *, enum sas_device_type);
void sas_rphy_free(struct sas_rphy *);
extern int sas_rphy_add(struct sas_rphy *);
extern void sas_rphy_delete(struct sas_rphy *);
@@ -115,5 +151,17 @@ extern int scsi_is_sas_rphy(const struct device *);
extern struct scsi_transport_template *
sas_attach_transport(struct sas_function_template *);
extern void sas_release_transport(struct scsi_transport_template *);
+int sas_read_port_mode_page(struct scsi_device *);
+
+static inline int
+scsi_is_sas_expander_device(struct device *dev)
+{
+ struct sas_rphy *rphy;
+ if (!scsi_is_sas_rphy(dev))
+ return 0;
+ rphy = dev_to_rphy(dev);
+ return rphy->identify.device_type == SAS_FANOUT_EXPANDER_DEVICE ||
+ rphy->identify.device_type == SAS_EDGE_EXPANDER_DEVICE;
+}
#endif /* SCSI_TRANSPORT_SAS_H */
diff --git a/include/scsi/scsi_transport_spi.h b/include/scsi/scsi_transport_spi.h
index fb5a2ffae93..5e1d61913d4 100644
--- a/include/scsi/scsi_transport_spi.h
+++ b/include/scsi/scsi_transport_spi.h
@@ -148,5 +148,9 @@ void spi_schedule_dv_device(struct scsi_device *);
void spi_dv_device(struct scsi_device *);
void spi_display_xfer_agreement(struct scsi_target *);
int spi_print_msg(const unsigned char *);
+int spi_populate_width_msg(unsigned char *msg, int width);
+int spi_populate_sync_msg(unsigned char *msg, int period, int offset);
+int spi_populate_ppr_msg(unsigned char *msg, int period, int offset, int width,
+ int options);
#endif /* SCSI_TRANSPORT_SPI_H */