aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-i386/acpi.h10
-rw-r--r--include/asm-i386/pgtable.h5
-rw-r--r--include/asm-ia64/intel_intrin.h134
-rw-r--r--include/asm-ia64/machvec.h13
-rw-r--r--include/asm-ia64/machvec_sn2.h4
-rw-r--r--include/asm-ia64/mca.h2
-rw-r--r--include/asm-ia64/mutex.h93
-rw-r--r--include/asm-ia64/page.h2
-rw-r--r--include/asm-ia64/pgtable.h5
-rw-r--r--include/asm-ia64/processor.h3
-rw-r--r--include/asm-ia64/signal.h2
-rw-r--r--include/asm-ia64/sn/addrs.h8
-rw-r--r--include/asm-ia64/sn/rw_mmr.h56
-rw-r--r--include/asm-ia64/sn/tioce.h36
-rw-r--r--include/asm-ia64/sn/xpc.h22
-rw-r--r--include/asm-ia64/system.h7
-rw-r--r--include/asm-ia64/thread_info.h12
-rw-r--r--include/asm-powerpc/pgtable.h5
-rw-r--r--include/asm-s390/pgalloc.h7
-rw-r--r--include/asm-sh64/pgalloc.h16
-rw-r--r--include/asm-x86_64/pgtable.h4
-rw-r--r--include/linux/hugetlb.h45
-rw-r--r--include/linux/migrate.h36
-rw-r--r--include/linux/mm.h48
-rw-r--r--include/linux/mm_inline.h2
-rw-r--r--include/linux/page-flags.h24
-rw-r--r--include/linux/rtc.h4
-rw-r--r--include/linux/slab.h3
-rw-r--r--include/linux/smp.h23
-rw-r--r--include/linux/swap.h38
30 files changed, 327 insertions, 342 deletions
diff --git a/include/asm-i386/acpi.h b/include/asm-i386/acpi.h
index 55059abf9c9..20f52395421 100644
--- a/include/asm-i386/acpi.h
+++ b/include/asm-i386/acpi.h
@@ -103,6 +103,12 @@ __acpi_release_global_lock (unsigned int *lock)
:"=r"(n_hi), "=r"(n_lo) \
:"0"(n_hi), "1"(n_lo))
+#ifdef CONFIG_X86_IO_APIC
+extern void check_acpi_pci(void);
+#else
+static inline void check_acpi_pci(void) { }
+#endif
+
#ifdef CONFIG_ACPI
extern int acpi_lapic;
extern int acpi_ioapic;
@@ -128,8 +134,6 @@ extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq);
extern int skip_ioapic_setup;
extern int acpi_skip_timer_override;
-extern void check_acpi_pci(void);
-
static inline void disable_ioapic_setup(void)
{
skip_ioapic_setup = 1;
@@ -142,8 +146,6 @@ static inline int ioapic_setup_disabled(void)
#else
static inline void disable_ioapic_setup(void) { }
-static inline void check_acpi_pci(void) { }
-
#endif
static inline void acpi_noirq_set(void) { acpi_noirq = 1; }
diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h
index 088a945bf26..ee056c41a9f 100644
--- a/include/asm-i386/pgtable.h
+++ b/include/asm-i386/pgtable.h
@@ -219,13 +219,12 @@ extern unsigned long pg0[];
* The following only work if pte_present() is true.
* Undefined behaviour if not..
*/
-#define __LARGE_PTE (_PAGE_PSE | _PAGE_PRESENT)
static inline int pte_user(pte_t pte) { return (pte).pte_low & _PAGE_USER; }
static inline int pte_read(pte_t pte) { return (pte).pte_low & _PAGE_USER; }
static inline int pte_dirty(pte_t pte) { return (pte).pte_low & _PAGE_DIRTY; }
static inline int pte_young(pte_t pte) { return (pte).pte_low & _PAGE_ACCESSED; }
static inline int pte_write(pte_t pte) { return (pte).pte_low & _PAGE_RW; }
-static inline int pte_huge(pte_t pte) { return ((pte).pte_low & __LARGE_PTE) == __LARGE_PTE; }
+static inline int pte_huge(pte_t pte) { return (pte).pte_low & _PAGE_PSE; }
/*
* The following only works if pte_present() is not true.
@@ -242,7 +241,7 @@ static inline pte_t pte_mkexec(pte_t pte) { (pte).pte_low |= _PAGE_USER; return
static inline pte_t pte_mkdirty(pte_t pte) { (pte).pte_low |= _PAGE_DIRTY; return pte; }
static inline pte_t pte_mkyoung(pte_t pte) { (pte).pte_low |= _PAGE_ACCESSED; return pte; }
static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte_low |= _PAGE_RW; return pte; }
-static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= __LARGE_PTE; return pte; }
+static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= _PAGE_PSE; return pte; }
#ifdef CONFIG_X86_PAE
# include <asm/pgtable-3level.h>
diff --git a/include/asm-ia64/intel_intrin.h b/include/asm-ia64/intel_intrin.h
index a7122d85017..d069b6acddc 100644
--- a/include/asm-ia64/intel_intrin.h
+++ b/include/asm-ia64/intel_intrin.h
@@ -5,113 +5,10 @@
*
* Copyright (C) 2002,2003 Jun Nakajima <jun.nakajima@intel.com>
* Copyright (C) 2002,2003 Suresh Siddha <suresh.b.siddha@intel.com>
+ * Copyright (C) 2005,2006 Hongjiu Lu <hongjiu.lu@intel.com>
*
*/
-#include <asm/types.h>
-
-void __lfetch(int lfhint, void *y);
-void __lfetch_excl(int lfhint, void *y);
-void __lfetch_fault(int lfhint, void *y);
-void __lfetch_fault_excl(int lfhint, void *y);
-
-/* In the following, whichFloatReg should be an integer from 0-127 */
-void __ldfs(const int whichFloatReg, void *src);
-void __ldfd(const int whichFloatReg, void *src);
-void __ldfe(const int whichFloatReg, void *src);
-void __ldf8(const int whichFloatReg, void *src);
-void __ldf_fill(const int whichFloatReg, void *src);
-void __stfs(void *dst, const int whichFloatReg);
-void __stfd(void *dst, const int whichFloatReg);
-void __stfe(void *dst, const int whichFloatReg);
-void __stf8(void *dst, const int whichFloatReg);
-void __stf_spill(void *dst, const int whichFloatReg);
-
-void __st1_rel(void *dst, const __s8 value);
-void __st2_rel(void *dst, const __s16 value);
-void __st4_rel(void *dst, const __s32 value);
-void __st8_rel(void *dst, const __s64 value);
-__u8 __ld1_acq(void *src);
-__u16 __ld2_acq(void *src);
-__u32 __ld4_acq(void *src);
-__u64 __ld8_acq(void *src);
-
-__u64 __fetchadd4_acq(__u32 *addend, const int increment);
-__u64 __fetchadd4_rel(__u32 *addend, const int increment);
-__u64 __fetchadd8_acq(__u64 *addend, const int increment);
-__u64 __fetchadd8_rel(__u64 *addend, const int increment);
-
-__u64 __getf_exp(double d);
-
-/* OS Related Itanium(R) Intrinsics */
-
-/* The names to use for whichReg and whichIndReg below come from
- the include file asm/ia64regs.h */
-
-__u64 __getIndReg(const int whichIndReg, __s64 index);
-__u64 __getReg(const int whichReg);
-
-void __setIndReg(const int whichIndReg, __s64 index, __u64 value);
-void __setReg(const int whichReg, __u64 value);
-
-void __mf(void);
-void __mfa(void);
-void __synci(void);
-void __itcd(__s64 pa);
-void __itci(__s64 pa);
-void __itrd(__s64 whichTransReg, __s64 pa);
-void __itri(__s64 whichTransReg, __s64 pa);
-void __ptce(__s64 va);
-void __ptcl(__s64 va, __s64 pagesz);
-void __ptcg(__s64 va, __s64 pagesz);
-void __ptcga(__s64 va, __s64 pagesz);
-void __ptri(__s64 va, __s64 pagesz);
-void __ptrd(__s64 va, __s64 pagesz);
-void __invala (void);
-void __invala_gr(const int whichGeneralReg /* 0-127 */ );
-void __invala_fr(const int whichFloatReg /* 0-127 */ );
-void __nop(const int);
-void __fc(__u64 *addr);
-void __sum(int mask);
-void __rum(int mask);
-void __ssm(int mask);
-void __rsm(int mask);
-__u64 __thash(__s64);
-__u64 __ttag(__s64);
-__s64 __tpa(__s64);
-
-/* Intrinsics for implementing get/put_user macros */
-void __st_user(const char *tableName, __u64 addr, char size, char relocType, __u64 val);
-void __ld_user(const char *tableName, __u64 addr, char size, char relocType);
-
-/* This intrinsic does not generate code, it creates a barrier across which
- * the compiler will not schedule data access instructions.
- */
-void __memory_barrier(void);
-
-void __isrlz(void);
-void __dsrlz(void);
-
-__u64 _m64_mux1(__u64 a, const int n);
-__u64 __thash(__u64);
-
-/* Lock and Atomic Operation Related Intrinsics */
-__u64 _InterlockedExchange8(volatile __u8 *trgt, __u8 value);
-__u64 _InterlockedExchange16(volatile __u16 *trgt, __u16 value);
-__s64 _InterlockedExchange(volatile __u32 *trgt, __u32 value);
-__s64 _InterlockedExchange64(volatile __u64 *trgt, __u64 value);
-
-__u64 _InterlockedCompareExchange8_rel(volatile __u8 *dest, __u64 xchg, __u64 comp);
-__u64 _InterlockedCompareExchange8_acq(volatile __u8 *dest, __u64 xchg, __u64 comp);
-__u64 _InterlockedCompareExchange16_rel(volatile __u16 *dest, __u64 xchg, __u64 comp);
-__u64 _InterlockedCompareExchange16_acq(volatile __u16 *dest, __u64 xchg, __u64 comp);
-__u64 _InterlockedCompareExchange_rel(volatile __u32 *dest, __u64 xchg, __u64 comp);
-__u64 _InterlockedCompareExchange_acq(volatile __u32 *dest, __u64 xchg, __u64 comp);
-__u64 _InterlockedCompareExchange64_rel(volatile __u64 *dest, __u64 xchg, __u64 comp);
-__u64 _InterlockedCompareExchange64_acq(volatile __u64 *dest, __u64 xchg, __u64 comp);
-
-__s64 _m64_dep_mi(const int v, __s64 s, const int p, const int len);
-__s64 _m64_shrp(__s64 a, __s64 b, const int count);
-__s64 _m64_popcnt(__s64 a);
+#include <ia64intrin.h>
#define ia64_barrier() __memory_barrier()
@@ -122,15 +19,16 @@ __s64 _m64_popcnt(__s64 a);
#define ia64_getreg __getReg
#define ia64_setreg __setReg
-#define ia64_hint(x)
+#define ia64_hint __hint
+#define ia64_hint_pause __hint_pause
-#define ia64_mux1_brcst 0
-#define ia64_mux1_mix 8
-#define ia64_mux1_shuf 9
-#define ia64_mux1_alt 10
-#define ia64_mux1_rev 11
+#define ia64_mux1_brcst _m64_mux1_brcst
+#define ia64_mux1_mix _m64_mux1_mix
+#define ia64_mux1_shuf _m64_mux1_shuf
+#define ia64_mux1_alt _m64_mux1_alt
+#define ia64_mux1_rev _m64_mux1_rev
-#define ia64_mux1 _m64_mux1
+#define ia64_mux1(x,v) _m_to_int64(_m64_mux1(_m_from_int64(x), (v)))
#define ia64_popcnt _m64_popcnt
#define ia64_getf_exp __getf_exp
#define ia64_shrp _m64_shrp
@@ -158,7 +56,7 @@ __s64 _m64_popcnt(__s64 a);
#define ia64_stf8 __stf8
#define ia64_stf_spill __stf_spill
-#define ia64_mf __mf
+#define ia64_mf __mf
#define ia64_mfa __mfa
#define ia64_fetchadd4_acq __fetchadd4_acq
@@ -234,10 +132,10 @@ __s64 _m64_popcnt(__s64 a);
/* Values for lfhint in __lfetch and __lfetch_fault */
-#define ia64_lfhint_none 0
-#define ia64_lfhint_nt1 1
-#define ia64_lfhint_nt2 2
-#define ia64_lfhint_nta 3
+#define ia64_lfhint_none __lfhint_none
+#define ia64_lfhint_nt1 __lfhint_nt1
+#define ia64_lfhint_nt2 __lfhint_nt2
+#define ia64_lfhint_nta __lfhint_nta
#define ia64_lfetch __lfetch
#define ia64_lfetch_excl __lfetch_excl
@@ -254,4 +152,6 @@ do { \
} \
} while (0)
+#define __builtin_trap() __break(0);
+
#endif /* _ASM_IA64_INTEL_INTRIN_H */
diff --git a/include/asm-ia64/machvec.h b/include/asm-ia64/machvec.h
index ca5ea994d68..c3e4ed8a3e1 100644
--- a/include/asm-ia64/machvec.h
+++ b/include/asm-ia64/machvec.h
@@ -20,6 +20,7 @@ struct scatterlist;
struct page;
struct mm_struct;
struct pci_bus;
+struct task_struct;
typedef void ia64_mv_setup_t (char **);
typedef void ia64_mv_cpu_init_t (void);
@@ -34,6 +35,7 @@ typedef int ia64_mv_pci_legacy_read_t (struct pci_bus *, u16 port, u32 *val,
u8 size);
typedef int ia64_mv_pci_legacy_write_t (struct pci_bus *, u16 port, u32 val,
u8 size);
+typedef void ia64_mv_migrate_t(struct task_struct * task);
/* DMA-mapping interface: */
typedef void ia64_mv_dma_init (void);
@@ -85,6 +87,11 @@ machvec_noop_mm (struct mm_struct *mm)
{
}
+static inline void
+machvec_noop_task (struct task_struct *task)
+{
+}
+
extern void machvec_setup (char **);
extern void machvec_timer_interrupt (int, void *, struct pt_regs *);
extern void machvec_dma_sync_single (struct device *, dma_addr_t, size_t, int);
@@ -146,6 +153,7 @@ extern void machvec_tlb_migrate_finish (struct mm_struct *);
# define platform_readw_relaxed ia64_mv.readw_relaxed
# define platform_readl_relaxed ia64_mv.readl_relaxed
# define platform_readq_relaxed ia64_mv.readq_relaxed
+# define platform_migrate ia64_mv.migrate
# endif
/* __attribute__((__aligned__(16))) is required to make size of the
@@ -194,6 +202,7 @@ struct ia64_machine_vector {
ia64_mv_readw_relaxed_t *readw_relaxed;
ia64_mv_readl_relaxed_t *readl_relaxed;
ia64_mv_readq_relaxed_t *readq_relaxed;
+ ia64_mv_migrate_t *migrate;
} __attribute__((__aligned__(16))); /* align attrib? see above comment */
#define MACHVEC_INIT(name) \
@@ -238,6 +247,7 @@ struct ia64_machine_vector {
platform_readw_relaxed, \
platform_readl_relaxed, \
platform_readq_relaxed, \
+ platform_migrate, \
}
extern struct ia64_machine_vector ia64_mv;
@@ -386,5 +396,8 @@ extern ia64_mv_dma_supported swiotlb_dma_supported;
#ifndef platform_readq_relaxed
# define platform_readq_relaxed __ia64_readq_relaxed
#endif
+#ifndef platform_migrate
+# define platform_migrate machvec_noop_task
+#endif
#endif /* _ASM_IA64_MACHVEC_H */
diff --git a/include/asm-ia64/machvec_sn2.h b/include/asm-ia64/machvec_sn2.h
index 03d00faf03b..da1d43755af 100644
--- a/include/asm-ia64/machvec_sn2.h
+++ b/include/asm-ia64/machvec_sn2.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2002-2003, 2006 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (c) 2002-2003,2006 Silicon Graphics, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License
@@ -66,6 +66,7 @@ extern ia64_mv_dma_sync_single_for_device sn_dma_sync_single_for_device;
extern ia64_mv_dma_sync_sg_for_device sn_dma_sync_sg_for_device;
extern ia64_mv_dma_mapping_error sn_dma_mapping_error;
extern ia64_mv_dma_supported sn_dma_supported;
+extern ia64_mv_migrate_t sn_migrate;
/*
* This stuff has dual use!
@@ -115,6 +116,7 @@ extern ia64_mv_dma_supported sn_dma_supported;
#define platform_dma_sync_sg_for_device sn_dma_sync_sg_for_device
#define platform_dma_mapping_error sn_dma_mapping_error
#define platform_dma_supported sn_dma_supported
+#define platform_migrate sn_migrate
#include <asm/sn/io.h>
diff --git a/include/asm-ia64/mca.h b/include/asm-ia64/mca.h
index c7d9c9ed38b..bfbbb8da79c 100644
--- a/include/asm-ia64/mca.h
+++ b/include/asm-ia64/mca.h
@@ -131,6 +131,8 @@ struct ia64_mca_cpu {
/* Array of physical addresses of each CPU's MCA area. */
extern unsigned long __per_cpu_mca[NR_CPUS];
+extern int cpe_vector;
+extern int ia64_cpe_irq;
extern void ia64_mca_init(void);
extern void ia64_mca_cpu_init(void *);
extern void ia64_os_mca_dispatch(void);
diff --git a/include/asm-ia64/mutex.h b/include/asm-ia64/mutex.h
index 458c1f7fbc1..5a3224f6af3 100644
--- a/include/asm-ia64/mutex.h
+++ b/include/asm-ia64/mutex.h
@@ -1,9 +1,92 @@
/*
- * Pull in the generic implementation for the mutex fastpath.
+ * ia64 implementation of the mutex fastpath.
*
- * TODO: implement optimized primitives instead, or leave the generic
- * implementation in place, or pick the atomic_xchg() based generic
- * implementation. (see asm-generic/mutex-xchg.h for details)
+ * Copyright (C) 2006 Ken Chen <kenneth.w.chen@intel.com>
+ *
+ */
+
+#ifndef _ASM_MUTEX_H
+#define _ASM_MUTEX_H
+
+/**
+ * __mutex_fastpath_lock - try to take the lock by moving the count
+ * from 1 to a 0 value
+ * @count: pointer of type atomic_t
+ * @fail_fn: function to call if the original value was not 1
+ *
+ * Change the count from 1 to a value lower than 1, and call <fail_fn> if
+ * it wasn't 1 originally. This function MUST leave the value lower than
+ * 1 even when the "1" assertion wasn't true.
+ */
+static inline void
+__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
+{
+ if (unlikely(ia64_fetchadd4_acq(count, -1) != 1))
+ fail_fn(count);
+}
+
+/**
+ * __mutex_fastpath_lock_retval - try to take the lock by moving the count
+ * from 1 to a 0 value
+ * @count: pointer of type atomic_t
+ * @fail_fn: function to call if the original value was not 1
+ *
+ * Change the count from 1 to a value lower than 1, and call <fail_fn> if
+ * it wasn't 1 originally. This function returns 0 if the fastpath succeeds,
+ * or anything the slow path function returns.
+ */
+static inline int
+__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
+{
+ if (unlikely(ia64_fetchadd4_acq(count, -1) != 1))
+ return fail_fn(count);
+ return 0;
+}
+
+/**
+ * __mutex_fastpath_unlock - try to promote the count from 0 to 1
+ * @count: pointer of type atomic_t
+ * @fail_fn: function to call if the original value was not 0
+ *
+ * Try to promote the count from 0 to 1. If it wasn't 0, call <fail_fn>.
+ * In the failure case, this function is allowed to either set the value to
+ * 1, or to set it to a value lower than 1.
+ *
+ * If the implementation sets it to a value of lower than 1, then the
+ * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs
+ * to return 0 otherwise.
+ */
+static inline void
+__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
+{
+ int ret = ia64_fetchadd4_rel(count, 1);
+ if (unlikely(ret < 0))
+ fail_fn(count);
+}
+
+#define __mutex_slowpath_needs_to_unlock() 1
+
+/**
+ * __mutex_fastpath_trylock - try to acquire the mutex, without waiting
+ *
+ * @count: pointer of type atomic_t
+ * @fail_fn: fallback function
+ *
+ * Change the count from 1 to a value lower than 1, and return 0 (failure)
+ * if it wasn't 1 originally, or return 1 (success) otherwise. This function
+ * MUST leave the value lower than 1 even when the "1" assertion wasn't true.
+ * Additionally, if the value was < 0 originally, this function must not leave
+ * it to 0 on failure.
+ *
+ * If the architecture has no effective trylock variant, it should call the
+ * <fail_fn> spinlock-based trylock variant unconditionally.
*/
+static inline int
+__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
+{
+ if (likely(cmpxchg_acq(count, 1, 0)) == 1)
+ return 1;
+ return 0;
+}
-#include <asm-generic/mutex-dec.h>
+#endif
diff --git a/include/asm-ia64/page.h b/include/asm-ia64/page.h
index 5e6362a786b..3ab27333dae 100644
--- a/include/asm-ia64/page.h
+++ b/include/asm-ia64/page.h
@@ -57,6 +57,8 @@
# define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
# define ARCH_HAS_HUGEPAGE_ONLY_RANGE
+# define ARCH_HAS_PREPARE_HUGEPAGE_RANGE
+# define ARCH_HAS_HUGETLB_FREE_PGD_RANGE
#endif /* CONFIG_HUGETLB_PAGE */
#ifdef __ASSEMBLY__
diff --git a/include/asm-ia64/pgtable.h b/include/asm-ia64/pgtable.h
index e2560c58384..c0f8144f234 100644
--- a/include/asm-ia64/pgtable.h
+++ b/include/asm-ia64/pgtable.h
@@ -314,7 +314,7 @@ ia64_phys_addr_valid (unsigned long addr)
#define pte_mkyoung(pte) (__pte(pte_val(pte) | _PAGE_A))
#define pte_mkclean(pte) (__pte(pte_val(pte) & ~_PAGE_D))
#define pte_mkdirty(pte) (__pte(pte_val(pte) | _PAGE_D))
-#define pte_mkhuge(pte) (__pte(pte_val(pte) | _PAGE_P))
+#define pte_mkhuge(pte) (__pte(pte_val(pte)))
/*
* Macro to a page protection value as "uncacheable". Note that "protection" is really a
@@ -505,9 +505,6 @@ extern struct page *zero_page_memmap_ptr;
#define HUGETLB_PGDIR_SHIFT (HPAGE_SHIFT + 2*(PAGE_SHIFT-3))
#define HUGETLB_PGDIR_SIZE (__IA64_UL(1) << HUGETLB_PGDIR_SHIFT)
#define HUGETLB_PGDIR_MASK (~(HUGETLB_PGDIR_SIZE-1))
-struct mmu_gather;
-void hugetlb_free_pgd_range(struct mmu_gather **tlb, unsigned long addr,
- unsigned long end, unsigned long floor, unsigned long ceiling);
#endif
/*
diff --git a/include/asm-ia64/processor.h b/include/asm-ia64/processor.h
index 23c8e1be191..128fefd8056 100644
--- a/include/asm-ia64/processor.h
+++ b/include/asm-ia64/processor.h
@@ -50,7 +50,8 @@
#define IA64_THREAD_PM_VALID (__IA64_UL(1) << 2) /* performance registers valid? */
#define IA64_THREAD_UAC_NOPRINT (__IA64_UL(1) << 3) /* don't log unaligned accesses */
#define IA64_THREAD_UAC_SIGBUS (__IA64_UL(1) << 4) /* generate SIGBUS on unaligned acc. */
- /* bit 5 is currently unused */
+#define IA64_THREAD_MIGRATION (__IA64_UL(1) << 5) /* require migration
+ sync at ctx sw */
#define IA64_THREAD_FPEMU_NOPRINT (__IA64_UL(1) << 6) /* don't log any fpswa faults */
#define IA64_THREAD_FPEMU_SIGFPE (__IA64_UL(1) << 7) /* send a SIGFPE for fpswa faults */
diff --git a/include/asm-ia64/signal.h b/include/asm-ia64/signal.h
index 608168d713d..5e328ed5d01 100644
--- a/include/asm-ia64/signal.h
+++ b/include/asm-ia64/signal.h
@@ -158,8 +158,6 @@ struct k_sigaction {
#define ptrace_signal_deliver(regs, cookie) do { } while (0)
-void set_sigdelayed(pid_t pid, int signo, int code, void __user *addr);
-
#endif /* __KERNEL__ */
# endif /* !__ASSEMBLY__ */
diff --git a/include/asm-ia64/sn/addrs.h b/include/asm-ia64/sn/addrs.h
index 2c32e4b77b5..1d9efe54166 100644
--- a/include/asm-ia64/sn/addrs.h
+++ b/include/asm-ia64/sn/addrs.h
@@ -283,5 +283,13 @@
#define REMOTE_HUB_L(n, a) HUB_L(REMOTE_HUB_ADDR((n), (a)))
#define REMOTE_HUB_S(n, a, d) HUB_S(REMOTE_HUB_ADDR((n), (a)), (d))
+/*
+ * Coretalk address breakdown
+ */
+#define CTALK_NASID_SHFT 40
+#define CTALK_NASID_MASK (0x3FFFULL << CTALK_NASID_SHFT)
+#define CTALK_CID_SHFT 38
+#define CTALK_CID_MASK (0x3ULL << CTALK_CID_SHFT)
+#define CTALK_NODE_OFFSET 0x3FFFFFFFFF
#endif /* _ASM_IA64_SN_ADDRS_H */
diff --git a/include/asm-ia64/sn/rw_mmr.h b/include/asm-ia64/sn/rw_mmr.h
index f40fd1a5510..2d78f4c5a45 100644
--- a/include/asm-ia64/sn/rw_mmr.h
+++ b/include/asm-ia64/sn/rw_mmr.h
@@ -3,15 +3,14 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2002-2004 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (C) 2002-2006 Silicon Graphics, Inc. All Rights Reserved.
*/
#ifndef _ASM_IA64_SN_RW_MMR_H
#define _ASM_IA64_SN_RW_MMR_H
/*
- * This file contains macros used to access MMR registers via
- * uncached physical addresses.
+ * This file that access MMRs via uncached physical addresses.
* pio_phys_read_mmr - read an MMR
* pio_phys_write_mmr - write an MMR
* pio_atomic_phys_write_mmrs - atomically write 1 or 2 MMRs with psr.ic=0
@@ -22,53 +21,8 @@
*/
-extern inline long
-pio_phys_read_mmr(volatile long *mmr)
-{
- long val;
- asm volatile
- ("mov r2=psr;;"
- "rsm psr.i | psr.dt;;"
- "srlz.i;;"
- "ld8.acq %0=[%1];;"
- "mov psr.l=r2;;"
- "srlz.i;;"
- : "=r"(val)
- : "r"(mmr)
- : "r2");
- return val;
-}
-
-
-
-extern inline void
-pio_phys_write_mmr(volatile long *mmr, long val)
-{
- asm volatile
- ("mov r2=psr;;"
- "rsm psr.i | psr.dt;;"
- "srlz.i;;"
- "st8.rel [%0]=%1;;"
- "mov psr.l=r2;;"
- "srlz.i;;"
- :: "r"(mmr), "r"(val)
- : "r2", "memory");
-}
-
-extern inline void
-pio_atomic_phys_write_mmrs(volatile long *mmr1, long val1, volatile long *mmr2, long val2)
-{
- asm volatile
- ("mov r2=psr;;"
- "rsm psr.i | psr.dt | psr.ic;;"
- "cmp.ne p9,p0=%2,r0;"
- "srlz.i;;"
- "st8.rel [%0]=%1;"
- "(p9) st8.rel [%2]=%3;;"
- "mov psr.l=r2;;"
- "srlz.i;;"
- :: "r"(mmr1), "r"(val1), "r"(mmr2), "r"(val2)
- : "p9", "r2", "memory");
-}
+extern long pio_phys_read_mmr(volatile long *mmr);
+extern void pio_phys_write_mmr(volatile long *mmr, long val);
+extern void pio_atomic_phys_write_mmrs(volatile long *mmr1, long val1, volatile long *mmr2, long val2);
#endif /* _ASM_IA64_SN_RW_MMR_H */
diff --git a/include/asm-ia64/sn/tioce.h b/include/asm-ia64/sn/tioce.h
index d4c990712ea..893468e1b41 100644
--- a/include/asm-ia64/sn/tioce.h
+++ b/include/asm-ia64/sn/tioce.h
@@ -11,7 +11,7 @@
/* CE ASIC part & mfgr information */
#define TIOCE_PART_NUM 0xCE00
-#define TIOCE_MFGR_NUM 0x36
+#define TIOCE_SRC_ID 0x01
#define TIOCE_REV_A 0x1
/* CE Virtual PPB Vendor/Device IDs */
@@ -20,7 +20,7 @@
/* CE Host Bridge Vendor/Device IDs */
#define CE_HOST_BRIDGE_VENDOR_ID 0x10a9
-#define CE_HOST_BRIDGE_DEVICE_ID 0x4003
+#define CE_HOST_BRIDGE_DEVICE_ID 0x4001
#define TIOCE_NUM_M40_ATES 4096
@@ -463,6 +463,25 @@ typedef volatile struct tioce {
u64 ce_end_of_struct; /* 0x044400 */
} tioce_t;
+/* ce_lsiX_gb_cfg1 register bit masks & shifts */
+#define CE_LSI_GB_CFG1_RXL0S_THS_SHFT 0
+#define CE_LSI_GB_CFG1_RXL0S_THS_MASK (0xffULL << 0)
+#define CE_LSI_GB_CFG1_RXL0S_SMP_SHFT 8
+#define CE_LSI_GB_CFG1_RXL0S_SMP_MASK (0xfULL << 8);
+#define CE_LSI_GB_CFG1_RXL0S_ADJ_SHFT 12
+#define CE_LSI_GB_CFG1_RXL0S_ADJ_MASK (0x7ULL << 12)
+#define CE_LSI_GB_CFG1_RXL0S_FLT_SHFT 15
+#define CE_LSI_GB_CFG1_RXL0S_FLT_MASK (0x1ULL << 15)
+#define CE_LSI_GB_CFG1_LPBK_SEL_SHFT 16
+#define CE_LSI_GB_CFG1_LPBK_SEL_MASK (0x3ULL << 16)
+#define CE_LSI_GB_CFG1_LPBK_EN_SHFT 18
+#define CE_LSI_GB_CFG1_LPBK_EN_MASK (0x1ULL << 18)
+#define CE_LSI_GB_CFG1_RVRS_LB_SHFT 19
+#define CE_LSI_GB_CFG1_RVRS_LB_MASK (0x1ULL << 19)
+#define CE_LSI_GB_CFG1_RVRS_CLK_SHFT 20
+#define CE_LSI_GB_CFG1_RVRS_CLK_MASK (0x3ULL << 20)
+#define CE_LSI_GB_CFG1_SLF_TS_SHFT 24
+#define CE_LSI_GB_CFG1_SLF_TS_MASK (0xfULL << 24)
/* ce_adm_int_mask/ce_adm_int_status register bit defines */
#define CE_ADM_INT_CE_ERROR_SHFT 0
@@ -592,6 +611,11 @@ typedef volatile struct tioce {
#define CE_URE_RD_MRG_ENABLE (0x1ULL << 0)
#define CE_URE_WRT_MRG_ENABLE1 (0x1ULL << 4)
#define CE_URE_WRT_MRG_ENABLE2 (0x1ULL << 5)
+#define CE_URE_WRT_MRG_TIMER_SHFT 12
+#define CE_URE_WRT_MRG_TIMER_MASK (0x7FFULL << CE_URE_WRT_MRG_TIMER_SHFT)
+#define CE_URE_WRT_MRG_TIMER(x) (((u64)(x) << \
+ CE_URE_WRT_MRG_TIMER_SHFT) & \
+ CE_URE_WRT_MRG_TIMER_MASK)
#define CE_URE_RSPQ_BYPASS_DISABLE (0x1ULL << 24)
#define CE_URE_UPS_DAT1_PAR_DISABLE (0x1ULL << 32)
#define CE_URE_UPS_HDR1_PAR_DISABLE (0x1ULL << 33)
@@ -653,8 +677,12 @@ typedef volatile struct tioce {
#define CE_URE_SI (0x1ULL << 0)
#define CE_URE_ELAL_SHFT 4
#define CE_URE_ELAL_MASK (0x7ULL << CE_URE_ELAL_SHFT)
+#define CE_URE_ELAL_SET(n) (((u64)(n) << CE_URE_ELAL_SHFT) & \
+ CE_URE_ELAL_MASK)
#define CE_URE_ELAL1_SHFT 8
#define CE_URE_ELAL1_MASK (0x7ULL << CE_URE_ELAL1_SHFT)
+#define CE_URE_ELAL1_SET(n) (((u64)(n) << CE_URE_ELAL1_SHFT) & \
+ CE_URE_ELAL1_MASK)
#define CE_URE_SCC (0x1ULL << 12)
#define CE_URE_PN1_SHFT 16
#define CE_URE_PN1_MASK (0xFFULL << CE_URE_PN1_SHFT)
@@ -675,8 +703,12 @@ typedef volatile struct tioce {
#define CE_URE_HPC (0x1ULL << 6)
#define CE_URE_SPLV_SHFT 7
#define CE_URE_SPLV_MASK (0xFFULL << CE_URE_SPLV_SHFT)
+#define CE_URE_SPLV_SET(n) (((u64)(n) << CE_URE_SPLV_SHFT) & \
+ CE_URE_SPLV_MASK)
#define CE_URE_SPLS_SHFT 15
#define CE_URE_SPLS_MASK (0x3ULL << CE_URE_SPLS_SHFT)
+#define CE_URE_SPLS_SET(n) (((u64)(n) << CE_URE_SPLS_SHFT) & \
+ CE_URE_SPLS_MASK)
#define CE_URE_PSN1_SHFT 19
#define CE_URE_PSN1_MASK (0x1FFFULL << CE_URE_PSN1_SHFT)
#define CE_URE_PSN2_SHFT 32
diff --git a/include/asm-ia64/sn/xpc.h b/include/asm-ia64/sn/xpc.h
index df7f5f4f3cd..aa3b8ace903 100644
--- a/include/asm-ia64/sn/xpc.h
+++ b/include/asm-ia64/sn/xpc.h
@@ -1227,28 +1227,6 @@ xpc_map_bte_errors(bte_result_t error)
-static inline void *
-xpc_kmalloc_cacheline_aligned(size_t size, gfp_t flags, void **base)
-{
- /* see if kmalloc will give us cachline aligned memory by default */
- *base = kmalloc(size, flags);
- if (*base == NULL) {
- return NULL;
- }
- if ((u64) *base == L1_CACHE_ALIGN((u64) *base)) {
- return *base;
- }
- kfree(*base);
-
- /* nope, we'll have to do it ourselves */
- *base = kmalloc(size + L1_CACHE_BYTES, flags);
- if (*base == NULL) {
- return NULL;
- }
- return (void *) L1_CACHE_ALIGN((u64) *base);
-}
-
-
/*
* Check to see if there is any channel activity to/from the specified
* partition.
diff --git a/include/asm-ia64/system.h b/include/asm-ia64/system.h
index 06253871562..cd4233d66f1 100644
--- a/include/asm-ia64/system.h
+++ b/include/asm-ia64/system.h
@@ -244,6 +244,13 @@ extern void ia64_load_extra (struct task_struct *task);
__ia64_save_fpu((prev)->thread.fph); \
} \
__switch_to(prev, next, last); \
+ /* "next" in old context is "current" in new context */ \
+ if (unlikely((current->thread.flags & IA64_THREAD_MIGRATION) && \
+ (task_cpu(current) != \
+ task_thread_info(current)->last_cpu))) { \
+ platform_migrate(current); \
+ task_thread_info(current)->last_cpu = task_cpu(current); \
+ } \
} while (0)
#else
# define switch_to(prev,next,last) __switch_to(prev, next, last)
diff --git a/include/asm-ia64/thread_info.h b/include/asm-ia64/thread_info.h
index 1d6518fe1f0..56394a2c705 100644
--- a/include/asm-ia64/thread_info.h
+++ b/include/asm-ia64/thread_info.h
@@ -26,16 +26,10 @@ struct thread_info {
struct exec_domain *exec_domain;/* execution domain */
__u32 flags; /* thread_info flags (see TIF_*) */
__u32 cpu; /* current CPU */
+ __u32 last_cpu; /* Last CPU thread ran on */
mm_segment_t addr_limit; /* user-level address space limit */
int preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */
struct restart_block restart_block;
- struct {
- int signo;
- int code;
- void __user *addr;
- unsigned long start_time;
- pid_t pid;
- } sigdelayed; /* Saved information for TIF_SIGDELAYED */
};
#define THREAD_SIZE KERNEL_STACK_SIZE
@@ -89,7 +83,6 @@ struct thread_info {
#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
#define TIF_SYSCALL_TRACE 3 /* syscall trace active */
#define TIF_SYSCALL_AUDIT 4 /* syscall auditing active */
-#define TIF_SIGDELAYED 5 /* signal delayed from MCA/INIT/NMI/PMI context */
#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
#define TIF_MEMDIE 17
#define TIF_MCA_INIT 18 /* this task is processing MCA or INIT */
@@ -101,13 +94,12 @@ struct thread_info {
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
-#define _TIF_SIGDELAYED (1 << TIF_SIGDELAYED)
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
#define _TIF_MCA_INIT (1 << TIF_MCA_INIT)
#define _TIF_DB_DISABLED (1 << TIF_DB_DISABLED)
/* "work to do on user-return" bits */
-#define TIF_ALLWORK_MASK (_TIF_NOTIFY_RESUME|_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SIGDELAYED)
+#define TIF_ALLWORK_MASK (_TIF_NOTIFY_RESUME|_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)
/* like TIF_ALLWORK_BITS but sans TIF_SYSCALL_TRACE or TIF_SYSCALL_AUDIT */
#define TIF_WORK_MASK (TIF_ALLWORK_MASK&~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT))
diff --git a/include/asm-powerpc/pgtable.h b/include/asm-powerpc/pgtable.h
index e38931379a7..185ee15963a 100644
--- a/include/asm-powerpc/pgtable.h
+++ b/include/asm-powerpc/pgtable.h
@@ -468,11 +468,6 @@ extern pgd_t swapper_pg_dir[];
extern void paging_init(void);
-#ifdef CONFIG_HUGETLB_PAGE
-#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) \
- free_pgd_range(tlb, addr, end, floor, ceiling)
-#endif
-
/*
* This gets called at the end of handling a page fault, when
* the kernel has put a new PTE into the page table for the process.
diff --git a/include/asm-s390/pgalloc.h b/include/asm-s390/pgalloc.h
index 3417dd71ab4..e28aaf28e4a 100644
--- a/include/asm-s390/pgalloc.h
+++ b/include/asm-s390/pgalloc.h
@@ -158,11 +158,4 @@ static inline void pte_free(struct page *pte)
#define __pte_free_tlb(tlb,pte) tlb_remove_page(tlb,pte)
-/*
- * This establishes kernel virtual mappings (e.g., as a result of a
- * vmalloc call). Since s390-esame uses a separate kernel page table,
- * there is nothing to do here... :)
- */
-#define set_pgdir(addr,entry) do { } while(0)
-
#endif /* _S390_PGALLOC_H */
diff --git a/include/asm-sh64/pgalloc.h b/include/asm-sh64/pgalloc.h
index 678251ac1db..b29dd468817 100644
--- a/include/asm-sh64/pgalloc.h
+++ b/include/asm-sh64/pgalloc.h
@@ -167,22 +167,6 @@ static __inline__ void pmd_free(pmd_t *pmd)
extern int do_check_pgt_cache(int, int);
-static inline void set_pgdir(unsigned long address, pgd_t entry)
-{
- struct task_struct * p;
- pgd_t *pgd;
-
- read_lock(&tasklist_lock);
- for_each_process(p) {
- if (!p->mm)
- continue;
- *pgd_offset(p->mm,address) = entry;
- }
- read_unlock(&tasklist_lock);
- for (pgd = (pgd_t *)pgd_quicklist; pgd; pgd = (pgd_t *)*(unsigned long *)pgd)
- pgd[address >> PGDIR_SHIFT] = entry;
-}
-
#define pmd_populate_kernel(mm, pmd, pte) \
set_pmd(pmd, __pmd(_PAGE_TABLE + (unsigned long) (pte)))
diff --git a/include/asm-x86_64/pgtable.h b/include/asm-x86_64/pgtable.h
index 715fd94cf57..a617d364d08 100644
--- a/include/asm-x86_64/pgtable.h
+++ b/include/asm-x86_64/pgtable.h
@@ -273,7 +273,7 @@ static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
-static inline int pte_huge(pte_t pte) { return (pte_val(pte) & __LARGE_PTE) == __LARGE_PTE; }
+static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_PSE; }
static inline pte_t pte_rdprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_USER)); return pte; }
static inline pte_t pte_exprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_USER)); return pte; }
@@ -285,7 +285,7 @@ static inline pte_t pte_mkexec(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _
static inline pte_t pte_mkdirty(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; }
static inline pte_t pte_mkyoung(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; }
static inline pte_t pte_mkwrite(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_RW)); return pte; }
-static inline pte_t pte_mkhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | __LARGE_PTE)); return pte; }
+static inline pte_t pte_mkhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_PSE)); return pte; }
struct vm_area_struct;
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 68d82ad6b17..d6f1019625a 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -20,10 +20,7 @@ void unmap_hugepage_range(struct vm_area_struct *, unsigned long, unsigned long)
int hugetlb_prefault(struct address_space *, struct vm_area_struct *);
int hugetlb_report_meminfo(char *);
int hugetlb_report_node_meminfo(int, char *);
-int is_hugepage_mem_enough(size_t);
unsigned long hugetlb_total_pages(void);
-struct page *alloc_huge_page(struct vm_area_struct *, unsigned long);
-void free_huge_page(struct page *);
int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, int write_access);
@@ -39,18 +36,35 @@ struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
int write);
struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
pmd_t *pmd, int write);
-int is_aligned_hugepage_range(unsigned long addr, unsigned long len);
int pmd_huge(pmd_t pmd);
+void hugetlb_change_protection(struct vm_area_struct *vma,
+ unsigned long address, unsigned long end, pgprot_t newprot);
#ifndef ARCH_HAS_HUGEPAGE_ONLY_RANGE
#define is_hugepage_only_range(mm, addr, len) 0
-#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) \
- do { } while (0)
+#endif
+
+#ifndef ARCH_HAS_HUGETLB_FREE_PGD_RANGE
+#define hugetlb_free_pgd_range free_pgd_range
+#else
+void hugetlb_free_pgd_range(struct mmu_gather **tlb, unsigned long addr,
+ unsigned long end, unsigned long floor,
+ unsigned long ceiling);
#endif
#ifndef ARCH_HAS_PREPARE_HUGEPAGE_RANGE
-#define prepare_hugepage_range(addr, len) \
- is_aligned_hugepage_range(addr, len)
+/*
+ * If the arch doesn't supply something else, assume that hugepage
+ * size aligned regions are ok without further preparation.
+ */
+static inline int prepare_hugepage_range(unsigned long addr, unsigned long len)
+{
+ if (len & ~HPAGE_MASK)
+ return -EINVAL;
+ if (addr & ~HPAGE_MASK)
+ return -EINVAL;
+ return 0;
+}
#else
int prepare_hugepage_range(unsigned long addr, unsigned long len);
#endif
@@ -87,20 +101,17 @@ static inline unsigned long hugetlb_total_pages(void)
#define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
#define hugetlb_prefault(mapping, vma) ({ BUG(); 0; })
#define unmap_hugepage_range(vma, start, end) BUG()
-#define is_hugepage_mem_enough(size) 0
#define hugetlb_report_meminfo(buf) 0
#define hugetlb_report_node_meminfo(n, buf) 0
#define follow_huge_pmd(mm, addr, pmd, write) NULL
-#define is_aligned_hugepage_range(addr, len) 0
#define prepare_hugepage_range(addr, len) (-EINVAL)
#define pmd_huge(x) 0
#define is_hugepage_only_range(mm, addr, len) 0
-#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) \
- do { } while (0)
-#define alloc_huge_page(vma, addr) ({ NULL; })
-#define free_huge_page(p) ({ (void)(p); BUG(); })
+#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
#define hugetlb_fault(mm, vma, addr, write) ({ BUG(); 0; })
+#define hugetlb_change_protection(vma, address, end, newprot)
+
#ifndef HPAGE_MASK
#define HPAGE_MASK PAGE_MASK /* Keep the compiler happy */
#define HPAGE_SIZE PAGE_SIZE
@@ -128,6 +139,8 @@ struct hugetlbfs_sb_info {
struct hugetlbfs_inode_info {
struct shared_policy policy;
+ /* Protected by the (global) hugetlb_lock */
+ unsigned long prereserved_hpages;
struct inode vfs_inode;
};
@@ -144,6 +157,10 @@ static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
extern struct file_operations hugetlbfs_file_operations;
extern struct vm_operations_struct hugetlb_vm_ops;
struct file *hugetlb_zero_setup(size_t);
+int hugetlb_extend_reservation(struct hugetlbfs_inode_info *info,
+ unsigned long atleast_hpages);
+void hugetlb_truncate_reservation(struct hugetlbfs_inode_info *info,
+ unsigned long atmost_hpages);
int hugetlb_get_quota(struct address_space *mapping);
void hugetlb_put_quota(struct address_space *mapping);
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
new file mode 100644
index 00000000000..7d09962c3c0
--- /dev/null
+++ b/include/linux/migrate.h
@@ -0,0 +1,36 @@
+#ifndef _LINUX_MIGRATE_H
+#define _LINUX_MIGRATE_H
+
+#include <linux/config.h>
+#include <linux/mm.h>
+
+#ifdef CONFIG_MIGRATION
+extern int isolate_lru_page(struct page *p, struct list_head *pagelist);
+extern int putback_lru_pages(struct list_head *l);
+extern int migrate_page(struct page *, struct page *);
+extern void migrate_page_copy(struct page *, struct page *);
+extern int migrate_page_remove_references(struct page *, struct page *, int);
+extern int migrate_pages(struct list_head *l, struct list_head *t,
+ struct list_head *moved, struct list_head *failed);
+int migrate_pages_to(struct list_head *pagelist,
+ struct vm_area_struct *vma, int dest);
+extern int fail_migrate_page(struct page *, struct page *);
+
+extern int migrate_prep(void);
+
+#else
+
+static inline int isolate_lru_page(struct page *p, struct list_head *list)
+ { return -ENOSYS; }
+static inline int putback_lru_pages(struct list_head *l) { return 0; }
+static inline int migrate_pages(struct list_head *l, struct list_head *t,
+ struct list_head *moved, struct list_head *failed) { return -ENOSYS; }
+
+static inline int migrate_prep(void) { return -ENOSYS; }
+
+/* Possible settings for the migrate_page() method in address_operations */
+#define migrate_page NULL
+#define fail_migrate_page NULL
+
+#endif /* CONFIG_MIGRATION */
+#endif /* _LINUX_MIGRATE_H */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 498ff8778fb..6aa016f1d3a 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -286,43 +286,34 @@ struct page {
*
* Also, many kernel routines increase the page count before a critical
* routine so they can be sure the page doesn't go away from under them.
- *
- * Since 2.6.6 (approx), a free page has ->_count = -1. This is so that we
- * can use atomic_add_negative(-1, page->_count) to detect when the page
- * becomes free and so that we can also use atomic_inc_and_test to atomically
- * detect when we just tried to grab a ref on a page which some other CPU has
- * already deemed to be freeable.
- *
- * NO code should make assumptions about this internal detail! Use the provided
- * macros which retain the old rules: page_count(page) == 0 is a free page.
*/
/*
* Drop a ref, return true if the logical refcount fell to zero (the page has
* no users)
*/
-#define put_page_testzero(p) \
- ({ \
- BUG_ON(atomic_read(&(p)->_count) == -1);\
- atomic_add_negative(-1, &(p)->_count); \
- })
+static inline int put_page_testzero(struct page *page)
+{
+ BUG_ON(atomic_read(&page->_count) == 0);
+ return atomic_dec_and_test(&page->_count);
+}
/*
- * Grab a ref, return true if the page previously had a logical refcount of
- * zero. ie: returns true if we just grabbed an already-deemed-to-be-free page
+ * Try to grab a ref unless the page has a refcount of zero, return false if
+ * that is the case.
*/
-#define get_page_testone(p) atomic_inc_and_test(&(p)->_count)
-
-#define set_page_count(p,v) atomic_set(&(p)->_count, (v) - 1)
-#define __put_page(p) atomic_dec(&(p)->_count)
+static inline int get_page_unless_zero(struct page *page)
+{
+ return atomic_inc_not_zero(&page->_count);
+}
extern void FASTCALL(__page_cache_release(struct page *));
static inline int page_count(struct page *page)
{
- if (PageCompound(page))
+ if (unlikely(PageCompound(page)))
page = (struct page *)page_private(page);
- return atomic_read(&page->_count) + 1;
+ return atomic_read(&page->_count);
}
static inline void get_page(struct page *page)
@@ -332,8 +323,19 @@ static inline void get_page(struct page *page)
atomic_inc(&page->_count);
}
+/*
+ * Setup the page count before being freed into the page allocator for
+ * the first time (boot or memory hotplug)
+ */
+static inline void init_page_count(struct page *page)
+{
+ atomic_set(&page->_count, 1);
+}
+
void put_page(struct page *page);
+void split_page(struct page *page, unsigned int order);
+
/*
* Multiple processes may "see" the same page. E.g. for untouched
* mappings of /dev/null, all processes see the same page full of
@@ -1046,7 +1048,7 @@ int in_gate_area_no_task(unsigned long addr);
int drop_caches_sysctl_handler(struct ctl_table *, int, struct file *,
void __user *, size_t *, loff_t *);
-int shrink_slab(unsigned long scanned, gfp_t gfp_mask,
+unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
unsigned long lru_pages);
void drop_pagecache(void);
void drop_slab(void);
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 8ac854f7f19..3b6723dfaff 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -32,7 +32,7 @@ del_page_from_lru(struct zone *zone, struct page *page)
{
list_del(&page->lru);
if (PageActive(page)) {
- ClearPageActive(page);
+ __ClearPageActive(page);
zone->nr_active--;
} else {
zone->nr_inactive--;
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index d52999c4333..9ea629c02a4 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -86,8 +86,9 @@
* - The __xxx_page_state variants can be used safely when interrupts are
* disabled.
* - The __xxx_page_state variants can be used if the field is only
- * modified from process context, or only modified from interrupt context.
- * In this case, the field should be commented here.
+ * modified from process context and protected from preemption, or only
+ * modified from interrupt context. In this case, the field should be
+ * commented here.
*/
struct page_state {
unsigned long nr_dirty; /* Dirty writeable pages */
@@ -239,22 +240,19 @@ extern void __mod_page_state_offset(unsigned long offset, unsigned long delta);
#define __ClearPageDirty(page) __clear_bit(PG_dirty, &(page)->flags)
#define TestClearPageDirty(page) test_and_clear_bit(PG_dirty, &(page)->flags)
-#define SetPageLRU(page) set_bit(PG_lru, &(page)->flags)
#define PageLRU(page) test_bit(PG_lru, &(page)->flags)
-#define TestSetPageLRU(page) test_and_set_bit(PG_lru, &(page)->flags)
-#define TestClearPageLRU(page) test_and_clear_bit(PG_lru, &(page)->flags)
+#define SetPageLRU(page) set_bit(PG_lru, &(page)->flags)
+#define ClearPageLRU(page) clear_bit(PG_lru, &(page)->flags)
+#define __ClearPageLRU(page) __clear_bit(PG_lru, &(page)->flags)
#define PageActive(page) test_bit(PG_active, &(page)->flags)
#define SetPageActive(page) set_bit(PG_active, &(page)->flags)
#define ClearPageActive(page) clear_bit(PG_active, &(page)->flags)
-#define TestClearPageActive(page) test_and_clear_bit(PG_active, &(page)->flags)
-#define TestSetPageActive(page) test_and_set_bit(PG_active, &(page)->flags)
+#define __ClearPageActive(page) __clear_bit(PG_active, &(page)->flags)
#define PageSlab(page) test_bit(PG_slab, &(page)->flags)
-#define SetPageSlab(page) set_bit(PG_slab, &(page)->flags)
-#define ClearPageSlab(page) clear_bit(PG_slab, &(page)->flags)
-#define TestClearPageSlab(page) test_and_clear_bit(PG_slab, &(page)->flags)
-#define TestSetPageSlab(page) test_and_set_bit(PG_slab, &(page)->flags)
+#define __SetPageSlab(page) __set_bit(PG_slab, &(page)->flags)
+#define __ClearPageSlab(page) __clear_bit(PG_slab, &(page)->flags)
#ifdef CONFIG_HIGHMEM
#define PageHighMem(page) is_highmem(page_zone(page))
@@ -329,8 +327,8 @@ extern void __mod_page_state_offset(unsigned long offset, unsigned long delta);
#define TestClearPageReclaim(page) test_and_clear_bit(PG_reclaim, &(page)->flags)
#define PageCompound(page) test_bit(PG_compound, &(page)->flags)
-#define SetPageCompound(page) set_bit(PG_compound, &(page)->flags)
-#define ClearPageCompound(page) clear_bit(PG_compound, &(page)->flags)
+#define __SetPageCompound(page) __set_bit(PG_compound, &(page)->flags)
+#define __ClearPageCompound(page) __clear_bit(PG_compound, &(page)->flags)
#ifdef CONFIG_SWAP
#define PageSwapCache(page) test_bit(PG_swapcache, &(page)->flags)
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index 0b2ba67ff13..b739ac1f7ca 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -11,8 +11,6 @@
#ifndef _LINUX_RTC_H_
#define _LINUX_RTC_H_
-#include <linux/interrupt.h>
-
/*
* The struct used to pass data via the following ioctl. Similar to the
* struct tm in <time.h>, but it needs to be here so that the kernel
@@ -95,6 +93,8 @@ struct rtc_pll_info {
#ifdef __KERNEL__
+#include <linux/interrupt.h>
+
typedef struct rtc_task {
void (*func)(void *private_data);
void *private_data;
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 8cf52939d0a..2b28c849d75 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -38,7 +38,6 @@ typedef struct kmem_cache kmem_cache_t;
#define SLAB_DEBUG_INITIAL 0x00000200UL /* Call constructor (as verifier) */
#define SLAB_RED_ZONE 0x00000400UL /* Red zone objs in a cache */
#define SLAB_POISON 0x00000800UL /* Poison objects */
-#define SLAB_NO_REAP 0x00001000UL /* never reap from the cache */
#define SLAB_HWCACHE_ALIGN 0x00002000UL /* align objs on a h/w cache lines */
#define SLAB_CACHE_DMA 0x00004000UL /* use GFP_DMA memory */
#define SLAB_MUST_HWCACHE_ALIGN 0x00008000UL /* force alignment */
@@ -118,7 +117,7 @@ extern void *kzalloc(size_t, gfp_t);
*/
static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
{
- if (n != 0 && size > INT_MAX / n)
+ if (n != 0 && size > ULONG_MAX / n)
return NULL;
return kzalloc(n * size, flags);
}
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 44153fdf73f..d699a16b0cb 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -52,23 +52,12 @@ extern void smp_cpus_done(unsigned int max_cpus);
/*
* Call a function on all other processors
*/
-extern int smp_call_function (void (*func) (void *info), void *info,
- int retry, int wait);
+int smp_call_function(void(*func)(void *info), void *info, int retry, int wait);
/*
* Call a function on all processors
*/
-static inline int on_each_cpu(void (*func) (void *info), void *info,
- int retry, int wait)
-{
- int ret = 0;
-
- preempt_disable();
- ret = smp_call_function(func, info, retry, wait);
- func(info);
- preempt_enable();
- return ret;
-}
+int on_each_cpu(void (*func) (void *info), void *info, int retry, int wait);
#define MSG_ALL_BUT_SELF 0x8000 /* Assume <32768 CPU's */
#define MSG_ALL 0x8001
@@ -94,7 +83,13 @@ void smp_prepare_boot_cpu(void);
#define raw_smp_processor_id() 0
#define hard_smp_processor_id() 0
#define smp_call_function(func,info,retry,wait) ({ 0; })
-#define on_each_cpu(func,info,retry,wait) ({ func(info); 0; })
+#define on_each_cpu(func,info,retry,wait) \
+ ({ \
+ local_irq_disable(); \
+ func(info); \
+ local_irq_enable(); \
+ 0; \
+ })
static inline void smp_send_reschedule(int cpu) { }
#define num_booting_cpus() 1
#define smp_prepare_boot_cpu() do {} while (0)
diff --git a/include/linux/swap.h b/include/linux/swap.h
index d572b19afb7..12415dd9445 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -172,9 +172,24 @@ extern int rotate_reclaimable_page(struct page *page);
extern void swap_setup(void);
/* linux/mm/vmscan.c */
-extern int try_to_free_pages(struct zone **, gfp_t);
-extern int shrink_all_memory(int);
+extern unsigned long try_to_free_pages(struct zone **, gfp_t);
+extern unsigned long shrink_all_memory(unsigned long nr_pages);
extern int vm_swappiness;
+extern int remove_mapping(struct address_space *mapping, struct page *page);
+
+/* possible outcome of pageout() */
+typedef enum {
+ /* failed to write page out, page is locked */
+ PAGE_KEEP,
+ /* move page to the active list, page is locked */
+ PAGE_ACTIVATE,
+ /* page has been sent to the disk successfully, page is unlocked */
+ PAGE_SUCCESS,
+ /* page is clean and locked */
+ PAGE_CLEAN,
+} pageout_t;
+
+extern pageout_t pageout(struct page *page, struct address_space *mapping);
#ifdef CONFIG_NUMA
extern int zone_reclaim_mode;
@@ -188,25 +203,6 @@ static inline int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order)
}
#endif
-#ifdef CONFIG_MIGRATION
-extern int isolate_lru_page(struct page *p);
-extern int putback_lru_pages(struct list_head *l);
-extern int migrate_page(struct page *, struct page *);
-extern void migrate_page_copy(struct page *, struct page *);
-extern int migrate_page_remove_references(struct page *, struct page *, int);
-extern int migrate_pages(struct list_head *l, struct list_head *t,
- struct list_head *moved, struct list_head *failed);
-extern int fail_migrate_page(struct page *, struct page *);
-#else
-static inline int isolate_lru_page(struct page *p) { return -ENOSYS; }
-static inline int putback_lru_pages(struct list_head *l) { return 0; }
-static inline int migrate_pages(struct list_head *l, struct list_head *t,
- struct list_head *moved, struct list_head *failed) { return -ENOSYS; }
-/* Possible settings for the migrate_page() method in address_operations */
-#define migrate_page NULL
-#define fail_migrate_page NULL
-#endif
-
#ifdef CONFIG_MMU
/* linux/mm/shmem.c */
extern int shmem_unuse(swp_entry_t entry, struct page *page);