diff options
Diffstat (limited to 'include')
254 files changed, 6986 insertions, 2801 deletions
diff --git a/include/asm-alpha/bug.h b/include/asm-alpha/bug.h index 39a3e2a5017..695a5ee4b5d 100644 --- a/include/asm-alpha/bug.h +++ b/include/asm-alpha/bug.h @@ -1,14 +1,24 @@ #ifndef _ALPHA_BUG_H #define _ALPHA_BUG_H +#include <linux/linkage.h> + #ifdef CONFIG_BUG #include <asm/pal.h> /* ??? Would be nice to use .gprel32 here, but we can't be sure that the function loaded the GP, so this could fail in modules. */ -#define BUG() \ - __asm__ __volatile__("call_pal %0 # bugchk\n\t"".long %1\n\t.8byte %2" \ - : : "i" (PAL_bugchk), "i"(__LINE__), "i"(__FILE__)) +static inline void ATTRIB_NORET __BUG(const char *file, int line) +{ + __asm__ __volatile__( + "call_pal %0 # bugchk\n\t" + ".long %1\n\t.8byte %2" + : : "i" (PAL_bugchk), "i"(line), "i"(file)); + for ( ; ; ) + ; +} + +#define BUG() __BUG(__FILE__, __LINE__) #define HAVE_ARCH_BUG #endif diff --git a/include/asm-alpha/byteorder.h b/include/asm-alpha/byteorder.h index 7af2b8d2548..58e958fc7f1 100644 --- a/include/asm-alpha/byteorder.h +++ b/include/asm-alpha/byteorder.h @@ -7,7 +7,7 @@ #ifdef __GNUC__ -static __inline __attribute_const__ __u32 __arch__swab32(__u32 x) +static inline __attribute_const__ __u32 __arch__swab32(__u32 x) { /* * Unfortunately, we can't use the 6 instruction sequence diff --git a/include/asm-alpha/pgtable.h b/include/asm-alpha/pgtable.h index 99037b03235..05ce5fba43e 100644 --- a/include/asm-alpha/pgtable.h +++ b/include/asm-alpha/pgtable.h @@ -268,6 +268,7 @@ extern inline int pte_write(pte_t pte) { return !(pte_val(pte) & _PAGE_FOW); } extern inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } extern inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } extern inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } +extern inline int pte_special(pte_t pte) { return 0; } extern inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) |= _PAGE_FOW; return pte; } extern inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~(__DIRTY_BITS); return pte; } @@ -275,6 +276,7 @@ extern inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~(__ACCESS_BITS); ret extern inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) &= ~_PAGE_FOW; return pte; } extern inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= __DIRTY_BITS; return pte; } extern inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= __ACCESS_BITS; return pte; } +extern inline pte_t pte_mkspecial(pte_t pte) { return pte; } #define PAGE_DIR_OFFSET(tsk,address) pgd_offset((tsk),(address)) diff --git a/include/asm-alpha/unaligned.h b/include/asm-alpha/unaligned.h index a1d72846f61..3787c60aed3 100644 --- a/include/asm-alpha/unaligned.h +++ b/include/asm-alpha/unaligned.h @@ -1,6 +1,11 @@ -#ifndef __ALPHA_UNALIGNED_H -#define __ALPHA_UNALIGNED_H +#ifndef _ASM_ALPHA_UNALIGNED_H +#define _ASM_ALPHA_UNALIGNED_H -#include <asm-generic/unaligned.h> +#include <linux/unaligned/le_struct.h> +#include <linux/unaligned/be_byteshift.h> +#include <linux/unaligned/generic.h> -#endif +#define get_unaligned __get_unaligned_le +#define put_unaligned __put_unaligned_le + +#endif /* _ASM_ALPHA_UNALIGNED_H */ diff --git a/include/asm-arm/arch-sa1100/ide.h b/include/asm-arm/arch-sa1100/ide.h index 98b10bcf9f1..b14cbda01dc 100644 --- a/include/asm-arm/arch-sa1100/ide.h +++ b/include/asm-arm/arch-sa1100/ide.h @@ -37,12 +37,12 @@ static inline void ide_init_hwif_ports(hw_regs_t *hw, unsigned long data_port, memset(hw, 0, sizeof(*hw)); - for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) { - hw->io_ports[i] = reg; + for (i = 0; i <= 7; i++) { + hw->io_ports_array[i] = reg; reg += regincr; } - hw->io_ports[IDE_CONTROL_OFFSET] = ctrl_port; + hw->io_ports.ctl_addr = ctrl_port; if (irq) *irq = 0; diff --git a/include/asm-arm/pgtable.h b/include/asm-arm/pgtable.h index 5e0182485d8..5571c13c3f3 100644 --- a/include/asm-arm/pgtable.h +++ b/include/asm-arm/pgtable.h @@ -260,6 +260,7 @@ extern struct page *empty_zero_page; #define pte_write(pte) (pte_val(pte) & L_PTE_WRITE) #define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY) #define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG) +#define pte_special(pte) (0) /* * The following only works if pte_present() is not true. @@ -280,6 +281,8 @@ PTE_BIT_FUNC(mkdirty, |= L_PTE_DIRTY); PTE_BIT_FUNC(mkold, &= ~L_PTE_YOUNG); PTE_BIT_FUNC(mkyoung, |= L_PTE_YOUNG); +static inline pte_t pte_mkspecial(pte_t pte) { return pte; } + /* * Mark the prot value as uncacheable and unbufferable. */ diff --git a/include/asm-arm/unaligned.h b/include/asm-arm/unaligned.h index 5db03cf3b90..44593a89490 100644 --- a/include/asm-arm/unaligned.h +++ b/include/asm-arm/unaligned.h @@ -1,171 +1,9 @@ -#ifndef __ASM_ARM_UNALIGNED_H -#define __ASM_ARM_UNALIGNED_H +#ifndef _ASM_ARM_UNALIGNED_H +#define _ASM_ARM_UNALIGNED_H -#include <asm/types.h> - -extern int __bug_unaligned_x(const void *ptr); - -/* - * What is the most efficient way of loading/storing an unaligned value? - * - * That is the subject of this file. Efficiency here is defined as - * minimum code size with minimum register usage for the common cases. - * It is currently not believed that long longs are common, so we - * trade efficiency for the chars, shorts and longs against the long - * longs. - * - * Current stats with gcc 2.7.2.2 for these functions: - * - * ptrsize get: code regs put: code regs - * 1 1 1 1 2 - * 2 3 2 3 2 - * 4 7 3 7 3 - * 8 20 6 16 6 - * - * gcc 2.95.1 seems to code differently: - * - * ptrsize get: code regs put: code regs - * 1 1 1 1 2 - * 2 3 2 3 2 - * 4 7 4 7 4 - * 8 19 8 15 6 - * - * which may or may not be more efficient (depending upon whether - * you can afford the extra registers). Hopefully the gcc 2.95 - * is inteligent enough to decide if it is better to use the - * extra register, but evidence so far seems to suggest otherwise. - * - * Unfortunately, gcc is not able to optimise the high word - * out of long long >> 32, or the low word from long long << 32 - */ - -#define __get_unaligned_2_le(__p) \ - (unsigned int)(__p[0] | __p[1] << 8) - -#define __get_unaligned_2_be(__p) \ - (unsigned int)(__p[0] << 8 | __p[1]) - -#define __get_unaligned_4_le(__p) \ - (unsigned int)(__p[0] | __p[1] << 8 | __p[2] << 16 | __p[3] << 24) - -#define __get_unaligned_4_be(__p) \ - (unsigned int)(__p[0] << 24 | __p[1] << 16 | __p[2] << 8 | __p[3]) - -#define __get_unaligned_8_le(__p) \ - ((unsigned long long)__get_unaligned_4_le((__p+4)) << 32 | \ - __get_unaligned_4_le(__p)) - -#define __get_unaligned_8_be(__p) \ - ((unsigned long long)__get_unaligned_4_be(__p) << 32 | \ - __get_unaligned_4_be((__p+4))) - -#define __get_unaligned_le(ptr) \ - ((__force typeof(*(ptr)))({ \ - const __u8 *__p = (const __u8 *)(ptr); \ - __builtin_choose_expr(sizeof(*(ptr)) == 1, *__p, \ - __builtin_choose_expr(sizeof(*(ptr)) == 2, __get_unaligned_2_le(__p), \ - __builtin_choose_expr(sizeof(*(ptr)) == 4, __get_unaligned_4_le(__p), \ - __builtin_choose_expr(sizeof(*(ptr)) == 8, __get_unaligned_8_le(__p), \ - (void)__bug_unaligned_x(__p))))); \ - })) - -#define __get_unaligned_be(ptr) \ - ((__force typeof(*(ptr)))({ \ - const __u8 *__p = (const __u8 *)(ptr); \ - __builtin_choose_expr(sizeof(*(ptr)) == 1, *__p, \ - __builtin_choose_expr(sizeof(*(ptr)) == 2, __get_unaligned_2_be(__p), \ - __builtin_choose_expr(sizeof(*(ptr)) == 4, __get_unaligned_4_be(__p), \ - __builtin_choose_expr(sizeof(*(ptr)) == 8, __get_unaligned_8_be(__p), \ - (void)__bug_unaligned_x(__p))))); \ - })) - - -static inline void __put_unaligned_2_le(__u32 __v, register __u8 *__p) -{ - *__p++ = __v; - *__p++ = __v >> 8; -} - -static inline void __put_unaligned_2_be(__u32 __v, register __u8 *__p) -{ - *__p++ = __v >> 8; - *__p++ = __v; -} - -static inline void __put_unaligned_4_le(__u32 __v, register __u8 *__p) -{ - __put_unaligned_2_le(__v >> 16, __p + 2); - __put_unaligned_2_le(__v, __p); -} - -static inline void __put_unaligned_4_be(__u32 __v, register __u8 *__p) -{ - __put_unaligned_2_be(__v >> 16, __p); - __put_unaligned_2_be(__v, __p + 2); -} - -static inline void __put_unaligned_8_le(const unsigned long long __v, register __u8 *__p) -{ - /* - * tradeoff: 8 bytes of stack for all unaligned puts (2 - * instructions), or an extra register in the long long - * case - go for the extra register. - */ - __put_unaligned_4_le(__v >> 32, __p+4); - __put_unaligned_4_le(__v, __p); -} - -static inline void __put_unaligned_8_be(const unsigned long long __v, register __u8 *__p) -{ - /* - * tradeoff: 8 bytes of stack for all unaligned puts (2 - * instructions), or an extra register in the long long - * case - go for the extra register. - */ - __put_unaligned_4_be(__v >> 32, __p); - __put_unaligned_4_be(__v, __p+4); -} - -/* - * Try to store an unaligned value as efficiently as possible. - */ -#define __put_unaligned_le(val,ptr) \ - ({ \ - (void)sizeof(*(ptr) = (val)); \ - switch (sizeof(*(ptr))) { \ - case 1: \ - *(ptr) = (val); \ - break; \ - case 2: __put_unaligned_2_le((__force u16)(val),(__u8 *)(ptr)); \ - break; \ - case 4: __put_unaligned_4_le((__force u32)(val),(__u8 *)(ptr)); \ - break; \ - case 8: __put_unaligned_8_le((__force u64)(val),(__u8 *)(ptr)); \ - break; \ - default: __bug_unaligned_x(ptr); \ - break; \ - } \ - (void) 0; \ - }) - -#define __put_unaligned_be(val,ptr) \ - ({ \ - (void)sizeof(*(ptr) = (val)); \ - switch (sizeof(*(ptr))) { \ - case 1: \ - *(ptr) = (val); \ - break; \ - case 2: __put_unaligned_2_be((__force u16)(val),(__u8 *)(ptr)); \ - break; \ - case 4: __put_unaligned_4_be((__force u32)(val),(__u8 *)(ptr)); \ - break; \ - case 8: __put_unaligned_8_be((__force u64)(val),(__u8 *)(ptr)); \ - break; \ - default: __bug_unaligned_x(ptr); \ - break; \ - } \ - (void) 0; \ - }) +#include <linux/unaligned/le_byteshift.h> +#include <linux/unaligned/be_byteshift.h> +#include <linux/unaligned/generic.h> /* * Select endianness @@ -178,4 +16,4 @@ static inline void __put_unaligned_8_be(const unsigned long long __v, register _ #define put_unaligned __put_unaligned_be #endif -#endif +#endif /* _ASM_ARM_UNALIGNED_H */ diff --git a/include/asm-avr32/pgtable.h b/include/asm-avr32/pgtable.h index 3ae7b548fce..c0e5e29417d 100644 --- a/include/asm-avr32/pgtable.h +++ b/include/asm-avr32/pgtable.h @@ -212,6 +212,10 @@ static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } +static inline int pte_special(pte_t pte) +{ + return 0; +} /* * The following only work if pte_present() is not true. @@ -252,6 +256,10 @@ static inline pte_t pte_mkyoung(pte_t pte) set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; } +static inline pte_t pte_mkspecial(pte_t pte) +{ + return pte; +} #define pmd_none(x) (!pmd_val(x)) #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) diff --git a/include/asm-avr32/unaligned.h b/include/asm-avr32/unaligned.h index 36f5fd43054..04187729047 100644 --- a/include/asm-avr32/unaligned.h +++ b/include/asm-avr32/unaligned.h @@ -1,5 +1,5 @@ -#ifndef __ASM_AVR32_UNALIGNED_H -#define __ASM_AVR32_UNALIGNED_H +#ifndef _ASM_AVR32_UNALIGNED_H +#define _ASM_AVR32_UNALIGNED_H /* * AVR32 can handle some unaligned accesses, depending on the @@ -11,6 +11,11 @@ * optimize word loads in general. */ -#include <asm-generic/unaligned.h> +#include <linux/unaligned/be_struct.h> +#include <linux/unaligned/le_byteshift.h> +#include <linux/unaligned/generic.h> -#endif /* __ASM_AVR32_UNALIGNED_H */ +#define get_unaligned __get_unaligned_be +#define put_unaligned __put_unaligned_be + +#endif /* _ASM_AVR32_UNALIGNED_H */ diff --git a/include/asm-blackfin/unaligned.h b/include/asm-blackfin/unaligned.h index 10081dc241e..fd8a1d63494 100644 --- a/include/asm-blackfin/unaligned.h +++ b/include/asm-blackfin/unaligned.h @@ -1,6 +1,11 @@ -#ifndef __BFIN_UNALIGNED_H -#define __BFIN_UNALIGNED_H +#ifndef _ASM_BLACKFIN_UNALIGNED_H +#define _ASM_BLACKFIN_UNALIGNED_H -#include <asm-generic/unaligned.h> +#include <linux/unaligned/le_struct.h> +#include <linux/unaligned/be_byteshift.h> +#include <linux/unaligned/generic.h> -#endif /* __BFIN_UNALIGNED_H */ +#define get_unaligned __get_unaligned_le +#define put_unaligned __put_unaligned_le + +#endif /* _ASM_BLACKFIN_UNALIGNED_H */ diff --git a/include/asm-cris/arch-v10/ide.h b/include/asm-cris/arch-v10/ide.h index ea34e0d0a38..5366e623932 100644 --- a/include/asm-cris/arch-v10/ide.h +++ b/include/asm-cris/arch-v10/ide.h @@ -59,22 +59,19 @@ static inline void ide_init_hwif_ports(hw_regs_t *hw, unsigned long data_port, u int i; /* fill in ports for ATA addresses 0 to 7 */ - - for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) { - hw->io_ports[i] = data_port | + for (i = 0; i <= 7; i++) { + hw->io_ports_array[i] = data_port | IO_FIELD(R_ATA_CTRL_DATA, addr, i) | IO_STATE(R_ATA_CTRL_DATA, cs0, active); } /* the IDE control register is at ATA address 6, with CS1 active instead of CS0 */ - - hw->io_ports[IDE_CONTROL_OFFSET] = data_port | + hw->io_ports.ctl_addr = data_port | IO_FIELD(R_ATA_CTRL_DATA, addr, 6) | IO_STATE(R_ATA_CTRL_DATA, cs1, active); /* whats this for ? */ - - hw->io_ports[IDE_IRQ_OFFSET] = 0; + hw->io_ports.irq_addr = 0; } static inline void ide_init_default_hwifs(void) diff --git a/include/asm-cris/pgtable.h b/include/asm-cris/pgtable.h index a2607575681..829e7a7d9fb 100644 --- a/include/asm-cris/pgtable.h +++ b/include/asm-cris/pgtable.h @@ -115,6 +115,7 @@ static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WR static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_MODIFIED; } static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } +static inline int pte_special(pte_t pte) { return 0; } static inline pte_t pte_wrprotect(pte_t pte) { @@ -162,6 +163,7 @@ static inline pte_t pte_mkyoung(pte_t pte) } return pte; } +static inline pte_t pte_mkspecial(pte_t pte) { return pte; } /* * Conversion functions: convert a page and protection to a page entry, @@ -229,7 +231,7 @@ static inline void pmd_set(pmd_t * pmdp, pte_t * ptep) #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) /* to find an entry in a page-table-directory */ -static inline pgd_t * pgd_offset(struct mm_struct * mm, unsigned long address) +static inline pgd_t * pgd_offset(const struct mm_struct *mm, unsigned long address) { return mm->pgd + pgd_index(address); } diff --git a/include/asm-cris/unaligned.h b/include/asm-cris/unaligned.h index 7fbbb399f6f..7b3f3fec567 100644 --- a/include/asm-cris/unaligned.h +++ b/include/asm-cris/unaligned.h @@ -1,16 +1,13 @@ -#ifndef __CRIS_UNALIGNED_H -#define __CRIS_UNALIGNED_H +#ifndef _ASM_CRIS_UNALIGNED_H +#define _ASM_CRIS_UNALIGNED_H /* * CRIS can do unaligned accesses itself. - * - * The strange macros are there to make sure these can't - * be misused in a way that makes them not work on other - * architectures where unaligned accesses aren't as simple. */ +#include <linux/unaligned/access_ok.h> +#include <linux/unaligned/generic.h> -#define get_unaligned(ptr) (*(ptr)) +#define get_unaligned __get_unaligned_le +#define put_unaligned __put_unaligned_le -#define put_unaligned(val, ptr) ((void)( *(ptr) = (val) )) - -#endif +#endif /* _ASM_CRIS_UNALIGNED_H */ diff --git a/include/asm-frv/pgtable.h b/include/asm-frv/pgtable.h index 4e219046fe4..83c51aba534 100644 --- a/include/asm-frv/pgtable.h +++ b/include/asm-frv/pgtable.h @@ -380,6 +380,7 @@ static inline pmd_t *pmd_offset(pud_t *dir, unsigned long address) static inline int pte_dirty(pte_t pte) { return (pte).pte & _PAGE_DIRTY; } static inline int pte_young(pte_t pte) { return (pte).pte & _PAGE_ACCESSED; } static inline int pte_write(pte_t pte) { return !((pte).pte & _PAGE_WP); } +static inline int pte_special(pte_t pte) { return 0; } static inline pte_t pte_mkclean(pte_t pte) { (pte).pte &= ~_PAGE_DIRTY; return pte; } static inline pte_t pte_mkold(pte_t pte) { (pte).pte &= ~_PAGE_ACCESSED; return pte; } @@ -387,6 +388,7 @@ static inline pte_t pte_wrprotect(pte_t pte) { (pte).pte |= _PAGE_WP; return pte static inline pte_t pte_mkdirty(pte_t pte) { (pte).pte |= _PAGE_DIRTY; return pte; } static inline pte_t pte_mkyoung(pte_t pte) { (pte).pte |= _PAGE_ACCESSED; return pte; } static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte &= ~_PAGE_WP; return pte; } +static inline pte_t pte_mkspecial(pte_t pte) { return pte; } static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { diff --git a/include/asm-frv/unaligned.h b/include/asm-frv/unaligned.h index dc8e9c9bf6b..64ccc736f2d 100644 --- a/include/asm-frv/unaligned.h +++ b/include/asm-frv/unaligned.h @@ -9,194 +9,14 @@ * 2 of the License, or (at your option) any later version. */ -#ifndef _ASM_UNALIGNED_H -#define _ASM_UNALIGNED_H +#ifndef _ASM_FRV_UNALIGNED_H +#define _ASM_FRV_UNALIGNED_H +#include <linux/unaligned/le_byteshift.h> +#include <linux/unaligned/be_byteshift.h> +#include <linux/unaligned/generic.h> -/* - * Unaligned accesses on uClinux can't be performed in a fault handler - the - * CPU detects them as imprecise exceptions making this impossible. - * - * With the FR451, however, they are precise, and so we used to fix them up in - * the memory access fault handler. However, instruction bundling make this - * impractical. So, now we fall back to using memcpy. - */ -#ifdef CONFIG_MMU - -/* - * The asm statement in the macros below is a way to get GCC to copy a - * value from one variable to another without having any clue it's - * actually doing so, so that it won't have any idea that the values - * in the two variables are related. - */ - -#define get_unaligned(ptr) ({ \ - typeof((*(ptr))) __x; \ - void *__ptrcopy; \ - asm("" : "=r" (__ptrcopy) : "0" (ptr)); \ - memcpy(&__x, __ptrcopy, sizeof(*(ptr))); \ - __x; \ -}) - -#define put_unaligned(val, ptr) ({ \ - typeof((*(ptr))) __x = (val); \ - void *__ptrcopy; \ - asm("" : "=r" (__ptrcopy) : "0" (ptr)); \ - memcpy(__ptrcopy, &__x, sizeof(*(ptr))); \ -}) - -extern int handle_misalignment(unsigned long esr0, unsigned long ear0, unsigned long epcr0); - -#else - -#define get_unaligned(ptr) \ -({ \ - typeof(*(ptr)) x; \ - const char *__p = (const char *) (ptr); \ - \ - switch (sizeof(x)) { \ - case 1: \ - x = *(ptr); \ - break; \ - case 2: \ - { \ - uint8_t a; \ - asm(" ldub%I2 %M2,%0 \n" \ - " ldub%I3.p %M3,%1 \n" \ - " slli %0,#8,%0 \n" \ - " or %0,%1,%0 \n" \ - : "=&r"(x), "=&r"(a) \ - : "m"(__p[0]), "m"(__p[1]) \ - ); \ - break; \ - } \ - \ - case 4: \ - { \ - uint8_t a; \ - asm(" ldub%I2 %M2,%0 \n" \ - " ldub%I3.p %M3,%1 \n" \ - " slli %0,#8,%0 \n" \ - " or %0,%1,%0 \n" \ - " ldub%I4.p %M4,%1 \n" \ - " slli %0,#8,%0 \n" \ - " or %0,%1,%0 \n" \ - " ldub%I5.p %M5,%1 \n" \ - " slli %0,#8,%0 \n" \ - " or %0,%1,%0 \n" \ - : "=&r"(x), "=&r"(a) \ - : "m"(__p[0]), "m"(__p[1]), "m"(__p[2]), "m"(__p[3]) \ - ); \ - break; \ - } \ - \ - case 8: \ - { \ - union { uint64_t x; u32 y[2]; } z; \ - uint8_t a; \ - asm(" ldub%I3 %M3,%0 \n" \ - " ldub%I4.p %M4,%2 \n" \ - " slli %0,#8,%0 \n" \ - " or %0,%2,%0 \n" \ - " ldub%I5.p %M5,%2 \n" \ - " slli %0,#8,%0 \n" \ - " or %0,%2,%0 \n" \ - " ldub%I6.p %M6,%2 \n" \ - " slli %0,#8,%0 \n" \ - " or %0,%2,%0 \n" \ - " ldub%I7 %M7,%1 \n" \ - " ldub%I8.p %M8,%2 \n" \ - " slli %1,#8,%1 \n" \ - " or %1,%2,%1 \n" \ - " ldub%I9.p %M9,%2 \n" \ - " slli %1,#8,%1 \n" \ - " or %1,%2,%1 \n" \ - " ldub%I10.p %M10,%2 \n" \ - " slli %1,#8,%1 \n" \ - " or %1,%2,%1 \n" \ - : "=&r"(z.y[0]), "=&r"(z.y[1]), "=&r"(a) \ - : "m"(__p[0]), "m"(__p[1]), "m"(__p[2]), "m"(__p[3]), \ - "m"(__p[4]), "m"(__p[5]), "m"(__p[6]), "m"(__p[7]) \ - ); \ - x = z.x; \ - break; \ - } \ - \ - default: \ - x = 0; \ - BUG(); \ - break; \ - } \ - \ - x; \ -}) - -#define put_unaligned(val, ptr) \ -do { \ - char *__p = (char *) (ptr); \ - int x; \ - \ - switch (sizeof(*ptr)) { \ - case 2: \ - { \ - asm(" stb%I1.p %0,%M1 \n" \ - " srli %0,#8,%0 \n" \ - " stb%I2 %0,%M2 \n" \ - : "=r"(x), "=m"(__p[1]), "=m"(__p[0]) \ - : "0"(val) \ - ); \ - break; \ - } \ - \ - case 4: \ - { \ - asm(" stb%I1.p %0,%M1 \n" \ - " srli %0,#8,%0 \n" \ - " stb%I2.p %0,%M2 \n" \ - " srli %0,#8,%0 \n" \ - " stb%I3.p %0,%M3 \n" \ - " srli %0,#8,%0 \n" \ - " stb%I4 %0,%M4 \n" \ - : "=r"(x), "=m"(__p[3]), "=m"(__p[2]), "=m"(__p[1]), "=m"(__p[0]) \ - : "0"(val) \ - ); \ - break; \ - } \ - \ - case 8: \ - { \ - uint32_t __high, __low; \ - __high = (uint64_t)val >> 32; \ - __low = val & 0xffffffff; \ - asm(" stb%I2.p %0,%M2 \n" \ - " srli %0,#8,%0 \n" \ - " stb%I3.p %0,%M3 \n" \ - " srli %0,#8,%0 \n" \ - " stb%I4.p %0,%M4 \n" \ - " srli %0,#8,%0 \n" \ - " stb%I5.p %0,%M5 \n" \ - " srli %0,#8,%0 \n" \ - " stb%I6.p %1,%M6 \n" \ - " srli %1,#8,%1 \n" \ - " stb%I7.p %1,%M7 \n" \ - " srli %1,#8,%1 \n" \ - " stb%I8.p %1,%M8 \n" \ - " srli %1,#8,%1 \n" \ - " stb%I9 %1,%M9 \n" \ - : "=&r"(__low), "=&r"(__high), "=m"(__p[7]), "=m"(__p[6]), \ - "=m"(__p[5]), "=m"(__p[4]), "=m"(__p[3]), "=m"(__p[2]), \ - "=m"(__p[1]), "=m"(__p[0]) \ - : "0"(__low), "1"(__high) \ - ); \ - break; \ - } \ - \ - default: \ - *(ptr) = (val); \ - break; \ - } \ -} while(0) - -#endif +#define get_unaligned __get_unaligned_be +#define put_unaligned __put_unaligned_be -#endif +#endif /* _ASM_FRV_UNALIGNED_H */ diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h index f29a502f4a6..ecf675a59d2 100644 --- a/include/asm-generic/gpio.h +++ b/include/asm-generic/gpio.h @@ -16,7 +16,14 @@ #define ARCH_NR_GPIOS 256 #endif +static inline int gpio_is_valid(int number) +{ + /* only some non-negative numbers are valid */ + return ((unsigned)number) < ARCH_NR_GPIOS; +} + struct seq_file; +struct module; /** * struct gpio_chip - abstract a GPIO controller @@ -48,6 +55,7 @@ struct seq_file; */ struct gpio_chip { char *label; + struct module *owner; int (*direction_input)(struct gpio_chip *chip, unsigned offset); @@ -66,6 +74,7 @@ struct gpio_chip { extern const char *gpiochip_is_requested(struct gpio_chip *chip, unsigned offset); +extern int __init __must_check gpiochip_reserve(int start, int ngpio); /* add/remove chips */ extern int gpiochip_add(struct gpio_chip *chip); @@ -97,6 +106,12 @@ extern int __gpio_cansleep(unsigned gpio); #else +static inline int gpio_is_valid(int number) +{ + /* only non-negative numbers are valid */ + return number >= 0; +} + /* platforms that don't directly support access to GPIOs through I2C, SPI, * or other blocking infrastructure can use these wrappers. */ diff --git a/include/asm-generic/ioctl.h b/include/asm-generic/ioctl.h index cd027298beb..86418138557 100644 --- a/include/asm-generic/ioctl.h +++ b/include/asm-generic/ioctl.h @@ -21,8 +21,19 @@ */ #define _IOC_NRBITS 8 #define _IOC_TYPEBITS 8 -#define _IOC_SIZEBITS 14 -#define _IOC_DIRBITS 2 + +/* + * Let any architecture override either of the following before + * including this file. + */ + +#ifndef _IOC_SIZEBITS +# define _IOC_SIZEBITS 14 +#endif + +#ifndef _IOC_DIRBITS +# define _IOC_DIRBITS 2 +#endif #define _IOC_NRMASK ((1 << _IOC_NRBITS)-1) #define _IOC_TYPEMASK ((1 << _IOC_TYPEBITS)-1) @@ -35,11 +46,21 @@ #define _IOC_DIRSHIFT (_IOC_SIZESHIFT+_IOC_SIZEBITS) /* - * Direction bits. + * Direction bits, which any architecture can choose to override + * before including this file. */ -#define _IOC_NONE 0U -#define _IOC_WRITE 1U -#define _IOC_READ 2U + +#ifndef _IOC_NONE +# define _IOC_NONE 0U +#endif + +#ifndef _IOC_WRITE +# define _IOC_WRITE 1U +#endif + +#ifndef _IOC_READ +# define _IOC_READ 2U +#endif #define _IOC(dir,type,nr,size) \ (((dir) << _IOC_DIRSHIFT) | \ diff --git a/include/asm-generic/unaligned.h b/include/asm-generic/unaligned.h deleted file mode 100644 index 2fe1b2e67f0..00000000000 --- a/include/asm-generic/unaligned.h +++ /dev/null @@ -1,124 +0,0 @@ -#ifndef _ASM_GENERIC_UNALIGNED_H_ -#define _ASM_GENERIC_UNALIGNED_H_ - -/* - * For the benefit of those who are trying to port Linux to another - * architecture, here are some C-language equivalents. - * - * This is based almost entirely upon Richard Henderson's - * asm-alpha/unaligned.h implementation. Some comments were - * taken from David Mosberger's asm-ia64/unaligned.h header. - */ - -#include <linux/types.h> - -/* - * The main single-value unaligned transfer routines. - */ -#define get_unaligned(ptr) \ - __get_unaligned((ptr), sizeof(*(ptr))) -#define put_unaligned(x,ptr) \ - ((void)sizeof(*(ptr)=(x)),\ - __put_unaligned((__force __u64)(x), (ptr), sizeof(*(ptr)))) - -/* - * This function doesn't actually exist. The idea is that when - * someone uses the macros below with an unsupported size (datatype), - * the linker will alert us to the problem via an unresolved reference - * error. - */ -extern void bad_unaligned_access_length(void) __attribute__((noreturn)); - -struct __una_u64 { __u64 x __attribute__((packed)); }; -struct __una_u32 { __u32 x __attribute__((packed)); }; -struct __una_u16 { __u16 x __attribute__((packed)); }; - -/* - * Elemental unaligned loads - */ - -static inline __u64 __uldq(const __u64 *addr) -{ - const struct __una_u64 *ptr = (const struct __una_u64 *) addr; - return ptr->x; -} - -static inline __u32 __uldl(const __u32 *addr) -{ - const struct __una_u32 *ptr = (const struct __una_u32 *) addr; - return ptr->x; -} - -static inline __u16 __uldw(const __u16 *addr) -{ - const struct __una_u16 *ptr = (const struct __una_u16 *) addr; - return ptr->x; -} - -/* - * Elemental unaligned stores - */ - -static inline void __ustq(__u64 val, __u64 *addr) -{ - struct __una_u64 *ptr = (struct __una_u64 *) addr; - ptr->x = val; -} - -static inline void __ustl(__u32 val, __u32 *addr) -{ - struct __una_u32 *ptr = (struct __una_u32 *) addr; - ptr->x = val; -} - -static inline void __ustw(__u16 val, __u16 *addr) -{ - struct __una_u16 *ptr = (struct __una_u16 *) addr; - ptr->x = val; -} - -#define __get_unaligned(ptr, size) ({ \ - const void *__gu_p = ptr; \ - __u64 __val; \ - switch (size) { \ - case 1: \ - __val = *(const __u8 *)__gu_p; \ - break; \ - case 2: \ - __val = __uldw(__gu_p); \ - break; \ - case 4: \ - __val = __uldl(__gu_p); \ - break; \ - case 8: \ - __val = __uldq(__gu_p); \ - break; \ - default: \ - bad_unaligned_access_length(); \ - }; \ - (__force __typeof__(*(ptr)))__val; \ -}) - -#define __put_unaligned(val, ptr, size) \ -({ \ - void *__gu_p = ptr; \ - switch (size) { \ - case 1: \ - *(__u8 *)__gu_p = (__force __u8)val; \ - break; \ - case 2: \ - __ustw((__force __u16)val, __gu_p); \ - break; \ - case 4: \ - __ustl((__force __u32)val, __gu_p); \ - break; \ - case 8: \ - __ustq(val, __gu_p); \ - break; \ - default: \ - bad_unaligned_access_length(); \ - }; \ - (void)0; \ -}) - -#endif /* _ASM_GENERIC_UNALIGNED_H */ diff --git a/include/asm-h8300/unaligned.h b/include/asm-h8300/unaligned.h index ffb67f47207..b8d06c70c2d 100644 --- a/include/asm-h8300/unaligned.h +++ b/include/asm-h8300/unaligned.h @@ -1,15 +1,11 @@ -#ifndef __H8300_UNALIGNED_H -#define __H8300_UNALIGNED_H +#ifndef _ASM_H8300_UNALIGNED_H +#define _ASM_H8300_UNALIGNED_H +#include <linux/unaligned/be_memmove.h> +#include <linux/unaligned/le_byteshift.h> +#include <linux/unaligned/generic.h> -/* Use memmove here, so gcc does not insert a __builtin_memcpy. */ +#define get_unaligned __get_unaligned_be +#define put_unaligned __put_unaligned_be -#define get_unaligned(ptr) \ - ({ __typeof__(*(ptr)) __tmp; memmove(&__tmp, (ptr), sizeof(*(ptr))); __tmp; }) - -#define put_unaligned(val, ptr) \ - ({ __typeof__(*(ptr)) __tmp = (val); \ - memmove((ptr), &__tmp, sizeof(*(ptr))); \ - (void)0; }) - -#endif +#endif /* _ASM_H8300_UNALIGNED_H */ diff --git a/include/asm-ia64/dma-mapping.h b/include/asm-ia64/dma-mapping.h index f1735a22d0e..9f0df9bd46b 100644 --- a/include/asm-ia64/dma-mapping.h +++ b/include/asm-ia64/dma-mapping.h @@ -23,10 +23,30 @@ dma_free_noncoherent(struct device *dev, size_t size, void *cpu_addr, { dma_free_coherent(dev, size, cpu_addr, dma_handle); } -#define dma_map_single platform_dma_map_single -#define dma_map_sg platform_dma_map_sg -#define dma_unmap_single platform_dma_unmap_single -#define dma_unmap_sg platform_dma_unmap_sg +#define dma_map_single_attrs platform_dma_map_single_attrs +static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, + size_t size, int dir) +{ + return dma_map_single_attrs(dev, cpu_addr, size, dir, NULL); +} +#define dma_map_sg_attrs platform_dma_map_sg_attrs +static inline int dma_map_sg(struct device *dev, struct scatterlist *sgl, + int nents, int dir) +{ + return dma_map_sg_attrs(dev, sgl, nents, dir, NULL); +} +#define dma_unmap_single_attrs platform_dma_unmap_single_attrs +static inline void dma_unmap_single(struct device *dev, dma_addr_t cpu_addr, + size_t size, int dir) +{ + return dma_unmap_single_attrs(dev, cpu_addr, size, dir, NULL); +} +#define dma_unmap_sg_attrs platform_dma_unmap_sg_attrs +static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sgl, + int nents, int dir) +{ + return dma_unmap_sg_attrs(dev, sgl, nents, dir, NULL); +} #define dma_sync_single_for_cpu platform_dma_sync_single_for_cpu #define dma_sync_sg_for_cpu platform_dma_sync_sg_for_cpu #define dma_sync_single_for_device platform_dma_sync_single_for_device diff --git a/include/asm-ia64/gcc_intrin.h b/include/asm-ia64/gcc_intrin.h index de2ed2cbdd8..2fe292c275f 100644 --- a/include/asm-ia64/gcc_intrin.h +++ b/include/asm-ia64/gcc_intrin.h @@ -21,6 +21,10 @@ #define ia64_invala_fr(regnum) asm volatile ("invala.e f%0" :: "i"(regnum)) +#define ia64_flushrs() asm volatile ("flushrs;;":::"memory") + +#define ia64_loadrs() asm volatile ("loadrs;;":::"memory") + extern void ia64_bad_param_for_setreg (void); extern void ia64_bad_param_for_getreg (void); @@ -517,6 +521,14 @@ do { \ #define ia64_ptrd(addr, size) \ asm volatile ("ptr.d %0,%1" :: "r"(addr), "r"(size) : "memory") +#define ia64_ttag(addr) \ +({ \ + __u64 ia64_intri_res; \ + asm volatile ("ttag %0=%1" : "=r"(ia64_intri_res) : "r" (addr)); \ + ia64_intri_res; \ +}) + + /* Values for lfhint in ia64_lfetch and ia64_lfetch_fault */ #define ia64_lfhint_none 0 diff --git a/include/asm-ia64/hugetlb.h b/include/asm-ia64/hugetlb.h new file mode 100644 index 00000000000..f28a9701f1c --- /dev/null +++ b/include/asm-ia64/hugetlb.h @@ -0,0 +1,79 @@ +#ifndef _ASM_IA64_HUGETLB_H +#define _ASM_IA64_HUGETLB_H + +#include <asm/page.h> + + +void hugetlb_free_pgd_range(struct mmu_gather **tlb, unsigned long addr, + unsigned long end, unsigned long floor, + unsigned long ceiling); + +int prepare_hugepage_range(unsigned long addr, unsigned long len); + +static inline int is_hugepage_only_range(struct mm_struct *mm, + unsigned long addr, + unsigned long len) +{ + return (REGION_NUMBER(addr) == RGN_HPAGE || + REGION_NUMBER((addr)+(len)-1) == RGN_HPAGE); +} + +static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) +{ +} + +static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte) +{ + set_pte_at(mm, addr, ptep, pte); +} + +static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + return ptep_get_and_clear(mm, addr, ptep); +} + +static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep) +{ +} + +static inline int huge_pte_none(pte_t pte) +{ + return pte_none(pte); +} + +static inline pte_t huge_pte_wrprotect(pte_t pte) +{ + return pte_wrprotect(pte); +} + +static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + ptep_set_wrprotect(mm, addr, ptep); +} + +static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep, + pte_t pte, int dirty) +{ + return ptep_set_access_flags(vma, addr, ptep, pte, dirty); +} + +static inline pte_t huge_ptep_get(pte_t *ptep) +{ + return *ptep; +} + +static inline int arch_prepare_hugepage(struct page *page) +{ + return 0; +} + +static inline void arch_release_hugepage(struct page *page) +{ +} + +#endif /* _ASM_IA64_HUGETLB_H */ diff --git a/include/asm-ia64/kvm.h b/include/asm-ia64/kvm.h index 030d29b4b26..eb2d3559d08 100644 --- a/include/asm-ia64/kvm.h +++ b/include/asm-ia64/kvm.h @@ -1,6 +1,205 @@ -#ifndef __LINUX_KVM_IA64_H -#define __LINUX_KVM_IA64_H +#ifndef __ASM_IA64_KVM_H +#define __ASM_IA64_KVM_H -/* ia64 does not support KVM */ +/* + * asm-ia64/kvm.h: kvm structure definitions for ia64 + * + * Copyright (C) 2007 Xiantao Zhang <xiantao.zhang@intel.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple + * Place - Suite 330, Boston, MA 02111-1307 USA. + * + */ + +#include <asm/types.h> +#include <asm/fpu.h> + +#include <linux/ioctl.h> + +/* Architectural interrupt line count. */ +#define KVM_NR_INTERRUPTS 256 + +#define KVM_IOAPIC_NUM_PINS 24 + +struct kvm_ioapic_state { + __u64 base_address; + __u32 ioregsel; + __u32 id; + __u32 irr; + __u32 pad; + union { + __u64 bits; + struct { + __u8 vector; + __u8 delivery_mode:3; + __u8 dest_mode:1; + __u8 delivery_status:1; + __u8 polarity:1; + __u8 remote_irr:1; + __u8 trig_mode:1; + __u8 mask:1; + __u8 reserve:7; + __u8 reserved[4]; + __u8 dest_id; + } fields; + } redirtbl[KVM_IOAPIC_NUM_PINS]; +}; + +#define KVM_IRQCHIP_PIC_MASTER 0 +#define KVM_IRQCHIP_PIC_SLAVE 1 +#define KVM_IRQCHIP_IOAPIC 2 + +#define KVM_CONTEXT_SIZE 8*1024 + +union context { + /* 8K size */ + char dummy[KVM_CONTEXT_SIZE]; + struct { + unsigned long psr; + unsigned long pr; + unsigned long caller_unat; + unsigned long pad; + unsigned long gr[32]; + unsigned long ar[128]; + unsigned long br[8]; + unsigned long cr[128]; + unsigned long rr[8]; + unsigned long ibr[8]; + unsigned long dbr[8]; + unsigned long pkr[8]; + struct ia64_fpreg fr[128]; + }; +}; + +struct thash_data { + union { + struct { + unsigned long p : 1; /* 0 */ + unsigned long rv1 : 1; /* 1 */ + unsigned long ma : 3; /* 2-4 */ + unsigned long a : 1; /* 5 */ + unsigned long d : 1; /* 6 */ + unsigned long pl : 2; /* 7-8 */ + unsigned long ar : 3; /* 9-11 */ + unsigned long ppn : 38; /* 12-49 */ + unsigned long rv2 : 2; /* 50-51 */ + unsigned long ed : 1; /* 52 */ + unsigned long ig1 : 11; /* 53-63 */ + }; + struct { + unsigned long __rv1 : 53; /* 0-52 */ + unsigned long contiguous : 1; /*53 */ + unsigned long tc : 1; /* 54 TR or TC */ + unsigned long cl : 1; + /* 55 I side or D side cache line */ + unsigned long len : 4; /* 56-59 */ + unsigned long io : 1; /* 60 entry is for io or not */ + unsigned long nomap : 1; + /* 61 entry cann't be inserted into machine TLB.*/ + unsigned long checked : 1; + /* 62 for VTLB/VHPT sanity check */ + unsigned long invalid : 1; + /* 63 invalid entry */ + }; + unsigned long page_flags; + }; /* same for VHPT and TLB */ + + union { + struct { + unsigned long rv3 : 2; + unsigned long ps : 6; + unsigned long key : 24; + unsigned long rv4 : 32; + }; + unsigned long itir; + }; + union { + struct { + unsigned long ig2 : 12; + unsigned long vpn : 49; + unsigned long vrn : 3; + }; + unsigned long ifa; + unsigned long vadr; + struct { + unsigned long tag : 63; + unsigned long ti : 1; + }; + unsigned long etag; + }; + union { + struct thash_data *next; + unsigned long rid; + unsigned long gpaddr; + }; +}; + +#define NITRS 8 +#define NDTRS 8 + +struct saved_vpd { + unsigned long vhpi; + unsigned long vgr[16]; + unsigned long vbgr[16]; + unsigned long vnat; + unsigned long vbnat; + unsigned long vcpuid[5]; + unsigned long vpsr; + unsigned long vpr; + unsigned long vcr[128]; +}; + +struct kvm_regs { + char *saved_guest; + char *saved_stack; + struct saved_vpd vpd; + /*Arch-regs*/ + int mp_state; + unsigned long vmm_rr; + /* TR and TC. */ + struct thash_data itrs[NITRS]; + struct thash_data dtrs[NDTRS]; + /* Bit is set if there is a tr/tc for the region. */ + unsigned char itr_regions; + unsigned char dtr_regions; + unsigned char tc_regions; + + char irq_check; + unsigned long saved_itc; + unsigned long itc_check; + unsigned long timer_check; + unsigned long timer_pending; + unsigned long last_itc; + + unsigned long vrr[8]; + unsigned long ibr[8]; + unsigned long dbr[8]; + unsigned long insvc[4]; /* Interrupt in service. */ + unsigned long xtp; + + unsigned long metaphysical_rr0; /* from kvm_arch (so is pinned) */ + unsigned long metaphysical_rr4; /* from kvm_arch (so is pinned) */ + unsigned long metaphysical_saved_rr0; /* from kvm_arch */ + unsigned long metaphysical_saved_rr4; /* from kvm_arch */ + unsigned long fp_psr; /*used for lazy float register */ + unsigned long saved_gp; + /*for phycial emulation */ +}; + +struct kvm_sregs { +}; + +struct kvm_fpu { +}; #endif diff --git a/include/asm-ia64/kvm_host.h b/include/asm-ia64/kvm_host.h new file mode 100644 index 00000000000..c082c208c1f --- /dev/null +++ b/include/asm-ia64/kvm_host.h @@ -0,0 +1,524 @@ +/* + * kvm_host.h: used for kvm module, and hold ia64-specific sections. + * + * Copyright (C) 2007, Intel Corporation. + * + * Xiantao Zhang <xiantao.zhang@intel.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple + * Place - Suite 330, Boston, MA 02111-1307 USA. + * + */ + +#ifndef __ASM_KVM_HOST_H +#define __ASM_KVM_HOST_H + + +#include <linux/types.h> +#include <linux/mm.h> +#include <linux/kvm.h> +#include <linux/kvm_para.h> +#include <linux/kvm_types.h> + +#include <asm/pal.h> +#include <asm/sal.h> + +#define KVM_MAX_VCPUS 4 +#define KVM_MEMORY_SLOTS 32 +/* memory slots that does not exposed to userspace */ +#define KVM_PRIVATE_MEM_SLOTS 4 + + +/* define exit reasons from vmm to kvm*/ +#define EXIT_REASON_VM_PANIC 0 +#define EXIT_REASON_MMIO_INSTRUCTION 1 +#define EXIT_REASON_PAL_CALL 2 +#define EXIT_REASON_SAL_CALL 3 +#define EXIT_REASON_SWITCH_RR6 4 +#define EXIT_REASON_VM_DESTROY 5 +#define EXIT_REASON_EXTERNAL_INTERRUPT 6 +#define EXIT_REASON_IPI 7 +#define EXIT_REASON_PTC_G 8 + +/*Define vmm address space and vm data space.*/ +#define KVM_VMM_SIZE (16UL<<20) +#define KVM_VMM_SHIFT 24 +#define KVM_VMM_BASE 0xD000000000000000UL +#define VMM_SIZE (8UL<<20) + +/* + * Define vm_buffer, used by PAL Services, base address. + * Note: vmbuffer is in the VMM-BLOCK, the size must be < 8M + */ +#define KVM_VM_BUFFER_BASE (KVM_VMM_BASE + VMM_SIZE) +#define KVM_VM_BUFFER_SIZE (8UL<<20) + +/*Define Virtual machine data layout.*/ +#define KVM_VM_DATA_SHIFT 24 +#define KVM_VM_DATA_SIZE (1UL << KVM_VM_DATA_SHIFT) +#define KVM_VM_DATA_BASE (KVM_VMM_BASE + KVM_VMM_SIZE) + + +#define KVM_P2M_BASE KVM_VM_DATA_BASE +#define KVM_P2M_OFS 0 +#define KVM_P2M_SIZE (8UL << 20) + +#define KVM_VHPT_BASE (KVM_P2M_BASE + KVM_P2M_SIZE) +#define KVM_VHPT_OFS KVM_P2M_SIZE +#define KVM_VHPT_BLOCK_SIZE (2UL << 20) +#define VHPT_SHIFT 18 +#define VHPT_SIZE (1UL << VHPT_SHIFT) +#define VHPT_NUM_ENTRIES (1<<(VHPT_SHIFT-5)) + +#define KVM_VTLB_BASE (KVM_VHPT_BASE+KVM_VHPT_BLOCK_SIZE) +#define KVM_VTLB_OFS (KVM_VHPT_OFS+KVM_VHPT_BLOCK_SIZE) +#define KVM_VTLB_BLOCK_SIZE (1UL<<20) +#define VTLB_SHIFT 17 +#define VTLB_SIZE (1UL<<VTLB_SHIFT) +#define VTLB_NUM_ENTRIES (1<<(VTLB_SHIFT-5)) + +#define KVM_VPD_BASE (KVM_VTLB_BASE+KVM_VTLB_BLOCK_SIZE) +#define KVM_VPD_OFS (KVM_VTLB_OFS+KVM_VTLB_BLOCK_SIZE) +#define KVM_VPD_BLOCK_SIZE (2UL<<20) +#define VPD_SHIFT 16 +#define VPD_SIZE (1UL<<VPD_SHIFT) + +#define KVM_VCPU_BASE (KVM_VPD_BASE+KVM_VPD_BLOCK_SIZE) +#define KVM_VCPU_OFS (KVM_VPD_OFS+KVM_VPD_BLOCK_SIZE) +#define KVM_VCPU_BLOCK_SIZE (2UL<<20) +#define VCPU_SHIFT 18 +#define VCPU_SIZE (1UL<<VCPU_SHIFT) +#define MAX_VCPU_NUM KVM_VCPU_BLOCK_SIZE/VCPU_SIZE + +#define KVM_VM_BASE (KVM_VCPU_BASE+KVM_VCPU_BLOCK_SIZE) +#define KVM_VM_OFS (KVM_VCPU_OFS+KVM_VCPU_BLOCK_SIZE) +#define KVM_VM_BLOCK_SIZE (1UL<<19) + +#define KVM_MEM_DIRTY_LOG_BASE (KVM_VM_BASE+KVM_VM_BLOCK_SIZE) +#define KVM_MEM_DIRTY_LOG_OFS (KVM_VM_OFS+KVM_VM_BLOCK_SIZE) +#define KVM_MEM_DIRTY_LOG_SIZE (1UL<<19) + +/* Get vpd, vhpt, tlb, vcpu, base*/ +#define VPD_ADDR(n) (KVM_VPD_BASE+n*VPD_SIZE) +#define VHPT_ADDR(n) (KVM_VHPT_BASE+n*VHPT_SIZE) +#define VTLB_ADDR(n) (KVM_VTLB_BASE+n*VTLB_SIZE) +#define VCPU_ADDR(n) (KVM_VCPU_BASE+n*VCPU_SIZE) + +/*IO section definitions*/ +#define IOREQ_READ 1 +#define IOREQ_WRITE 0 + +#define STATE_IOREQ_NONE 0 +#define STATE_IOREQ_READY 1 +#define STATE_IOREQ_INPROCESS 2 +#define STATE_IORESP_READY 3 + +/*Guest Physical address layout.*/ +#define GPFN_MEM (0UL << 60) /* Guest pfn is normal mem */ +#define GPFN_FRAME_BUFFER (1UL << 60) /* VGA framebuffer */ +#define GPFN_LOW_MMIO (2UL << 60) /* Low MMIO range */ +#define GPFN_PIB (3UL << 60) /* PIB base */ +#define GPFN_IOSAPIC (4UL << 60) /* IOSAPIC base */ +#define GPFN_LEGACY_IO (5UL << 60) /* Legacy I/O base */ +#define GPFN_GFW (6UL << 60) /* Guest Firmware */ +#define GPFN_HIGH_MMIO (7UL << 60) /* High MMIO range */ + +#define GPFN_IO_MASK (7UL << 60) /* Guest pfn is I/O type */ +#define GPFN_INV_MASK (1UL << 63) /* Guest pfn is invalid */ +#define INVALID_MFN (~0UL) +#define MEM_G (1UL << 30) +#define MEM_M (1UL << 20) +#define MMIO_START (3 * MEM_G) +#define MMIO_SIZE (512 * MEM_M) +#define VGA_IO_START 0xA0000UL +#define VGA_IO_SIZE 0x20000 +#define LEGACY_IO_START (MMIO_START + MMIO_SIZE) +#define LEGACY_IO_SIZE (64 * MEM_M) +#define IO_SAPIC_START 0xfec00000UL +#define IO_SAPIC_SIZE 0x100000 +#define PIB_START 0xfee00000UL +#define PIB_SIZE 0x200000 +#define GFW_START (4 * MEM_G - 16 * MEM_M) +#define GFW_SIZE (16 * MEM_M) + +/*Deliver mode, defined for ioapic.c*/ +#define dest_Fixed IOSAPIC_FIXED +#define dest_LowestPrio IOSAPIC_LOWEST_PRIORITY + +#define NMI_VECTOR 2 +#define ExtINT_VECTOR 0 +#define NULL_VECTOR (-1) +#define IA64_SPURIOUS_INT_VECTOR 0x0f + +#define VCPU_LID(v) (((u64)(v)->vcpu_id) << 24) + +/* + *Delivery mode + */ +#define SAPIC_DELIV_SHIFT 8 +#define SAPIC_FIXED 0x0 +#define SAPIC_LOWEST_PRIORITY 0x1 +#define SAPIC_PMI 0x2 +#define SAPIC_NMI 0x4 +#define SAPIC_INIT 0x5 +#define SAPIC_EXTINT 0x7 + +/* + * vcpu->requests bit members for arch + */ +#define KVM_REQ_PTC_G 32 +#define KVM_REQ_RESUME 33 + +#define KVM_PAGES_PER_HPAGE 1 + +struct kvm; +struct kvm_vcpu; +struct kvm_guest_debug{ +}; + +struct kvm_mmio_req { + uint64_t addr; /* physical address */ + uint64_t size; /* size in bytes */ + uint64_t data; /* data (or paddr of data) */ + uint8_t state:4; + uint8_t dir:1; /* 1=read, 0=write */ +}; + +/*Pal data struct */ +struct kvm_pal_call{ + /*In area*/ + uint64_t gr28; + uint64_t gr29; + uint64_t gr30; + uint64_t gr31; + /*Out area*/ + struct ia64_pal_retval ret; +}; + +/* Sal data structure */ +struct kvm_sal_call{ + /*In area*/ + uint64_t in0; + uint64_t in1; + uint64_t in2; + uint64_t in3; + uint64_t in4; + uint64_t in5; + uint64_t in6; + uint64_t in7; + struct sal_ret_values ret; +}; + +/*Guest change rr6*/ +struct kvm_switch_rr6 { + uint64_t old_rr; + uint64_t new_rr; +}; + +union ia64_ipi_a{ + unsigned long val; + struct { + unsigned long rv : 3; + unsigned long ir : 1; + unsigned long eid : 8; + unsigned long id : 8; + unsigned long ib_base : 44; + }; +}; + +union ia64_ipi_d { + unsigned long val; + struct { + unsigned long vector : 8; + unsigned long dm : 3; + unsigned long ig : 53; + }; +}; + +/*ipi check exit data*/ +struct kvm_ipi_data{ + union ia64_ipi_a addr; + union ia64_ipi_d data; +}; + +/*global purge data*/ +struct kvm_ptc_g { + unsigned long vaddr; + unsigned long rr; + unsigned long ps; + struct kvm_vcpu *vcpu; +}; + +/*Exit control data */ +struct exit_ctl_data{ + uint32_t exit_reason; + uint32_t vm_status; + union { + struct kvm_mmio_req ioreq; + struct kvm_pal_call pal_data; + struct kvm_sal_call sal_data; + struct kvm_switch_rr6 rr_data; + struct kvm_ipi_data ipi_data; + struct kvm_ptc_g ptc_g_data; + } u; +}; + +union pte_flags { + unsigned long val; + struct { + unsigned long p : 1; /*0 */ + unsigned long : 1; /* 1 */ + unsigned long ma : 3; /* 2-4 */ + unsigned long a : 1; /* 5 */ + unsigned long d : 1; /* 6 */ + unsigned long pl : 2; /* 7-8 */ + unsigned long ar : 3; /* 9-11 */ + unsigned long ppn : 38; /* 12-49 */ + unsigned long : 2; /* 50-51 */ + unsigned long ed : 1; /* 52 */ + }; +}; + +union ia64_pta { + unsigned long val; + struct { + unsigned long ve : 1; + unsigned long reserved0 : 1; + unsigned long size : 6; + unsigned long vf : 1; + unsigned long reserved1 : 6; + unsigned long base : 49; + }; +}; + +struct thash_cb { + /* THASH base information */ + struct thash_data *hash; /* hash table pointer */ + union ia64_pta pta; + int num; +}; + +struct kvm_vcpu_stat { +}; + +struct kvm_vcpu_arch { + int launched; + int last_exit; + int last_run_cpu; + int vmm_tr_slot; + int vm_tr_slot; + +#define KVM_MP_STATE_RUNNABLE 0 +#define KVM_MP_STATE_UNINITIALIZED 1 +#define KVM_MP_STATE_INIT_RECEIVED 2 +#define KVM_MP_STATE_HALTED 3 + int mp_state; + +#define MAX_PTC_G_NUM 3 + int ptc_g_count; + struct kvm_ptc_g ptc_g_data[MAX_PTC_G_NUM]; + + /*halt timer to wake up sleepy vcpus*/ + struct hrtimer hlt_timer; + long ht_active; + + struct kvm_lapic *apic; /* kernel irqchip context */ + struct vpd *vpd; + + /* Exit data for vmm_transition*/ + struct exit_ctl_data exit_data; + + cpumask_t cache_coherent_map; + + unsigned long vmm_rr; + unsigned long host_rr6; + unsigned long psbits[8]; + unsigned long cr_iipa; + unsigned long cr_isr; + unsigned long vsa_base; + unsigned long dirty_log_lock_pa; + unsigned long __gp; + /* TR and TC. */ + struct thash_data itrs[NITRS]; + struct thash_data dtrs[NDTRS]; + /* Bit is set if there is a tr/tc for the region. */ + unsigned char itr_regions; + unsigned char dtr_regions; + unsigned char tc_regions; + /* purge all */ + unsigned long ptce_base; + unsigned long ptce_count[2]; + unsigned long ptce_stride[2]; + /* itc/itm */ + unsigned long last_itc; + long itc_offset; + unsigned long itc_check; + unsigned long timer_check; + unsigned long timer_pending; + + unsigned long vrr[8]; + unsigned long ibr[8]; + unsigned long dbr[8]; + unsigned long insvc[4]; /* Interrupt in service. */ + unsigned long xtp; + + unsigned long metaphysical_rr0; /* from kvm_arch (so is pinned) */ + unsigned long metaphysical_rr4; /* from kvm_arch (so is pinned) */ + unsigned long metaphysical_saved_rr0; /* from kvm_arch */ + unsigned long metaphysical_saved_rr4; /* from kvm_arch */ + unsigned long fp_psr; /*used for lazy float register */ + unsigned long saved_gp; + /*for phycial emulation */ + int mode_flags; + struct thash_cb vtlb; + struct thash_cb vhpt; + char irq_check; + char irq_new_pending; + + unsigned long opcode; + unsigned long cause; + union context host; + union context guest; +}; + +struct kvm_vm_stat { + u64 remote_tlb_flush; +}; + +struct kvm_sal_data { + unsigned long boot_ip; + unsigned long boot_gp; +}; + +struct kvm_arch { + unsigned long vm_base; + unsigned long metaphysical_rr0; + unsigned long metaphysical_rr4; + unsigned long vmm_init_rr; + unsigned long vhpt_base; + unsigned long vtlb_base; + unsigned long vpd_base; + spinlock_t dirty_log_lock; + struct kvm_ioapic *vioapic; + struct kvm_vm_stat stat; + struct kvm_sal_data rdv_sal_data; +}; + +union cpuid3_t { + u64 value; + struct { + u64 number : 8; + u64 revision : 8; + u64 model : 8; + u64 family : 8; + u64 archrev : 8; + u64 rv : 24; + }; +}; + +struct kvm_pt_regs { + /* The following registers are saved by SAVE_MIN: */ + unsigned long b6; /* scratch */ + unsigned long b7; /* scratch */ + + unsigned long ar_csd; /* used by cmp8xchg16 (scratch) */ + unsigned long ar_ssd; /* reserved for future use (scratch) */ + + unsigned long r8; /* scratch (return value register 0) */ + unsigned long r9; /* scratch (return value register 1) */ + unsigned long r10; /* scratch (return value register 2) */ + unsigned long r11; /* scratch (return value register 3) */ + + unsigned long cr_ipsr; /* interrupted task's psr */ + unsigned long cr_iip; /* interrupted task's instruction pointer */ + unsigned long cr_ifs; /* interrupted task's function state */ + + unsigned long ar_unat; /* interrupted task's NaT register (preserved) */ + unsigned long ar_pfs; /* prev function state */ + unsigned long ar_rsc; /* RSE configuration */ + /* The following two are valid only if cr_ipsr.cpl > 0: */ + unsigned long ar_rnat; /* RSE NaT */ + unsigned long ar_bspstore; /* RSE bspstore */ + + unsigned long pr; /* 64 predicate registers (1 bit each) */ + unsigned long b0; /* return pointer (bp) */ + unsigned long loadrs; /* size of dirty partition << 16 */ + + unsigned long r1; /* the gp pointer */ + unsigned long r12; /* interrupted task's memory stack pointer */ + unsigned long r13; /* thread pointer */ + + unsigned long ar_fpsr; /* floating point status (preserved) */ + unsigned long r15; /* scratch */ + + /* The remaining registers are NOT saved for system calls. */ + unsigned long r14; /* scratch */ + unsigned long r2; /* scratch */ + unsigned long r3; /* scratch */ + unsigned long r16; /* scratch */ + unsigned long r17; /* scratch */ + unsigned long r18; /* scratch */ + unsigned long r19; /* scratch */ + unsigned long r20; /* scratch */ + unsigned long r21; /* scratch */ + unsigned long r22; /* scratch */ + unsigned long r23; /* scratch */ + unsigned long r24; /* scratch */ + unsigned long r25; /* scratch */ + unsigned long r26; /* scratch */ + unsigned long r27; /* scratch */ + unsigned long r28; /* scratch */ + unsigned long r29; /* scratch */ + unsigned long r30; /* scratch */ + unsigned long r31; /* scratch */ + unsigned long ar_ccv; /* compare/exchange value (scratch) */ + + /* + * Floating point registers that the kernel considers scratch: + */ + struct ia64_fpreg f6; /* scratch */ + struct ia64_fpreg f7; /* scratch */ + struct ia64_fpreg f8; /* scratch */ + struct ia64_fpreg f9; /* scratch */ + struct ia64_fpreg f10; /* scratch */ + struct ia64_fpreg f11; /* scratch */ + + unsigned long r4; /* preserved */ + unsigned long r5; /* preserved */ + unsigned long r6; /* preserved */ + unsigned long r7; /* preserved */ + unsigned long eml_unat; /* used for emulating instruction */ + unsigned long pad0; /* alignment pad */ +}; + +static inline struct kvm_pt_regs *vcpu_regs(struct kvm_vcpu *v) +{ + return (struct kvm_pt_regs *) ((unsigned long) v + IA64_STK_OFFSET) - 1; +} + +typedef int kvm_vmm_entry(void); +typedef void kvm_tramp_entry(union context *host, union context *guest); + +struct kvm_vmm_info{ + struct module *module; + kvm_vmm_entry *vmm_entry; + kvm_tramp_entry *tramp_entry; + unsigned long vmm_ivt; +}; + +int kvm_highest_pending_irq(struct kvm_vcpu *vcpu); +int kvm_emulate_halt(struct kvm_vcpu *vcpu); +int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run); +void kvm_sal_emul(struct kvm_vcpu *vcpu); + +#endif diff --git a/include/asm-ia64/kvm_para.h b/include/asm-ia64/kvm_para.h new file mode 100644 index 00000000000..9f9796bb344 --- /dev/null +++ b/include/asm-ia64/kvm_para.h @@ -0,0 +1,29 @@ +#ifndef __IA64_KVM_PARA_H +#define __IA64_KVM_PARA_H + +/* + * asm-ia64/kvm_para.h + * + * Copyright (C) 2007 Xiantao Zhang <xiantao.zhang@intel.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple + * Place - Suite 330, Boston, MA 02111-1307 USA. + * + */ + +static inline unsigned int kvm_arch_para_features(void) +{ + return 0; +} + +#endif diff --git a/include/asm-ia64/machvec.h b/include/asm-ia64/machvec.h index c201a2020aa..9f020eb825c 100644 --- a/include/asm-ia64/machvec.h +++ b/include/asm-ia64/machvec.h @@ -22,6 +22,7 @@ struct pci_bus; struct task_struct; struct pci_dev; struct msi_desc; +struct dma_attrs; typedef void ia64_mv_setup_t (char **); typedef void ia64_mv_cpu_init_t (void); @@ -56,6 +57,11 @@ typedef void ia64_mv_dma_sync_sg_for_device (struct device *, struct scatterlist typedef int ia64_mv_dma_mapping_error (dma_addr_t dma_addr); typedef int ia64_mv_dma_supported (struct device *, u64); +typedef dma_addr_t ia64_mv_dma_map_single_attrs (struct device *, void *, size_t, int, struct dma_attrs *); +typedef void ia64_mv_dma_unmap_single_attrs (struct device *, dma_addr_t, size_t, int, struct dma_attrs *); +typedef int ia64_mv_dma_map_sg_attrs (struct device *, struct scatterlist *, int, int, struct dma_attrs *); +typedef void ia64_mv_dma_unmap_sg_attrs (struct device *, struct scatterlist *, int, int, struct dma_attrs *); + /* * WARNING: The legacy I/O space is _architected_. Platforms are * expected to follow this architected model (see Section 10.7 in the @@ -136,10 +142,10 @@ extern void machvec_tlb_migrate_finish (struct mm_struct *); # define platform_dma_init ia64_mv.dma_init # define platform_dma_alloc_coherent ia64_mv.dma_alloc_coherent # define platform_dma_free_coherent ia64_mv.dma_free_coherent -# define platform_dma_map_single ia64_mv.dma_map_single -# define platform_dma_unmap_single ia64_mv.dma_unmap_single -# define platform_dma_map_sg ia64_mv.dma_map_sg -# define platform_dma_unmap_sg ia64_mv.dma_unmap_sg +# define platform_dma_map_single_attrs ia64_mv.dma_map_single_attrs +# define platform_dma_unmap_single_attrs ia64_mv.dma_unmap_single_attrs +# define platform_dma_map_sg_attrs ia64_mv.dma_map_sg_attrs +# define platform_dma_unmap_sg_attrs ia64_mv.dma_unmap_sg_attrs # define platform_dma_sync_single_for_cpu ia64_mv.dma_sync_single_for_cpu # define platform_dma_sync_sg_for_cpu ia64_mv.dma_sync_sg_for_cpu # define platform_dma_sync_single_for_device ia64_mv.dma_sync_single_for_device @@ -190,10 +196,10 @@ struct ia64_machine_vector { ia64_mv_dma_init *dma_init; ia64_mv_dma_alloc_coherent *dma_alloc_coherent; ia64_mv_dma_free_coherent *dma_free_coherent; - ia64_mv_dma_map_single *dma_map_single; - ia64_mv_dma_unmap_single *dma_unmap_single; - ia64_mv_dma_map_sg *dma_map_sg; - ia64_mv_dma_unmap_sg *dma_unmap_sg; + ia64_mv_dma_map_single_attrs *dma_map_single_attrs; + ia64_mv_dma_unmap_single_attrs *dma_unmap_single_attrs; + ia64_mv_dma_map_sg_attrs *dma_map_sg_attrs; + ia64_mv_dma_unmap_sg_attrs *dma_unmap_sg_attrs; ia64_mv_dma_sync_single_for_cpu *dma_sync_single_for_cpu; ia64_mv_dma_sync_sg_for_cpu *dma_sync_sg_for_cpu; ia64_mv_dma_sync_single_for_device *dma_sync_single_for_device; @@ -240,10 +246,10 @@ struct ia64_machine_vector { platform_dma_init, \ platform_dma_alloc_coherent, \ platform_dma_free_coherent, \ - platform_dma_map_single, \ - platform_dma_unmap_single, \ - platform_dma_map_sg, \ - platform_dma_unmap_sg, \ + platform_dma_map_single_attrs, \ + platform_dma_unmap_single_attrs, \ + platform_dma_map_sg_attrs, \ + platform_dma_unmap_sg_attrs, \ platform_dma_sync_single_for_cpu, \ platform_dma_sync_sg_for_cpu, \ platform_dma_sync_single_for_device, \ @@ -292,9 +298,13 @@ extern ia64_mv_dma_init swiotlb_init; extern ia64_mv_dma_alloc_coherent swiotlb_alloc_coherent; extern ia64_mv_dma_free_coherent swiotlb_free_coherent; extern ia64_mv_dma_map_single swiotlb_map_single; +extern ia64_mv_dma_map_single_attrs swiotlb_map_single_attrs; extern ia64_mv_dma_unmap_single swiotlb_unmap_single; +extern ia64_mv_dma_unmap_single_attrs swiotlb_unmap_single_attrs; extern ia64_mv_dma_map_sg swiotlb_map_sg; +extern ia64_mv_dma_map_sg_attrs swiotlb_map_sg_attrs; extern ia64_mv_dma_unmap_sg swiotlb_unmap_sg; +extern ia64_mv_dma_unmap_sg_attrs swiotlb_unmap_sg_attrs; extern ia64_mv_dma_sync_single_for_cpu swiotlb_sync_single_for_cpu; extern ia64_mv_dma_sync_sg_for_cpu swiotlb_sync_sg_for_cpu; extern ia64_mv_dma_sync_single_for_device swiotlb_sync_single_for_device; @@ -340,17 +350,17 @@ extern ia64_mv_dma_supported swiotlb_dma_supported; #ifndef platform_dma_free_coherent # define platform_dma_free_coherent swiotlb_free_coherent #endif -#ifndef platform_dma_map_single -# define platform_dma_map_single swiotlb_map_single +#ifndef platform_dma_map_single_attrs +# define platform_dma_map_single_attrs swiotlb_map_single_attrs #endif -#ifndef platform_dma_unmap_single -# define platform_dma_unmap_single swiotlb_unmap_single +#ifndef platform_dma_unmap_single_attrs +# define platform_dma_unmap_single_attrs swiotlb_unmap_single_attrs #endif -#ifndef platform_dma_map_sg -# define platform_dma_map_sg swiotlb_map_sg +#ifndef platform_dma_map_sg_attrs +# define platform_dma_map_sg_attrs swiotlb_map_sg_attrs #endif -#ifndef platform_dma_unmap_sg -# define platform_dma_unmap_sg swiotlb_unmap_sg +#ifndef platform_dma_unmap_sg_attrs +# define platform_dma_unmap_sg_attrs swiotlb_unmap_sg_attrs #endif #ifndef platform_dma_sync_single_for_cpu # define platform_dma_sync_single_for_cpu swiotlb_sync_single_for_cpu diff --git a/include/asm-ia64/machvec_hpzx1.h b/include/asm-ia64/machvec_hpzx1.h index e90daf9ce34..2f57f5144b9 100644 --- a/include/asm-ia64/machvec_hpzx1.h +++ b/include/asm-ia64/machvec_hpzx1.h @@ -4,10 +4,10 @@ extern ia64_mv_setup_t dig_setup; extern ia64_mv_dma_alloc_coherent sba_alloc_coherent; extern ia64_mv_dma_free_coherent sba_free_coherent; -extern ia64_mv_dma_map_single sba_map_single; -extern ia64_mv_dma_unmap_single sba_unmap_single; -extern ia64_mv_dma_map_sg sba_map_sg; -extern ia64_mv_dma_unmap_sg sba_unmap_sg; +extern ia64_mv_dma_map_single_attrs sba_map_single_attrs; +extern ia64_mv_dma_unmap_single_attrs sba_unmap_single_attrs; +extern ia64_mv_dma_map_sg_attrs sba_map_sg_attrs; +extern ia64_mv_dma_unmap_sg_attrs sba_unmap_sg_attrs; extern ia64_mv_dma_supported sba_dma_supported; extern ia64_mv_dma_mapping_error sba_dma_mapping_error; @@ -23,10 +23,10 @@ extern ia64_mv_dma_mapping_error sba_dma_mapping_error; #define platform_dma_init machvec_noop #define platform_dma_alloc_coherent sba_alloc_coherent #define platform_dma_free_coherent sba_free_coherent -#define platform_dma_map_single sba_map_single -#define platform_dma_unmap_single sba_unmap_single -#define platform_dma_map_sg sba_map_sg -#define platform_dma_unmap_sg sba_unmap_sg +#define platform_dma_map_single_attrs sba_map_single_attrs +#define platform_dma_unmap_single_attrs sba_unmap_single_attrs +#define platform_dma_map_sg_attrs sba_map_sg_attrs +#define platform_dma_unmap_sg_attrs sba_unmap_sg_attrs #define platform_dma_sync_single_for_cpu machvec_dma_sync_single #define platform_dma_sync_sg_for_cpu machvec_dma_sync_sg #define platform_dma_sync_single_for_device machvec_dma_sync_single diff --git a/include/asm-ia64/machvec_hpzx1_swiotlb.h b/include/asm-ia64/machvec_hpzx1_swiotlb.h index f00a34a148f..a842cdda827 100644 --- a/include/asm-ia64/machvec_hpzx1_swiotlb.h +++ b/include/asm-ia64/machvec_hpzx1_swiotlb.h @@ -4,10 +4,10 @@ extern ia64_mv_setup_t dig_setup; extern ia64_mv_dma_alloc_coherent hwsw_alloc_coherent; extern ia64_mv_dma_free_coherent hwsw_free_coherent; -extern ia64_mv_dma_map_single hwsw_map_single; -extern ia64_mv_dma_unmap_single hwsw_unmap_single; -extern ia64_mv_dma_map_sg hwsw_map_sg; -extern ia64_mv_dma_unmap_sg hwsw_unmap_sg; +extern ia64_mv_dma_map_single_attrs hwsw_map_single_attrs; +extern ia64_mv_dma_unmap_single_attrs hwsw_unmap_single_attrs; +extern ia64_mv_dma_map_sg_attrs hwsw_map_sg_attrs; +extern ia64_mv_dma_unmap_sg_attrs hwsw_unmap_sg_attrs; extern ia64_mv_dma_supported hwsw_dma_supported; extern ia64_mv_dma_mapping_error hwsw_dma_mapping_error; extern ia64_mv_dma_sync_single_for_cpu hwsw_sync_single_for_cpu; @@ -28,10 +28,10 @@ extern ia64_mv_dma_sync_sg_for_device hwsw_sync_sg_for_device; #define platform_dma_init machvec_noop #define platform_dma_alloc_coherent hwsw_alloc_coherent #define platform_dma_free_coherent hwsw_free_coherent -#define platform_dma_map_single hwsw_map_single -#define platform_dma_unmap_single hwsw_unmap_single -#define platform_dma_map_sg hwsw_map_sg -#define platform_dma_unmap_sg hwsw_unmap_sg +#define platform_dma_map_single_attrs hwsw_map_single_attrs +#define platform_dma_unmap_single_attrs hwsw_unmap_single_attrs +#define platform_dma_map_sg_attrs hwsw_map_sg_attrs +#define platform_dma_unmap_sg_attrs hwsw_unmap_sg_attrs #define platform_dma_supported hwsw_dma_supported #define platform_dma_mapping_error hwsw_dma_mapping_error #define platform_dma_sync_single_for_cpu hwsw_sync_single_for_cpu diff --git a/include/asm-ia64/machvec_sn2.h b/include/asm-ia64/machvec_sn2.h index 61439a7f5b0..781308ea7b8 100644 --- a/include/asm-ia64/machvec_sn2.h +++ b/include/asm-ia64/machvec_sn2.h @@ -57,10 +57,10 @@ extern ia64_mv_readl_t __sn_readl_relaxed; extern ia64_mv_readq_t __sn_readq_relaxed; extern ia64_mv_dma_alloc_coherent sn_dma_alloc_coherent; extern ia64_mv_dma_free_coherent sn_dma_free_coherent; -extern ia64_mv_dma_map_single sn_dma_map_single; -extern ia64_mv_dma_unmap_single sn_dma_unmap_single; -extern ia64_mv_dma_map_sg sn_dma_map_sg; -extern ia64_mv_dma_unmap_sg sn_dma_unmap_sg; +extern ia64_mv_dma_map_single_attrs sn_dma_map_single_attrs; +extern ia64_mv_dma_unmap_single_attrs sn_dma_unmap_single_attrs; +extern ia64_mv_dma_map_sg_attrs sn_dma_map_sg_attrs; +extern ia64_mv_dma_unmap_sg_attrs sn_dma_unmap_sg_attrs; extern ia64_mv_dma_sync_single_for_cpu sn_dma_sync_single_for_cpu; extern ia64_mv_dma_sync_sg_for_cpu sn_dma_sync_sg_for_cpu; extern ia64_mv_dma_sync_single_for_device sn_dma_sync_single_for_device; @@ -113,10 +113,10 @@ extern ia64_mv_pci_fixup_bus_t sn_pci_fixup_bus; #define platform_dma_init machvec_noop #define platform_dma_alloc_coherent sn_dma_alloc_coherent #define platform_dma_free_coherent sn_dma_free_coherent -#define platform_dma_map_single sn_dma_map_single -#define platform_dma_unmap_single sn_dma_unmap_single -#define platform_dma_map_sg sn_dma_map_sg -#define platform_dma_unmap_sg sn_dma_unmap_sg +#define platform_dma_map_single_attrs sn_dma_map_single_attrs +#define platform_dma_unmap_single_attrs sn_dma_unmap_single_attrs +#define platform_dma_map_sg_attrs sn_dma_map_sg_attrs +#define platform_dma_unmap_sg_attrs sn_dma_unmap_sg_attrs #define platform_dma_sync_single_for_cpu sn_dma_sync_single_for_cpu #define platform_dma_sync_sg_for_cpu sn_dma_sync_sg_for_cpu #define platform_dma_sync_single_for_device sn_dma_sync_single_for_device diff --git a/include/asm-ia64/page.h b/include/asm-ia64/page.h index 4999a6c6377..36f39321b76 100644 --- a/include/asm-ia64/page.h +++ b/include/asm-ia64/page.h @@ -54,9 +54,6 @@ # define HPAGE_MASK (~(HPAGE_SIZE - 1)) # define HAVE_ARCH_HUGETLB_UNMAPPED_AREA -# define ARCH_HAS_HUGEPAGE_ONLY_RANGE -# define ARCH_HAS_PREPARE_HUGEPAGE_RANGE -# define ARCH_HAS_HUGETLB_FREE_PGD_RANGE #endif /* CONFIG_HUGETLB_PAGE */ #ifdef __ASSEMBLY__ @@ -153,9 +150,6 @@ typedef union ia64_va { # define htlbpage_to_page(x) (((unsigned long) REGION_NUMBER(x) << 61) \ | (REGION_OFFSET(x) >> (HPAGE_SHIFT-PAGE_SHIFT))) # define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) -# define is_hugepage_only_range(mm, addr, len) \ - (REGION_NUMBER(addr) == RGN_HPAGE || \ - REGION_NUMBER((addr)+(len)-1) == RGN_HPAGE) extern unsigned int hpage_shift; #endif diff --git a/include/asm-ia64/pgtable.h b/include/asm-ia64/pgtable.h index ed70862ea24..7a9bff47564 100644 --- a/include/asm-ia64/pgtable.h +++ b/include/asm-ia64/pgtable.h @@ -302,6 +302,8 @@ ia64_phys_addr_valid (unsigned long addr) #define pte_dirty(pte) ((pte_val(pte) & _PAGE_D) != 0) #define pte_young(pte) ((pte_val(pte) & _PAGE_A) != 0) #define pte_file(pte) ((pte_val(pte) & _PAGE_FILE) != 0) +#define pte_special(pte) 0 + /* * Note: we convert AR_RWX to AR_RX and AR_RW to AR_R by clearing the 2nd bit in the * access rights: @@ -313,6 +315,7 @@ ia64_phys_addr_valid (unsigned long addr) #define pte_mkclean(pte) (__pte(pte_val(pte) & ~_PAGE_D)) #define pte_mkdirty(pte) (__pte(pte_val(pte) | _PAGE_D)) #define pte_mkhuge(pte) (__pte(pte_val(pte))) +#define pte_mkspecial(pte) (pte) /* * Because ia64's Icache and Dcache is not coherent (on a cpu), we need to diff --git a/include/asm-ia64/processor.h b/include/asm-ia64/processor.h index 741f7ecb986..6aff126fc07 100644 --- a/include/asm-ia64/processor.h +++ b/include/asm-ia64/processor.h @@ -119,6 +119,69 @@ struct ia64_psr { __u64 reserved4 : 19; }; +union ia64_isr { + __u64 val; + struct { + __u64 code : 16; + __u64 vector : 8; + __u64 reserved1 : 8; + __u64 x : 1; + __u64 w : 1; + __u64 r : 1; + __u64 na : 1; + __u64 sp : 1; + __u64 rs : 1; + __u64 ir : 1; + __u64 ni : 1; + __u64 so : 1; + __u64 ei : 2; + __u64 ed : 1; + __u64 reserved2 : 20; + }; +}; + +union ia64_lid { + __u64 val; + struct { + __u64 rv : 16; + __u64 eid : 8; + __u64 id : 8; + __u64 ig : 32; + }; +}; + +union ia64_tpr { + __u64 val; + struct { + __u64 ig0 : 4; + __u64 mic : 4; + __u64 rsv : 8; + __u64 mmi : 1; + __u64 ig1 : 47; + }; +}; + +union ia64_itir { + __u64 val; + struct { + __u64 rv3 : 2; /* 0-1 */ + __u64 ps : 6; /* 2-7 */ + __u64 key : 24; /* 8-31 */ + __u64 rv4 : 32; /* 32-63 */ + }; +}; + +union ia64_rr { + __u64 val; + struct { + __u64 ve : 1; /* enable hw walker */ + __u64 reserved0: 1; /* reserved */ + __u64 ps : 6; /* log page size */ + __u64 rid : 24; /* region id */ + __u64 reserved1: 32; /* reserved */ + }; +}; + /* * CPU type, hardware bug flags, and per-CPU state. Frequently used * state comes earlier: diff --git a/include/asm-ia64/system.h b/include/asm-ia64/system.h index dff8128fa58..26e250bfb91 100644 --- a/include/asm-ia64/system.h +++ b/include/asm-ia64/system.h @@ -146,23 +146,23 @@ do { \ # define local_irq_save(x) \ do { \ - unsigned long psr; \ + unsigned long __psr; \ \ - __local_irq_save(psr); \ - if (psr & IA64_PSR_I) \ + __local_irq_save(__psr); \ + if (__psr & IA64_PSR_I) \ __save_ip(); \ - (x) = psr; \ + (x) = __psr; \ } while (0) -# define local_irq_disable() do { unsigned long x; local_irq_save(x); } while (0) +# define local_irq_disable() do { unsigned long __x; local_irq_save(__x); } while (0) # define local_irq_restore(x) \ do { \ - unsigned long old_psr, psr = (x); \ + unsigned long __old_psr, __psr = (x); \ \ - local_save_flags(old_psr); \ - __local_irq_restore(psr); \ - if ((old_psr & IA64_PSR_I) && !(psr & IA64_PSR_I)) \ + local_save_flags(__old_psr); \ + __local_irq_restore(__psr); \ + if ((__old_psr & IA64_PSR_I) && !(__psr & IA64_PSR_I)) \ __save_ip(); \ } while (0) diff --git a/include/asm-ia64/unaligned.h b/include/asm-ia64/unaligned.h index bb855988810..7bddc7f5858 100644 --- a/include/asm-ia64/unaligned.h +++ b/include/asm-ia64/unaligned.h @@ -1,6 +1,11 @@ #ifndef _ASM_IA64_UNALIGNED_H #define _ASM_IA64_UNALIGNED_H -#include <asm-generic/unaligned.h> +#include <linux/unaligned/le_struct.h> +#include <linux/unaligned/be_byteshift.h> +#include <linux/unaligned/generic.h> + +#define get_unaligned __get_unaligned_le +#define put_unaligned __put_unaligned_le #endif /* _ASM_IA64_UNALIGNED_H */ diff --git a/include/asm-m32r/pgtable.h b/include/asm-m32r/pgtable.h index 86505387be0..e6359c566b5 100644 --- a/include/asm-m32r/pgtable.h +++ b/include/asm-m32r/pgtable.h @@ -214,6 +214,11 @@ static inline int pte_file(pte_t pte) return pte_val(pte) & _PAGE_FILE; } +static inline int pte_special(pte_t pte) +{ + return 0; +} + static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; @@ -250,6 +255,11 @@ static inline pte_t pte_mkwrite(pte_t pte) return pte; } +static inline pte_t pte_mkspecial(pte_t pte) +{ + return pte; +} + static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { return test_and_clear_bit(_PAGE_BIT_ACCESSED, ptep); diff --git a/include/asm-m32r/unaligned.h b/include/asm-m32r/unaligned.h index fccc180c391..377eb20d1ec 100644 --- a/include/asm-m32r/unaligned.h +++ b/include/asm-m32r/unaligned.h @@ -1,19 +1,18 @@ #ifndef _ASM_M32R_UNALIGNED_H #define _ASM_M32R_UNALIGNED_H -/* - * For the benefit of those who are trying to port Linux to another - * architecture, here are some C-language equivalents. - */ - -#include <asm/string.h> - -#define get_unaligned(ptr) \ - ({ __typeof__(*(ptr)) __tmp; memmove(&__tmp, (ptr), sizeof(*(ptr))); __tmp; }) - -#define put_unaligned(val, ptr) \ - ({ __typeof__(*(ptr)) __tmp = (val); \ - memmove((ptr), &__tmp, sizeof(*(ptr))); \ - (void)0; }) +#if defined(__LITTLE_ENDIAN__) +# include <linux/unaligned/le_memmove.h> +# include <linux/unaligned/be_byteshift.h> +# include <linux/unaligned/generic.h> +# define get_unaligned __get_unaligned_le +# define put_unaligned __put_unaligned_le +#else +# include <linux/unaligned/be_memmove.h> +# include <linux/unaligned/le_byteshift.h> +# include <linux/unaligned/generic.h> +# define get_unaligned __get_unaligned_be +# define put_unaligned __put_unaligned_be +#endif #endif /* _ASM_M32R_UNALIGNED_H */ diff --git a/include/asm-m68k/motorola_pgtable.h b/include/asm-m68k/motorola_pgtable.h index 13135d4821d..8e9a8a754dd 100644 --- a/include/asm-m68k/motorola_pgtable.h +++ b/include/asm-m68k/motorola_pgtable.h @@ -168,6 +168,7 @@ static inline int pte_write(pte_t pte) { return !(pte_val(pte) & _PAGE_RONLY); static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } +static inline int pte_special(pte_t pte) { return 0; } static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) |= _PAGE_RONLY; return pte; } static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; } @@ -185,6 +186,7 @@ static inline pte_t pte_mkcache(pte_t pte) pte_val(pte) = (pte_val(pte) & _CACHEMASK040) | m68k_supervisor_cachemode; return pte; } +static inline pte_t pte_mkspecial(pte_t pte) { return pte; } #define PAGE_DIR_OFFSET(tsk,address) pgd_offset((tsk),(address)) diff --git a/include/asm-m68k/sun3_pgtable.h b/include/asm-m68k/sun3_pgtable.h index b766fc261bd..f847ec732d6 100644 --- a/include/asm-m68k/sun3_pgtable.h +++ b/include/asm-m68k/sun3_pgtable.h @@ -169,6 +169,7 @@ static inline int pte_write(pte_t pte) { return pte_val(pte) & SUN3_PAGE_WRITEA static inline int pte_dirty(pte_t pte) { return pte_val(pte) & SUN3_PAGE_MODIFIED; } static inline int pte_young(pte_t pte) { return pte_val(pte) & SUN3_PAGE_ACCESSED; } static inline int pte_file(pte_t pte) { return pte_val(pte) & SUN3_PAGE_ACCESSED; } +static inline int pte_special(pte_t pte) { return 0; } static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= ~SUN3_PAGE_WRITEABLE; return pte; } static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~SUN3_PAGE_MODIFIED; return pte; } @@ -181,6 +182,7 @@ static inline pte_t pte_mknocache(pte_t pte) { pte_val(pte) |= SUN3_PAGE_NOCACHE //static inline pte_t pte_mkcache(pte_t pte) { pte_val(pte) &= SUN3_PAGE_NOCACHE; return pte; } // until then, use: static inline pte_t pte_mkcache(pte_t pte) { return pte; } +static inline pte_t pte_mkspecial(pte_t pte) { return pte; } extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; extern pgd_t kernel_pg_dir[PTRS_PER_PGD]; diff --git a/include/asm-m68k/unaligned.h b/include/asm-m68k/unaligned.h index 804cb3f888f..77698f2dc33 100644 --- a/include/asm-m68k/unaligned.h +++ b/include/asm-m68k/unaligned.h @@ -1,16 +1,13 @@ -#ifndef __M68K_UNALIGNED_H -#define __M68K_UNALIGNED_H +#ifndef _ASM_M68K_UNALIGNED_H +#define _ASM_M68K_UNALIGNED_H /* * The m68k can do unaligned accesses itself. - * - * The strange macros are there to make sure these can't - * be misused in a way that makes them not work on other - * architectures where unaligned accesses aren't as simple. */ +#include <linux/unaligned/access_ok.h> +#include <linux/unaligned/generic.h> -#define get_unaligned(ptr) (*(ptr)) +#define get_unaligned __get_unaligned_be +#define put_unaligned __put_unaligned_be -#define put_unaligned(val, ptr) ((void)( *(ptr) = (val) )) - -#endif +#endif /* _ASM_M68K_UNALIGNED_H */ diff --git a/include/asm-m68knommu/unaligned.h b/include/asm-m68knommu/unaligned.h index 869e9dd24f5..eb1ea4cb9a5 100644 --- a/include/asm-m68knommu/unaligned.h +++ b/include/asm-m68knommu/unaligned.h @@ -1,23 +1,25 @@ -#ifndef __M68K_UNALIGNED_H -#define __M68K_UNALIGNED_H +#ifndef _ASM_M68KNOMMU_UNALIGNED_H +#define _ASM_M68KNOMMU_UNALIGNED_H #ifdef CONFIG_COLDFIRE +#include <linux/unaligned/be_struct.h> +#include <linux/unaligned/le_byteshift.h> +#include <linux/unaligned/generic.h> -#include <asm-generic/unaligned.h> +#define get_unaligned __get_unaligned_be +#define put_unaligned __put_unaligned_be #else /* * The m68k can do unaligned accesses itself. - * - * The strange macros are there to make sure these can't - * be misused in a way that makes them not work on other - * architectures where unaligned accesses aren't as simple. */ +#include <linux/unaligned/access_ok.h> +#include <linux/unaligned/generic.h> -#define get_unaligned(ptr) (*(ptr)) -#define put_unaligned(val, ptr) ((void)( *(ptr) = (val) )) +#define get_unaligned __get_unaligned_be +#define put_unaligned __put_unaligned_be #endif -#endif +#endif /* _ASM_M68KNOMMU_UNALIGNED_H */ diff --git a/include/asm-mips/cmp.h b/include/asm-mips/cmp.h new file mode 100644 index 00000000000..89a73fb93ae --- /dev/null +++ b/include/asm-mips/cmp.h @@ -0,0 +1,18 @@ +#ifndef _ASM_CMP_H +#define _ASM_CMP_H + +/* + * Definitions for CMP multitasking on MIPS cores + */ +struct task_struct; + +extern void cmp_smp_setup(void); +extern void cmp_smp_finish(void); +extern void cmp_boot_secondary(int cpu, struct task_struct *t); +extern void cmp_init_secondary(void); +extern void cmp_cpus_done(void); +extern void cmp_prepare_cpus(unsigned int max_cpus); + +/* This is platform specific */ +extern void cmp_send_ipi(int cpu, unsigned int action); +#endif /* _ASM_CMP_H */ diff --git a/include/asm-mips/cpu.h b/include/asm-mips/cpu.h index bf5bbc78a9f..1c35cac6f35 100644 --- a/include/asm-mips/cpu.h +++ b/include/asm-mips/cpu.h @@ -29,7 +29,7 @@ #define PRID_COMP_ALCHEMY 0x030000 #define PRID_COMP_SIBYTE 0x040000 #define PRID_COMP_SANDCRAFT 0x050000 -#define PRID_COMP_PHILIPS 0x060000 +#define PRID_COMP_NXP 0x060000 #define PRID_COMP_TOSHIBA 0x070000 #define PRID_COMP_LSI 0x080000 #define PRID_COMP_LEXRA 0x0b0000 @@ -89,6 +89,7 @@ #define PRID_IMP_34K 0x9500 #define PRID_IMP_24KE 0x9600 #define PRID_IMP_74K 0x9700 +#define PRID_IMP_1004K 0x9900 #define PRID_IMP_LOONGSON1 0x4200 #define PRID_IMP_LOONGSON2 0x6300 @@ -194,9 +195,9 @@ enum cpu_type_enum { /* * MIPS32 class processors */ - CPU_4KC, CPU_4KEC, CPU_4KSC, CPU_24K, CPU_34K, CPU_74K, CPU_AU1000, - CPU_AU1100, CPU_AU1200, CPU_AU1210, CPU_AU1250, CPU_AU1500, CPU_AU1550, - CPU_PR4450, CPU_BCM3302, CPU_BCM4710, + CPU_4KC, CPU_4KEC, CPU_4KSC, CPU_24K, CPU_34K, CPU_1004K, CPU_74K, + CPU_AU1000, CPU_AU1100, CPU_AU1200, CPU_AU1210, CPU_AU1250, CPU_AU1500, + CPU_AU1550, CPU_PR4450, CPU_BCM3302, CPU_BCM4710, /* * MIPS64 class processors diff --git a/include/asm-mips/dec/ioasic.h b/include/asm-mips/dec/ioasic.h index 486a5b0a130..98badd6bf22 100644 --- a/include/asm-mips/dec/ioasic.h +++ b/include/asm-mips/dec/ioasic.h @@ -33,4 +33,6 @@ static inline u32 ioasic_read(unsigned int reg) extern void init_ioasic_irqs(int base); +extern void dec_ioasic_clocksource_init(void); + #endif /* __ASM_DEC_IOASIC_H */ diff --git a/include/asm-mips/ds1287.h b/include/asm-mips/ds1287.h new file mode 100644 index 00000000000..ba1702e8693 --- /dev/null +++ b/include/asm-mips/ds1287.h @@ -0,0 +1,27 @@ +/* + * DS1287 timer functions. + * + * Copyright (C) 2008 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ +#ifndef __ASM_DS1287_H +#define __ASM_DS1287_H + +extern int ds1287_timer_state(void); +extern void ds1287_set_base_clock(unsigned int clock); +extern int ds1287_clockevent_init(int irq); + +#endif diff --git a/include/asm-mips/gcmpregs.h b/include/asm-mips/gcmpregs.h new file mode 100644 index 00000000000..d74a8a4ca86 --- /dev/null +++ b/include/asm-mips/gcmpregs.h @@ -0,0 +1,117 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2000, 07 MIPS Technologies, Inc. + * + * Multiprocessor Subsystem Register Definitions + * + */ +#ifndef _ASM_GCMPREGS_H +#define _ASM_GCMPREGS_H + + +/* Offsets to major blocks within GCMP from GCMP base */ +#define GCMP_GCB_OFS 0x0000 /* Global Control Block */ +#define GCMP_CLCB_OFS 0x2000 /* Core Local Control Block */ +#define GCMP_COCB_OFS 0x4000 /* Core Other Control Block */ +#define GCMP_GDB_OFS 0x8000 /* Global Debug Block */ + +/* Offsets to individual GCMP registers from GCMP base */ +#define GCMPOFS(block, tag, reg) (GCMP_##block##_OFS + GCMP_##tag##_##reg##_OFS) + +#define GCMPGCBOFS(reg) GCMPOFS(GCB, GCB, reg) +#define GCMPCLCBOFS(reg) GCMPOFS(CLCB, CCB, reg) +#define GCMPCOCBOFS(reg) GCMPOFS(COCB, CCB, reg) +#define GCMPGDBOFS(reg) GCMPOFS(GDB, GDB, reg) + +/* GCMP register access */ +#define GCMPGCB(reg) REGP(_gcmp_base, GCMPGCBOFS(reg)) +#define GCMPCLCB(reg) REGP(_gcmp_base, GCMPCLCBOFS(reg)) +#define GCMPCOCB(reg) REGP(_gcmp_base, GCMPCOCBOFS(reg)) +#define GCMPGDB(reg) REGP(_gcmp_base, GCMPGDBOFS(reg)) + +/* Mask generation */ +#define GCMPMSK(block, reg, bits) (MSK(bits)<<GCMP_##block##_##reg##_SHF) +#define GCMPGCBMSK(reg, bits) GCMPMSK(GCB, reg, bits) +#define GCMPCCBMSK(reg, bits) GCMPMSK(CCB, reg, bits) +#define GCMPGDBMSK(reg, bits) GCMPMSK(GDB, reg, bits) + +/* GCB registers */ +#define GCMP_GCB_GC_OFS 0x0000 /* Global Config Register */ +#define GCMP_GCB_GC_NUMIOCU_SHF 8 +#define GCMP_GCB_GC_NUMIOCU_MSK GCMPGCBMSK(GC_NUMIOCU, 4) +#define GCMP_GCB_GC_NUMCORES_SHF 0 +#define GCMP_GCB_GC_NUMCORES_MSK GCMPGCBMSK(GC_NUMCORES, 8) +#define GCMP_GCB_GCMPB_OFS 0x0008 /* Global GCMP Base */ +#define GCMP_GCB_GCMPB_GCMPBASE_SHF 15 +#define GCMP_GCB_GCMPB_GCMPBASE_MSK GCMPGCBMSK(GCMPB_GCMPBASE, 17) +#define GCMP_GCB_GCMPB_CMDEFTGT_SHF 0 +#define GCMP_GCB_GCMPB_CMDEFTGT_MSK GCMPGCBMSK(GCMPB_CMDEFTGT, 2) +#define GCMP_GCB_GCMPB_CMDEFTGT_MEM 0 +#define GCMP_GCB_GCMPB_CMDEFTGT_MEM1 1 +#define GCMP_GCB_GCMPB_CMDEFTGT_IOCU1 2 +#define GCMP_GCB_GCMPB_CMDEFTGT_IOCU2 3 +#define GCMP_GCB_CCMC_OFS 0x0010 /* Global CM Control */ +#define GCMP_GCB_GCSRAP_OFS 0x0020 /* Global CSR Access Privilege */ +#define GCMP_GCB_GCSRAP_CMACCESS_SHF 0 +#define GCMP_GCB_GCSRAP_CMACCESS_MSK GCMPGCBMSK(GCSRAP_CMACCESS, 8) +#define GCMP_GCB_GCMPREV_OFS 0x0030 /* GCMP Revision Register */ +#define GCMP_GCB_GCMEM_OFS 0x0040 /* Global CM Error Mask */ +#define GCMP_GCB_GCMEC_OFS 0x0048 /* Global CM Error Cause */ +#define GCMP_GCB_GMEC_ERROR_TYPE_SHF 27 +#define GCMP_GCB_GMEC_ERROR_TYPE_MSK GCMPGCBMSK(GMEC_ERROR_TYPE, 5) +#define GCMP_GCB_GMEC_ERROR_INFO_SHF 0 +#define GCMP_GCB_GMEC_ERROR_INFO_MSK GCMPGCBMSK(GMEC_ERROR_INFO, 27) +#define GCMP_GCB_GCMEA_OFS 0x0050 /* Global CM Error Address */ +#define GCMP_GCB_GCMEO_OFS 0x0058 /* Global CM Error Multiple */ +#define GCMP_GCB_GMEO_ERROR_2ND_SHF 0 +#define GCMP_GCB_GMEO_ERROR_2ND_MSK GCMPGCBMSK(GMEO_ERROR_2ND, 5) +#define GCMP_GCB_GICBA_OFS 0x0080 /* Global Interrupt Controller Base Address */ +#define GCMP_GCB_GICBA_BASE_SHF 17 +#define GCMP_GCB_GICBA_BASE_MSK GCMPGCBMSK(GICBA_BASE, 15) +#define GCMP_GCB_GICBA_EN_SHF 0 +#define GCMP_GCB_GICBA_EN_MSK GCMPGCBMSK(GICBA_EN, 1) + +/* GCB Regions */ +#define GCMP_GCB_CMxBASE_OFS(n) (0x0090+16*(n)) /* Global Region[0-3] Base Address */ +#define GCMP_GCB_CMxBASE_BASE_SHF 16 +#define GCMP_GCB_CMxBASE_BASE_MSK GCMPGCBMSK(CMxBASE_BASE, 16) +#define GCMP_GCB_CMxMASK_OFS(n) (0x0098+16*(n)) /* Global Region[0-3] Address Mask */ +#define GCMP_GCB_CMxMASK_MASK_SHF 16 +#define GCMP_GCB_CMxMASK_MASK_MSK GCMPGCBMSK(CMxMASK_MASK, 16) +#define GCMP_GCB_CMxMASK_CMREGTGT_SHF 0 +#define GCMP_GCB_CMxMASK_CMREGTGT_MSK GCMPGCBMSK(CMxMASK_CMREGTGT, 2) +#define GCMP_GCB_CMxMASK_CMREGTGT_MEM 0 +#define GCMP_GCB_CMxMASK_CMREGTGT_MEM1 1 +#define GCMP_GCB_CMxMASK_CMREGTGT_IOCU1 2 +#define GCMP_GCB_CMxMASK_CMREGTGT_IOCU2 3 + + +/* Core local/Core other control block registers */ +#define GCMP_CCB_RESETR_OFS 0x0000 /* Reset Release */ +#define GCMP_CCB_RESETR_INRESET_SHF 0 +#define GCMP_CCB_RESETR_INRESET_MSK GCMPCCBMSK(RESETR_INRESET, 16) +#define GCMP_CCB_COHCTL_OFS 0x0008 /* Coherence Control */ +#define GCMP_CCB_COHCTL_DOMAIN_SHF 0 +#define GCMP_CCB_COHCTL_DOMAIN_MSK GCMPCCBMSK(COHCTL_DOMAIN, 8) +#define GCMP_CCB_CFG_OFS 0x0010 /* Config */ +#define GCMP_CCB_CFG_IOCUTYPE_SHF 10 +#define GCMP_CCB_CFG_IOCUTYPE_MSK GCMPCCBMSK(CFG_IOCUTYPE, 2) +#define GCMP_CCB_CFG_IOCUTYPE_CPU 0 +#define GCMP_CCB_CFG_IOCUTYPE_NCIOCU 1 +#define GCMP_CCB_CFG_IOCUTYPE_CIOCU 2 +#define GCMP_CCB_CFG_NUMVPE_SHF 0 +#define GCMP_CCB_CFG_NUMVPE_MSK GCMPCCBMSK(CFG_NUMVPE, 10) +#define GCMP_CCB_OTHER_OFS 0x0018 /* Other Address */ +#define GCMP_CCB_OTHER_CORENUM_SHF 16 +#define GCMP_CCB_OTHER_CORENUM_MSK GCMPCCBMSK(OTHER_CORENUM, 16) +#define GCMP_CCB_RESETBASE_OFS 0x0020 /* Reset Exception Base */ +#define GCMP_CCB_RESETBASE_BEV_SHF 12 +#define GCMP_CCB_RESETBASE_BEV_MSK GCMPCCBMSK(RESETBASE_BEV, 20) +#define GCMP_CCB_ID_OFS 0x0028 /* Identification */ +#define GCMP_CCB_DINTGROUP_OFS 0x0030 /* DINT Group Participate */ +#define GCMP_CCB_DBGGROUP_OFS 0x0100 /* DebugBreak Group */ + +#endif /* _ASM_GCMPREGS_H */ diff --git a/include/asm-mips/gic.h b/include/asm-mips/gic.h new file mode 100644 index 00000000000..01b2f92dc33 --- /dev/null +++ b/include/asm-mips/gic.h @@ -0,0 +1,487 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2000, 07 MIPS Technologies, Inc. + * + * GIC Register Definitions + * + */ +#ifndef _ASM_GICREGS_H +#define _ASM_GICREGS_H + +#undef GICISBYTELITTLEENDIAN +#define GICISWORDLITTLEENDIAN + +/* Constants */ +#define GIC_POL_POS 1 +#define GIC_POL_NEG 0 +#define GIC_TRIG_EDGE 1 +#define GIC_TRIG_LEVEL 0 + +#define GIC_NUM_INTRS 32 + +#define MSK(n) ((1 << (n)) - 1) +#define REG32(addr) (*(volatile unsigned int *) (addr)) +#define REG(base, offs) REG32((unsigned int)(base) + offs##_##OFS) +#define REGP(base, phys) REG32((unsigned int)(base) + (phys)) + +/* Accessors */ +#define GIC_REG(segment, offset) \ + REG32(_gic_base + segment##_##SECTION_OFS + offset##_##OFS) +#define GIC_REG_ADDR(segment, offset) \ + REG32(_gic_base + segment##_##SECTION_OFS + offset) + +#define GIC_ABS_REG(segment, offset) \ + (_gic_base + segment##_##SECTION_OFS + offset##_##OFS) +#define GIC_REG_ABS_ADDR(segment, offset) \ + (_gic_base + segment##_##SECTION_OFS + offset) + +#ifdef GICISBYTELITTLEENDIAN +#define GICREAD(reg, data) (data) = (reg), (data) = le32_to_cpu(data) +#define GICWRITE(reg, data) (reg) = cpu_to_le32(data) +#define GICBIS(reg, bits) \ + ({unsigned int data; \ + GICREAD(reg, data); \ + data |= bits; \ + GICWRITE(reg, data); \ + }) + +#else +#define GICREAD(reg, data) (data) = (reg) +#define GICWRITE(reg, data) (reg) = (data) +#define GICBIS(reg, bits) (reg) |= (bits) +#endif + + +/* GIC Address Space */ +#define SHARED_SECTION_OFS 0x0000 +#define SHARED_SECTION_SIZE 0x8000 +#define VPE_LOCAL_SECTION_OFS 0x8000 +#define VPE_LOCAL_SECTION_SIZE 0x4000 +#define VPE_OTHER_SECTION_OFS 0xc000 +#define VPE_OTHER_SECTION_SIZE 0x4000 +#define USM_VISIBLE_SECTION_OFS 0x10000 +#define USM_VISIBLE_SECTION_SIZE 0x10000 + +/* Register Map for Shared Section */ +#if defined(CONFIG_CPU_LITTLE_ENDIAN) || defined(GICISWORDLITTLEENDIAN) + +#define GIC_SH_CONFIG_OFS 0x0000 + +/* Shared Global Counter */ +#define GIC_SH_COUNTER_31_00_OFS 0x0010 +#define GIC_SH_COUNTER_63_32_OFS 0x0014 + +/* Interrupt Polarity */ +#define GIC_SH_POL_31_0_OFS 0x0100 +#define GIC_SH_POL_63_32_OFS 0x0104 +#define GIC_SH_POL_95_64_OFS 0x0108 +#define GIC_SH_POL_127_96_OFS 0x010c +#define GIC_SH_POL_159_128_OFS 0x0110 +#define GIC_SH_POL_191_160_OFS 0x0114 +#define GIC_SH_POL_223_192_OFS 0x0118 +#define GIC_SH_POL_255_224_OFS 0x011c + +/* Edge/Level Triggering */ +#define GIC_SH_TRIG_31_0_OFS 0x0180 +#define GIC_SH_TRIG_63_32_OFS 0x0184 +#define GIC_SH_TRIG_95_64_OFS 0x0188 +#define GIC_SH_TRIG_127_96_OFS 0x018c +#define GIC_SH_TRIG_159_128_OFS 0x0190 +#define GIC_SH_TRIG_191_160_OFS 0x0194 +#define GIC_SH_TRIG_223_192_OFS 0x0198 +#define GIC_SH_TRIG_255_224_OFS 0x019c + +/* Dual Edge Triggering */ +#define GIC_SH_DUAL_31_0_OFS 0x0200 +#define GIC_SH_DUAL_63_32_OFS 0x0204 +#define GIC_SH_DUAL_95_64_OFS 0x0208 +#define GIC_SH_DUAL_127_96_OFS 0x020c +#define GIC_SH_DUAL_159_128_OFS 0x0210 +#define GIC_SH_DUAL_191_160_OFS 0x0214 +#define GIC_SH_DUAL_223_192_OFS 0x0218 +#define GIC_SH_DUAL_255_224_OFS 0x021c + +/* Set/Clear corresponding bit in Edge Detect Register */ +#define GIC_SH_WEDGE_OFS 0x0280 + +/* Reset Mask - Disables Interrupt */ +#define GIC_SH_RMASK_31_0_OFS 0x0300 +#define GIC_SH_RMASK_63_32_OFS 0x0304 +#define GIC_SH_RMASK_95_64_OFS 0x0308 +#define GIC_SH_RMASK_127_96_OFS 0x030c +#define GIC_SH_RMASK_159_128_OFS 0x0310 +#define GIC_SH_RMASK_191_160_OFS 0x0314 +#define GIC_SH_RMASK_223_192_OFS 0x0318 +#define GIC_SH_RMASK_255_224_OFS 0x031c + +/* Set Mask (WO) - Enables Interrupt */ +#define GIC_SH_SMASK_31_0_OFS 0x0380 +#define GIC_SH_SMASK_63_32_OFS 0x0384 +#define GIC_SH_SMASK_95_64_OFS 0x0388 +#define GIC_SH_SMASK_127_96_OFS 0x038c +#define GIC_SH_SMASK_159_128_OFS 0x0390 +#define GIC_SH_SMASK_191_160_OFS 0x0394 +#define GIC_SH_SMASK_223_192_OFS 0x0398 +#define GIC_SH_SMASK_255_224_OFS 0x039c + +/* Global Interrupt Mask Register (RO) - Bit Set == Interrupt enabled */ +#define GIC_SH_MASK_31_0_OFS 0x0400 +#define GIC_SH_MASK_63_32_OFS 0x0404 +#define GIC_SH_MASK_95_64_OFS 0x0408 +#define GIC_SH_MASK_127_96_OFS 0x040c +#define GIC_SH_MASK_159_128_OFS 0x0410 +#define GIC_SH_MASK_191_160_OFS 0x0414 +#define GIC_SH_MASK_223_192_OFS 0x0418 +#define GIC_SH_MASK_255_224_OFS 0x041c + +/* Pending Global Interrupts (RO) */ +#define GIC_SH_PEND_31_0_OFS 0x0480 +#define GIC_SH_PEND_63_32_OFS 0x0484 +#define GIC_SH_PEND_95_64_OFS 0x0488 +#define GIC_SH_PEND_127_96_OFS 0x048c +#define GIC_SH_PEND_159_128_OFS 0x0490 +#define GIC_SH_PEND_191_160_OFS 0x0494 +#define GIC_SH_PEND_223_192_OFS 0x0498 +#define GIC_SH_PEND_255_224_OFS 0x049c + +#define GIC_SH_INTR_MAP_TO_PIN_BASE_OFS 0x0500 + +/* Maps Interrupt X to a Pin */ +#define GIC_SH_MAP_TO_PIN(intr) \ + (GIC_SH_INTR_MAP_TO_PIN_BASE_OFS + (4 * intr)) + +#define GIC_SH_INTR_MAP_TO_VPE_BASE_OFS 0x2000 + +/* Maps Interrupt X to a VPE */ +#define GIC_SH_MAP_TO_VPE_REG_OFF(intr, vpe) \ + (GIC_SH_INTR_MAP_TO_VPE_BASE_OFS + (32 * (intr)) + (((vpe) / 32) * 4)) +#define GIC_SH_MAP_TO_VPE_REG_BIT(vpe) (1 << ((vpe) % 32)) + +/* Polarity : Reset Value is always 0 */ +#define GIC_SH_SET_POLARITY_OFS 0x0100 +#define GIC_SET_POLARITY(intr, pol) \ + GICBIS(GIC_REG_ADDR(SHARED, GIC_SH_SET_POLARITY_OFS + (((intr) / 32) * 4)), (pol) << ((intr) % 32)) + +/* Triggering : Reset Value is always 0 */ +#define GIC_SH_SET_TRIGGER_OFS 0x0180 +#define GIC_SET_TRIGGER(intr, trig) \ + GICBIS(GIC_REG_ADDR(SHARED, GIC_SH_SET_TRIGGER_OFS + (((intr) / 32) * 4)), (trig) << ((intr) % 32)) + +/* Mask manipulation */ +#define GIC_SH_SMASK_OFS 0x0380 +#define GIC_SET_INTR_MASK(intr, val) \ + GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_SMASK_OFS + (((intr) / 32) * 4)), ((val) << ((intr) % 32))) + +#define GIC_SH_RMASK_OFS 0x0300 +#define GIC_CLR_INTR_MASK(intr, val) \ + GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_RMASK_OFS + (((intr) / 32) * 4)), ((val) << ((intr) % 32))) + +/* Register Map for Local Section */ +#define GIC_VPE_CTL_OFS 0x0000 +#define GIC_VPE_PEND_OFS 0x0004 +#define GIC_VPE_MASK_OFS 0x0008 +#define GIC_VPE_RMASK_OFS 0x000c +#define GIC_VPE_SMASK_OFS 0x0010 +#define GIC_VPE_WD_MAP_OFS 0x0040 +#define GIC_VPE_COMPARE_MAP_OFS 0x0044 +#define GIC_VPE_TIMER_MAP_OFS 0x0048 +#define GIC_VPE_PERFCTR_MAP_OFS 0x0050 +#define GIC_VPE_SWINT0_MAP_OFS 0x0054 +#define GIC_VPE_SWINT1_MAP_OFS 0x0058 +#define GIC_VPE_OTHER_ADDR_OFS 0x0080 +#define GIC_VPE_WD_CONFIG0_OFS 0x0090 +#define GIC_VPE_WD_COUNT0_OFS 0x0094 +#define GIC_VPE_WD_INITIAL0_OFS 0x0098 +#define GIC_VPE_COMPARE_LO_OFS 0x00a0 +#define GIC_VPE_COMPARE_HI 0x00a4 + +#define GIC_VPE_EIC_SHADOW_SET_BASE 0x0100 +#define GIC_VPE_EIC_SS(intr) \ + (GIC_EIC_SHADOW_SET_BASE + (4 * intr)) + +#define GIC_VPE_EIC_VEC_BASE 0x0800 +#define GIC_VPE_EIC_VEC(intr) \ + (GIC_VPE_EIC_VEC_BASE + (4 * intr)) + +#define GIC_VPE_TENABLE_NMI_OFS 0x1000 +#define GIC_VPE_TENABLE_YQ_OFS 0x1004 +#define GIC_VPE_TENABLE_INT_31_0_OFS 0x1080 +#define GIC_VPE_TENABLE_INT_63_32_OFS 0x1084 + +/* User Mode Visible Section Register Map */ +#define GIC_UMV_SH_COUNTER_31_00_OFS 0x0000 +#define GIC_UMV_SH_COUNTER_63_32_OFS 0x0004 + +#else /* CONFIG_CPU_BIG_ENDIAN */ + +#define GIC_SH_CONFIG_OFS 0x0000 + +/* Shared Global Counter */ +#define GIC_SH_COUNTER_31_00_OFS 0x0014 +#define GIC_SH_COUNTER_63_32_OFS 0x0010 + +/* Interrupt Polarity */ +#define GIC_SH_POL_31_0_OFS 0x0104 +#define GIC_SH_POL_63_32_OFS 0x0100 +#define GIC_SH_POL_95_64_OFS 0x010c +#define GIC_SH_POL_127_96_OFS 0x0108 +#define GIC_SH_POL_159_128_OFS 0x0114 +#define GIC_SH_POL_191_160_OFS 0x0110 +#define GIC_SH_POL_223_192_OFS 0x011c +#define GIC_SH_POL_255_224_OFS 0x0118 + +/* Edge/Level Triggering */ +#define GIC_SH_TRIG_31_0_OFS 0x0184 +#define GIC_SH_TRIG_63_32_OFS 0x0180 +#define GIC_SH_TRIG_95_64_OFS 0x018c +#define GIC_SH_TRIG_127_96_OFS 0x0188 +#define GIC_SH_TRIG_159_128_OFS 0x0194 +#define GIC_SH_TRIG_191_160_OFS 0x0190 +#define GIC_SH_TRIG_223_192_OFS 0x019c +#define GIC_SH_TRIG_255_224_OFS 0x0198 + +/* Dual Edge Triggering */ +#define GIC_SH_DUAL_31_0_OFS 0x0204 +#define GIC_SH_DUAL_63_32_OFS 0x0200 +#define GIC_SH_DUAL_95_64_OFS 0x020c +#define GIC_SH_DUAL_127_96_OFS 0x0208 +#define GIC_SH_DUAL_159_128_OFS 0x0214 +#define GIC_SH_DUAL_191_160_OFS 0x0210 +#define GIC_SH_DUAL_223_192_OFS 0x021c +#define GIC_SH_DUAL_255_224_OFS 0x0218 + +/* Set/Clear corresponding bit in Edge Detect Register */ +#define GIC_SH_WEDGE_OFS 0x0280 + +/* Reset Mask - Disables Interrupt */ +#define GIC_SH_RMASK_31_0_OFS 0x0304 +#define GIC_SH_RMASK_63_32_OFS 0x0300 +#define GIC_SH_RMASK_95_64_OFS 0x030c +#define GIC_SH_RMASK_127_96_OFS 0x0308 +#define GIC_SH_RMASK_159_128_OFS 0x0314 +#define GIC_SH_RMASK_191_160_OFS 0x0310 +#define GIC_SH_RMASK_223_192_OFS 0x031c +#define GIC_SH_RMASK_255_224_OFS 0x0318 + +/* Set Mask (WO) - Enables Interrupt */ +#define GIC_SH_SMASK_31_0_OFS 0x0384 +#define GIC_SH_SMASK_63_32_OFS 0x0380 +#define GIC_SH_SMASK_95_64_OFS 0x038c +#define GIC_SH_SMASK_127_96_OFS 0x0388 +#define GIC_SH_SMASK_159_128_OFS 0x0394 +#define GIC_SH_SMASK_191_160_OFS 0x0390 +#define GIC_SH_SMASK_223_192_OFS 0x039c +#define GIC_SH_SMASK_255_224_OFS 0x0398 + +/* Global Interrupt Mask Register (RO) - Bit Set == Interrupt enabled */ +#define GIC_SH_MASK_31_0_OFS 0x0404 +#define GIC_SH_MASK_63_32_OFS 0x0400 +#define GIC_SH_MASK_95_64_OFS 0x040c +#define GIC_SH_MASK_127_96_OFS 0x0408 +#define GIC_SH_MASK_159_128_OFS 0x0414 +#define GIC_SH_MASK_191_160_OFS 0x0410 +#define GIC_SH_MASK_223_192_OFS 0x041c +#define GIC_SH_MASK_255_224_OFS 0x0418 + +/* Pending Global Interrupts (RO) */ +#define GIC_SH_PEND_31_0_OFS 0x0484 +#define GIC_SH_PEND_63_32_OFS 0x0480 +#define GIC_SH_PEND_95_64_OFS 0x048c +#define GIC_SH_PEND_127_96_OFS 0x0488 +#define GIC_SH_PEND_159_128_OFS 0x0494 +#define GIC_SH_PEND_191_160_OFS 0x0490 +#define GIC_SH_PEND_223_192_OFS 0x049c +#define GIC_SH_PEND_255_224_OFS 0x0498 + +#define GIC_SH_INTR_MAP_TO_PIN_BASE_OFS 0x0500 + +/* Maps Interrupt X to a Pin */ +#define GIC_SH_MAP_TO_PIN(intr) \ + (GIC_SH_INTR_MAP_TO_PIN_BASE_OFS + (4 * intr)) + +#define GIC_SH_INTR_MAP_TO_VPE_BASE_OFS 0x2004 + +/* + * Maps Interrupt X to a VPE. This is more complex than the LE case, as + * odd and even registers need to be transposed. It does work - trust me! + */ +#define GIC_SH_MAP_TO_VPE_REG_OFF(intr, vpe) \ + (GIC_SH_INTR_MAP_TO_VPE_BASE_OFS + (32 * (intr)) + \ + (((((vpe) / 32) ^ 1) - 1) * 4)) +#define GIC_SH_MAP_TO_VPE_REG_BIT(vpe) (1 << ((vpe) % 32)) + +/* Polarity */ +#define GIC_SH_SET_POLARITY_OFS 0x0100 +#define GIC_SET_POLARITY(intr, pol) \ + GICBIS(GIC_REG_ADDR(SHARED, GIC_SH_SET_POLARITY_OFS + 4 + (((((intr) / 32) ^ 1) - 1) * 4)), (pol) << ((intr) % 32)) + +/* Triggering */ +#define GIC_SH_SET_TRIGGER_OFS 0x0180 +#define GIC_SET_TRIGGER(intr, trig) \ + GICBIS(GIC_REG_ADDR(SHARED, GIC_SH_SET_TRIGGER_OFS + 4 + (((((intr) / 32) ^ 1) - 1) * 4)), (trig) << ((intr) % 32)) + +/* Mask manipulation */ +#define GIC_SH_SMASK_OFS 0x0380 +#define GIC_SET_INTR_MASK(intr, val) \ + GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_SMASK_OFS + 4 + (((((intr) / 32) ^ 1) - 1) * 4)), ((val) << ((intr) % 32))) + +#define GIC_SH_RMASK_OFS 0x0300 +#define GIC_CLR_INTR_MASK(intr, val) \ + GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_RMASK_OFS + 4 + (((((intr) / 32) ^ 1) - 1) * 4)), ((val) << ((intr) % 32)) + +/* Register Map for Local Section */ +#define GIC_VPE_CTL_OFS 0x0000 +#define GIC_VPE_PEND_OFS 0x0004 +#define GIC_VPE_MASK_OFS 0x0008 +#define GIC_VPE_RMASK_OFS 0x000c +#define GIC_VPE_SMASK_OFS 0x0010 +#define GIC_VPE_WD_MAP_OFS 0x0040 +#define GIC_VPE_COMPARE_MAP_OFS 0x0044 +#define GIC_VPE_TIMER_MAP_OFS 0x0048 +#define GIC_VPE_PERFCTR_MAP_OFS 0x0050 +#define GIC_VPE_SWINT0_MAP_OFS 0x0054 +#define GIC_VPE_SWINT1_MAP_OFS 0x0058 +#define GIC_VPE_OTHER_ADDR_OFS 0x0080 +#define GIC_VPE_WD_CONFIG0_OFS 0x0090 +#define GIC_VPE_WD_COUNT0_OFS 0x0094 +#define GIC_VPE_WD_INITIAL0_OFS 0x0098 +#define GIC_VPE_COMPARE_LO_OFS 0x00a4 +#define GIC_VPE_COMPARE_HI_OFS 0x00a0 + +#define GIC_VPE_EIC_SHADOW_SET_BASE 0x0100 +#define GIC_VPE_EIC_SS(intr) \ + (GIC_EIC_SHADOW_SET_BASE + (4 * intr)) + +#define GIC_VPE_EIC_VEC_BASE 0x0800 +#define GIC_VPE_EIC_VEC(intr) \ + (GIC_VPE_EIC_VEC_BASE + (4 * intr)) + +#define GIC_VPE_TENABLE_NMI_OFS 0x1000 +#define GIC_VPE_TENABLE_YQ_OFS 0x1004 +#define GIC_VPE_TENABLE_INT_31_0_OFS 0x1080 +#define GIC_VPE_TENABLE_INT_63_32_OFS 0x1084 + +/* User Mode Visible Section Register Map */ +#define GIC_UMV_SH_COUNTER_31_00_OFS 0x0004 +#define GIC_UMV_SH_COUNTER_63_32_OFS 0x0000 + +#endif /* !LE */ + +/* Masks */ +#define GIC_SH_CONFIG_COUNTSTOP_SHF 28 +#define GIC_SH_CONFIG_COUNTSTOP_MSK (MSK(1) << GIC_SH_CONFIG_COUNTSTOP_SHF) + +#define GIC_SH_CONFIG_COUNTBITS_SHF 24 +#define GIC_SH_CONFIG_COUNTBITS_MSK (MSK(4) << GIC_SH_CONFIG_COUNTBITS_SHF) + +#define GIC_SH_CONFIG_NUMINTRS_SHF 16 +#define GIC_SH_CONFIG_NUMINTRS_MSK (MSK(8) << GIC_SH_CONFIG_NUMINTRS_SHF) + +#define GIC_SH_CONFIG_NUMVPES_SHF 0 +#define GIC_SH_CONFIG_NUMVPES_MSK (MSK(8) << GIC_SH_CONFIG_NUMVPES_SHF) + +#define GIC_SH_WEDGE_SET(intr) (intr | (0x1 << 31)) +#define GIC_SH_WEDGE_CLR(intr) (intr & ~(0x1 << 31)) + +#define GIC_MAP_TO_PIN_SHF 31 +#define GIC_MAP_TO_PIN_MSK (MSK(1) << GIC_MAP_TO_PIN_SHF) +#define GIC_MAP_TO_NMI_SHF 30 +#define GIC_MAP_TO_NMI_MSK (MSK(1) << GIC_MAP_TO_NMI_SHF) +#define GIC_MAP_TO_YQ_SHF 29 +#define GIC_MAP_TO_YQ_MSK (MSK(1) << GIC_MAP_TO_YQ_SHF) +#define GIC_MAP_SHF 0 +#define GIC_MAP_MSK (MSK(6) << GIC_MAP_SHF) + +/* GIC_VPE_CTL Masks */ +#define GIC_VPE_CTL_PERFCNT_RTBL_SHF 2 +#define GIC_VPE_CTL_PERFCNT_RTBL_MSK (MSK(1) << GIC_VPE_CTL_PERFCNT_RTBL_SHF) +#define GIC_VPE_CTL_TIMER_RTBL_SHF 1 +#define GIC_VPE_CTL_TIMER_RTBL_MSK (MSK(1) << GIC_VPE_CTL_TIMER_RTBL_SHF) +#define GIC_VPE_CTL_EIC_MODE_SHF 0 +#define GIC_VPE_CTL_EIC_MODE_MSK (MSK(1) << GIC_VPE_CTL_EIC_MODE_SHF) + +/* GIC_VPE_PEND Masks */ +#define GIC_VPE_PEND_WD_SHF 0 +#define GIC_VPE_PEND_WD_MSK (MSK(1) << GIC_VPE_PEND_WD_SHF) +#define GIC_VPE_PEND_CMP_SHF 1 +#define GIC_VPE_PEND_CMP_MSK (MSK(1) << GIC_VPE_PEND_CMP_SHF) +#define GIC_VPE_PEND_TIMER_SHF 2 +#define GIC_VPE_PEND_TIMER_MSK (MSK(1) << GIC_VPE_PEND_TIMER_SHF) +#define GIC_VPE_PEND_PERFCOUNT_SHF 3 +#define GIC_VPE_PEND_PERFCOUNT_MSK (MSK(1) << GIC_VPE_PEND_PERFCOUNT_SHF) +#define GIC_VPE_PEND_SWINT0_SHF 4 +#define GIC_VPE_PEND_SWINT0_MSK (MSK(1) << GIC_VPE_PEND_SWINT0_SHF) +#define GIC_VPE_PEND_SWINT1_SHF 5 +#define GIC_VPE_PEND_SWINT1_MSK (MSK(1) << GIC_VPE_PEND_SWINT1_SHF) + +/* GIC_VPE_RMASK Masks */ +#define GIC_VPE_RMASK_WD_SHF 0 +#define GIC_VPE_RMASK_WD_MSK (MSK(1) << GIC_VPE_RMASK_WD_SHF) +#define GIC_VPE_RMASK_CMP_SHF 1 +#define GIC_VPE_RMASK_CMP_MSK (MSK(1) << GIC_VPE_RMASK_CMP_SHF) +#define GIC_VPE_RMASK_TIMER_SHF 2 +#define GIC_VPE_RMASK_TIMER_MSK (MSK(1) << GIC_VPE_RMASK_TIMER_SHF) +#define GIC_VPE_RMASK_PERFCNT_SHF 3 +#define GIC_VPE_RMASK_PERFCNT_MSK (MSK(1) << GIC_VPE_RMASK_PERFCNT_SHF) +#define GIC_VPE_RMASK_SWINT0_SHF 4 +#define GIC_VPE_RMASK_SWINT0_MSK (MSK(1) << GIC_VPE_RMASK_SWINT0_SHF) +#define GIC_VPE_RMASK_SWINT1_SHF 5 +#define GIC_VPE_RMASK_SWINT1_MSK (MSK(1) << GIC_VPE_RMASK_SWINT1_SHF) + +/* GIC_VPE_SMASK Masks */ +#define GIC_VPE_SMASK_WD_SHF 0 +#define GIC_VPE_SMASK_WD_MSK (MSK(1) << GIC_VPE_SMASK_WD_SHF) +#define GIC_VPE_SMASK_CMP_SHF 1 +#define GIC_VPE_SMASK_CMP_MSK (MSK(1) << GIC_VPE_SMASK_CMP_SHF) +#define GIC_VPE_SMASK_TIMER_SHF 2 +#define GIC_VPE_SMASK_TIMER_MSK (MSK(1) << GIC_VPE_SMASK_TIMER_SHF) +#define GIC_VPE_SMASK_PERFCNT_SHF 3 +#define GIC_VPE_SMASK_PERFCNT_MSK (MSK(1) << GIC_VPE_SMASK_PERFCNT_SHF) +#define GIC_VPE_SMASK_SWINT0_SHF 4 +#define GIC_VPE_SMASK_SWINT0_MSK (MSK(1) << GIC_VPE_SMASK_SWINT0_SHF) +#define GIC_VPE_SMASK_SWINT1_SHF 5 +#define GIC_VPE_SMASK_SWINT1_MSK (MSK(1) << GIC_VPE_SMASK_SWINT1_SHF) + +/* + * Set the Mapping of Interrupt X to a VPE. + */ +#define GIC_SH_MAP_TO_VPE_SMASK(intr, vpe) \ + GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_MAP_TO_VPE_REG_OFF(intr, vpe)), \ + GIC_SH_MAP_TO_VPE_REG_BIT(vpe)) + +struct gic_pcpu_mask { + DECLARE_BITMAP(pcpu_mask, GIC_NUM_INTRS); +}; + +struct gic_pending_regs { + DECLARE_BITMAP(pending, GIC_NUM_INTRS); +}; + +struct gic_intrmask_regs { + DECLARE_BITMAP(intrmask, GIC_NUM_INTRS); +}; + +/* + * Interrupt Meta-data specification. The ipiflag helps + * in building ipi_map. + */ +struct gic_intr_map { + unsigned int intrnum; /* Ext Intr Num */ + unsigned int cpunum; /* Directed to this CPU */ + unsigned int pin; /* Directed to this Pin */ + unsigned int polarity; /* Polarity : +/- */ + unsigned int trigtype; /* Trigger : Edge/Levl */ + unsigned int ipiflag; /* Is used for IPI ? */ +}; + +extern void gic_init(unsigned long gic_base_addr, + unsigned long gic_addrspace_size, struct gic_intr_map *intrmap, + unsigned int intrmap_size, unsigned int irqbase); + +extern unsigned int gic_get_int(void); +extern void gic_send_ipi(unsigned int intr); + +#endif /* _ASM_GICREGS_H */ diff --git a/include/asm-mips/io.h b/include/asm-mips/io.h index e62058b0d28..f18d2816cbe 100644 --- a/include/asm-mips/io.h +++ b/include/asm-mips/io.h @@ -273,7 +273,7 @@ static inline void __iomem * __ioremap_mode(phys_t offset, unsigned long size, * memory-like regions on I/O busses. */ #define ioremap_cachable(offset, size) \ - __ioremap_mode((offset), (size), PAGE_CACHABLE_DEFAULT) + __ioremap_mode((offset), (size), _page_cachable_default) /* * These two are MIPS specific ioremap variant. ioremap_cacheable_cow diff --git a/include/asm-mips/jmr3927/jmr3927.h b/include/asm-mips/jmr3927/jmr3927.h index 81602c8047e..a162268f17d 100644 --- a/include/asm-mips/jmr3927/jmr3927.h +++ b/include/asm-mips/jmr3927/jmr3927.h @@ -99,8 +99,8 @@ #define jmr3927_led_and_set(n/*0-16*/) jmr3927_ioc_reg_out((~(n)) & jmr3927_ioc_reg_in(JMR3927_IOC_LED_ADDR), JMR3927_IOC_LED_ADDR) /* DIPSW4 macro */ -#define jmr3927_dipsw1() ((tx3927_pioptr->din & (1 << 11)) == 0) -#define jmr3927_dipsw2() ((tx3927_pioptr->din & (1 << 10)) == 0) +#define jmr3927_dipsw1() (gpio_get_value(11) == 0) +#define jmr3927_dipsw2() (gpio_get_value(10) == 0) #define jmr3927_dipsw3() ((jmr3927_ioc_reg_in(JMR3927_IOC_DIPSW_ADDR) & 2) == 0) #define jmr3927_dipsw4() ((jmr3927_ioc_reg_in(JMR3927_IOC_DIPSW_ADDR) & 1) == 0) diff --git a/include/asm-mips/jmr3927/tx3927.h b/include/asm-mips/jmr3927/tx3927.h index 338f99882a3..fb580333c10 100644 --- a/include/asm-mips/jmr3927/tx3927.h +++ b/include/asm-mips/jmr3927/tx3927.h @@ -314,6 +314,6 @@ struct tx3927_ccfg_reg { #define tx3927_ccfgptr ((struct tx3927_ccfg_reg *)TX3927_CCFG_REG) #define tx3927_tmrptr(ch) ((struct txx927_tmr_reg *)TX3927_TMR_REG(ch)) #define tx3927_sioptr(ch) ((struct txx927_sio_reg *)TX3927_SIO_REG(ch)) -#define tx3927_pioptr ((struct txx927_pio_reg *)TX3927_PIO_REG) +#define tx3927_pioptr ((struct txx9_pio_reg __iomem *)TX3927_PIO_REG) #endif /* __ASM_TX3927_H */ diff --git a/include/asm-mips/jmr3927/txx927.h b/include/asm-mips/jmr3927/txx927.h index 0474fe8dac3..25dcf2feb09 100644 --- a/include/asm-mips/jmr3927/txx927.h +++ b/include/asm-mips/jmr3927/txx927.h @@ -22,18 +22,6 @@ struct txx927_sio_reg { volatile unsigned long rfifo; }; -struct txx927_pio_reg { - volatile unsigned long dout; - volatile unsigned long din; - volatile unsigned long dir; - volatile unsigned long od; - volatile unsigned long flag[2]; - volatile unsigned long pol; - volatile unsigned long intc; - volatile unsigned long maskcpu; - volatile unsigned long maskext; -}; - /* * SIO */ diff --git a/include/asm-mips/mach-au1x00/au1000.h b/include/asm-mips/mach-au1x00/au1000.h index 5bb57bf2b9d..a05555165d0 100644 --- a/include/asm-mips/mach-au1x00/au1000.h +++ b/include/asm-mips/mach-au1x00/au1000.h @@ -3,9 +3,8 @@ * BRIEF MODULE DESCRIPTION * Include file for Alchemy Semiconductor's Au1k CPU. * - * Copyright 2000,2001 MontaVista Software Inc. - * Author: MontaVista Software, Inc. - * ppopov@mvista.com or source@mvista.com + * Copyright 2000-2001, 2006-2008 MontaVista Software Inc. + * Author: MontaVista Software, Inc. <source@mvista.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -117,13 +116,6 @@ extern struct au1xxx_irqmap au1xxx_irq_map[]; #endif /* !defined (_LANGUAGE_ASSEMBLY) */ -#ifdef CONFIG_PM -/* no CP0 timer irq */ -#define ALLINTS (IE_IRQ0 | IE_IRQ1 | IE_IRQ2 | IE_IRQ3 | IE_IRQ4) -#else -#define ALLINTS (IE_IRQ0 | IE_IRQ1 | IE_IRQ2 | IE_IRQ3 | IE_IRQ4 | IE_IRQ5) -#endif - /* * SDRAM Register Offsets */ @@ -1693,20 +1685,6 @@ enum soc_au1200_ints { #define IOMEM_RESOURCE_START 0x10000000 #define IOMEM_RESOURCE_END 0xffffffff - /* - * Borrowed from the PPC arch: - * The following macro is used to lookup irqs in a standard table - * format for those PPC systems that do not already have PCI - * interrupts properly routed. - */ - /* FIXME - double check this from asm-ppc/pci-bridge.h */ -#define PCI_IRQ_TABLE_LOOKUP \ - ({ long _ctl_ = -1; \ - if (idsel >= min_idsel && idsel <= max_idsel && pin <= irqs_per_slot) \ - _ctl_ = pci_irq_table[idsel - min_idsel][pin-1]; \ - _ctl_; }) - - #else /* Au1000 and Au1100 and Au1200 */ /* don't allow any legacy ports probing */ diff --git a/include/asm-mips/mach-db1x00/db1200.h b/include/asm-mips/mach-db1x00/db1200.h index d2e28e64932..eedd048a726 100644 --- a/include/asm-mips/mach-db1x00/db1200.h +++ b/include/asm-mips/mach-db1x00/db1200.h @@ -169,15 +169,15 @@ static BCSR * const bcsr = (BCSR *)BCSR_KSEG1_ADDR; #define BCSR_INT_SD0INSERT 0x1000 #define BCSR_INT_SD0EJECT 0x2000 -#define AU1XXX_SMC91111_PHYS_ADDR (0x19000300) -#define AU1XXX_SMC91111_IRQ DB1200_ETH_INT - -#define AU1XXX_ATA_PHYS_ADDR (0x18800000) -#define AU1XXX_ATA_REG_OFFSET (5) -#define AU1XXX_ATA_PHYS_LEN (16 << AU1XXX_ATA_REG_OFFSET) -#define AU1XXX_ATA_INT DB1200_IDE_INT -#define AU1XXX_ATA_DDMA_REQ DSCR_CMD0_DMA_REQ1; -#define AU1XXX_ATA_RQSIZE 128 +#define SMC91C111_PHYS_ADDR 0x19000300 +#define SMC91C111_INT DB1200_ETH_INT + +#define IDE_PHYS_ADDR 0x18800000 +#define IDE_REG_SHIFT 5 +#define IDE_PHYS_LEN (16 << IDE_REG_SHIFT) +#define IDE_INT DB1200_IDE_INT +#define IDE_DDMA_REQ DSCR_CMD0_DMA_REQ1 +#define IDE_RQSIZE 128 #define NAND_PHYS_ADDR 0x20000000 diff --git a/include/asm-mips/mach-generic/gpio.h b/include/asm-mips/mach-generic/gpio.h index 6eaf5efedf3..e6b376bd9d0 100644 --- a/include/asm-mips/mach-generic/gpio.h +++ b/include/asm-mips/mach-generic/gpio.h @@ -1,12 +1,18 @@ #ifndef __ASM_MACH_GENERIC_GPIO_H #define __ASM_MACH_GENERIC_GPIO_H +#ifdef CONFIG_HAVE_GPIO_LIB +#define gpio_get_value __gpio_get_value +#define gpio_set_value __gpio_set_value +#define gpio_cansleep __gpio_cansleep +#else int gpio_request(unsigned gpio, const char *label); void gpio_free(unsigned gpio); int gpio_direction_input(unsigned gpio); int gpio_direction_output(unsigned gpio, int value); int gpio_get_value(unsigned gpio); void gpio_set_value(unsigned gpio, int value); +#endif int gpio_to_irq(unsigned gpio); int irq_to_gpio(unsigned irq); diff --git a/include/asm-mips/mach-ip27/topology.h b/include/asm-mips/mach-ip27/topology.h index 372291f53fb..7785bec732f 100644 --- a/include/asm-mips/mach-ip27/topology.h +++ b/include/asm-mips/mach-ip27/topology.h @@ -54,4 +54,6 @@ extern unsigned char __node_distances[MAX_COMPACT_NODES][MAX_COMPACT_NODES]; .nr_balance_failed = 0, \ } +#include <asm-generic/topology.h> + #endif /* _ASM_MACH_TOPOLOGY_H */ diff --git a/include/asm-mips/mach-pb1x00/pb1200.h b/include/asm-mips/mach-pb1x00/pb1200.h index edaa489b58f..e2c6bcac3b4 100644 --- a/include/asm-mips/mach-pb1x00/pb1200.h +++ b/include/asm-mips/mach-pb1x00/pb1200.h @@ -182,15 +182,15 @@ static BCSR * const bcsr = (BCSR *)BCSR_KSEG1_ADDR; #define SET_VCC_VPP(VCC, VPP, SLOT)\ ((((VCC)<<2) | ((VPP)<<0)) << ((SLOT)*8)) -#define AU1XXX_SMC91111_PHYS_ADDR (0x0D000300) -#define AU1XXX_SMC91111_IRQ PB1200_ETH_INT - -#define AU1XXX_ATA_PHYS_ADDR (0x0C800000) -#define AU1XXX_ATA_REG_OFFSET (5) -#define AU1XXX_ATA_PHYS_LEN (16 << AU1XXX_ATA_REG_OFFSET) -#define AU1XXX_ATA_INT PB1200_IDE_INT -#define AU1XXX_ATA_DDMA_REQ DSCR_CMD0_DMA_REQ1; -#define AU1XXX_ATA_RQSIZE 128 +#define SMC91C111_PHYS_ADDR 0x0D000300 +#define SMC91C111_INT PB1200_ETH_INT + +#define IDE_PHYS_ADDR 0x0C800000 +#define IDE_REG_SHIFT 5 +#define IDE_PHYS_LEN (16 << IDE_REG_SHIFT) +#define IDE_INT PB1200_IDE_INT +#define IDE_DDMA_REQ DSCR_CMD0_DMA_REQ1 +#define IDE_RQSIZE 128 #define NAND_PHYS_ADDR 0x1C000000 diff --git a/include/asm-mips/mips-boards/generic.h b/include/asm-mips/mips-boards/generic.h index 1c39d339521..33407bee4e7 100644 --- a/include/asm-mips/mips-boards/generic.h +++ b/include/asm-mips/mips-boards/generic.h @@ -68,6 +68,7 @@ #define MIPS_REVISION_CORID_CORE_FPGA3 9 #define MIPS_REVISION_CORID_CORE_24K 10 #define MIPS_REVISION_CORID_CORE_FPGA4 11 +#define MIPS_REVISION_CORID_CORE_FPGA5 12 /**** Artificial corid defines ****/ /* diff --git a/include/asm-mips/mips-boards/launch.h b/include/asm-mips/mips-boards/launch.h new file mode 100644 index 00000000000..d8ae7f95a52 --- /dev/null +++ b/include/asm-mips/mips-boards/launch.h @@ -0,0 +1,35 @@ +/* + * + */ + +#ifndef _ASSEMBLER_ + +struct cpulaunch { + unsigned long pc; + unsigned long gp; + unsigned long sp; + unsigned long a0; + unsigned long _pad[3]; /* pad to cache line size to avoid thrashing */ + unsigned long flags; +}; + +#else + +#define LOG2CPULAUNCH 5 +#define LAUNCH_PC 0 +#define LAUNCH_GP 4 +#define LAUNCH_SP 8 +#define LAUNCH_A0 12 +#define LAUNCH_FLAGS 28 + +#endif + +#define LAUNCH_FREADY 1 +#define LAUNCH_FGO 2 +#define LAUNCH_FGONE 4 + +#define CPULAUNCH 0x00000f00 +#define NCPULAUNCH 8 + +/* Polling period in count cycles for secondary CPU's */ +#define LAUNCHPERIOD 10000 diff --git a/include/asm-mips/mips-boards/malta.h b/include/asm-mips/mips-boards/malta.h index 93bf4e51b8a..c1891578fa6 100644 --- a/include/asm-mips/mips-boards/malta.h +++ b/include/asm-mips/mips-boards/malta.h @@ -52,6 +52,29 @@ static inline unsigned long get_msc_port_base(unsigned long reg) } /* + * GCMP Specific definitions + */ +#define GCMP_BASE_ADDR 0x1fbf8000 +#define GCMP_ADDRSPACE_SZ (256 * 1024) + +/* + * GIC Specific definitions + */ +#define GIC_BASE_ADDR 0x1bdc0000 +#define GIC_ADDRSPACE_SZ (128 * 1024) + +/* + * MSC01 BIU Specific definitions + * FIXME : These should be elsewhere ? + */ +#define MSC01_BIU_REG_BASE 0x1bc80000 +#define MSC01_BIU_ADDRSPACE_SZ (256 * 1024) +#define MSC01_SC_CFG_OFS 0x0110 +#define MSC01_SC_CFG_GICPRES_MSK 0x00000004 +#define MSC01_SC_CFG_GICPRES_SHF 2 +#define MSC01_SC_CFG_GICENA_SHF 3 + +/* * Malta RTC-device indirect register access. */ #define MALTA_RTC_ADR_REG 0x70 diff --git a/include/asm-mips/mips-boards/maltaint.h b/include/asm-mips/mips-boards/maltaint.h index 7461318f1cd..cea872fc6f5 100644 --- a/include/asm-mips/mips-boards/maltaint.h +++ b/include/asm-mips/mips-boards/maltaint.h @@ -39,7 +39,9 @@ #define MIPSCPU_INT_I8259A MIPSCPU_INT_MB0 #define MIPSCPU_INT_MB1 3 #define MIPSCPU_INT_SMI MIPSCPU_INT_MB1 +#define MIPSCPU_INT_IPI0 MIPSCPU_INT_MB1 /* GIC IPI */ #define MIPSCPU_INT_MB2 4 +#define MIPSCPU_INT_IPI1 MIPSCPU_INT_MB2 /* GIC IPI */ #define MIPSCPU_INT_MB3 5 #define MIPSCPU_INT_COREHI MIPSCPU_INT_MB3 #define MIPSCPU_INT_MB4 6 @@ -76,6 +78,31 @@ #define MSC01E_INT_PERFCTR 10 #define MSC01E_INT_CPUCTR 11 +/* GIC's Nomenclature for Core Interrupt Pins on the Malta */ +#define GIC_CPU_INT0 0 /* Core Interrupt 2 */ +#define GIC_CPU_INT1 1 /* . */ +#define GIC_CPU_INT2 2 /* . */ +#define GIC_CPU_INT3 3 /* . */ +#define GIC_CPU_INT4 4 /* . */ +#define GIC_CPU_INT5 5 /* Core Interrupt 5 */ + +#define GIC_EXT_INTR(x) x + +/* Dummy data */ +#define X 0xdead + +/* External Interrupts used for IPI */ +#define GIC_IPI_EXT_INTR_RESCHED_VPE0 16 +#define GIC_IPI_EXT_INTR_CALLFNC_VPE0 17 +#define GIC_IPI_EXT_INTR_RESCHED_VPE1 18 +#define GIC_IPI_EXT_INTR_CALLFNC_VPE1 19 +#define GIC_IPI_EXT_INTR_RESCHED_VPE2 20 +#define GIC_IPI_EXT_INTR_CALLFNC_VPE2 21 +#define GIC_IPI_EXT_INTR_RESCHED_VPE3 22 +#define GIC_IPI_EXT_INTR_CALLFNC_VPE3 23 + +#define MIPS_GIC_IRQ_BASE (MIPS_CPU_IRQ_BASE + 8) + #ifndef __ASSEMBLY__ extern void maltaint_init(void); #endif diff --git a/include/asm-mips/mips-boards/maltasmp.h b/include/asm-mips/mips-boards/maltasmp.h new file mode 100644 index 00000000000..8d7e955d506 --- /dev/null +++ b/include/asm-mips/mips-boards/maltasmp.h @@ -0,0 +1,36 @@ +/* + * There are several SMP models supported + * SMTC is mutually exclusive to other options (atm) + */ +#if defined(CONFIG_MIPS_MT_SMTC) +#define malta_smtc 1 +#define malta_cmp 0 +#define malta_smvp 0 +#else +#define malta_smtc 0 +#if defined(CONFIG_MIPS_CMP) +extern int gcmp_present; +#define malta_cmp gcmp_present +#else +#define malta_cmp 0 +#endif +/* FIXME: should become COMFIG_MIPS_MT_SMVP */ +#if defined(CONFIG_MIPS_MT_SMP) +#define malta_smvp 1 +#else +#define malta_smvp 0 +#endif +#endif + +#include <asm/mipsregs.h> +#include <asm/mipsmtregs.h> + +/* malta_smtc */ +#include <asm/smtc.h> +#include <asm/smtc_ipi.h> + +/* malta_cmp */ +#include <asm/cmp.h> + +/* malta_smvp */ +#include <asm/smvp.h> diff --git a/include/asm-mips/mipsmtregs.h b/include/asm-mips/mipsmtregs.h index 5a2f8a3a6a1..c9420aa97e3 100644 --- a/include/asm-mips/mipsmtregs.h +++ b/include/asm-mips/mipsmtregs.h @@ -197,8 +197,8 @@ static inline void __raw_evpe(void) " .set pop \n"); } -/* Enable multiMT if previous suggested it should be. - EMT_ENABLE to force */ +/* Enable virtual processor execution if previous suggested it should be. + EVPE_ENABLE to force */ #define EVPE_ENABLE MVPCONTROL_EVP @@ -238,8 +238,8 @@ static inline void __raw_emt(void) " .set reorder"); } -/* enable multiVPE if previous suggested it should be. - EVPE_ENABLE to force */ +/* enable multi-threaded execution if previous suggested it should be. + EMT_ENABLE to force */ #define EMT_ENABLE VPECONTROL_TE diff --git a/include/asm-mips/pgtable-32.h b/include/asm-mips/pgtable-32.h index ceefe027c76..4396e9ffd41 100644 --- a/include/asm-mips/pgtable-32.h +++ b/include/asm-mips/pgtable-32.h @@ -107,7 +107,7 @@ static inline void pmd_clear(pmd_t *pmdp) pmd_val(*pmdp) = ((unsigned long) invalid_pte_table); } -#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32_R1) +#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) #define pte_page(x) pfn_to_page(pte_pfn(x)) #define pte_pfn(x) ((unsigned long)((x).pte_high >> 6)) static inline pte_t @@ -130,7 +130,7 @@ pfn_pte(unsigned long pfn, pgprot_t prot) #define pte_pfn(x) ((unsigned long)((x).pte >> PAGE_SHIFT)) #define pfn_pte(pfn, prot) __pte(((unsigned long long)(pfn) << PAGE_SHIFT) | pgprot_val(prot)) #endif -#endif /* defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32_R1) */ +#endif /* defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) */ #define __pgd_offset(address) pgd_index(address) #define __pud_offset(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) diff --git a/include/asm-mips/pgtable-bits.h b/include/asm-mips/pgtable-bits.h index 7494ba91112..60e2f9338fc 100644 --- a/include/asm-mips/pgtable-bits.h +++ b/include/asm-mips/pgtable-bits.h @@ -32,14 +32,14 @@ * unpredictable things. The code (when it is written) to deal with * this problem will be in the update_mmu_cache() code for the r4k. */ -#if defined(CONFIG_CPU_MIPS32_R1) && defined(CONFIG_64BIT_PHYS_ADDR) +#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) #define _PAGE_PRESENT (1<<6) /* implemented in software */ #define _PAGE_READ (1<<7) /* implemented in software */ #define _PAGE_WRITE (1<<8) /* implemented in software */ #define _PAGE_ACCESSED (1<<9) /* implemented in software */ #define _PAGE_MODIFIED (1<<10) /* implemented in software */ -#define _PAGE_FILE (1<<10) /* set:pagecache unset:swap */ +#define _PAGE_FILE (1<<10) /* set:pagecache unset:swap */ #define _PAGE_R4KBUG (1<<0) /* workaround for r4k bug */ #define _PAGE_GLOBAL (1<<0) @@ -47,15 +47,9 @@ #define _PAGE_SILENT_READ (1<<1) /* synonym */ #define _PAGE_DIRTY (1<<2) /* The MIPS dirty bit */ #define _PAGE_SILENT_WRITE (1<<2) +#define _CACHE_SHIFT 3 #define _CACHE_MASK (7<<3) -/* MIPS32 defines only values 2 and 3. The rest are implementation - * dependent. - */ -#define _CACHE_UNCACHED (2<<3) -#define _CACHE_CACHABLE_NONCOHERENT (3<<3) -#define _CACHE_CACHABLE_COW (3<<3) /* Au1x */ - #else #define _PAGE_PRESENT (1<<0) /* implemented in software */ @@ -74,75 +68,72 @@ #define _PAGE_SILENT_WRITE (1<<10) #define _CACHE_UNCACHED (1<<11) #define _CACHE_MASK (1<<11) -#define _CACHE_CACHABLE_NONCOHERENT 0 #else + #define _PAGE_R4KBUG (1<<5) /* workaround for r4k bug */ #define _PAGE_GLOBAL (1<<6) #define _PAGE_VALID (1<<7) #define _PAGE_SILENT_READ (1<<7) /* synonym */ #define _PAGE_DIRTY (1<<8) /* The MIPS dirty bit */ #define _PAGE_SILENT_WRITE (1<<8) +#define _CACHE_SHIFT 9 #define _CACHE_MASK (7<<9) -#ifdef CONFIG_CPU_SB1 +#endif +#endif /* defined(CONFIG_64BIT_PHYS_ADDR && defined(CONFIG_CPU_MIPS32) */ + + +/* + * Cache attributes + */ +#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) + +#define _CACHE_CACHABLE_NONCOHERENT 0 + +#elif defined(CONFIG_CPU_SB1) /* No penalty for being coherent on the SB1, so just use it for "noncoherent" spaces, too. Shouldn't hurt. */ -#define _CACHE_UNCACHED (2<<9) -#define _CACHE_CACHABLE_COW (5<<9) -#define _CACHE_CACHABLE_NONCOHERENT (5<<9) -#define _CACHE_UNCACHED_ACCELERATED (7<<9) +#define _CACHE_UNCACHED (2<<_CACHE_SHIFT) +#define _CACHE_CACHABLE_COW (5<<_CACHE_SHIFT) +#define _CACHE_CACHABLE_NONCOHERENT (5<<_CACHE_SHIFT) +#define _CACHE_UNCACHED_ACCELERATED (7<<_CACHE_SHIFT) #elif defined(CONFIG_CPU_RM9000) -#define _CACHE_WT (0 << 9) -#define _CACHE_WTWA (1 << 9) -#define _CACHE_UC_B (2 << 9) -#define _CACHE_WB (3 << 9) -#define _CACHE_CWBEA (4 << 9) -#define _CACHE_CWB (5 << 9) -#define _CACHE_UCNB (6 << 9) -#define _CACHE_FPC (7 << 9) +#define _CACHE_WT (0<<_CACHE_SHIFT) +#define _CACHE_WTWA (1<<_CACHE_SHIFT) +#define _CACHE_UC_B (2<<_CACHE_SHIFT) +#define _CACHE_WB (3<<_CACHE_SHIFT) +#define _CACHE_CWBEA (4<<_CACHE_SHIFT) +#define _CACHE_CWB (5<<_CACHE_SHIFT) +#define _CACHE_UCNB (6<<_CACHE_SHIFT) +#define _CACHE_FPC (7<<_CACHE_SHIFT) -#define _CACHE_UNCACHED _CACHE_UC_B -#define _CACHE_CACHABLE_NONCOHERENT _CACHE_WB +#define _CACHE_UNCACHED _CACHE_UC_B +#define _CACHE_CACHABLE_NONCOHERENT _CACHE_WB #else -#define _CACHE_CACHABLE_NO_WA (0<<9) /* R4600 only */ -#define _CACHE_CACHABLE_WA (1<<9) /* R4600 only */ -#define _CACHE_UNCACHED (2<<9) /* R4[0246]00 */ -#define _CACHE_CACHABLE_NONCOHERENT (3<<9) /* R4[0246]00 */ -#define _CACHE_CACHABLE_CE (4<<9) /* R4[04]00MC only */ -#define _CACHE_CACHABLE_COW (5<<9) /* R4[04]00MC only */ -#define _CACHE_CACHABLE_CUW (6<<9) /* R4[04]00MC only */ -#define _CACHE_UNCACHED_ACCELERATED (7<<9) /* R10000 only */ +#define _CACHE_CACHABLE_NO_WA (0<<_CACHE_SHIFT) /* R4600 only */ +#define _CACHE_CACHABLE_WA (1<<_CACHE_SHIFT) /* R4600 only */ +#define _CACHE_UNCACHED (2<<_CACHE_SHIFT) /* R4[0246]00 */ +#define _CACHE_CACHABLE_NONCOHERENT (3<<_CACHE_SHIFT) /* R4[0246]00 */ +#define _CACHE_CACHABLE_CE (4<<_CACHE_SHIFT) /* R4[04]00MC only */ +#define _CACHE_CACHABLE_COW (5<<_CACHE_SHIFT) /* R4[04]00MC only */ +#define _CACHE_CACHABLE_COHERENT (5<<_CACHE_SHIFT) /* MIPS32R2 CMP */ +#define _CACHE_CACHABLE_CUW (6<<_CACHE_SHIFT) /* R4[04]00MC only */ +#define _CACHE_UNCACHED_ACCELERATED (7<<_CACHE_SHIFT) /* R10000 only */ #endif -#endif -#endif /* defined(CONFIG_CPU_MIPS32_R1) && defined(CONFIG_64BIT_PHYS_ADDR) */ #define __READABLE (_PAGE_READ | _PAGE_SILENT_READ | _PAGE_ACCESSED) #define __WRITEABLE (_PAGE_WRITE | _PAGE_SILENT_WRITE | _PAGE_MODIFIED) #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED | _CACHE_MASK) -#ifdef CONFIG_MIPS_UNCACHED -#define PAGE_CACHABLE_DEFAULT _CACHE_UNCACHED -#elif defined(CONFIG_DMA_NONCOHERENT) -#define PAGE_CACHABLE_DEFAULT _CACHE_CACHABLE_NONCOHERENT -#elif defined(CONFIG_CPU_RM9000) -#define PAGE_CACHABLE_DEFAULT _CACHE_CWB -#else -#define PAGE_CACHABLE_DEFAULT _CACHE_CACHABLE_COW -#endif - -#if defined(CONFIG_CPU_MIPS32_R1) && defined(CONFIG_64BIT_PHYS_ADDR) -#define CONF_CM_DEFAULT (PAGE_CACHABLE_DEFAULT >> 3) -#else -#define CONF_CM_DEFAULT (PAGE_CACHABLE_DEFAULT >> 9) -#endif +#define CONF_CM_DEFAULT (PAGE_CACHABLE_DEFAULT>>_CACHE_SHIFT) #endif /* _ASM_PGTABLE_BITS_H */ diff --git a/include/asm-mips/pgtable.h b/include/asm-mips/pgtable.h index 17a7703a296..2f597eea444 100644 --- a/include/asm-mips/pgtable.h +++ b/include/asm-mips/pgtable.h @@ -23,15 +23,15 @@ struct vm_area_struct; #define PAGE_NONE __pgprot(_PAGE_PRESENT | _CACHE_CACHABLE_NONCOHERENT) #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ - PAGE_CACHABLE_DEFAULT) + _page_cachable_default) #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_READ | \ - PAGE_CACHABLE_DEFAULT) + _page_cachable_default) #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_READ | \ - PAGE_CACHABLE_DEFAULT) + _page_cachable_default) #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \ - _PAGE_GLOBAL | PAGE_CACHABLE_DEFAULT) + _PAGE_GLOBAL | _page_cachable_default) #define PAGE_USERIO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ - PAGE_CACHABLE_DEFAULT) + _page_cachable_default) #define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \ __WRITEABLE | _PAGE_GLOBAL | _CACHE_UNCACHED) @@ -40,23 +40,30 @@ struct vm_area_struct; * read. Also, write permissions imply read permissions. This is the closest * we can get by reasonable means.. */ -#define __P000 PAGE_NONE -#define __P001 PAGE_READONLY -#define __P010 PAGE_COPY -#define __P011 PAGE_COPY -#define __P100 PAGE_READONLY -#define __P101 PAGE_READONLY -#define __P110 PAGE_COPY -#define __P111 PAGE_COPY - -#define __S000 PAGE_NONE -#define __S001 PAGE_READONLY -#define __S010 PAGE_SHARED -#define __S011 PAGE_SHARED -#define __S100 PAGE_READONLY -#define __S101 PAGE_READONLY -#define __S110 PAGE_SHARED -#define __S111 PAGE_SHARED + +/* + * Dummy values to fill the table in mmap.c + * The real values will be generated at runtime + */ +#define __P000 __pgprot(0) +#define __P001 __pgprot(0) +#define __P010 __pgprot(0) +#define __P011 __pgprot(0) +#define __P100 __pgprot(0) +#define __P101 __pgprot(0) +#define __P110 __pgprot(0) +#define __P111 __pgprot(0) + +#define __S000 __pgprot(0) +#define __S001 __pgprot(0) +#define __S010 __pgprot(0) +#define __S011 __pgprot(0) +#define __S100 __pgprot(0) +#define __S101 __pgprot(0) +#define __S110 __pgprot(0) +#define __S111 __pgprot(0) + +extern unsigned long _page_cachable_default; /* * ZERO_PAGE is a global shared page that is always zero; used @@ -79,7 +86,7 @@ extern void paging_init(void); #define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT)) #define pmd_page_vaddr(pmd) pmd_val(pmd) -#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32_R1) +#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) #define pte_none(pte) (!(((pte).pte_low | (pte).pte_high) & ~_PAGE_GLOBAL)) #define pte_present(pte) ((pte).pte_low & _PAGE_PRESENT) @@ -182,7 +189,7 @@ extern pgd_t swapper_pg_dir[]; * The following only work if pte_present() is true. * Undefined behaviour if not.. */ -#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32_R1) +#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) static inline int pte_write(pte_t pte) { return pte.pte_low & _PAGE_WRITE; } static inline int pte_dirty(pte_t pte) { return pte.pte_low & _PAGE_MODIFIED; } static inline int pte_young(pte_t pte) { return pte.pte_low & _PAGE_ACCESSED; } @@ -285,6 +292,8 @@ static inline pte_t pte_mkyoung(pte_t pte) return pte; } #endif +static inline int pte_special(pte_t pte) { return 0; } +static inline pte_t pte_mkspecial(pte_t pte) { return pte; } /* * Macro to make mark a page protection value as "uncacheable". Note @@ -309,7 +318,7 @@ static inline pgprot_t pgprot_noncached(pgprot_t _prot) */ #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) -#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32_R1) +#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { pte.pte_low &= _PAGE_CHG_MASK; diff --git a/include/asm-mips/r4k-timer.h b/include/asm-mips/r4k-timer.h new file mode 100644 index 00000000000..a37d12b3b61 --- /dev/null +++ b/include/asm-mips/r4k-timer.h @@ -0,0 +1,30 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2008 by Ralf Baechle (ralf@linux-mips.org) + */ +#ifndef __ASM_R4K_TYPES_H +#define __ASM_R4K_TYPES_H + +#include <linux/compiler.h> + +#ifdef CONFIG_SYNC_R4K + +extern void synchronise_count_master(void); +extern void synchronise_count_slave(void); + +#else + +static inline void synchronise_count_master(void) +{ +} + +static inline void synchronise_count_slave(void) +{ +} + +#endif + +#endif /* __ASM_R4K_TYPES_H */ diff --git a/include/asm-mips/smp-ops.h b/include/asm-mips/smp-ops.h index b17fdfb5d81..43c207e72a6 100644 --- a/include/asm-mips/smp-ops.h +++ b/include/asm-mips/smp-ops.h @@ -51,6 +51,7 @@ static inline void register_smp_ops(struct plat_smp_ops *ops) #endif /* !CONFIG_SMP */ extern struct plat_smp_ops up_smp_ops; +extern struct plat_smp_ops cmp_smp_ops; extern struct plat_smp_ops vsmp_smp_ops; #endif /* __ASM_SMP_OPS_H */ diff --git a/include/asm-mips/smtc.h b/include/asm-mips/smtc.h index ff3e8936b49..3639b28f80d 100644 --- a/include/asm-mips/smtc.h +++ b/include/asm-mips/smtc.h @@ -44,6 +44,7 @@ extern int mipsmt_build_cpu_map(int startslot); extern void mipsmt_prepare_cpus(void); extern void smtc_smp_finish(void); extern void smtc_boot_secondary(int cpu, struct task_struct *t); +extern void smtc_cpus_done(void); /* * Sharing the TLB between multiple VPEs means that the diff --git a/include/asm-mips/smvp.h b/include/asm-mips/smvp.h new file mode 100644 index 00000000000..0d0e80a39e8 --- /dev/null +++ b/include/asm-mips/smvp.h @@ -0,0 +1,19 @@ +#ifndef _ASM_SMVP_H +#define _ASM_SMVP_H + +/* + * Definitions for SMVP multitasking on MIPS MT cores + */ +struct task_struct; + +extern void smvp_smp_setup(void); +extern void smvp_smp_finish(void); +extern void smvp_boot_secondary(int cpu, struct task_struct *t); +extern void smvp_init_secondary(void); +extern void smvp_smp_finish(void); +extern void smvp_cpus_done(void); +extern void smvp_prepare_cpus(unsigned int max_cpus); + +/* This is platform specific */ +extern void smvp_send_ipi(int cpu, unsigned int action); +#endif /* _ASM_SMVP_H */ diff --git a/include/asm-mips/traps.h b/include/asm-mips/traps.h index d02e019b012..e5dbde625ec 100644 --- a/include/asm-mips/traps.h +++ b/include/asm-mips/traps.h @@ -23,5 +23,7 @@ extern int (*board_be_handler)(struct pt_regs *regs, int is_fixup); extern void (*board_nmi_handler_setup)(void); extern void (*board_ejtag_handler_setup)(void); +extern void (*board_bind_eic_interrupt)(int irq, int regset); +extern void (*board_watchpoint_handler)(struct pt_regs *regs); #endif /* _ASM_TRAPS_H */ diff --git a/include/asm-mips/tx4938/rbtx4938.h b/include/asm-mips/tx4938/rbtx4938.h index b180488dcdc..dfed7beb533 100644 --- a/include/asm-mips/tx4938/rbtx4938.h +++ b/include/asm-mips/tx4938/rbtx4938.h @@ -67,44 +67,26 @@ #define RBTX4938_INTF_MODEM (1 << RBTX4938_INTB_MODEM) #define RBTX4938_INTF_SWINT (1 << RBTX4938_INTB_SWINT) -#define rbtx4938_fpga_rev_ptr \ - ((volatile unsigned char *)RBTX4938_FPGA_REV_ADDR) -#define rbtx4938_led_ptr \ - ((volatile unsigned char *)RBTX4938_LED_ADDR) -#define rbtx4938_dipsw_ptr \ - ((volatile unsigned char *)RBTX4938_DIPSW_ADDR) -#define rbtx4938_bdipsw_ptr \ - ((volatile unsigned char *)RBTX4938_BDIPSW_ADDR) -#define rbtx4938_imask_ptr \ - ((volatile unsigned char *)RBTX4938_IMASK_ADDR) -#define rbtx4938_imask2_ptr \ - ((volatile unsigned char *)RBTX4938_IMASK2_ADDR) -#define rbtx4938_intpol_ptr \ - ((volatile unsigned char *)RBTX4938_INTPOL_ADDR) -#define rbtx4938_istat_ptr \ - ((volatile unsigned char *)RBTX4938_ISTAT_ADDR) -#define rbtx4938_istat2_ptr \ - ((volatile unsigned char *)RBTX4938_ISTAT2_ADDR) -#define rbtx4938_imstat_ptr \ - ((volatile unsigned char *)RBTX4938_IMSTAT_ADDR) -#define rbtx4938_imstat2_ptr \ - ((volatile unsigned char *)RBTX4938_IMSTAT2_ADDR) -#define rbtx4938_softint_ptr \ - ((volatile unsigned char *)RBTX4938_SOFTINT_ADDR) -#define rbtx4938_piosel_ptr \ - ((volatile unsigned char *)RBTX4938_PIOSEL_ADDR) -#define rbtx4938_spics_ptr \ - ((volatile unsigned char *)RBTX4938_SPICS_ADDR) -#define rbtx4938_sfpwr_ptr \ - ((volatile unsigned char *)RBTX4938_SFPWR_ADDR) -#define rbtx4938_sfvol_ptr \ - ((volatile unsigned char *)RBTX4938_SFVOL_ADDR) -#define rbtx4938_softreset_ptr \ - ((volatile unsigned char *)RBTX4938_SOFTRESET_ADDR) -#define rbtx4938_softresetlock_ptr \ - ((volatile unsigned char *)RBTX4938_SOFTRESETLOCK_ADDR) -#define rbtx4938_pcireset_ptr \ - ((volatile unsigned char *)RBTX4938_PCIRESET_ADDR) +#define rbtx4938_fpga_rev_addr ((__u8 __iomem *)RBTX4938_FPGA_REV_ADDR) +#define rbtx4938_led_addr ((__u8 __iomem *)RBTX4938_LED_ADDR) +#define rbtx4938_dipsw_addr ((__u8 __iomem *)RBTX4938_DIPSW_ADDR) +#define rbtx4938_bdipsw_addr ((__u8 __iomem *)RBTX4938_BDIPSW_ADDR) +#define rbtx4938_imask_addr ((__u8 __iomem *)RBTX4938_IMASK_ADDR) +#define rbtx4938_imask2_addr ((__u8 __iomem *)RBTX4938_IMASK2_ADDR) +#define rbtx4938_intpol_addr ((__u8 __iomem *)RBTX4938_INTPOL_ADDR) +#define rbtx4938_istat_addr ((__u8 __iomem *)RBTX4938_ISTAT_ADDR) +#define rbtx4938_istat2_addr ((__u8 __iomem *)RBTX4938_ISTAT2_ADDR) +#define rbtx4938_imstat_addr ((__u8 __iomem *)RBTX4938_IMSTAT_ADDR) +#define rbtx4938_imstat2_addr ((__u8 __iomem *)RBTX4938_IMSTAT2_ADDR) +#define rbtx4938_softint_addr ((__u8 __iomem *)RBTX4938_SOFTINT_ADDR) +#define rbtx4938_piosel_addr ((__u8 __iomem *)RBTX4938_PIOSEL_ADDR) +#define rbtx4938_spics_addr ((__u8 __iomem *)RBTX4938_SPICS_ADDR) +#define rbtx4938_sfpwr_addr ((__u8 __iomem *)RBTX4938_SFPWR_ADDR) +#define rbtx4938_sfvol_addr ((__u8 __iomem *)RBTX4938_SFVOL_ADDR) +#define rbtx4938_softreset_addr ((__u8 __iomem *)RBTX4938_SOFTRESET_ADDR) +#define rbtx4938_softresetlock_addr \ + ((__u8 __iomem *)RBTX4938_SOFTRESETLOCK_ADDR) +#define rbtx4938_pcireset_addr ((__u8 __iomem *)RBTX4938_PCIRESET_ADDR) /* * IRQ mappings diff --git a/include/asm-mips/tx4938/tx4938.h b/include/asm-mips/tx4938/tx4938.h index f7c448b9057..e8807f5c61e 100644 --- a/include/asm-mips/tx4938/tx4938.h +++ b/include/asm-mips/tx4938/tx4938.h @@ -13,8 +13,6 @@ #ifndef __ASM_TX_BOARDS_TX4938_H #define __ASM_TX_BOARDS_TX4938_H -#include <asm/tx4938/tx4938_mips.h> - #define tx4938_read_nfmc(addr) (*(volatile unsigned int *)(addr)) #define tx4938_write_nfmc(b, addr) (*(volatile unsigned int *)(addr)) = (b) @@ -54,28 +52,6 @@ #define TX4938_ACLC_REG (TX4938_REG_BASE + 0xf700) #define TX4938_SPI_REG (TX4938_REG_BASE + 0xf800) -#ifndef _LANGUAGE_ASSEMBLY -#include <asm/byteorder.h> - -#define TX4938_MKA(x) ((u32)( ((u32)(TX4938_REG_BASE)) | ((u32)(x)) )) - -#define TX4938_RD08( reg ) (*(vu08*)(reg)) -#define TX4938_WR08( reg, val ) ((*(vu08*)(reg))=(val)) - -#define TX4938_RD16( reg ) (*(vu16*)(reg)) -#define TX4938_WR16( reg, val ) ((*(vu16*)(reg))=(val)) - -#define TX4938_RD32( reg ) (*(vu32*)(reg)) -#define TX4938_WR32( reg, val ) ((*(vu32*)(reg))=(val)) - -#define TX4938_RD64( reg ) (*(vu64*)(reg)) -#define TX4938_WR64( reg, val ) ((*(vu64*)(reg))=(val)) - -#define TX4938_RD( reg ) TX4938_RD32( reg ) -#define TX4938_WR( reg, val ) TX4938_WR32( reg, val ) - -#endif /* !__ASSEMBLY__ */ - #ifdef __ASSEMBLY__ #define _CONST64(c) c #else @@ -261,18 +237,6 @@ struct tx4938_sio_reg { volatile unsigned long rfifo; }; -struct tx4938_pio_reg { - volatile unsigned long dout; - volatile unsigned long din; - volatile unsigned long dir; - volatile unsigned long od; - volatile unsigned long flag[2]; - volatile unsigned long pol; - volatile unsigned long intc; - volatile unsigned long maskcpu; - volatile unsigned long maskext; -}; - struct tx4938_ndfmc_reg { endian_def_l2(unused0, dtr); endian_def_l2(unused1, mcr); @@ -642,7 +606,7 @@ struct tx4938_ccfg_reg { #define tx4938_pcic1ptr ((struct tx4938_pcic_reg *)TX4938_PCIC1_REG) #define tx4938_ccfgptr ((struct tx4938_ccfg_reg *)TX4938_CCFG_REG) #define tx4938_sioptr(ch) ((struct tx4938_sio_reg *)TX4938_SIO_REG(ch)) -#define tx4938_pioptr ((struct tx4938_pio_reg *)TX4938_PIO_REG) +#define tx4938_pioptr ((struct txx9_pio_reg __iomem *)TX4938_PIO_REG) #define tx4938_aclcptr ((struct tx4938_aclc_reg *)TX4938_ACLC_REG) #define tx4938_spiptr ((struct tx4938_spi_reg *)TX4938_SPI_REG) #define tx4938_sramcptr ((struct tx4938_sramc_reg *)TX4938_SRAMC_REG) diff --git a/include/asm-mips/tx4938/tx4938_mips.h b/include/asm-mips/tx4938/tx4938_mips.h deleted file mode 100644 index f346ff58b94..00000000000 --- a/include/asm-mips/tx4938/tx4938_mips.h +++ /dev/null @@ -1,54 +0,0 @@ -/* - * linux/include/asm-mips/tx4938/tx4938_mips.h - * Generic bitmask definitions - * - * 2003-2005 (c) MontaVista Software, Inc. This file is licensed under the - * terms of the GNU General Public License version 2. This program is - * licensed "as is" without any warranty of any kind, whether express - * or implied. - * - * Support for TX4938 in 2.6 - Manish Lachwani (mlachwani@mvista.com) - */ - -#ifndef TX4938_TX4938_MIPS_H -#define TX4938_TX4938_MIPS_H -#ifndef __ASSEMBLY__ - -#define reg_rd08(r) ((u8 )(*((vu8 *)(r)))) -#define reg_rd16(r) ((u16)(*((vu16*)(r)))) -#define reg_rd32(r) ((u32)(*((vu32*)(r)))) -#define reg_rd64(r) ((u64)(*((vu64*)(r)))) - -#define reg_wr08(r, v) ((*((vu8 *)(r)))=((u8 )(v))) -#define reg_wr16(r, v) ((*((vu16*)(r)))=((u16)(v))) -#define reg_wr32(r, v) ((*((vu32*)(r)))=((u32)(v))) -#define reg_wr64(r, v) ((*((vu64*)(r)))=((u64)(v))) - -typedef volatile __signed char vs8; -typedef volatile unsigned char vu8; - -typedef volatile __signed short vs16; -typedef volatile unsigned short vu16; - -typedef volatile __signed int vs32; -typedef volatile unsigned int vu32; - -typedef s8 s08; -typedef vs8 vs08; - -typedef u8 u08; -typedef vu8 vu08; - -#if (_MIPS_SZLONG == 64) - -typedef volatile __signed__ long vs64; -typedef volatile unsigned long vu64; - -#else - -typedef volatile __signed__ long long vs64; -typedef volatile unsigned long long vu64; - -#endif -#endif -#endif diff --git a/include/asm-mips/txx9pio.h b/include/asm-mips/txx9pio.h new file mode 100644 index 00000000000..3d6fa9f8d51 --- /dev/null +++ b/include/asm-mips/txx9pio.h @@ -0,0 +1,29 @@ +/* + * include/asm-mips/txx9pio.h + * TX39/TX49 PIO controller definitions. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#ifndef __ASM_TXX9PIO_H +#define __ASM_TXX9PIO_H + +#include <linux/types.h> + +struct txx9_pio_reg { + __u32 dout; + __u32 din; + __u32 dir; + __u32 od; + __u32 flag[2]; + __u32 pol; + __u32 intc; + __u32 maskcpu; + __u32 maskext; +}; + +int txx9_gpio_init(unsigned long baseaddr, + unsigned int base, unsigned int num); + +#endif /* __ASM_TXX9PIO_H */ diff --git a/include/asm-mips/unaligned.h b/include/asm-mips/unaligned.h index 3249049e93a..79240494857 100644 --- a/include/asm-mips/unaligned.h +++ b/include/asm-mips/unaligned.h @@ -5,25 +5,24 @@ * * Copyright (C) 2007 Ralf Baechle (ralf@linux-mips.org) */ -#ifndef __ASM_GENERIC_UNALIGNED_H -#define __ASM_GENERIC_UNALIGNED_H +#ifndef _ASM_MIPS_UNALIGNED_H +#define _ASM_MIPS_UNALIGNED_H #include <linux/compiler.h> +#if defined(__MIPSEB__) +# include <linux/unaligned/be_struct.h> +# include <linux/unaligned/le_byteshift.h> +# include <linux/unaligned/generic.h> +# define get_unaligned __get_unaligned_be +# define put_unaligned __put_unaligned_be +#elif defined(__MIPSEL__) +# include <linux/unaligned/le_struct.h> +# include <linux/unaligned/be_byteshift.h> +# include <linux/unaligned/generic.h> +# define get_unaligned __get_unaligned_le +# define put_unaligned __put_unaligned_le +#else +# error "MIPS, but neither __MIPSEB__, nor __MIPSEL__???" +#endif -#define get_unaligned(ptr) \ -({ \ - struct __packed { \ - typeof(*(ptr)) __v; \ - } *__p = (void *) (ptr); \ - __p->__v; \ -}) - -#define put_unaligned(val, ptr) \ -do { \ - struct __packed { \ - typeof(*(ptr)) __v; \ - } *__p = (void *) (ptr); \ - __p->__v = (val); \ -} while(0) - -#endif /* __ASM_GENERIC_UNALIGNED_H */ +#endif /* _ASM_MIPS_UNALIGNED_H */ diff --git a/include/asm-mips/vr41xx/siu.h b/include/asm-mips/vr41xx/siu.h index 98cdb409648..da9f6e37340 100644 --- a/include/asm-mips/vr41xx/siu.h +++ b/include/asm-mips/vr41xx/siu.h @@ -1,7 +1,7 @@ /* * Include file for NEC VR4100 series Serial Interface Unit. * - * Copyright (C) 2005 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp> + * Copyright (C) 2005-2008 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -49,4 +49,10 @@ typedef enum { extern void vr41xx_select_irda_module(irda_module_t module, irda_speed_t speed); +#ifdef CONFIG_SERIAL_VR41XX_CONSOLE +extern void vr41xx_siu_early_setup(struct uart_port *port); +#else +static inline void vr41xx_siu_early_setup(struct uart_port *port) {} +#endif + #endif /* __NEC_VR41XX_SIU_H */ diff --git a/include/asm-mips/vr41xx/vr41xx.h b/include/asm-mips/vr41xx/vr41xx.h index 88b492f6ea9..22be64971cc 100644 --- a/include/asm-mips/vr41xx/vr41xx.h +++ b/include/asm-mips/vr41xx/vr41xx.h @@ -7,7 +7,7 @@ * Copyright (C) 2001, 2002 Paul Mundt * Copyright (C) 2002 MontaVista Software, Inc. * Copyright (C) 2002 TimeSys Corp. - * Copyright (C) 2003-2005 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp> + * Copyright (C) 2003-2008 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -143,4 +143,10 @@ extern void vr41xx_disable_csiint(uint16_t mask); extern void vr41xx_enable_bcuint(void); extern void vr41xx_disable_bcuint(void); +#ifdef CONFIG_SERIAL_VR41XX_CONSOLE +extern void vr41xx_siu_setup(void); +#else +static inline void vr41xx_siu_setup(void) {} +#endif + #endif /* __NEC_VR41XX_H */ diff --git a/include/asm-mn10300/pgtable.h b/include/asm-mn10300/pgtable.h index 375c4941ded..6dc30fc827c 100644 --- a/include/asm-mn10300/pgtable.h +++ b/include/asm-mn10300/pgtable.h @@ -224,6 +224,7 @@ static inline int pte_read(pte_t pte) { return pte_val(pte) & __PAGE_PROT_USER; static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } static inline int pte_write(pte_t pte) { return pte_val(pte) & __PAGE_PROT_WRITE; } +static inline int pte_special(pte_t pte){ return 0; } /* * The following only works if pte_present() is not true. @@ -265,6 +266,8 @@ static inline pte_t pte_mkwrite(pte_t pte) return pte; } +static inline pte_t pte_mkspecial(pte_t pte) { return pte; } + #define pte_ERROR(e) \ printk(KERN_ERR "%s:%d: bad pte %08lx.\n", \ __FILE__, __LINE__, pte_val(e)) diff --git a/include/asm-mn10300/unaligned.h b/include/asm-mn10300/unaligned.h index cad3afbd035..0df671318ae 100644 --- a/include/asm-mn10300/unaligned.h +++ b/include/asm-mn10300/unaligned.h @@ -8,129 +8,13 @@ * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ -#ifndef _ASM_UNALIGNED_H -#define _ASM_UNALIGNED_H +#ifndef _ASM_MN10300_UNALIGNED_H +#define _ASM_MN10300_UNALIGNED_H -#include <asm/types.h> +#include <linux/unaligned/access_ok.h> +#include <linux/unaligned/generic.h> -#if 0 -extern int __bug_unaligned_x(void *ptr); +#define get_unaligned __get_unaligned_le +#define put_unaligned __put_unaligned_le -/* - * What is the most efficient way of loading/storing an unaligned value? - * - * That is the subject of this file. Efficiency here is defined as - * minimum code size with minimum register usage for the common cases. - * It is currently not believed that long longs are common, so we - * trade efficiency for the chars, shorts and longs against the long - * longs. - * - * Current stats with gcc 2.7.2.2 for these functions: - * - * ptrsize get: code regs put: code regs - * 1 1 1 1 2 - * 2 3 2 3 2 - * 4 7 3 7 3 - * 8 20 6 16 6 - * - * gcc 2.95.1 seems to code differently: - * - * ptrsize get: code regs put: code regs - * 1 1 1 1 2 - * 2 3 2 3 2 - * 4 7 4 7 4 - * 8 19 8 15 6 - * - * which may or may not be more efficient (depending upon whether - * you can afford the extra registers). Hopefully the gcc 2.95 - * is inteligent enough to decide if it is better to use the - * extra register, but evidence so far seems to suggest otherwise. - * - * Unfortunately, gcc is not able to optimise the high word - * out of long long >> 32, or the low word from long long << 32 - */ - -#define __get_unaligned_2(__p) \ - (__p[0] | __p[1] << 8) - -#define __get_unaligned_4(__p) \ - (__p[0] | __p[1] << 8 | __p[2] << 16 | __p[3] << 24) - -#define get_unaligned(ptr) \ -({ \ - unsigned int __v1, __v2; \ - __typeof__(*(ptr)) __v; \ - __u8 *__p = (__u8 *)(ptr); \ - \ - switch (sizeof(*(ptr))) { \ - case 1: __v = *(ptr); break; \ - case 2: __v = __get_unaligned_2(__p); break; \ - case 4: __v = __get_unaligned_4(__p); break; \ - case 8: \ - __v2 = __get_unaligned_4((__p+4)); \ - __v1 = __get_unaligned_4(__p); \ - __v = ((unsigned long long)__v2 << 32 | __v1); \ - break; \ - default: __v = __bug_unaligned_x(__p); break; \ - } \ - __v; \ -}) - - -static inline void __put_unaligned_2(__u32 __v, register __u8 *__p) -{ - *__p++ = __v; - *__p++ = __v >> 8; -} - -static inline void __put_unaligned_4(__u32 __v, register __u8 *__p) -{ - __put_unaligned_2(__v >> 16, __p + 2); - __put_unaligned_2(__v, __p); -} - -static inline void __put_unaligned_8(const unsigned long long __v, __u8 *__p) -{ - /* - * tradeoff: 8 bytes of stack for all unaligned puts (2 - * instructions), or an extra register in the long long - * case - go for the extra register. - */ - __put_unaligned_4(__v >> 32, __p + 4); - __put_unaligned_4(__v, __p); -} - -/* - * Try to store an unaligned value as efficiently as possible. - */ -#define put_unaligned(val, ptr) \ - ({ \ - switch (sizeof(*(ptr))) { \ - case 1: \ - *(ptr) = (val); \ - break; \ - case 2: \ - __put_unaligned_2((val), (__u8 *)(ptr)); \ - break; \ - case 4: \ - __put_unaligned_4((val), (__u8 *)(ptr)); \ - break; \ - case 8: \ - __put_unaligned_8((val), (__u8 *)(ptr)); \ - break; \ - default: \ - __bug_unaligned_x(ptr); \ - break; \ - } \ - (void) 0; \ - }) - - -#else - -#define get_unaligned(ptr) (*(ptr)) -#define put_unaligned(val, ptr) ({ *(ptr) = (val); (void) 0; }) - -#endif - -#endif +#endif /* _ASM_MN10300_UNALIGNED_H */ diff --git a/include/asm-parisc/pgtable.h b/include/asm-parisc/pgtable.h index dc86adbec91..470a4b88124 100644 --- a/include/asm-parisc/pgtable.h +++ b/include/asm-parisc/pgtable.h @@ -323,6 +323,7 @@ static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; } static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } +static inline int pte_special(pte_t pte) { return 0; } static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; } static inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; } @@ -330,6 +331,7 @@ static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= ~_PAGE_WRITE; ret static inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; } static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; } static inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_WRITE; return pte; } +static inline pte_t pte_mkspecial(pte_t pte) { return pte; } /* * Conversion functions: convert a page and protection to a page entry, diff --git a/include/asm-parisc/unaligned.h b/include/asm-parisc/unaligned.h index 53c905838d9..dfc5d3321a5 100644 --- a/include/asm-parisc/unaligned.h +++ b/include/asm-parisc/unaligned.h @@ -1,7 +1,11 @@ -#ifndef _ASM_PARISC_UNALIGNED_H_ -#define _ASM_PARISC_UNALIGNED_H_ +#ifndef _ASM_PARISC_UNALIGNED_H +#define _ASM_PARISC_UNALIGNED_H -#include <asm-generic/unaligned.h> +#include <linux/unaligned/be_struct.h> +#include <linux/unaligned/le_byteshift.h> +#include <linux/unaligned/generic.h> +#define get_unaligned __get_unaligned_be +#define put_unaligned __put_unaligned_be #ifdef __KERNEL__ struct pt_regs; @@ -9,4 +13,4 @@ void handle_unaligned(struct pt_regs *regs); int check_unaligned(struct pt_regs *regs); #endif -#endif /* _ASM_PARISC_UNALIGNED_H_ */ +#endif /* _ASM_PARISC_UNALIGNED_H */ diff --git a/include/asm-powerpc/hugetlb.h b/include/asm-powerpc/hugetlb.h new file mode 100644 index 00000000000..649c6c3b87b --- /dev/null +++ b/include/asm-powerpc/hugetlb.h @@ -0,0 +1,79 @@ +#ifndef _ASM_POWERPC_HUGETLB_H +#define _ASM_POWERPC_HUGETLB_H + +#include <asm/page.h> + + +int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr, + unsigned long len); + +void hugetlb_free_pgd_range(struct mmu_gather **tlb, unsigned long addr, + unsigned long end, unsigned long floor, + unsigned long ceiling); + +void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte); + +pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, + pte_t *ptep); + +/* + * If the arch doesn't supply something else, assume that hugepage + * size aligned regions are ok without further preparation. + */ +static inline int prepare_hugepage_range(unsigned long addr, unsigned long len) +{ + if (len & ~HPAGE_MASK) + return -EINVAL; + if (addr & ~HPAGE_MASK) + return -EINVAL; + return 0; +} + +static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) +{ +} + +static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep) +{ +} + +static inline int huge_pte_none(pte_t pte) +{ + return pte_none(pte); +} + +static inline pte_t huge_pte_wrprotect(pte_t pte) +{ + return pte_wrprotect(pte); +} + +static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + ptep_set_wrprotect(mm, addr, ptep); +} + +static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep, + pte_t pte, int dirty) +{ + return ptep_set_access_flags(vma, addr, ptep, pte, dirty); +} + +static inline pte_t huge_ptep_get(pte_t *ptep) +{ + return *ptep; +} + +static inline int arch_prepare_hugepage(struct page *page) +{ + return 0; +} + +static inline void arch_release_hugepage(struct page *page) +{ +} + +#endif /* _ASM_POWERPC_HUGETLB_H */ diff --git a/include/asm-powerpc/irq.h b/include/asm-powerpc/irq.h index b5c03127a9b..5089deb8fec 100644 --- a/include/asm-powerpc/irq.h +++ b/include/asm-powerpc/irq.h @@ -619,8 +619,6 @@ struct pt_regs; #define __ARCH_HAS_DO_SOFTIRQ -extern void __do_softirq(void); - #ifdef CONFIG_IRQSTACKS /* * Per-cpu stacks for handling hard and soft interrupts. diff --git a/include/asm-powerpc/kvm.h b/include/asm-powerpc/kvm.h index d1b530fbf8d..f993e4198d5 100644 --- a/include/asm-powerpc/kvm.h +++ b/include/asm-powerpc/kvm.h @@ -1,6 +1,55 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + * Copyright IBM Corp. 2007 + * + * Authors: Hollis Blanchard <hollisb@us.ibm.com> + */ + #ifndef __LINUX_KVM_POWERPC_H #define __LINUX_KVM_POWERPC_H -/* powerpc does not support KVM */ +#include <asm/types.h> + +struct kvm_regs { + __u64 pc; + __u64 cr; + __u64 ctr; + __u64 lr; + __u64 xer; + __u64 msr; + __u64 srr0; + __u64 srr1; + __u64 pid; + + __u64 sprg0; + __u64 sprg1; + __u64 sprg2; + __u64 sprg3; + __u64 sprg4; + __u64 sprg5; + __u64 sprg6; + __u64 sprg7; + + __u64 gpr[32]; +}; + +struct kvm_sregs { +}; + +struct kvm_fpu { + __u64 fpr[32]; +}; -#endif +#endif /* __LINUX_KVM_POWERPC_H */ diff --git a/include/asm-powerpc/kvm_asm.h b/include/asm-powerpc/kvm_asm.h new file mode 100644 index 00000000000..2197764796d --- /dev/null +++ b/include/asm-powerpc/kvm_asm.h @@ -0,0 +1,55 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + * Copyright IBM Corp. 2008 + * + * Authors: Hollis Blanchard <hollisb@us.ibm.com> + */ + +#ifndef __POWERPC_KVM_ASM_H__ +#define __POWERPC_KVM_ASM_H__ + +/* IVPR must be 64KiB-aligned. */ +#define VCPU_SIZE_ORDER 4 +#define VCPU_SIZE_LOG (VCPU_SIZE_ORDER + 12) +#define VCPU_TLB_PGSZ PPC44x_TLB_64K +#define VCPU_SIZE_BYTES (1<<VCPU_SIZE_LOG) + +#define BOOKE_INTERRUPT_CRITICAL 0 +#define BOOKE_INTERRUPT_MACHINE_CHECK 1 +#define BOOKE_INTERRUPT_DATA_STORAGE 2 +#define BOOKE_INTERRUPT_INST_STORAGE 3 +#define BOOKE_INTERRUPT_EXTERNAL 4 +#define BOOKE_INTERRUPT_ALIGNMENT 5 +#define BOOKE_INTERRUPT_PROGRAM 6 +#define BOOKE_INTERRUPT_FP_UNAVAIL 7 +#define BOOKE_INTERRUPT_SYSCALL 8 +#define BOOKE_INTERRUPT_AP_UNAVAIL 9 +#define BOOKE_INTERRUPT_DECREMENTER 10 +#define BOOKE_INTERRUPT_FIT 11 +#define BOOKE_INTERRUPT_WATCHDOG 12 +#define BOOKE_INTERRUPT_DTLB_MISS 13 +#define BOOKE_INTERRUPT_ITLB_MISS 14 +#define BOOKE_INTERRUPT_DEBUG 15 +#define BOOKE_MAX_INTERRUPT 15 + +#define RESUME_FLAG_NV (1<<0) /* Reload guest nonvolatile state? */ +#define RESUME_FLAG_HOST (1<<1) /* Resume host? */ + +#define RESUME_GUEST 0 +#define RESUME_GUEST_NV RESUME_FLAG_NV +#define RESUME_HOST RESUME_FLAG_HOST +#define RESUME_HOST_NV (RESUME_FLAG_HOST|RESUME_FLAG_NV) + +#endif /* __POWERPC_KVM_ASM_H__ */ diff --git a/include/asm-powerpc/kvm_host.h b/include/asm-powerpc/kvm_host.h new file mode 100644 index 00000000000..04ffbb8e0a3 --- /dev/null +++ b/include/asm-powerpc/kvm_host.h @@ -0,0 +1,152 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + * Copyright IBM Corp. 2007 + * + * Authors: Hollis Blanchard <hollisb@us.ibm.com> + */ + +#ifndef __POWERPC_KVM_HOST_H__ +#define __POWERPC_KVM_HOST_H__ + +#include <linux/mutex.h> +#include <linux/timer.h> +#include <linux/types.h> +#include <linux/kvm_types.h> +#include <asm/kvm_asm.h> + +#define KVM_MAX_VCPUS 1 +#define KVM_MEMORY_SLOTS 32 +/* memory slots that does not exposed to userspace */ +#define KVM_PRIVATE_MEM_SLOTS 4 + +/* We don't currently support large pages. */ +#define KVM_PAGES_PER_HPAGE (1<<31) + +struct kvm; +struct kvm_run; +struct kvm_vcpu; + +struct kvm_vm_stat { + u32 remote_tlb_flush; +}; + +struct kvm_vcpu_stat { + u32 sum_exits; + u32 mmio_exits; + u32 dcr_exits; + u32 signal_exits; + u32 light_exits; + /* Account for special types of light exits: */ + u32 itlb_real_miss_exits; + u32 itlb_virt_miss_exits; + u32 dtlb_real_miss_exits; + u32 dtlb_virt_miss_exits; + u32 syscall_exits; + u32 isi_exits; + u32 dsi_exits; + u32 emulated_inst_exits; + u32 dec_exits; + u32 ext_intr_exits; +}; + +struct tlbe { + u32 tid; /* Only the low 8 bits are used. */ + u32 word0; + u32 word1; + u32 word2; +}; + +struct kvm_arch { +}; + +struct kvm_vcpu_arch { + /* Unmodified copy of the guest's TLB. */ + struct tlbe guest_tlb[PPC44x_TLB_SIZE]; + /* TLB that's actually used when the guest is running. */ + struct tlbe shadow_tlb[PPC44x_TLB_SIZE]; + /* Pages which are referenced in the shadow TLB. */ + struct page *shadow_pages[PPC44x_TLB_SIZE]; + /* Copy of the host's TLB. */ + struct tlbe host_tlb[PPC44x_TLB_SIZE]; + + u32 host_stack; + u32 host_pid; + + u64 fpr[32]; + u32 gpr[32]; + + u32 pc; + u32 cr; + u32 ctr; + u32 lr; + u32 xer; + + u32 msr; + u32 mmucr; + u32 sprg0; + u32 sprg1; + u32 sprg2; + u32 sprg3; + u32 sprg4; + u32 sprg5; + u32 sprg6; + u32 sprg7; + u32 srr0; + u32 srr1; + u32 csrr0; + u32 csrr1; + u32 dsrr0; + u32 dsrr1; + u32 dear; + u32 esr; + u32 dec; + u32 decar; + u32 tbl; + u32 tbu; + u32 tcr; + u32 tsr; + u32 ivor[16]; + u32 ivpr; + u32 pir; + u32 pid; + u32 pvr; + u32 ccr0; + u32 ccr1; + u32 dbcr0; + u32 dbcr1; + + u32 last_inst; + u32 fault_dear; + u32 fault_esr; + gpa_t paddr_accessed; + + u8 io_gpr; /* GPR used as IO source/target */ + u8 mmio_is_bigendian; + u8 dcr_needed; + u8 dcr_is_write; + + u32 cpr0_cfgaddr; /* holds the last set cpr0_cfgaddr */ + + struct timer_list dec_timer; + unsigned long pending_exceptions; +}; + +struct kvm_guest_debug { + int enabled; + unsigned long bp[4]; + int singlestep; +}; + +#endif /* __POWERPC_KVM_HOST_H__ */ diff --git a/include/asm-powerpc/kvm_para.h b/include/asm-powerpc/kvm_para.h new file mode 100644 index 00000000000..2d48f6a63d0 --- /dev/null +++ b/include/asm-powerpc/kvm_para.h @@ -0,0 +1,37 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + * Copyright IBM Corp. 2008 + * + * Authors: Hollis Blanchard <hollisb@us.ibm.com> + */ + +#ifndef __POWERPC_KVM_PARA_H__ +#define __POWERPC_KVM_PARA_H__ + +#ifdef __KERNEL__ + +static inline int kvm_para_available(void) +{ + return 0; +} + +static inline unsigned int kvm_arch_para_features(void) +{ + return 0; +} + +#endif /* __KERNEL__ */ + +#endif /* __POWERPC_KVM_PARA_H__ */ diff --git a/include/asm-powerpc/kvm_ppc.h b/include/asm-powerpc/kvm_ppc.h new file mode 100644 index 00000000000..7ac820308a7 --- /dev/null +++ b/include/asm-powerpc/kvm_ppc.h @@ -0,0 +1,88 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + * Copyright IBM Corp. 2008 + * + * Authors: Hollis Blanchard <hollisb@us.ibm.com> + */ + +#ifndef __POWERPC_KVM_PPC_H__ +#define __POWERPC_KVM_PPC_H__ + +/* This file exists just so we can dereference kvm_vcpu, avoiding nested header + * dependencies. */ + +#include <linux/mutex.h> +#include <linux/timer.h> +#include <linux/types.h> +#include <linux/kvm_types.h> +#include <linux/kvm_host.h> + +struct kvm_tlb { + struct tlbe guest_tlb[PPC44x_TLB_SIZE]; + struct tlbe shadow_tlb[PPC44x_TLB_SIZE]; +}; + +enum emulation_result { + EMULATE_DONE, /* no further processing */ + EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */ + EMULATE_DO_DCR, /* kvm_run filled with DCR request */ + EMULATE_FAIL, /* can't emulate this instruction */ +}; + +extern const unsigned char exception_priority[]; +extern const unsigned char priority_exception[]; + +extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); +extern char kvmppc_handlers_start[]; +extern unsigned long kvmppc_handler_len; + +extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu); +extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, + unsigned int rt, unsigned int bytes, + int is_bigendian); +extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, + u32 val, unsigned int bytes, int is_bigendian); + +extern int kvmppc_emulate_instruction(struct kvm_run *run, + struct kvm_vcpu *vcpu); + +extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, + u64 asid, u32 flags); +extern void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, u64 eaddr, u64 asid); +extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode); + +extern void kvmppc_check_and_deliver_interrupts(struct kvm_vcpu *vcpu); + +static inline void kvmppc_queue_exception(struct kvm_vcpu *vcpu, int exception) +{ + unsigned int priority = exception_priority[exception]; + set_bit(priority, &vcpu->arch.pending_exceptions); +} + +static inline void kvmppc_clear_exception(struct kvm_vcpu *vcpu, int exception) +{ + unsigned int priority = exception_priority[exception]; + clear_bit(priority, &vcpu->arch.pending_exceptions); +} + +static inline void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr) +{ + if ((new_msr & MSR_PR) != (vcpu->arch.msr & MSR_PR)) + kvmppc_mmu_priv_switch(vcpu, new_msr & MSR_PR); + + vcpu->arch.msr = new_msr; +} + +#endif /* __POWERPC_KVM_PPC_H__ */ diff --git a/include/asm-powerpc/mmu-44x.h b/include/asm-powerpc/mmu-44x.h index c8b02d97f75..a825524c981 100644 --- a/include/asm-powerpc/mmu-44x.h +++ b/include/asm-powerpc/mmu-44x.h @@ -53,6 +53,8 @@ #ifndef __ASSEMBLY__ +extern unsigned int tlb_44x_hwater; + typedef struct { unsigned long id; unsigned long vdso_base; diff --git a/include/asm-powerpc/page_64.h b/include/asm-powerpc/page_64.h index 67834eae570..25af4fc8daf 100644 --- a/include/asm-powerpc/page_64.h +++ b/include/asm-powerpc/page_64.h @@ -128,11 +128,6 @@ extern void slice_init_context(struct mm_struct *mm, unsigned int psize); extern void slice_set_user_psize(struct mm_struct *mm, unsigned int psize); #define slice_mm_new_context(mm) ((mm)->context.id == 0) -#define ARCH_HAS_HUGEPAGE_ONLY_RANGE -extern int is_hugepage_only_range(struct mm_struct *m, - unsigned long addr, - unsigned long len); - #endif /* __ASSEMBLY__ */ #else #define slice_init() @@ -146,8 +141,6 @@ do { \ #ifdef CONFIG_HUGETLB_PAGE -#define ARCH_HAS_HUGETLB_FREE_PGD_RANGE -#define ARCH_HAS_SETCLEAR_HUGE_PTE #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA #endif /* !CONFIG_HUGETLB_PAGE */ diff --git a/include/asm-powerpc/pgtable-ppc32.h b/include/asm-powerpc/pgtable-ppc32.h index daea7692d07..7c97b5a08d0 100644 --- a/include/asm-powerpc/pgtable-ppc32.h +++ b/include/asm-powerpc/pgtable-ppc32.h @@ -504,6 +504,7 @@ static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; } static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } +static inline int pte_special(pte_t pte) { return 0; } static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; } static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; } @@ -521,6 +522,8 @@ static inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; } static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; } +static inline pte_t pte_mkspecial(pte_t pte) { + return pte; } static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { diff --git a/include/asm-powerpc/pgtable-ppc64.h b/include/asm-powerpc/pgtable-ppc64.h index dd4c26dc57d..27f18695f7d 100644 --- a/include/asm-powerpc/pgtable-ppc64.h +++ b/include/asm-powerpc/pgtable-ppc64.h @@ -239,6 +239,7 @@ static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW;} static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY;} static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED;} static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE;} +static inline int pte_special(pte_t pte) { return 0; } static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; } static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; } @@ -257,6 +258,8 @@ static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; } static inline pte_t pte_mkhuge(pte_t pte) { return pte; } +static inline pte_t pte_mkspecial(pte_t pte) { + return pte; } /* Atomic PTE updates */ static inline unsigned long pte_update(struct mm_struct *mm, diff --git a/include/asm-powerpc/processor.h b/include/asm-powerpc/processor.h index fd98ca998b4..cf83f2d7e2a 100644 --- a/include/asm-powerpc/processor.h +++ b/include/asm-powerpc/processor.h @@ -138,6 +138,8 @@ typedef struct { struct thread_struct { unsigned long ksp; /* Kernel stack pointer */ + unsigned long ksp_limit; /* if ksp <= ksp_limit stack overflow */ + #ifdef CONFIG_PPC64 unsigned long ksp_vsid; #endif @@ -182,11 +184,14 @@ struct thread_struct { #define ARCH_MIN_TASKALIGN 16 #define INIT_SP (sizeof(init_stack) + (unsigned long) &init_stack) +#define INIT_SP_LIMIT \ + (_ALIGN_UP(sizeof(init_thread_info), 16) + (unsigned long) &init_stack) #ifdef CONFIG_PPC32 #define INIT_THREAD { \ .ksp = INIT_SP, \ + .ksp_limit = INIT_SP_LIMIT, \ .fs = KERNEL_DS, \ .pgdir = swapper_pg_dir, \ .fpexc_mode = MSR_FE0 | MSR_FE1, \ @@ -194,6 +199,7 @@ struct thread_struct { #else #define INIT_THREAD { \ .ksp = INIT_SP, \ + .ksp_limit = INIT_SP_LIMIT, \ .regs = (struct pt_regs *)INIT_SP - 1, /* XXX bogus, I think */ \ .fs = KERNEL_DS, \ .fpr = {0}, \ diff --git a/include/asm-ppc/rio.h b/include/asm-powerpc/rio.h index 0018bf80cb2..0018bf80cb2 100644 --- a/include/asm-ppc/rio.h +++ b/include/asm-powerpc/rio.h diff --git a/include/asm-powerpc/system.h b/include/asm-powerpc/system.h index fab1674b31b..2b6559a6d11 100644 --- a/include/asm-powerpc/system.h +++ b/include/asm-powerpc/system.h @@ -204,7 +204,7 @@ extern int powersave_nap; /* set if nap mode can be used in idle loop */ * Changes the memory location '*ptr' to be val and returns * the previous value stored there. */ -static __inline__ unsigned long +static __always_inline unsigned long __xchg_u32(volatile void *p, unsigned long val) { unsigned long prev; @@ -229,7 +229,7 @@ __xchg_u32(volatile void *p, unsigned long val) * Changes the memory location '*ptr' to be val and returns * the previous value stored there. */ -static __inline__ unsigned long +static __always_inline unsigned long __xchg_u32_local(volatile void *p, unsigned long val) { unsigned long prev; @@ -247,7 +247,7 @@ __xchg_u32_local(volatile void *p, unsigned long val) } #ifdef CONFIG_PPC64 -static __inline__ unsigned long +static __always_inline unsigned long __xchg_u64(volatile void *p, unsigned long val) { unsigned long prev; @@ -266,7 +266,7 @@ __xchg_u64(volatile void *p, unsigned long val) return prev; } -static __inline__ unsigned long +static __always_inline unsigned long __xchg_u64_local(volatile void *p, unsigned long val) { unsigned long prev; @@ -290,7 +290,7 @@ __xchg_u64_local(volatile void *p, unsigned long val) */ extern void __xchg_called_with_bad_pointer(void); -static __inline__ unsigned long +static __always_inline unsigned long __xchg(volatile void *ptr, unsigned long x, unsigned int size) { switch (size) { @@ -305,7 +305,7 @@ __xchg(volatile void *ptr, unsigned long x, unsigned int size) return x; } -static __inline__ unsigned long +static __always_inline unsigned long __xchg_local(volatile void *ptr, unsigned long x, unsigned int size) { switch (size) { @@ -338,7 +338,7 @@ __xchg_local(volatile void *ptr, unsigned long x, unsigned int size) */ #define __HAVE_ARCH_CMPXCHG 1 -static __inline__ unsigned long +static __always_inline unsigned long __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new) { unsigned int prev; @@ -361,7 +361,7 @@ __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new) return prev; } -static __inline__ unsigned long +static __always_inline unsigned long __cmpxchg_u32_local(volatile unsigned int *p, unsigned long old, unsigned long new) { @@ -384,7 +384,7 @@ __cmpxchg_u32_local(volatile unsigned int *p, unsigned long old, } #ifdef CONFIG_PPC64 -static __inline__ unsigned long +static __always_inline unsigned long __cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new) { unsigned long prev; @@ -406,7 +406,7 @@ __cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new) return prev; } -static __inline__ unsigned long +static __always_inline unsigned long __cmpxchg_u64_local(volatile unsigned long *p, unsigned long old, unsigned long new) { @@ -432,7 +432,7 @@ __cmpxchg_u64_local(volatile unsigned long *p, unsigned long old, if something tries to do an invalid cmpxchg(). */ extern void __cmpxchg_called_with_bad_pointer(void); -static __inline__ unsigned long +static __always_inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, unsigned int size) { @@ -448,7 +448,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, return old; } -static __inline__ unsigned long +static __always_inline unsigned long __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new, unsigned int size) { diff --git a/include/asm-powerpc/unaligned.h b/include/asm-powerpc/unaligned.h index 6c95dfa2652..5f1b1e3c213 100644 --- a/include/asm-powerpc/unaligned.h +++ b/include/asm-powerpc/unaligned.h @@ -5,15 +5,12 @@ /* * The PowerPC can do unaligned accesses itself in big endian mode. - * - * The strange macros are there to make sure these can't - * be misused in a way that makes them not work on other - * architectures where unaligned accesses aren't as simple. */ +#include <linux/unaligned/access_ok.h> +#include <linux/unaligned/generic.h> -#define get_unaligned(ptr) (*(ptr)) - -#define put_unaligned(val, ptr) ((void)( *(ptr) = (val) )) +#define get_unaligned __get_unaligned_be +#define put_unaligned __put_unaligned_be #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_UNALIGNED_H */ diff --git a/include/asm-ppc/pgtable.h b/include/asm-ppc/pgtable.h index 70435d32129..55f9d38e3bf 100644 --- a/include/asm-ppc/pgtable.h +++ b/include/asm-ppc/pgtable.h @@ -483,6 +483,7 @@ static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; } static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } +static inline int pte_special(pte_t pte) { return 0; } static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; } static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; } @@ -500,6 +501,8 @@ static inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; } static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; } +static inline pte_t pte_mkspecial(pte_t pte) { + return pte; } static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { diff --git a/include/asm-s390/Kbuild b/include/asm-s390/Kbuild index e92b429d2be..13c9805349f 100644 --- a/include/asm-s390/Kbuild +++ b/include/asm-s390/Kbuild @@ -7,6 +7,7 @@ header-y += tape390.h header-y += ucontext.h header-y += vtoc.h header-y += zcrypt.h +header-y += kvm.h unifdef-y += cmb.h unifdef-y += debug.h diff --git a/include/asm-s390/kvm.h b/include/asm-s390/kvm.h index 573f2a35138..d74002f9579 100644 --- a/include/asm-s390/kvm.h +++ b/include/asm-s390/kvm.h @@ -1,6 +1,45 @@ #ifndef __LINUX_KVM_S390_H #define __LINUX_KVM_S390_H -/* s390 does not support KVM */ +/* + * asm-s390/kvm.h - KVM s390 specific structures and definitions + * + * Copyright IBM Corp. 2008 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License (version 2 only) + * as published by the Free Software Foundation. + * + * Author(s): Carsten Otte <cotte@de.ibm.com> + * Christian Borntraeger <borntraeger@de.ibm.com> + */ +#include <asm/types.h> + +/* for KVM_GET_IRQCHIP and KVM_SET_IRQCHIP */ +struct kvm_pic_state { + /* no PIC for s390 */ +}; + +struct kvm_ioapic_state { + /* no IOAPIC for s390 */ +}; + +/* for KVM_GET_REGS and KVM_SET_REGS */ +struct kvm_regs { + /* general purpose regs for s390 */ + __u64 gprs[16]; +}; + +/* for KVM_GET_SREGS and KVM_SET_SREGS */ +struct kvm_sregs { + __u32 acrs[16]; + __u64 crs[16]; +}; + +/* for KVM_GET_FPU and KVM_SET_FPU */ +struct kvm_fpu { + __u32 fpc; + __u64 fprs[16]; +}; #endif diff --git a/include/asm-s390/kvm_host.h b/include/asm-s390/kvm_host.h new file mode 100644 index 00000000000..f8204a4f2e0 --- /dev/null +++ b/include/asm-s390/kvm_host.h @@ -0,0 +1,234 @@ +/* + * asm-s390/kvm_host.h - definition for kernel virtual machines on s390 + * + * Copyright IBM Corp. 2008 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License (version 2 only) + * as published by the Free Software Foundation. + * + * Author(s): Carsten Otte <cotte@de.ibm.com> + */ + + +#ifndef ASM_KVM_HOST_H +#define ASM_KVM_HOST_H +#include <linux/kvm_host.h> +#include <asm/debug.h> + +#define KVM_MAX_VCPUS 64 +#define KVM_MEMORY_SLOTS 32 +/* memory slots that does not exposed to userspace */ +#define KVM_PRIVATE_MEM_SLOTS 4 + +struct kvm_guest_debug { +}; + +struct sca_entry { + atomic_t scn; + __u64 reserved; + __u64 sda; + __u64 reserved2[2]; +} __attribute__((packed)); + + +struct sca_block { + __u64 ipte_control; + __u64 reserved[5]; + __u64 mcn; + __u64 reserved2; + struct sca_entry cpu[64]; +} __attribute__((packed)); + +#define KVM_PAGES_PER_HPAGE 256 + +#define CPUSTAT_HOST 0x80000000 +#define CPUSTAT_WAIT 0x10000000 +#define CPUSTAT_ECALL_PEND 0x08000000 +#define CPUSTAT_STOP_INT 0x04000000 +#define CPUSTAT_IO_INT 0x02000000 +#define CPUSTAT_EXT_INT 0x01000000 +#define CPUSTAT_RUNNING 0x00800000 +#define CPUSTAT_RETAINED 0x00400000 +#define CPUSTAT_TIMING_SUB 0x00020000 +#define CPUSTAT_SIE_SUB 0x00010000 +#define CPUSTAT_RRF 0x00008000 +#define CPUSTAT_SLSV 0x00004000 +#define CPUSTAT_SLSR 0x00002000 +#define CPUSTAT_ZARCH 0x00000800 +#define CPUSTAT_MCDS 0x00000100 +#define CPUSTAT_SM 0x00000080 +#define CPUSTAT_G 0x00000008 +#define CPUSTAT_J 0x00000002 +#define CPUSTAT_P 0x00000001 + +struct sie_block { + atomic_t cpuflags; /* 0x0000 */ + __u32 prefix; /* 0x0004 */ + __u8 reserved8[32]; /* 0x0008 */ + __u64 cputm; /* 0x0028 */ + __u64 ckc; /* 0x0030 */ + __u64 epoch; /* 0x0038 */ + __u8 reserved40[4]; /* 0x0040 */ +#define LCTL_CR0 0x8000 + __u16 lctl; /* 0x0044 */ + __s16 icpua; /* 0x0046 */ + __u32 ictl; /* 0x0048 */ + __u32 eca; /* 0x004c */ + __u8 icptcode; /* 0x0050 */ + __u8 reserved51; /* 0x0051 */ + __u16 ihcpu; /* 0x0052 */ + __u8 reserved54[2]; /* 0x0054 */ + __u16 ipa; /* 0x0056 */ + __u32 ipb; /* 0x0058 */ + __u32 scaoh; /* 0x005c */ + __u8 reserved60; /* 0x0060 */ + __u8 ecb; /* 0x0061 */ + __u8 reserved62[2]; /* 0x0062 */ + __u32 scaol; /* 0x0064 */ + __u8 reserved68[4]; /* 0x0068 */ + __u32 todpr; /* 0x006c */ + __u8 reserved70[16]; /* 0x0070 */ + __u64 gmsor; /* 0x0080 */ + __u64 gmslm; /* 0x0088 */ + psw_t gpsw; /* 0x0090 */ + __u64 gg14; /* 0x00a0 */ + __u64 gg15; /* 0x00a8 */ + __u8 reservedb0[30]; /* 0x00b0 */ + __u16 iprcc; /* 0x00ce */ + __u8 reservedd0[48]; /* 0x00d0 */ + __u64 gcr[16]; /* 0x0100 */ + __u64 gbea; /* 0x0180 */ + __u8 reserved188[120]; /* 0x0188 */ +} __attribute__((packed)); + +struct kvm_vcpu_stat { + u32 exit_userspace; + u32 exit_external_request; + u32 exit_external_interrupt; + u32 exit_stop_request; + u32 exit_validity; + u32 exit_instruction; + u32 instruction_lctl; + u32 instruction_lctg; + u32 exit_program_interruption; + u32 exit_instr_and_program; + u32 deliver_emergency_signal; + u32 deliver_service_signal; + u32 deliver_virtio_interrupt; + u32 deliver_stop_signal; + u32 deliver_prefix_signal; + u32 deliver_restart_signal; + u32 deliver_program_int; + u32 exit_wait_state; + u32 instruction_stidp; + u32 instruction_spx; + u32 instruction_stpx; + u32 instruction_stap; + u32 instruction_storage_key; + u32 instruction_stsch; + u32 instruction_chsc; + u32 instruction_stsi; + u32 instruction_stfl; + u32 instruction_sigp_sense; + u32 instruction_sigp_emergency; + u32 instruction_sigp_stop; + u32 instruction_sigp_arch; + u32 instruction_sigp_prefix; + u32 instruction_sigp_restart; + u32 diagnose_44; +}; + +struct io_info { + __u16 subchannel_id; /* 0x0b8 */ + __u16 subchannel_nr; /* 0x0ba */ + __u32 io_int_parm; /* 0x0bc */ + __u32 io_int_word; /* 0x0c0 */ +}; + +struct ext_info { + __u32 ext_params; + __u64 ext_params2; +}; + +#define PGM_OPERATION 0x01 +#define PGM_PRIVILEGED_OPERATION 0x02 +#define PGM_EXECUTE 0x03 +#define PGM_PROTECTION 0x04 +#define PGM_ADDRESSING 0x05 +#define PGM_SPECIFICATION 0x06 +#define PGM_DATA 0x07 + +struct pgm_info { + __u16 code; +}; + +struct prefix_info { + __u32 address; +}; + +struct interrupt_info { + struct list_head list; + u64 type; + union { + struct io_info io; + struct ext_info ext; + struct pgm_info pgm; + struct prefix_info prefix; + }; +}; + +/* for local_interrupt.action_flags */ +#define ACTION_STORE_ON_STOP 1 +#define ACTION_STOP_ON_STOP 2 + +struct local_interrupt { + spinlock_t lock; + struct list_head list; + atomic_t active; + struct float_interrupt *float_int; + int timer_due; /* event indicator for waitqueue below */ + wait_queue_head_t wq; + atomic_t *cpuflags; + unsigned int action_bits; +}; + +struct float_interrupt { + spinlock_t lock; + struct list_head list; + atomic_t active; + int next_rr_cpu; + unsigned long idle_mask [(64 + sizeof(long) - 1) / sizeof(long)]; + struct local_interrupt *local_int[64]; +}; + + +struct kvm_vcpu_arch { + struct sie_block *sie_block; + unsigned long guest_gprs[16]; + s390_fp_regs host_fpregs; + unsigned int host_acrs[NUM_ACRS]; + s390_fp_regs guest_fpregs; + unsigned int guest_acrs[NUM_ACRS]; + struct local_interrupt local_int; + struct timer_list ckc_timer; + union { + cpuid_t cpu_id; + u64 stidp_data; + }; +}; + +struct kvm_vm_stat { + u32 remote_tlb_flush; +}; + +struct kvm_arch{ + unsigned long guest_origin; + unsigned long guest_memsize; + struct sca_block *sca; + debug_info_t *dbf; + struct float_interrupt float_int; +}; + +extern int sie64a(struct sie_block *, __u64 *); +#endif diff --git a/include/asm-s390/kvm_para.h b/include/asm-s390/kvm_para.h new file mode 100644 index 00000000000..2c503796b61 --- /dev/null +++ b/include/asm-s390/kvm_para.h @@ -0,0 +1,150 @@ +/* + * asm-s390/kvm_para.h - definition for paravirtual devices on s390 + * + * Copyright IBM Corp. 2008 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License (version 2 only) + * as published by the Free Software Foundation. + * + * Author(s): Christian Borntraeger <borntraeger@de.ibm.com> + */ + +#ifndef __S390_KVM_PARA_H +#define __S390_KVM_PARA_H + +/* + * Hypercalls for KVM on s390. The calling convention is similar to the + * s390 ABI, so we use R2-R6 for parameters 1-5. In addition we use R1 + * as hypercall number and R7 as parameter 6. The return value is + * written to R2. We use the diagnose instruction as hypercall. To avoid + * conflicts with existing diagnoses for LPAR and z/VM, we do not use + * the instruction encoded number, but specify the number in R1 and + * use 0x500 as KVM hypercall + * + * Copyright IBM Corp. 2007,2008 + * Author(s): Christian Borntraeger <borntraeger@de.ibm.com> + * + * This work is licensed under the terms of the GNU GPL, version 2. + */ + +static inline long kvm_hypercall0(unsigned long nr) +{ + register unsigned long __nr asm("1") = nr; + register long __rc asm("2"); + + asm volatile ("diag 2,4,0x500\n" + : "=d" (__rc) : "d" (__nr): "memory", "cc"); + return __rc; +} + +static inline long kvm_hypercall1(unsigned long nr, unsigned long p1) +{ + register unsigned long __nr asm("1") = nr; + register unsigned long __p1 asm("2") = p1; + register long __rc asm("2"); + + asm volatile ("diag 2,4,0x500\n" + : "=d" (__rc) : "d" (__nr), "0" (__p1) : "memory", "cc"); + return __rc; +} + +static inline long kvm_hypercall2(unsigned long nr, unsigned long p1, + unsigned long p2) +{ + register unsigned long __nr asm("1") = nr; + register unsigned long __p1 asm("2") = p1; + register unsigned long __p2 asm("3") = p2; + register long __rc asm("2"); + + asm volatile ("diag 2,4,0x500\n" + : "=d" (__rc) : "d" (__nr), "0" (__p1), "d" (__p2) + : "memory", "cc"); + return __rc; +} + +static inline long kvm_hypercall3(unsigned long nr, unsigned long p1, + unsigned long p2, unsigned long p3) +{ + register unsigned long __nr asm("1") = nr; + register unsigned long __p1 asm("2") = p1; + register unsigned long __p2 asm("3") = p2; + register unsigned long __p3 asm("4") = p3; + register long __rc asm("2"); + + asm volatile ("diag 2,4,0x500\n" + : "=d" (__rc) : "d" (__nr), "0" (__p1), "d" (__p2), + "d" (__p3) : "memory", "cc"); + return __rc; +} + + +static inline long kvm_hypercall4(unsigned long nr, unsigned long p1, + unsigned long p2, unsigned long p3, + unsigned long p4) +{ + register unsigned long __nr asm("1") = nr; + register unsigned long __p1 asm("2") = p1; + register unsigned long __p2 asm("3") = p2; + register unsigned long __p3 asm("4") = p3; + register unsigned long __p4 asm("5") = p4; + register long __rc asm("2"); + + asm volatile ("diag 2,4,0x500\n" + : "=d" (__rc) : "d" (__nr), "0" (__p1), "d" (__p2), + "d" (__p3), "d" (__p4) : "memory", "cc"); + return __rc; +} + +static inline long kvm_hypercall5(unsigned long nr, unsigned long p1, + unsigned long p2, unsigned long p3, + unsigned long p4, unsigned long p5) +{ + register unsigned long __nr asm("1") = nr; + register unsigned long __p1 asm("2") = p1; + register unsigned long __p2 asm("3") = p2; + register unsigned long __p3 asm("4") = p3; + register unsigned long __p4 asm("5") = p4; + register unsigned long __p5 asm("6") = p5; + register long __rc asm("2"); + + asm volatile ("diag 2,4,0x500\n" + : "=d" (__rc) : "d" (__nr), "0" (__p1), "d" (__p2), + "d" (__p3), "d" (__p4), "d" (__p5) : "memory", "cc"); + return __rc; +} + +static inline long kvm_hypercall6(unsigned long nr, unsigned long p1, + unsigned long p2, unsigned long p3, + unsigned long p4, unsigned long p5, + unsigned long p6) +{ + register unsigned long __nr asm("1") = nr; + register unsigned long __p1 asm("2") = p1; + register unsigned long __p2 asm("3") = p2; + register unsigned long __p3 asm("4") = p3; + register unsigned long __p4 asm("5") = p4; + register unsigned long __p5 asm("6") = p5; + register unsigned long __p6 asm("7") = p6; + register long __rc asm("2"); + + asm volatile ("diag 2,4,0x500\n" + : "=d" (__rc) : "d" (__nr), "0" (__p1), "d" (__p2), + "d" (__p3), "d" (__p4), "d" (__p5), "d" (__p6) + : "memory", "cc"); + return __rc; +} + +/* kvm on s390 is always paravirtualization enabled */ +static inline int kvm_para_available(void) +{ + return 1; +} + +/* No feature bits are currently assigned for kvm on s390 */ +static inline unsigned int kvm_arch_para_features(void) +{ + return 0; +} + +#endif /* __S390_KVM_PARA_H */ diff --git a/include/asm-s390/kvm_virtio.h b/include/asm-s390/kvm_virtio.h new file mode 100644 index 00000000000..5c871a990c2 --- /dev/null +++ b/include/asm-s390/kvm_virtio.h @@ -0,0 +1,53 @@ +/* + * kvm_virtio.h - definition for virtio for kvm on s390 + * + * Copyright IBM Corp. 2008 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License (version 2 only) + * as published by the Free Software Foundation. + * + * Author(s): Christian Borntraeger <borntraeger@de.ibm.com> + */ + +#ifndef __KVM_S390_VIRTIO_H +#define __KVM_S390_VIRTIO_H + +#include <linux/types.h> + +struct kvm_device_desc { + /* The device type: console, network, disk etc. Type 0 terminates. */ + __u8 type; + /* The number of virtqueues (first in config array) */ + __u8 num_vq; + /* + * The number of bytes of feature bits. Multiply by 2: one for host + * features and one for guest acknowledgements. + */ + __u8 feature_len; + /* The number of bytes of the config array after virtqueues. */ + __u8 config_len; + /* A status byte, written by the Guest. */ + __u8 status; + __u8 config[0]; +}; + +/* + * This is how we expect the device configuration field for a virtqueue + * to be laid out in config space. + */ +struct kvm_vqconfig { + /* The token returned with an interrupt. Set by the guest */ + __u64 token; + /* The address of the virtio ring */ + __u64 address; + /* The number of entries in the virtio_ring */ + __u16 num; + +}; + +#define KVM_S390_VIRTIO_NOTIFY 0 +#define KVM_S390_VIRTIO_RESET 1 +#define KVM_S390_VIRTIO_SET_STATUS 2 + +#endif diff --git a/include/asm-s390/lowcore.h b/include/asm-s390/lowcore.h index 5de3efb3144..0bc51d52a89 100644 --- a/include/asm-s390/lowcore.h +++ b/include/asm-s390/lowcore.h @@ -381,27 +381,32 @@ struct _lowcore /* whether the kernel died with panic() or not */ __u32 panic_magic; /* 0xe00 */ - __u8 pad13[0x1200-0xe04]; /* 0xe04 */ + __u8 pad13[0x11b8-0xe04]; /* 0xe04 */ + + /* 64 bit extparam used for pfault, diag 250 etc */ + __u64 ext_params2; /* 0x11B8 */ + + __u8 pad14[0x1200-0x11C0]; /* 0x11C0 */ /* System info area */ __u64 floating_pt_save_area[16]; /* 0x1200 */ __u64 gpregs_save_area[16]; /* 0x1280 */ __u32 st_status_fixed_logout[4]; /* 0x1300 */ - __u8 pad14[0x1318-0x1310]; /* 0x1310 */ + __u8 pad15[0x1318-0x1310]; /* 0x1310 */ __u32 prefixreg_save_area; /* 0x1318 */ __u32 fpt_creg_save_area; /* 0x131c */ - __u8 pad15[0x1324-0x1320]; /* 0x1320 */ + __u8 pad16[0x1324-0x1320]; /* 0x1320 */ __u32 tod_progreg_save_area; /* 0x1324 */ __u32 cpu_timer_save_area[2]; /* 0x1328 */ __u32 clock_comp_save_area[2]; /* 0x1330 */ - __u8 pad16[0x1340-0x1338]; /* 0x1338 */ + __u8 pad17[0x1340-0x1338]; /* 0x1338 */ __u32 access_regs_save_area[16]; /* 0x1340 */ __u64 cregs_save_area[16]; /* 0x1380 */ /* align to the top of the prefix area */ - __u8 pad17[0x2000-0x1400]; /* 0x1400 */ + __u8 pad18[0x2000-0x1400]; /* 0x1400 */ #endif /* !__s390x__ */ } __attribute__((packed)); /* End structure*/ diff --git a/include/asm-s390/mmu.h b/include/asm-s390/mmu.h index 1698e29c5b2..5dd5e7b3476 100644 --- a/include/asm-s390/mmu.h +++ b/include/asm-s390/mmu.h @@ -7,6 +7,7 @@ typedef struct { unsigned long asce_bits; unsigned long asce_limit; int noexec; + int pgstes; } mm_context_t; #endif diff --git a/include/asm-s390/mmu_context.h b/include/asm-s390/mmu_context.h index b5a34c6f91a..4c2fbf48c9c 100644 --- a/include/asm-s390/mmu_context.h +++ b/include/asm-s390/mmu_context.h @@ -20,7 +20,13 @@ static inline int init_new_context(struct task_struct *tsk, #ifdef CONFIG_64BIT mm->context.asce_bits |= _ASCE_TYPE_REGION3; #endif - mm->context.noexec = s390_noexec; + if (current->mm->context.pgstes) { + mm->context.noexec = 0; + mm->context.pgstes = 1; + } else { + mm->context.noexec = s390_noexec; + mm->context.pgstes = 0; + } mm->context.asce_limit = STACK_TOP_MAX; crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm)); return 0; diff --git a/include/asm-s390/pgtable.h b/include/asm-s390/pgtable.h index 65154dc9a9e..f8347ce9c5a 100644 --- a/include/asm-s390/pgtable.h +++ b/include/asm-s390/pgtable.h @@ -30,6 +30,7 @@ */ #ifndef __ASSEMBLY__ #include <linux/mm_types.h> +#include <asm/bitops.h> #include <asm/bug.h> #include <asm/processor.h> @@ -219,6 +220,8 @@ extern char empty_zero_page[PAGE_SIZE]; /* Software bits in the page table entry */ #define _PAGE_SWT 0x001 /* SW pte type bit t */ #define _PAGE_SWX 0x002 /* SW pte type bit x */ +#define _PAGE_SPECIAL 0x004 /* SW associated with special page */ +#define __HAVE_ARCH_PTE_SPECIAL /* Six different types of pages. */ #define _PAGE_TYPE_EMPTY 0x400 @@ -258,6 +261,13 @@ extern char empty_zero_page[PAGE_SIZE]; * swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid. */ +/* Page status table bits for virtualization */ +#define RCP_PCL_BIT 55 +#define RCP_HR_BIT 54 +#define RCP_HC_BIT 53 +#define RCP_GR_BIT 50 +#define RCP_GC_BIT 49 + #ifndef __s390x__ /* Bits in the segment table address-space-control-element */ @@ -510,9 +520,56 @@ static inline int pte_file(pte_t pte) return (pte_val(pte) & mask) == _PAGE_TYPE_FILE; } +static inline int pte_special(pte_t pte) +{ + return (pte_val(pte) & _PAGE_SPECIAL); +} + #define __HAVE_ARCH_PTE_SAME #define pte_same(a,b) (pte_val(a) == pte_val(b)) +static inline void rcp_lock(pte_t *ptep) +{ +#ifdef CONFIG_PGSTE + unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE); + preempt_disable(); + while (test_and_set_bit(RCP_PCL_BIT, pgste)) + ; +#endif +} + +static inline void rcp_unlock(pte_t *ptep) +{ +#ifdef CONFIG_PGSTE + unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE); + clear_bit(RCP_PCL_BIT, pgste); + preempt_enable(); +#endif +} + +/* forward declaration for SetPageUptodate in page-flags.h*/ +static inline void page_clear_dirty(struct page *page); +#include <linux/page-flags.h> + +static inline void ptep_rcp_copy(pte_t *ptep) +{ +#ifdef CONFIG_PGSTE + struct page *page = virt_to_page(pte_val(*ptep)); + unsigned int skey; + unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE); + + skey = page_get_storage_key(page_to_phys(page)); + if (skey & _PAGE_CHANGED) + set_bit_simple(RCP_GC_BIT, pgste); + if (skey & _PAGE_REFERENCED) + set_bit_simple(RCP_GR_BIT, pgste); + if (test_and_clear_bit_simple(RCP_HC_BIT, pgste)) + SetPageDirty(page); + if (test_and_clear_bit_simple(RCP_HR_BIT, pgste)) + SetPageReferenced(page); +#endif +} + /* * query functions pte_write/pte_dirty/pte_young only work if * pte_present() is true. Undefined behaviour if not.. @@ -599,6 +656,8 @@ static inline void pmd_clear(pmd_t *pmd) static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { + if (mm->context.pgstes) + ptep_rcp_copy(ptep); pte_val(*ptep) = _PAGE_TYPE_EMPTY; if (mm->context.noexec) pte_val(ptep[PTRS_PER_PTE]) = _PAGE_TYPE_EMPTY; @@ -663,10 +722,34 @@ static inline pte_t pte_mkyoung(pte_t pte) return pte; } +static inline pte_t pte_mkspecial(pte_t pte) +{ + pte_val(pte) |= _PAGE_SPECIAL; + return pte; +} + #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { +#ifdef CONFIG_PGSTE + unsigned long physpage; + int young; + unsigned long *pgste; + + if (!vma->vm_mm->context.pgstes) + return 0; + physpage = pte_val(*ptep) & PAGE_MASK; + pgste = (unsigned long *) (ptep + PTRS_PER_PTE); + + young = ((page_get_storage_key(physpage) & _PAGE_REFERENCED) != 0); + rcp_lock(ptep); + if (young) + set_bit_simple(RCP_GR_BIT, pgste); + young |= test_and_clear_bit_simple(RCP_HR_BIT, pgste); + rcp_unlock(ptep); + return young; +#endif return 0; } @@ -674,7 +757,13 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, static inline int ptep_clear_flush_young(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { - /* No need to flush TLB; bits are in storage key */ + /* No need to flush TLB + * On s390 reference bits are in storage key and never in TLB + * With virtualization we handle the reference bit, without we + * we can simply return */ +#ifdef CONFIG_PGSTE + return ptep_test_and_clear_young(vma, address, ptep); +#endif return 0; } @@ -693,15 +782,25 @@ static inline void __ptep_ipte(unsigned long address, pte_t *ptep) : "=m" (*ptep) : "m" (*ptep), "a" (pto), "a" (address)); } - pte_val(*ptep) = _PAGE_TYPE_EMPTY; } static inline void ptep_invalidate(struct mm_struct *mm, unsigned long address, pte_t *ptep) { + if (mm->context.pgstes) { + rcp_lock(ptep); + __ptep_ipte(address, ptep); + ptep_rcp_copy(ptep); + pte_val(*ptep) = _PAGE_TYPE_EMPTY; + rcp_unlock(ptep); + return; + } __ptep_ipte(address, ptep); - if (mm->context.noexec) + pte_val(*ptep) = _PAGE_TYPE_EMPTY; + if (mm->context.noexec) { __ptep_ipte(address, ptep + PTRS_PER_PTE); + pte_val(*(ptep + PTRS_PER_PTE)) = _PAGE_TYPE_EMPTY; + } } /* @@ -966,6 +1065,7 @@ static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) extern int add_shared_memory(unsigned long start, unsigned long size); extern int remove_shared_memory(unsigned long start, unsigned long size); +extern int s390_enable_sie(void); /* * No page table caches to initialise diff --git a/include/asm-s390/setup.h b/include/asm-s390/setup.h index a76a6b8fd88..aaf4b518b94 100644 --- a/include/asm-s390/setup.h +++ b/include/asm-s390/setup.h @@ -62,6 +62,7 @@ extern unsigned long machine_flags; #define MACHINE_IS_VM (machine_flags & 1) #define MACHINE_IS_P390 (machine_flags & 4) #define MACHINE_HAS_MVPG (machine_flags & 16) +#define MACHINE_IS_KVM (machine_flags & 64) #define MACHINE_HAS_IDTE (machine_flags & 128) #define MACHINE_HAS_DIAG9C (machine_flags & 256) diff --git a/include/asm-s390/unaligned.h b/include/asm-s390/unaligned.h index 8ee86dbedd1..da9627afe5d 100644 --- a/include/asm-s390/unaligned.h +++ b/include/asm-s390/unaligned.h @@ -1,24 +1,13 @@ -/* - * include/asm-s390/unaligned.h - * - * S390 version - * - * Derived from "include/asm-i386/unaligned.h" - */ - -#ifndef __S390_UNALIGNED_H -#define __S390_UNALIGNED_H +#ifndef _ASM_S390_UNALIGNED_H +#define _ASM_S390_UNALIGNED_H /* * The S390 can do unaligned accesses itself. - * - * The strange macros are there to make sure these can't - * be misused in a way that makes them not work on other - * architectures where unaligned accesses aren't as simple. */ +#include <linux/unaligned/access_ok.h> +#include <linux/unaligned/generic.h> -#define get_unaligned(ptr) (*(ptr)) - -#define put_unaligned(val, ptr) ((void)( *(ptr) = (val) )) +#define get_unaligned __get_unaligned_be +#define put_unaligned __put_unaligned_be -#endif +#endif /* _ASM_S390_UNALIGNED_H */ diff --git a/include/asm-sh/hugetlb.h b/include/asm-sh/hugetlb.h new file mode 100644 index 00000000000..02402303d89 --- /dev/null +++ b/include/asm-sh/hugetlb.h @@ -0,0 +1,91 @@ +#ifndef _ASM_SH_HUGETLB_H +#define _ASM_SH_HUGETLB_H + +#include <asm/page.h> + + +static inline int is_hugepage_only_range(struct mm_struct *mm, + unsigned long addr, + unsigned long len) { + return 0; +} + +/* + * If the arch doesn't supply something else, assume that hugepage + * size aligned regions are ok without further preparation. + */ +static inline int prepare_hugepage_range(unsigned long addr, unsigned long len) +{ + if (len & ~HPAGE_MASK) + return -EINVAL; + if (addr & ~HPAGE_MASK) + return -EINVAL; + return 0; +} + +static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) { +} + +static inline void hugetlb_free_pgd_range(struct mmu_gather **tlb, + unsigned long addr, unsigned long end, + unsigned long floor, + unsigned long ceiling) +{ + free_pgd_range(tlb, addr, end, floor, ceiling); +} + +static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte) +{ + set_pte_at(mm, addr, ptep, pte); +} + +static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + return ptep_get_and_clear(mm, addr, ptep); +} + +static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep) +{ +} + +static inline int huge_pte_none(pte_t pte) +{ + return pte_none(pte); +} + +static inline pte_t huge_pte_wrprotect(pte_t pte) +{ + return pte_wrprotect(pte); +} + +static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + ptep_set_wrprotect(mm, addr, ptep); +} + +static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep, + pte_t pte, int dirty) +{ + return ptep_set_access_flags(vma, addr, ptep, pte, dirty); +} + +static inline pte_t huge_ptep_get(pte_t *ptep) +{ + return *ptep; +} + +static inline int arch_prepare_hugepage(struct page *page) +{ + return 0; +} + +static inline void arch_release_hugepage(struct page *page) +{ +} + +#endif /* _ASM_SH_HUGETLB_H */ diff --git a/include/asm-sh/pgtable_32.h b/include/asm-sh/pgtable_32.h index 3e3557c53c5..cbc731d35c2 100644 --- a/include/asm-sh/pgtable_32.h +++ b/include/asm-sh/pgtable_32.h @@ -326,6 +326,7 @@ static inline void set_pte(pte_t *ptep, pte_t pte) #define pte_dirty(pte) ((pte).pte_low & _PAGE_DIRTY) #define pte_young(pte) ((pte).pte_low & _PAGE_ACCESSED) #define pte_file(pte) ((pte).pte_low & _PAGE_FILE) +#define pte_special(pte) (0) #ifdef CONFIG_X2TLB #define pte_write(pte) ((pte).pte_high & _PAGE_EXT_USER_WRITE) @@ -356,6 +357,8 @@ PTE_BIT_FUNC(low, mkdirty, |= _PAGE_DIRTY); PTE_BIT_FUNC(low, mkold, &= ~_PAGE_ACCESSED); PTE_BIT_FUNC(low, mkyoung, |= _PAGE_ACCESSED); +static inline pte_t pte_mkspecial(pte_t pte) { return pte; } + /* * Macro and implementation to make a page protection as uncachable. */ diff --git a/include/asm-sh/pgtable_64.h b/include/asm-sh/pgtable_64.h index f9dd9d31144..c78990cda55 100644 --- a/include/asm-sh/pgtable_64.h +++ b/include/asm-sh/pgtable_64.h @@ -254,10 +254,11 @@ extern void __handle_bad_pmd_kernel(pmd_t * pmd); /* * The following have defined behavior only work if pte_present() is true. */ -static inline int pte_dirty(pte_t pte){ return pte_val(pte) & _PAGE_DIRTY; } -static inline int pte_young(pte_t pte){ return pte_val(pte) & _PAGE_ACCESSED; } -static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } -static inline int pte_write(pte_t pte){ return pte_val(pte) & _PAGE_WRITE; } +static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } +static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } +static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } +static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; } +static inline int pte_special(pte_t pte){ return 0; } static inline pte_t pte_wrprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_WRITE)); return pte; } static inline pte_t pte_mkclean(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY)); return pte; } @@ -266,6 +267,7 @@ static inline pte_t pte_mkwrite(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | static inline pte_t pte_mkdirty(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; } static inline pte_t pte_mkyoung(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; } static inline pte_t pte_mkhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_SZHUGE)); return pte; } +static inline pte_t pte_mkspecial(pte_t pte) { return pte; } /* diff --git a/include/asm-sh/unaligned.h b/include/asm-sh/unaligned.h index 5250e3063b4..c1641a01d50 100644 --- a/include/asm-sh/unaligned.h +++ b/include/asm-sh/unaligned.h @@ -1,7 +1,19 @@ -#ifndef __ASM_SH_UNALIGNED_H -#define __ASM_SH_UNALIGNED_H +#ifndef _ASM_SH_UNALIGNED_H +#define _ASM_SH_UNALIGNED_H /* SH can't handle unaligned accesses. */ -#include <asm-generic/unaligned.h> +#ifdef __LITTLE_ENDIAN__ +# include <linux/unaligned/le_struct.h> +# include <linux/unaligned/be_byteshift.h> +# include <linux/unaligned/generic.h> +# define get_unaligned __get_unaligned_le +# define put_unaligned __put_unaligned_le +#else +# include <linux/unaligned/be_struct.h> +# include <linux/unaligned/le_byteshift.h> +# include <linux/unaligned/generic.h> +# define get_unaligned __get_unaligned_be +# define put_unaligned __put_unaligned_be +#endif -#endif /* __ASM_SH_UNALIGNED_H */ +#endif /* _ASM_SH_UNALIGNED_H */ diff --git a/include/asm-sparc/pgtable.h b/include/asm-sparc/pgtable.h index 2cc235b74d9..d84af6d95f5 100644 --- a/include/asm-sparc/pgtable.h +++ b/include/asm-sparc/pgtable.h @@ -219,6 +219,11 @@ static inline int pte_file(pte_t pte) return pte_val(pte) & BTFIXUP_HALF(pte_filei); } +static inline int pte_special(pte_t pte) +{ + return 0; +} + /* */ BTFIXUPDEF_HALF(pte_wrprotecti) @@ -251,6 +256,8 @@ BTFIXUPDEF_CALL_CONST(pte_t, pte_mkyoung, pte_t) #define pte_mkdirty(pte) BTFIXUP_CALL(pte_mkdirty)(pte) #define pte_mkyoung(pte) BTFIXUP_CALL(pte_mkyoung)(pte) +#define pte_mkspecial(pte) (pte) + #define pfn_pte(pfn, prot) mk_pte(pfn_to_page(pfn), prot) BTFIXUPDEF_CALL(unsigned long, pte_pfn, pte_t) diff --git a/include/asm-sparc/processor.h b/include/asm-sparc/processor.h index e3006979709..8898efbbbe0 100644 --- a/include/asm-sparc/processor.h +++ b/include/asm-sparc/processor.h @@ -1,5 +1,4 @@ -/* $Id: processor.h,v 1.83 2001/10/08 09:32:13 davem Exp $ - * include/asm-sparc/processor.h +/* include/asm-sparc/processor.h * * Copyright (C) 1994 David S. Miller (davem@caip.rutgers.edu) */ @@ -65,7 +64,6 @@ struct thread_struct { struct fpq fpqueue[16]; unsigned long flags; mm_segment_t current_ds; - int new_signal; }; #define SPARC_FLAG_KTHREAD 0x1 /* task is a kernel thread */ diff --git a/include/asm-sparc/unaligned.h b/include/asm-sparc/unaligned.h index b6f8eddd30a..11d2d5fb590 100644 --- a/include/asm-sparc/unaligned.h +++ b/include/asm-sparc/unaligned.h @@ -1,6 +1,10 @@ -#ifndef _ASM_SPARC_UNALIGNED_H_ -#define _ASM_SPARC_UNALIGNED_H_ +#ifndef _ASM_SPARC_UNALIGNED_H +#define _ASM_SPARC_UNALIGNED_H -#include <asm-generic/unaligned.h> +#include <linux/unaligned/be_struct.h> +#include <linux/unaligned/le_byteshift.h> +#include <linux/unaligned/generic.h> +#define get_unaligned __get_unaligned_be +#define put_unaligned __put_unaligned_be #endif /* _ASM_SPARC_UNALIGNED_H */ diff --git a/include/asm-sparc64/floppy.h b/include/asm-sparc64/floppy.h index c47f58d6c15..ca19f80a9b7 100644 --- a/include/asm-sparc64/floppy.h +++ b/include/asm-sparc64/floppy.h @@ -293,7 +293,6 @@ static int sun_fd_eject(int drive) #ifdef CONFIG_PCI #include <asm/ebus.h> -#include <asm/isa.h> #include <asm/ns87303.h> static struct ebus_dma_info sun_pci_fd_ebus_dma; @@ -558,82 +557,6 @@ static int __init ebus_fdthree_p(struct linux_ebus_device *edev) } #endif -#ifdef CONFIG_PCI -#undef ISA_FLOPPY_WORKS - -#ifdef ISA_FLOPPY_WORKS -static unsigned long __init isa_floppy_init(void) -{ - struct sparc_isa_bridge *isa_br; - struct sparc_isa_device *isa_dev = NULL; - - for_each_isa(isa_br) { - for_each_isadev(isa_dev, isa_br) { - if (!strcmp(isa_dev->prom_node->name, "dma")) { - struct sparc_isa_device *child = - isa_dev->child; - - while (child) { - if (!strcmp(child->prom_node->name, - "floppy")) { - isa_dev = child; - goto isa_done; - } - child = child->next; - } - } - } - } -isa_done: - if (!isa_dev) - return 0; - - /* We could use DMA on devices behind the ISA bridge, but... - * - * There is a slight problem. Normally on x86 kit the x86 processor - * delays I/O port instructions when the ISA bus "dma in progress" - * signal is active. Well, sparc64 systems do not monitor this - * signal thus we would need to block all I/O port accesses in software - * when a dma transfer is active for some device. - */ - - sun_fdc = (struct sun_flpy_controller *)isa_dev->resource.start; - FLOPPY_IRQ = isa_dev->irq; - - sun_fdops.fd_inb = sun_pci_fd_inb; - sun_fdops.fd_outb = sun_pci_fd_outb; - - can_use_virtual_dma = use_virtual_dma = 1; - sun_fdops.fd_enable_dma = sun_fd_enable_dma; - sun_fdops.fd_disable_dma = sun_fd_disable_dma; - sun_fdops.fd_set_dma_mode = sun_fd_set_dma_mode; - sun_fdops.fd_set_dma_addr = sun_fd_set_dma_addr; - sun_fdops.fd_set_dma_count = sun_fd_set_dma_count; - sun_fdops.get_dma_residue = sun_get_dma_residue; - - sun_fdops.fd_request_irq = sun_fd_request_irq; - sun_fdops.fd_free_irq = sun_fd_free_irq; - - /* Floppy eject is manual. Actually, could determine this - * via presence of 'manual' property in OBP node. - */ - sun_fdops.fd_eject = sun_pci_fd_eject; - - fdc_status = (unsigned long) &sun_fdc->status_82077; - - allowed_drive_mask = 0; - sun_floppy_types[0] = 0; - sun_floppy_types[1] = 4; - - sun_pci_broken_drive = 1; - sun_fdops.fd_outb = sun_pci_fd_broken_outb; - - return sun_floppy_types[0]; -} -#endif /* ISA_FLOPPY_WORKS */ - -#endif - static unsigned long __init sun_floppy_init(void) { char state[128]; @@ -667,13 +590,8 @@ static unsigned long __init sun_floppy_init(void) } } ebus_done: - if (!edev) { -#ifdef ISA_FLOPPY_WORKS - return isa_floppy_init(); -#else + if (!edev) return 0; -#endif - } state_prop = of_get_property(edev->prom_node, "status", NULL); if (state_prop && !strncmp(state_prop, "disabled", 8)) diff --git a/include/asm-sparc64/hugetlb.h b/include/asm-sparc64/hugetlb.h new file mode 100644 index 00000000000..412af58926a --- /dev/null +++ b/include/asm-sparc64/hugetlb.h @@ -0,0 +1,84 @@ +#ifndef _ASM_SPARC64_HUGETLB_H +#define _ASM_SPARC64_HUGETLB_H + +#include <asm/page.h> + + +void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte); + +pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, + pte_t *ptep); + +void hugetlb_prefault_arch_hook(struct mm_struct *mm); + +static inline int is_hugepage_only_range(struct mm_struct *mm, + unsigned long addr, + unsigned long len) { + return 0; +} + +/* + * If the arch doesn't supply something else, assume that hugepage + * size aligned regions are ok without further preparation. + */ +static inline int prepare_hugepage_range(unsigned long addr, unsigned long len) +{ + if (len & ~HPAGE_MASK) + return -EINVAL; + if (addr & ~HPAGE_MASK) + return -EINVAL; + return 0; +} + +static inline void hugetlb_free_pgd_range(struct mmu_gather **tlb, + unsigned long addr, unsigned long end, + unsigned long floor, + unsigned long ceiling) +{ + free_pgd_range(tlb, addr, end, floor, ceiling); +} + +static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep) +{ +} + +static inline int huge_pte_none(pte_t pte) +{ + return pte_none(pte); +} + +static inline pte_t huge_pte_wrprotect(pte_t pte) +{ + return pte_wrprotect(pte); +} + +static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + ptep_set_wrprotect(mm, addr, ptep); +} + +static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep, + pte_t pte, int dirty) +{ + return ptep_set_access_flags(vma, addr, ptep, pte, dirty); +} + +static inline pte_t huge_ptep_get(pte_t *ptep) +{ + return *ptep; +} + +static inline int arch_prepare_hugepage(struct page *page) +{ + return 0; +} + +static inline void arch_release_hugepage(struct page *page) +{ +} + +#endif /* _ASM_SPARC64_HUGETLB_H */ diff --git a/include/asm-sparc64/isa.h b/include/asm-sparc64/isa.h deleted file mode 100644 index ecd9290f78d..00000000000 --- a/include/asm-sparc64/isa.h +++ /dev/null @@ -1,47 +0,0 @@ -/* $Id: isa.h,v 1.1 2001/05/11 04:31:55 davem Exp $ - * isa.h: Sparc64 layer for PCI to ISA bridge devices. - * - * Copyright (C) 2001 David S. Miller (davem@redhat.com) - */ - -#ifndef __SPARC64_ISA_H -#define __SPARC64_ISA_H - -#include <asm/oplib.h> -#include <asm/prom.h> -#include <asm/of_device.h> - -struct sparc_isa_bridge; - -struct sparc_isa_device { - struct of_device ofdev; - struct sparc_isa_device *next; - struct sparc_isa_device *child; - struct sparc_isa_bridge *bus; - struct device_node *prom_node; - struct resource resource; - unsigned int irq; -}; -#define to_isa_device(d) container_of(d, struct sparc_isa_device, ofdev.dev) - -struct sparc_isa_bridge { - struct of_device ofdev; - struct sparc_isa_bridge *next; - struct sparc_isa_device *devices; - struct pci_dev *self; - int index; - struct device_node *prom_node; -}; -#define to_isa_bridge(d) container_of(d, struct sparc_isa_bridge, ofdev.dev) - -extern struct sparc_isa_bridge *isa_chain; - -extern void isa_init(void); - -#define for_each_isa(bus) \ - for((bus) = isa_chain; (bus); (bus) = (bus)->next) - -#define for_each_isadev(dev, bus) \ - for((dev) = (bus)->devices; (dev); (dev) = (dev)->next) - -#endif /* !(__SPARC64_ISA_H) */ diff --git a/include/asm-sparc64/page.h b/include/asm-sparc64/page.h index e93a482aa24..618117def0d 100644 --- a/include/asm-sparc64/page.h +++ b/include/asm-sparc64/page.h @@ -39,8 +39,6 @@ #define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT) #define HPAGE_MASK (~(HPAGE_SIZE - 1UL)) #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) -#define ARCH_HAS_SETCLEAR_HUGE_PTE -#define ARCH_HAS_HUGETLB_PREFAULT_HOOK #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA #endif diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h index 549e45266b6..0e200e7acec 100644 --- a/include/asm-sparc64/pgtable.h +++ b/include/asm-sparc64/pgtable.h @@ -506,6 +506,11 @@ static inline pte_t pte_mkyoung(pte_t pte) return __pte(pte_val(pte) | mask); } +static inline pte_t pte_mkspecial(pte_t pte) +{ + return pte; +} + static inline unsigned long pte_young(pte_t pte) { unsigned long mask; @@ -608,6 +613,11 @@ static inline unsigned long pte_present(pte_t pte) return val; } +static inline int pte_special(pte_t pte) +{ + return 0; +} + #define pmd_set(pmdp, ptep) \ (pmd_val(*(pmdp)) = (__pa((unsigned long) (ptep)) >> 11UL)) #define pud_set(pudp, pmdp) \ diff --git a/include/asm-sparc64/ptrace.h b/include/asm-sparc64/ptrace.h index b4b951d570b..714b81956f3 100644 --- a/include/asm-sparc64/ptrace.h +++ b/include/asm-sparc64/ptrace.h @@ -1,4 +1,3 @@ -/* $Id: ptrace.h,v 1.14 2002/02/09 19:49:32 davem Exp $ */ #ifndef _SPARC64_PTRACE_H #define _SPARC64_PTRACE_H @@ -8,10 +7,15 @@ * stack during a system call and basically all traps. */ +/* This magic value must have the low 9 bits clear, + * as that is where we encode the %tt value, see below. + */ #define PT_REGS_MAGIC 0x57ac6c00 #ifndef __ASSEMBLY__ +#include <linux/types.h> + struct pt_regs { unsigned long u_regs[16]; /* globals and ins */ unsigned long tstate; @@ -33,6 +37,23 @@ struct pt_regs { unsigned int magic; }; +static inline int pt_regs_trap_type(struct pt_regs *regs) +{ + return regs->magic & 0x1ff; +} + +static inline int pt_regs_clear_trap_type(struct pt_regs *regs) +{ + return regs->magic &= ~0x1ff; +} + +static inline bool pt_regs_is_syscall(struct pt_regs *regs) +{ + int tt = pt_regs_trap_type(regs); + + return (tt == 0x110 || tt == 0x111 || tt == 0x16d); +} + struct pt_regs32 { unsigned int psr; unsigned int pc; diff --git a/include/asm-sparc64/thread_info.h b/include/asm-sparc64/thread_info.h index 98252cd44dd..71e42d1a80d 100644 --- a/include/asm-sparc64/thread_info.h +++ b/include/asm-sparc64/thread_info.h @@ -1,5 +1,4 @@ -/* $Id: thread_info.h,v 1.1 2002/02/10 00:00:58 davem Exp $ - * thread_info.h: sparc64 low-level thread information +/* thread_info.h: sparc64 low-level thread information * * Copyright (C) 2002 David S. Miller (davem@redhat.com) */ @@ -223,7 +222,7 @@ register struct thread_info *current_thread_info_reg asm("g6"); #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ #define TIF_PERFCTR 4 /* performance counters active */ #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */ -#define TIF_NEWSIGNALS 6 /* wants new-style signals */ +/* flag bit 6 is available */ #define TIF_32BIT 7 /* 32-bit binary */ /* flag bit 8 is available */ #define TIF_SECCOMP 9 /* secure computing */ @@ -242,7 +241,6 @@ register struct thread_info *current_thread_info_reg asm("g6"); #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) #define _TIF_PERFCTR (1<<TIF_PERFCTR) #define _TIF_UNALIGNED (1<<TIF_UNALIGNED) -#define _TIF_NEWSIGNALS (1<<TIF_NEWSIGNALS) #define _TIF_32BIT (1<<TIF_32BIT) #define _TIF_SECCOMP (1<<TIF_SECCOMP) #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) diff --git a/include/asm-sparc64/unaligned.h b/include/asm-sparc64/unaligned.h index 1ed3ba53777..edcebb09441 100644 --- a/include/asm-sparc64/unaligned.h +++ b/include/asm-sparc64/unaligned.h @@ -1,6 +1,10 @@ -#ifndef _ASM_SPARC64_UNALIGNED_H_ -#define _ASM_SPARC64_UNALIGNED_H_ +#ifndef _ASM_SPARC64_UNALIGNED_H +#define _ASM_SPARC64_UNALIGNED_H -#include <asm-generic/unaligned.h> +#include <linux/unaligned/be_struct.h> +#include <linux/unaligned/le_byteshift.h> +#include <linux/unaligned/generic.h> +#define get_unaligned __get_unaligned_be +#define put_unaligned __put_unaligned_be #endif /* _ASM_SPARC64_UNALIGNED_H */ diff --git a/include/asm-um/pgtable.h b/include/asm-um/pgtable.h index 4102b443e92..02db81b7b86 100644 --- a/include/asm-um/pgtable.h +++ b/include/asm-um/pgtable.h @@ -173,6 +173,11 @@ static inline int pte_newprot(pte_t pte) return(pte_present(pte) && (pte_get_bits(pte, _PAGE_NEWPROT))); } +static inline int pte_special(pte_t pte) +{ + return 0; +} + /* * ================================= * Flags setting section. @@ -241,6 +246,11 @@ static inline pte_t pte_mknewpage(pte_t pte) return(pte); } +static inline pte_t pte_mkspecial(pte_t pte) +{ + return(pte); +} + static inline void set_pte(pte_t *pteptr, pte_t pteval) { pte_copy(*pteptr, pteval); diff --git a/include/asm-um/unaligned.h b/include/asm-um/unaligned.h index 1d2497c5727..a47196974e3 100644 --- a/include/asm-um/unaligned.h +++ b/include/asm-um/unaligned.h @@ -1,6 +1,6 @@ -#ifndef __UM_UNALIGNED_H -#define __UM_UNALIGNED_H +#ifndef _ASM_UM_UNALIGNED_H +#define _ASM_UM_UNALIGNED_H #include "asm/arch/unaligned.h" -#endif +#endif /* _ASM_UM_UNALIGNED_H */ diff --git a/include/asm-v850/unaligned.h b/include/asm-v850/unaligned.h index e30b18653a9..53122b28491 100644 --- a/include/asm-v850/unaligned.h +++ b/include/asm-v850/unaligned.h @@ -1,6 +1,4 @@ /* - * include/asm-v850/unaligned.h -- Unaligned memory access - * * Copyright (C) 2001 NEC Corporation * Copyright (C) 2001 Miles Bader <miles@gnu.org> * @@ -8,123 +6,17 @@ * Public License. See the file COPYING in the main directory of this * archive for more details. * - * This file is a copy of the arm version, include/asm-arm/unaligned.h - * * Note that some v850 chips support unaligned access, but it seems too * annoying to use. */ +#ifndef _ASM_V850_UNALIGNED_H +#define _ASM_V850_UNALIGNED_H -#ifndef __V850_UNALIGNED_H__ -#define __V850_UNALIGNED_H__ - -#include <asm/types.h> - -extern int __bug_unaligned_x(void *ptr); - -/* - * What is the most efficient way of loading/storing an unaligned value? - * - * That is the subject of this file. Efficiency here is defined as - * minimum code size with minimum register usage for the common cases. - * It is currently not believed that long longs are common, so we - * trade efficiency for the chars, shorts and longs against the long - * longs. - * - * Current stats with gcc 2.7.2.2 for these functions: - * - * ptrsize get: code regs put: code regs - * 1 1 1 1 2 - * 2 3 2 3 2 - * 4 7 3 7 3 - * 8 20 6 16 6 - * - * gcc 2.95.1 seems to code differently: - * - * ptrsize get: code regs put: code regs - * 1 1 1 1 2 - * 2 3 2 3 2 - * 4 7 4 7 4 - * 8 19 8 15 6 - * - * which may or may not be more efficient (depending upon whether - * you can afford the extra registers). Hopefully the gcc 2.95 - * is inteligent enough to decide if it is better to use the - * extra register, but evidence so far seems to suggest otherwise. - * - * Unfortunately, gcc is not able to optimise the high word - * out of long long >> 32, or the low word from long long << 32 - */ - -#define __get_unaligned_2(__p) \ - (__p[0] | __p[1] << 8) - -#define __get_unaligned_4(__p) \ - (__p[0] | __p[1] << 8 | __p[2] << 16 | __p[3] << 24) - -#define get_unaligned(ptr) \ - ({ \ - __typeof__(*(ptr)) __v; \ - __u8 *__p = (__u8 *)(ptr); \ - switch (sizeof(*(ptr))) { \ - case 1: __v = *(ptr); break; \ - case 2: __v = __get_unaligned_2(__p); break; \ - case 4: __v = __get_unaligned_4(__p); break; \ - case 8: { \ - unsigned int __v1, __v2; \ - __v2 = __get_unaligned_4((__p+4)); \ - __v1 = __get_unaligned_4(__p); \ - __v = ((unsigned long long)__v2 << 32 | __v1); \ - } \ - break; \ - default: __v = __bug_unaligned_x(__p); break; \ - } \ - __v; \ - }) - - -static inline void __put_unaligned_2(__u32 __v, register __u8 *__p) -{ - *__p++ = __v; - *__p++ = __v >> 8; -} - -static inline void __put_unaligned_4(__u32 __v, register __u8 *__p) -{ - __put_unaligned_2(__v >> 16, __p + 2); - __put_unaligned_2(__v, __p); -} - -static inline void __put_unaligned_8(const unsigned long long __v, register __u8 *__p) -{ - /* - * tradeoff: 8 bytes of stack for all unaligned puts (2 - * instructions), or an extra register in the long long - * case - go for the extra register. - */ - __put_unaligned_4(__v >> 32, __p+4); - __put_unaligned_4(__v, __p); -} - -/* - * Try to store an unaligned value as efficiently as possible. - */ -#define put_unaligned(val,ptr) \ - ({ \ - switch (sizeof(*(ptr))) { \ - case 1: \ - *(ptr) = (val); \ - break; \ - case 2: __put_unaligned_2((val),(__u8 *)(ptr)); \ - break; \ - case 4: __put_unaligned_4((val),(__u8 *)(ptr)); \ - break; \ - case 8: __put_unaligned_8((val),(__u8 *)(ptr)); \ - break; \ - default: __bug_unaligned_x(ptr); \ - break; \ - } \ - (void) 0; \ - }) +#include <linux/unaligned/be_byteshift.h> +#include <linux/unaligned/le_byteshift.h> +#include <linux/unaligned/generic.h> +#define get_unaligned __get_unaligned_le +#define put_unaligned __put_unaligned_le -#endif /* __V850_UNALIGNED_H__ */ +#endif /* _ASM_V850_UNALIGNED_H */ diff --git a/include/asm-x86/geode.h b/include/asm-x86/geode.h index 9870cc1f2f8..7154dc4de95 100644 --- a/include/asm-x86/geode.h +++ b/include/asm-x86/geode.h @@ -30,7 +30,13 @@ extern int geode_get_dev_base(unsigned int dev); /* MSRS */ -#define GX_GLCP_SYS_RSTPLL 0x4C000014 +#define MSR_GLIU_P2D_RO0 0x10000029 + +#define MSR_LX_GLD_MSR_CONFIG 0x48002001 +#define MSR_LX_MSR_PADSEL 0x48002011 /* NOT 0x48000011; the data + * sheet has the wrong value */ +#define MSR_GLCP_SYS_RSTPLL 0x4C000014 +#define MSR_GLCP_DOTPLL 0x4C000015 #define MSR_LBAR_SMB 0x5140000B #define MSR_LBAR_GPIO 0x5140000C @@ -45,8 +51,14 @@ extern int geode_get_dev_base(unsigned int dev); #define MSR_PIC_ZSEL_LOW 0x51400022 #define MSR_PIC_ZSEL_HIGH 0x51400023 -#define MFGPT_IRQ_MSR 0x51400028 -#define MFGPT_NR_MSR 0x51400029 +#define MSR_MFGPT_IRQ 0x51400028 +#define MSR_MFGPT_NR 0x51400029 +#define MSR_MFGPT_SETUP 0x5140002B + +#define MSR_LX_SPARE_MSR 0x80000011 /* DC-specific */ + +#define MSR_GX_GLD_MSR_CONFIG 0xC0002001 +#define MSR_GX_MSR_PADSEL 0xC0002011 /* Resource Sizes */ @@ -93,6 +105,15 @@ extern int geode_get_dev_base(unsigned int dev); #define PM_AWKD 0x50 #define PM_SSC 0x54 +/* VSA2 magic values */ + +#define VSA_VRC_INDEX 0xAC1C +#define VSA_VRC_DATA 0xAC1E +#define VSA_VR_UNLOCK 0xFC53 /* unlock virtual register */ +#define VSA_VR_SIGNATURE 0x0003 +#define VSA_VR_MEM_SIZE 0x0200 +#define VSA_SIG 0x4132 /* signature is ascii 'VSA2' */ + /* GPIO */ #define GPIO_OUTPUT_VAL 0x00 @@ -164,6 +185,17 @@ static inline int is_geode(void) return (is_geode_gx() || is_geode_lx()); } +/* + * The VSA has virtual registers that we can query for a signature. + */ +static inline int geode_has_vsa2(void) +{ + outw(VSA_VR_UNLOCK, VSA_VRC_INDEX); + outw(VSA_VR_SIGNATURE, VSA_VRC_INDEX); + + return (inw(VSA_VRC_DATA) == VSA_SIG); +} + /* MFGPTs */ #define MFGPT_MAX_TIMERS 8 diff --git a/include/asm-x86/hugetlb.h b/include/asm-x86/hugetlb.h new file mode 100644 index 00000000000..14171a4924f --- /dev/null +++ b/include/asm-x86/hugetlb.h @@ -0,0 +1,91 @@ +#ifndef _ASM_X86_HUGETLB_H +#define _ASM_X86_HUGETLB_H + +#include <asm/page.h> + + +static inline int is_hugepage_only_range(struct mm_struct *mm, + unsigned long addr, + unsigned long len) { + return 0; +} + +/* + * If the arch doesn't supply something else, assume that hugepage + * size aligned regions are ok without further preparation. + */ +static inline int prepare_hugepage_range(unsigned long addr, unsigned long len) +{ + if (len & ~HPAGE_MASK) + return -EINVAL; + if (addr & ~HPAGE_MASK) + return -EINVAL; + return 0; +} + +static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) { +} + +static inline void hugetlb_free_pgd_range(struct mmu_gather **tlb, + unsigned long addr, unsigned long end, + unsigned long floor, + unsigned long ceiling) +{ + free_pgd_range(tlb, addr, end, floor, ceiling); +} + +static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte) +{ + set_pte_at(mm, addr, ptep, pte); +} + +static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + return ptep_get_and_clear(mm, addr, ptep); +} + +static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep) +{ +} + +static inline int huge_pte_none(pte_t pte) +{ + return pte_none(pte); +} + +static inline pte_t huge_pte_wrprotect(pte_t pte) +{ + return pte_wrprotect(pte); +} + +static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + ptep_set_wrprotect(mm, addr, ptep); +} + +static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep, + pte_t pte, int dirty) +{ + return ptep_set_access_flags(vma, addr, ptep, pte, dirty); +} + +static inline pte_t huge_ptep_get(pte_t *ptep) +{ + return *ptep; +} + +static inline int arch_prepare_hugepage(struct page *page) +{ + return 0; +} + +static inline void arch_release_hugepage(struct page *page) +{ +} + +#endif /* _ASM_X86_HUGETLB_H */ diff --git a/include/asm-x86/kvm.h b/include/asm-x86/kvm.h index 7a71120426a..80eefef2cc7 100644 --- a/include/asm-x86/kvm.h +++ b/include/asm-x86/kvm.h @@ -188,4 +188,45 @@ struct kvm_cpuid2 { struct kvm_cpuid_entry2 entries[0]; }; +/* for KVM_GET_PIT and KVM_SET_PIT */ +struct kvm_pit_channel_state { + __u32 count; /* can be 65536 */ + __u16 latched_count; + __u8 count_latched; + __u8 status_latched; + __u8 status; + __u8 read_state; + __u8 write_state; + __u8 write_latch; + __u8 rw_mode; + __u8 mode; + __u8 bcd; + __u8 gate; + __s64 count_load_time; +}; + +struct kvm_pit_state { + struct kvm_pit_channel_state channels[3]; +}; + +#define KVM_TRC_INJ_VIRQ (KVM_TRC_HANDLER + 0x02) +#define KVM_TRC_REDELIVER_EVT (KVM_TRC_HANDLER + 0x03) +#define KVM_TRC_PEND_INTR (KVM_TRC_HANDLER + 0x04) +#define KVM_TRC_IO_READ (KVM_TRC_HANDLER + 0x05) +#define KVM_TRC_IO_WRITE (KVM_TRC_HANDLER + 0x06) +#define KVM_TRC_CR_READ (KVM_TRC_HANDLER + 0x07) +#define KVM_TRC_CR_WRITE (KVM_TRC_HANDLER + 0x08) +#define KVM_TRC_DR_READ (KVM_TRC_HANDLER + 0x09) +#define KVM_TRC_DR_WRITE (KVM_TRC_HANDLER + 0x0A) +#define KVM_TRC_MSR_READ (KVM_TRC_HANDLER + 0x0B) +#define KVM_TRC_MSR_WRITE (KVM_TRC_HANDLER + 0x0C) +#define KVM_TRC_CPUID (KVM_TRC_HANDLER + 0x0D) +#define KVM_TRC_INTR (KVM_TRC_HANDLER + 0x0E) +#define KVM_TRC_NMI (KVM_TRC_HANDLER + 0x0F) +#define KVM_TRC_VMMCALL (KVM_TRC_HANDLER + 0x10) +#define KVM_TRC_HLT (KVM_TRC_HANDLER + 0x11) +#define KVM_TRC_CLTS (KVM_TRC_HANDLER + 0x12) +#define KVM_TRC_LMSW (KVM_TRC_HANDLER + 0x13) +#define KVM_TRC_APIC_ACCESS (KVM_TRC_HANDLER + 0x14) + #endif diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h index 68ee390b284..9d963cd6533 100644 --- a/include/asm-x86/kvm_host.h +++ b/include/asm-x86/kvm_host.h @@ -20,6 +20,13 @@ #include <asm/desc.h> +#define KVM_MAX_VCPUS 16 +#define KVM_MEMORY_SLOTS 32 +/* memory slots that does not exposed to userspace */ +#define KVM_PRIVATE_MEM_SLOTS 4 + +#define KVM_PIO_PAGE_OFFSET 1 + #define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1) #define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD)) #define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS | \ @@ -39,6 +46,13 @@ #define INVALID_PAGE (~(hpa_t)0) #define UNMAPPED_GVA (~(gpa_t)0) +/* shadow tables are PAE even on non-PAE hosts */ +#define KVM_HPAGE_SHIFT 21 +#define KVM_HPAGE_SIZE (1UL << KVM_HPAGE_SHIFT) +#define KVM_HPAGE_MASK (~(KVM_HPAGE_SIZE - 1)) + +#define KVM_PAGES_PER_HPAGE (KVM_HPAGE_SIZE / PAGE_SIZE) + #define DE_VECTOR 0 #define UD_VECTOR 6 #define NM_VECTOR 7 @@ -48,6 +62,7 @@ #define SS_VECTOR 12 #define GP_VECTOR 13 #define PF_VECTOR 14 +#define MC_VECTOR 18 #define SELECTOR_TI_MASK (1 << 2) #define SELECTOR_RPL_MASK 0x03 @@ -58,7 +73,8 @@ #define KVM_PERMILLE_MMU_PAGES 20 #define KVM_MIN_ALLOC_MMU_PAGES 64 -#define KVM_NUM_MMU_PAGES 1024 +#define KVM_MMU_HASH_SHIFT 10 +#define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT) #define KVM_MIN_FREE_MMU_PAGES 5 #define KVM_REFILL_PAGES 25 #define KVM_MAX_CPUID_ENTRIES 40 @@ -106,6 +122,12 @@ enum { #define KVM_NR_MEM_OBJS 40 +struct kvm_guest_debug { + int enabled; + unsigned long bp[4]; + int singlestep; +}; + /* * We don't want allocation failures within the mmu code, so we preallocate * enough memory for a single page fault in a cache. @@ -140,6 +162,7 @@ union kvm_mmu_page_role { unsigned pad_for_nice_hex_output:6; unsigned metaphysical:1; unsigned access:3; + unsigned invalid:1; }; }; @@ -204,11 +227,6 @@ struct kvm_vcpu_arch { u64 shadow_efer; u64 apic_base; struct kvm_lapic *apic; /* kernel irqchip context */ -#define VCPU_MP_STATE_RUNNABLE 0 -#define VCPU_MP_STATE_UNINITIALIZED 1 -#define VCPU_MP_STATE_INIT_RECEIVED 2 -#define VCPU_MP_STATE_SIPI_RECEIVED 3 -#define VCPU_MP_STATE_HALTED 4 int mp_state; int sipi_vector; u64 ia32_misc_enable_msr; @@ -226,8 +244,9 @@ struct kvm_vcpu_arch { u64 *last_pte_updated; struct { - gfn_t gfn; /* presumed gfn during guest pte update */ - struct page *page; /* page corresponding to that gfn */ + gfn_t gfn; /* presumed gfn during guest pte update */ + pfn_t pfn; /* pfn corresponding to that gfn */ + int largepage; } update_pte; struct i387_fxsave_struct host_fx_image; @@ -261,6 +280,11 @@ struct kvm_vcpu_arch { /* emulate context */ struct x86_emulate_ctxt emulate_ctxt; + + gpa_t time; + struct kvm_vcpu_time_info hv_clock; + unsigned int time_offset; + struct page *time_page; }; struct kvm_mem_alias { @@ -283,10 +307,13 @@ struct kvm_arch{ struct list_head active_mmu_pages; struct kvm_pic *vpic; struct kvm_ioapic *vioapic; + struct kvm_pit *vpit; int round_robin_prev_vcpu; unsigned int tss_addr; struct page *apic_access_page; + + gpa_t wall_clock; }; struct kvm_vm_stat { @@ -298,6 +325,7 @@ struct kvm_vm_stat { u32 mmu_recycled; u32 mmu_cache_miss; u32 remote_tlb_flush; + u32 lpages; }; struct kvm_vcpu_stat { @@ -320,6 +348,7 @@ struct kvm_vcpu_stat { u32 fpu_reload; u32 insn_emulation; u32 insn_emulation_fail; + u32 hypercalls; }; struct descriptor_table { @@ -355,6 +384,7 @@ struct kvm_x86_ops { u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg); void (*get_segment)(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); + int (*get_cpl)(struct kvm_vcpu *vcpu); void (*set_segment)(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l); @@ -410,6 +440,15 @@ void kvm_mmu_zap_all(struct kvm *kvm); unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm); void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages); +int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3); + +int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, + const void *val, int bytes); +int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes, + gpa_t addr, unsigned long *ret); + +extern bool tdp_enabled; + enum emulation_result { EMULATE_DONE, /* no further processing */ EMULATE_DO_MMIO, /* kvm_run filled with mmio request */ @@ -429,6 +468,7 @@ void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw, unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr); void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long value, unsigned long *rflags); +void kvm_enable_efer_bits(u64); int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data); int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data); @@ -448,12 +488,14 @@ int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value); -void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); -void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr0); -void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr0); -void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr0); -unsigned long get_cr8(struct kvm_vcpu *vcpu); -void lmsw(struct kvm_vcpu *vcpu, unsigned long msw); +int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason); + +void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); +void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3); +void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); +void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8); +unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu); +void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw); void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l); int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata); @@ -491,6 +533,8 @@ int kvm_fix_hypercall(struct kvm_vcpu *vcpu); int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code); +void kvm_enable_tdp(void); + int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3); int complete_pio(struct kvm_vcpu *vcpu); @@ -600,6 +644,7 @@ static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code) #define ASM_VMX_VMWRITE_RSP_RDX ".byte 0x0f, 0x79, 0xd4" #define ASM_VMX_VMXOFF ".byte 0x0f, 0x01, 0xc4" #define ASM_VMX_VMXON_RAX ".byte 0xf3, 0x0f, 0xc7, 0x30" +#define ASM_VMX_INVVPID ".byte 0x66, 0x0f, 0x38, 0x81, 0x08" #define MSR_IA32_TIME_STAMP_COUNTER 0x010 @@ -610,4 +655,30 @@ static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code) #define RMODE_TSS_SIZE \ (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1) +enum { + TASK_SWITCH_CALL = 0, + TASK_SWITCH_IRET = 1, + TASK_SWITCH_JMP = 2, + TASK_SWITCH_GATE = 3, +}; + +#define KVMTRACE_5D(evt, vcpu, d1, d2, d3, d4, d5, name) \ + trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \ + vcpu, 5, d1, d2, d3, d4, d5) +#define KVMTRACE_4D(evt, vcpu, d1, d2, d3, d4, name) \ + trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \ + vcpu, 4, d1, d2, d3, d4, 0) +#define KVMTRACE_3D(evt, vcpu, d1, d2, d3, name) \ + trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \ + vcpu, 3, d1, d2, d3, 0, 0) +#define KVMTRACE_2D(evt, vcpu, d1, d2, name) \ + trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \ + vcpu, 2, d1, d2, 0, 0, 0) +#define KVMTRACE_1D(evt, vcpu, d1, name) \ + trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \ + vcpu, 1, d1, 0, 0, 0, 0) +#define KVMTRACE_0D(evt, vcpu, name) \ + trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \ + vcpu, 0, 0, 0, 0, 0, 0) + #endif diff --git a/include/asm-x86/kvm_para.h b/include/asm-x86/kvm_para.h index c6f3fd8d8c5..50984594207 100644 --- a/include/asm-x86/kvm_para.h +++ b/include/asm-x86/kvm_para.h @@ -10,10 +10,65 @@ * paravirtualization, the appropriate feature bit should be checked. */ #define KVM_CPUID_FEATURES 0x40000001 +#define KVM_FEATURE_CLOCKSOURCE 0 +#define KVM_FEATURE_NOP_IO_DELAY 1 +#define KVM_FEATURE_MMU_OP 2 + +#define MSR_KVM_WALL_CLOCK 0x11 +#define MSR_KVM_SYSTEM_TIME 0x12 + +#define KVM_MAX_MMU_OP_BATCH 32 + +/* Operations for KVM_HC_MMU_OP */ +#define KVM_MMU_OP_WRITE_PTE 1 +#define KVM_MMU_OP_FLUSH_TLB 2 +#define KVM_MMU_OP_RELEASE_PT 3 + +/* Payload for KVM_HC_MMU_OP */ +struct kvm_mmu_op_header { + __u32 op; + __u32 pad; +}; + +struct kvm_mmu_op_write_pte { + struct kvm_mmu_op_header header; + __u64 pte_phys; + __u64 pte_val; +}; + +struct kvm_mmu_op_flush_tlb { + struct kvm_mmu_op_header header; +}; + +struct kvm_mmu_op_release_pt { + struct kvm_mmu_op_header header; + __u64 pt_phys; +}; #ifdef __KERNEL__ #include <asm/processor.h> +/* xen binary-compatible interface. See xen headers for details */ +struct kvm_vcpu_time_info { + uint32_t version; + uint32_t pad0; + uint64_t tsc_timestamp; + uint64_t system_time; + uint32_t tsc_to_system_mul; + int8_t tsc_shift; + int8_t pad[3]; +} __attribute__((__packed__)); /* 32 bytes */ + +struct kvm_wall_clock { + uint32_t wc_version; + uint32_t wc_sec; + uint32_t wc_nsec; +} __attribute__((__packed__)); + + +extern void kvmclock_init(void); + + /* This instruction is vmcall. On non-VT architectures, it will generate a * trap that we will then rewrite to the appropriate instruction. */ diff --git a/include/asm-x86/olpc.h b/include/asm-x86/olpc.h new file mode 100644 index 00000000000..97d47133486 --- /dev/null +++ b/include/asm-x86/olpc.h @@ -0,0 +1,132 @@ +/* OLPC machine specific definitions */ + +#ifndef ASM_OLPC_H_ +#define ASM_OLPC_H_ + +#include <asm/geode.h> + +struct olpc_platform_t { + int flags; + uint32_t boardrev; + int ecver; +}; + +#define OLPC_F_PRESENT 0x01 +#define OLPC_F_DCON 0x02 +#define OLPC_F_VSA 0x04 + +#ifdef CONFIG_OLPC + +extern struct olpc_platform_t olpc_platform_info; + +/* + * OLPC board IDs contain the major build number within the mask 0x0ff0, + * and the minor build number withing 0x000f. Pre-builds have a minor + * number less than 8, and normal builds start at 8. For example, 0x0B10 + * is a PreB1, and 0x0C18 is a C1. + */ + +static inline uint32_t olpc_board(uint8_t id) +{ + return (id << 4) | 0x8; +} + +static inline uint32_t olpc_board_pre(uint8_t id) +{ + return id << 4; +} + +static inline int machine_is_olpc(void) +{ + return (olpc_platform_info.flags & OLPC_F_PRESENT) ? 1 : 0; +} + +/* + * The DCON is OLPC's Display Controller. It has a number of unique + * features that we might want to take advantage of.. + */ +static inline int olpc_has_dcon(void) +{ + return (olpc_platform_info.flags & OLPC_F_DCON) ? 1 : 0; +} + +/* + * The VSA is software from AMD that typical Geode bioses will include. + * It is used to emulate the PCI bus, VGA, etc. OLPC's Open Firmware does + * not include the VSA; instead, PCI is emulated by the kernel. + * + * The VSA is described further in arch/x86/pci/olpc.c. + */ +static inline int olpc_has_vsa(void) +{ + return (olpc_platform_info.flags & OLPC_F_VSA) ? 1 : 0; +} + +/* + * The "Mass Production" version of OLPC's XO is identified as being model + * C2. During the prototype phase, the following models (in chronological + * order) were created: A1, B1, B2, B3, B4, C1. The A1 through B2 models + * were based on Geode GX CPUs, and models after that were based upon + * Geode LX CPUs. There were also some hand-assembled models floating + * around, referred to as PreB1, PreB2, etc. + */ +static inline int olpc_board_at_least(uint32_t rev) +{ + return olpc_platform_info.boardrev >= rev; +} + +#else + +static inline int machine_is_olpc(void) +{ + return 0; +} + +static inline int olpc_has_dcon(void) +{ + return 0; +} + +static inline int olpc_has_vsa(void) +{ + return 0; +} + +#endif + +/* EC related functions */ + +extern int olpc_ec_cmd(unsigned char cmd, unsigned char *inbuf, size_t inlen, + unsigned char *outbuf, size_t outlen); + +extern int olpc_ec_mask_set(uint8_t bits); +extern int olpc_ec_mask_unset(uint8_t bits); + +/* EC commands */ + +#define EC_FIRMWARE_REV 0x08 + +/* SCI source values */ + +#define EC_SCI_SRC_EMPTY 0x00 +#define EC_SCI_SRC_GAME 0x01 +#define EC_SCI_SRC_BATTERY 0x02 +#define EC_SCI_SRC_BATSOC 0x04 +#define EC_SCI_SRC_BATERR 0x08 +#define EC_SCI_SRC_EBOOK 0x10 +#define EC_SCI_SRC_WLAN 0x20 +#define EC_SCI_SRC_ACPWR 0x40 +#define EC_SCI_SRC_ALL 0x7F + +/* GPIO assignments */ + +#define OLPC_GPIO_MIC_AC geode_gpio(1) +#define OLPC_GPIO_DCON_IRQ geode_gpio(7) +#define OLPC_GPIO_THRM_ALRM geode_gpio(10) +#define OLPC_GPIO_SMB_CLK geode_gpio(14) +#define OLPC_GPIO_SMB_DATA geode_gpio(15) +#define OLPC_GPIO_WORKAUX geode_gpio(24) +#define OLPC_GPIO_LID geode_gpio(26) +#define OLPC_GPIO_ECSCI geode_gpio(27) + +#endif diff --git a/include/asm-x86/pgtable.h b/include/asm-x86/pgtable.h index a496d6335d3..801b31f7145 100644 --- a/include/asm-x86/pgtable.h +++ b/include/asm-x86/pgtable.h @@ -195,6 +195,11 @@ static inline int pte_exec(pte_t pte) return !(pte_val(pte) & _PAGE_NX); } +static inline int pte_special(pte_t pte) +{ + return 0; +} + static inline int pmd_large(pmd_t pte) { return (pmd_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) == @@ -256,6 +261,11 @@ static inline pte_t pte_clrglobal(pte_t pte) return __pte(pte_val(pte) & ~(pteval_t)_PAGE_GLOBAL); } +static inline pte_t pte_mkspecial(pte_t pte) +{ + return pte; +} + extern pteval_t __supported_pte_mask; static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot) diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h index 117343b0c27..2e7974ec77e 100644 --- a/include/asm-x86/processor.h +++ b/include/asm-x86/processor.h @@ -722,6 +722,7 @@ static inline void __mwait(unsigned long eax, unsigned long ecx) static inline void __sti_mwait(unsigned long eax, unsigned long ecx) { + trace_hardirqs_on(); /* "mwait %eax, %ecx;" */ asm volatile("sti; .byte 0x0f, 0x01, 0xc9;" :: "a" (eax), "c" (ecx)); diff --git a/include/asm-x86/reboot.h b/include/asm-x86/reboot.h index 6b5233b4f84..e63741f1939 100644 --- a/include/asm-x86/reboot.h +++ b/include/asm-x86/reboot.h @@ -15,5 +15,7 @@ struct machine_ops { extern struct machine_ops machine_ops; void machine_real_restart(unsigned char *code, int length); +void native_machine_crash_shutdown(struct pt_regs *regs); +void native_machine_shutdown(void); #endif /* _ASM_REBOOT_H */ diff --git a/include/asm-x86/time.h b/include/asm-x86/time.h index 68779b048a3..bce72d7a958 100644 --- a/include/asm-x86/time.h +++ b/include/asm-x86/time.h @@ -1,7 +1,6 @@ #ifndef _ASMX86_TIME_H #define _ASMX86_TIME_H -extern void (*late_time_init)(void); extern void hpet_time_init(void); #include <asm/mc146818rtc.h> diff --git a/include/asm-x86/unaligned.h b/include/asm-x86/unaligned.h index d270ffe7275..a7bd416b476 100644 --- a/include/asm-x86/unaligned.h +++ b/include/asm-x86/unaligned.h @@ -3,35 +3,12 @@ /* * The x86 can do unaligned accesses itself. - * - * The strange macros are there to make sure these can't - * be misused in a way that makes them not work on other - * architectures where unaligned accesses aren't as simple. */ -/** - * get_unaligned - get value from possibly mis-aligned location - * @ptr: pointer to value - * - * This macro should be used for accessing values larger in size than - * single bytes at locations that are expected to be improperly aligned, - * e.g. retrieving a u16 value from a location not u16-aligned. - * - * Note that unaligned accesses can be very expensive on some architectures. - */ -#define get_unaligned(ptr) (*(ptr)) +#include <linux/unaligned/access_ok.h> +#include <linux/unaligned/generic.h> -/** - * put_unaligned - put value to a possibly mis-aligned location - * @val: value to place - * @ptr: pointer to location - * - * This macro should be used for placing values larger in size than - * single bytes at locations that are expected to be improperly aligned, - * e.g. writing a u16 value to a location not u16-aligned. - * - * Note that unaligned accesses can be very expensive on some architectures. - */ -#define put_unaligned(val, ptr) ((void)(*(ptr) = (val))) +#define get_unaligned __get_unaligned_le +#define put_unaligned __put_unaligned_le #endif /* _ASM_X86_UNALIGNED_H */ diff --git a/include/asm-xtensa/pgtable.h b/include/asm-xtensa/pgtable.h index c8b024a48b4..8014d96b21f 100644 --- a/include/asm-xtensa/pgtable.h +++ b/include/asm-xtensa/pgtable.h @@ -210,6 +210,8 @@ static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITABLE; } static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } +static inline int pte_special(pte_t pte) { return 0; } + static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= ~(_PAGE_WRITABLE | _PAGE_HW_WRITE); return pte; } static inline pte_t pte_mkclean(pte_t pte) @@ -222,6 +224,8 @@ static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; } static inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_WRITABLE; return pte; } +static inline pte_t pte_mkspecial(pte_t pte) + { return pte; } /* * Conversion functions: convert a page and protection to a page entry, diff --git a/include/asm-xtensa/unaligned.h b/include/asm-xtensa/unaligned.h index 28220890d0a..8f3424fc5d1 100644 --- a/include/asm-xtensa/unaligned.h +++ b/include/asm-xtensa/unaligned.h @@ -1,6 +1,4 @@ /* - * include/asm-xtensa/unaligned.h - * * Xtensa doesn't handle unaligned accesses efficiently. * * This file is subject to the terms and conditions of the GNU General Public @@ -9,20 +7,23 @@ * * Copyright (C) 2001 - 2005 Tensilica Inc. */ +#ifndef _ASM_XTENSA_UNALIGNED_H +#define _ASM_XTENSA_UNALIGNED_H -#ifndef _XTENSA_UNALIGNED_H -#define _XTENSA_UNALIGNED_H - -#include <linux/string.h> - -/* Use memmove here, so gcc does not insert a __builtin_memcpy. */ - -#define get_unaligned(ptr) \ - ({ __typeof__(*(ptr)) __tmp; memmove(&__tmp, (ptr), sizeof(*(ptr))); __tmp; }) - -#define put_unaligned(val, ptr) \ - ({ __typeof__(*(ptr)) __tmp = (val); \ - memmove((ptr), &__tmp, sizeof(*(ptr))); \ - (void)0; }) +#ifdef __XTENSA_EL__ +# include <linux/unaligned/le_memmove.h> +# include <linux/unaligned/be_byteshift.h> +# include <linux/unaligned/generic.h> +# define get_unaligned __get_unaligned_le +# define put_unaligned __put_unaligned_le +#elif defined(__XTENSA_EB__) +# include <linux/unaligned/be_memmove.h> +# include <linux/unaligned/le_byteshift.h> +# include <linux/unaligned/generic.h> +# define get_unaligned __get_unaligned_be +# define put_unaligned __put_unaligned_be +#else +# error processor byte order undefined! +#endif -#endif /* _XTENSA_UNALIGNED_H */ +#endif /* _ASM_XTENSA_UNALIGNED_H */ diff --git a/include/linux/Kbuild b/include/linux/Kbuild index bda6f04791d..78fade0a1e3 100644 --- a/include/linux/Kbuild +++ b/include/linux/Kbuild @@ -20,6 +20,7 @@ header-y += affs_hardblocks.h header-y += aio_abi.h header-y += arcfb.h header-y += atmapi.h +header-y += atmarp.h header-y += atmbr2684.h header-y += atmclip.h header-y += atm_eni.h @@ -48,6 +49,7 @@ header-y += coff.h header-y += comstats.h header-y += const.h header-y += cgroupstats.h +header-y += cramfs_fs.h header-y += cycx_cfm.h header-y += dlmconstants.h header-y += dlm_device.h @@ -70,10 +72,12 @@ header-y += firewire-constants.h header-y += fuse.h header-y += genetlink.h header-y += gen_stats.h +header-y += gfs2_ondisk.h header-y += gigaset_dev.h header-y += hysdn_if.h header-y += i2o-dev.h header-y += i8k.h +header-y += if_addrlabel.h header-y += if_arcnet.h header-y += if_bonding.h header-y += if_cablemodem.h @@ -91,6 +95,7 @@ header-y += if_tunnel.h header-y += in6.h header-y += in_route.h header-y += ioctl.h +header-y += ip6_tunnel.h header-y += ipmi_msgdefs.h header-y += ipsec.h header-y += ipx.h @@ -117,7 +122,6 @@ header-y += nfs2.h header-y += nfs4_mount.h header-y += nfs_mount.h header-y += nl80211.h -header-y += oom.h header-y += param.h header-y += pci_regs.h header-y += pfkeyv2.h @@ -166,7 +170,6 @@ unifdef-y += adfs_fs.h unifdef-y += agpgart.h unifdef-y += apm_bios.h unifdef-y += atalk.h -unifdef-y += atmarp.h unifdef-y += atmdev.h unifdef-y += atm.h unifdef-y += atm_tcp.h @@ -182,7 +185,6 @@ unifdef-y += cm4000_cs.h unifdef-y += cn_proc.h unifdef-y += coda.h unifdef-y += connector.h -unifdef-y += cramfs_fs.h unifdef-y += cuda.h unifdef-y += cyclades.h unifdef-y += dccp.h @@ -205,7 +207,6 @@ unifdef-y += futex.h unifdef-y += fs.h unifdef-y += gameport.h unifdef-y += generic_serial.h -unifdef-y += gfs2_ondisk.h unifdef-y += hayesesp.h unifdef-y += hdlcdrv.h unifdef-y += hdlc.h @@ -219,7 +220,6 @@ unifdef-y += i2c-dev.h unifdef-y += icmp.h unifdef-y += icmpv6.h unifdef-y += if_addr.h -unifdef-y += if_addrlabel.h unifdef-y += if_arp.h unifdef-y += if_bridge.h unifdef-y += if_ec.h @@ -243,7 +243,6 @@ unifdef-y += ipc.h unifdef-y += ipmi.h unifdef-y += ipv6.h unifdef-y += ipv6_route.h -unifdef-y += ip6_tunnel.h unifdef-y += isdn.h unifdef-y += isdnif.h unifdef-y += isdn_divertif.h diff --git a/include/linux/aio.h b/include/linux/aio.h index 0d0b7f629bd..b51ddd28444 100644 --- a/include/linux/aio.h +++ b/include/linux/aio.h @@ -209,27 +209,8 @@ extern ssize_t wait_on_sync_kiocb(struct kiocb *iocb); extern int aio_put_req(struct kiocb *iocb); extern void kick_iocb(struct kiocb *iocb); extern int aio_complete(struct kiocb *iocb, long res, long res2); -extern void __put_ioctx(struct kioctx *ctx); struct mm_struct; extern void exit_aio(struct mm_struct *mm); -extern struct kioctx *lookup_ioctx(unsigned long ctx_id); -extern int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, - struct iocb *iocb); - -/* semi private, but used by the 32bit emulations: */ -struct kioctx *lookup_ioctx(unsigned long ctx_id); -int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, - struct iocb *iocb); - -#define get_ioctx(kioctx) do { \ - BUG_ON(atomic_read(&(kioctx)->users) <= 0); \ - atomic_inc(&(kioctx)->users); \ -} while (0) -#define put_ioctx(kioctx) do { \ - BUG_ON(atomic_read(&(kioctx)->users) <= 0); \ - if (unlikely(atomic_dec_and_test(&(kioctx)->users))) \ - __put_ioctx(kioctx); \ -} while (0) #define io_wait_to_kiocb(wait) container_of(wait, struct kiocb, ki_wait) diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index 48a62baace5..b66fa2bdfd9 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -156,9 +156,7 @@ static inline unsigned long bdi_stat_error(struct backing_dev_info *bdi) extern struct backing_dev_info default_backing_dev_info; void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page); -int writeback_acquire(struct backing_dev_info *bdi); int writeback_in_progress(struct backing_dev_info *bdi); -void writeback_release(struct backing_dev_info *bdi); static inline int bdi_congested(struct backing_dev_info *bdi, int bdi_bits) { diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h index b7fc55ec8d4..b512e48f6d8 100644 --- a/include/linux/binfmts.h +++ b/include/linux/binfmts.h @@ -34,7 +34,8 @@ struct linux_binprm{ #endif struct mm_struct *mm; unsigned long p; /* current top of mem */ - int sh_bang; + unsigned int sh_bang:1, + misc_bang:1; struct file * file; int e_uid, e_gid; kernel_cap_t cap_inheritable, cap_permitted; @@ -48,7 +49,6 @@ struct linux_binprm{ unsigned interp_flags; unsigned interp_data; unsigned long loader, exec; - unsigned long argv_len; }; #define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0 diff --git a/include/linux/bio.h b/include/linux/bio.h index d259690863f..61c15eaf3fb 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -324,6 +324,8 @@ extern struct bio *bio_map_user_iov(struct request_queue *, extern void bio_unmap_user(struct bio *); extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int, gfp_t); +extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int, + gfp_t, int); extern void bio_set_pages_dirty(struct bio *bio); extern void bio_check_pages_dirty(struct bio *bio); extern struct bio *bio_copy_user(struct request_queue *, unsigned long, unsigned int, int); diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h index 1dbe074f1c6..43b406def35 100644 --- a/include/linux/bitmap.h +++ b/include/linux/bitmap.h @@ -46,6 +46,8 @@ * bitmap_shift_left(dst, src, n, nbits) *dst = *src << n * bitmap_remap(dst, src, old, new, nbits) *dst = map(old, new)(src) * bitmap_bitremap(oldbit, old, new, nbits) newbit = map(old, new)(oldbit) + * bitmap_onto(dst, orig, relmap, nbits) *dst = orig relative to relmap + * bitmap_fold(dst, orig, sz, nbits) dst bits = orig bits mod sz * bitmap_scnprintf(buf, len, src, nbits) Print bitmap src to buf * bitmap_parse(buf, buflen, dst, nbits) Parse bitmap dst from kernel buf * bitmap_parse_user(ubuf, ulen, dst, nbits) Parse bitmap dst from user buf @@ -121,6 +123,10 @@ extern void bitmap_remap(unsigned long *dst, const unsigned long *src, const unsigned long *old, const unsigned long *new, int bits); extern int bitmap_bitremap(int oldbit, const unsigned long *old, const unsigned long *new, int bits); +extern void bitmap_onto(unsigned long *dst, const unsigned long *orig, + const unsigned long *relmap, int bits); +extern void bitmap_fold(unsigned long *dst, const unsigned long *orig, + int sz, int bits); extern int bitmap_find_free_region(unsigned long *bitmap, int bits, int order); extern void bitmap_release_region(unsigned long *bitmap, int pos, int order); extern int bitmap_allocate_region(unsigned long *bitmap, int pos, int order); diff --git a/include/linux/bitops.h b/include/linux/bitops.h index 48bde600a2d..024f2b02724 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -6,8 +6,8 @@ #define BIT(nr) (1UL << (nr)) #define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG)) #define BIT_WORD(nr) ((nr) / BITS_PER_LONG) -#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_LONG) #define BITS_PER_BYTE 8 +#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long)) #endif /* @@ -114,8 +114,6 @@ static inline unsigned fls_long(unsigned long l) #ifdef __KERNEL__ #ifdef CONFIG_GENERIC_FIND_FIRST_BIT -extern unsigned long __find_first_bit(const unsigned long *addr, - unsigned long size); /** * find_first_bit - find the first set bit in a memory region @@ -124,28 +122,8 @@ extern unsigned long __find_first_bit(const unsigned long *addr, * * Returns the bit number of the first set bit. */ -static __always_inline unsigned long -find_first_bit(const unsigned long *addr, unsigned long size) -{ - /* Avoid a function call if the bitmap size is a constant */ - /* and not bigger than BITS_PER_LONG. */ - - /* insert a sentinel so that __ffs returns size if there */ - /* are no set bits in the bitmap */ - if (__builtin_constant_p(size) && (size < BITS_PER_LONG)) - return __ffs((*addr) | (1ul << size)); - - /* the result of __ffs(0) is undefined, so it needs to be */ - /* handled separately */ - if (__builtin_constant_p(size) && (size == BITS_PER_LONG)) - return ((*addr) == 0) ? BITS_PER_LONG : __ffs(*addr); - - /* size is not constant or too big */ - return __find_first_bit(addr, size); -} - -extern unsigned long __find_first_zero_bit(const unsigned long *addr, - unsigned long size); +extern unsigned long find_first_bit(const unsigned long *addr, + unsigned long size); /** * find_first_zero_bit - find the first cleared bit in a memory region @@ -154,31 +132,12 @@ extern unsigned long __find_first_zero_bit(const unsigned long *addr, * * Returns the bit number of the first cleared bit. */ -static __always_inline unsigned long -find_first_zero_bit(const unsigned long *addr, unsigned long size) -{ - /* Avoid a function call if the bitmap size is a constant */ - /* and not bigger than BITS_PER_LONG. */ - - /* insert a sentinel so that __ffs returns size if there */ - /* are no set bits in the bitmap */ - if (__builtin_constant_p(size) && (size < BITS_PER_LONG)) { - return __ffs(~(*addr) | (1ul << size)); - } - - /* the result of __ffs(0) is undefined, so it needs to be */ - /* handled separately */ - if (__builtin_constant_p(size) && (size == BITS_PER_LONG)) - return (~(*addr) == 0) ? BITS_PER_LONG : __ffs(~(*addr)); - - /* size is not constant or too big */ - return __find_first_zero_bit(addr, size); -} +extern unsigned long find_first_zero_bit(const unsigned long *addr, + unsigned long size); + #endif /* CONFIG_GENERIC_FIND_FIRST_BIT */ #ifdef CONFIG_GENERIC_FIND_NEXT_BIT -extern unsigned long __find_next_bit(const unsigned long *addr, - unsigned long size, unsigned long offset); /** * find_next_bit - find the next set bit in a memory region @@ -186,36 +145,8 @@ extern unsigned long __find_next_bit(const unsigned long *addr, * @offset: The bitnumber to start searching at * @size: The bitmap size in bits */ -static __always_inline unsigned long -find_next_bit(const unsigned long *addr, unsigned long size, - unsigned long offset) -{ - unsigned long value; - - /* Avoid a function call if the bitmap size is a constant */ - /* and not bigger than BITS_PER_LONG. */ - - /* insert a sentinel so that __ffs returns size if there */ - /* are no set bits in the bitmap */ - if (__builtin_constant_p(size) && (size < BITS_PER_LONG)) { - value = (*addr) & ((~0ul) << offset); - value |= (1ul << size); - return __ffs(value); - } - - /* the result of __ffs(0) is undefined, so it needs to be */ - /* handled separately */ - if (__builtin_constant_p(size) && (size == BITS_PER_LONG)) { - value = (*addr) & ((~0ul) << offset); - return (value == 0) ? BITS_PER_LONG : __ffs(value); - } - - /* size is not constant or too big */ - return __find_next_bit(addr, size, offset); -} - -extern unsigned long __find_next_zero_bit(const unsigned long *addr, - unsigned long size, unsigned long offset); +extern unsigned long find_next_bit(const unsigned long *addr, + unsigned long size, unsigned long offset); /** * find_next_zero_bit - find the next cleared bit in a memory region @@ -223,33 +154,11 @@ extern unsigned long __find_next_zero_bit(const unsigned long *addr, * @offset: The bitnumber to start searching at * @size: The bitmap size in bits */ -static __always_inline unsigned long -find_next_zero_bit(const unsigned long *addr, unsigned long size, - unsigned long offset) -{ - unsigned long value; - - /* Avoid a function call if the bitmap size is a constant */ - /* and not bigger than BITS_PER_LONG. */ - - /* insert a sentinel so that __ffs returns size if there */ - /* are no set bits in the bitmap */ - if (__builtin_constant_p(size) && (size < BITS_PER_LONG)) { - value = (~(*addr)) & ((~0ul) << offset); - value |= (1ul << size); - return __ffs(value); - } - - /* the result of __ffs(0) is undefined, so it needs to be */ - /* handled separately */ - if (__builtin_constant_p(size) && (size == BITS_PER_LONG)) { - value = (~(*addr)) & ((~0ul) << offset); - return (value == 0) ? BITS_PER_LONG : __ffs(value); - } - - /* size is not constant or too big */ - return __find_next_zero_bit(addr, size, offset); -} + +extern unsigned long find_next_zero_bit(const unsigned long *addr, + unsigned long size, + unsigned long offset); + #endif /* CONFIG_GENERIC_FIND_NEXT_BIT */ #endif /* __KERNEL__ */ #endif diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index c5065e3d2ca..c09696a90d6 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -215,8 +215,9 @@ struct request { /* * when request is used as a packet command carrier */ - unsigned int cmd_len; - unsigned char cmd[BLK_MAX_CDB]; + unsigned short cmd_len; + unsigned char __cmd[BLK_MAX_CDB]; + unsigned char *cmd; unsigned int data_len; unsigned int extra_len; /* length of alignment and padding */ @@ -407,6 +408,31 @@ struct request_queue #define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */ #define QUEUE_FLAG_ELVSWITCH 8 /* don't use elevator, just do FIFO */ #define QUEUE_FLAG_BIDI 9 /* queue supports bidi requests */ +#define QUEUE_FLAG_NOMERGES 10 /* disable merge attempts */ + +static inline void queue_flag_set_unlocked(unsigned int flag, + struct request_queue *q) +{ + __set_bit(flag, &q->queue_flags); +} + +static inline void queue_flag_set(unsigned int flag, struct request_queue *q) +{ + WARN_ON_ONCE(!spin_is_locked(q->queue_lock)); + __set_bit(flag, &q->queue_flags); +} + +static inline void queue_flag_clear_unlocked(unsigned int flag, + struct request_queue *q) +{ + __clear_bit(flag, &q->queue_flags); +} + +static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) +{ + WARN_ON_ONCE(!spin_is_locked(q->queue_lock)); + __clear_bit(flag, &q->queue_flags); +} enum { /* @@ -451,6 +477,7 @@ enum { #define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags) #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) +#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) #define blk_queue_flushing(q) ((q)->ordseq) #define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS) @@ -496,17 +523,17 @@ static inline int blk_queue_full(struct request_queue *q, int rw) static inline void blk_set_queue_full(struct request_queue *q, int rw) { if (rw == READ) - set_bit(QUEUE_FLAG_READFULL, &q->queue_flags); + queue_flag_set(QUEUE_FLAG_READFULL, q); else - set_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags); + queue_flag_set(QUEUE_FLAG_WRITEFULL, q); } static inline void blk_clear_queue_full(struct request_queue *q, int rw) { if (rw == READ) - clear_bit(QUEUE_FLAG_READFULL, &q->queue_flags); + queue_flag_clear(QUEUE_FLAG_READFULL, q); else - clear_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags); + queue_flag_clear(QUEUE_FLAG_WRITEFULL, q); } @@ -583,6 +610,7 @@ extern int blk_register_queue(struct gendisk *disk); extern void blk_unregister_queue(struct gendisk *disk); extern void register_disk(struct gendisk *dev); extern void generic_make_request(struct bio *bio); +extern void blk_rq_init(struct request_queue *q, struct request *rq); extern void blk_put_request(struct request *); extern void __blk_put_request(struct request_queue *, struct request *); extern void blk_end_sync_rq(struct request *rq, int error); @@ -626,6 +654,7 @@ extern void blk_start_queue(struct request_queue *q); extern void blk_stop_queue(struct request_queue *q); extern void blk_sync_queue(struct request_queue *q); extern void __blk_stop_queue(struct request_queue *q); +extern void __blk_run_queue(struct request_queue *); extern void blk_run_queue(struct request_queue *); extern void blk_start_queueing(struct request_queue *); extern int blk_rq_map_user(struct request_queue *, struct request *, void __user *, unsigned long); diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h index 4e4e340592f..6a5dbdc8a7d 100644 --- a/include/linux/bootmem.h +++ b/include/linux/bootmem.h @@ -101,6 +101,8 @@ extern void reserve_bootmem_node(pg_data_t *pgdat, extern void free_bootmem_node(pg_data_t *pgdat, unsigned long addr, unsigned long size); +extern void *alloc_bootmem_section(unsigned long size, + unsigned long section_nr); #ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE #define alloc_bootmem_node(pgdat, x) \ diff --git a/include/linux/bsg.h b/include/linux/bsg.h index e8406c55c6d..cf0303a6061 100644 --- a/include/linux/bsg.h +++ b/include/linux/bsg.h @@ -56,19 +56,25 @@ struct sg_io_v4 { #if defined(CONFIG_BLK_DEV_BSG) struct bsg_class_device { struct device *class_dev; - struct device *dev; + struct device *parent; int minor; struct request_queue *queue; + struct kref ref; + void (*release)(struct device *); }; -extern int bsg_register_queue(struct request_queue *, struct device *, const char *); +extern int bsg_register_queue(struct request_queue *q, + struct device *parent, const char *name, + void (*release)(struct device *)); extern void bsg_unregister_queue(struct request_queue *); #else -static inline int bsg_register_queue(struct request_queue * rq, struct device *dev, const char *name) +static inline int bsg_register_queue(struct request_queue *q, + struct device *parent, const char *name, + void (*release)(struct device *)) { return 0; } -static inline void bsg_unregister_queue(struct request_queue *rq) +static inline void bsg_unregister_queue(struct request_queue *q) { } #endif diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 932eb02a275..82aa36c53ea 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -225,7 +225,6 @@ int block_page_mkwrite(struct vm_area_struct *vma, struct page *page, get_block_t get_block); void block_sync_page(struct page *); sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *); -int generic_commit_write(struct file *, struct page *, unsigned, unsigned); int block_truncate_page(struct address_space *, loff_t, get_block_t *); int file_fsync(struct file *, struct dentry *, int); int nobh_write_begin(struct file *, struct address_space *, diff --git a/include/linux/cache.h b/include/linux/cache.h index 4552504c022..97e24881c4c 100644 --- a/include/linux/cache.h +++ b/include/linux/cache.h @@ -60,4 +60,8 @@ #endif #endif +#ifndef CONFIG_ARCH_HAS_CACHE_LINE_SIZE +#define cache_line_size() L1_CACHE_BYTES +#endif + #endif /* __LINUX_CACHE_H */ diff --git a/include/linux/capability.h b/include/linux/capability.h index 7d50ff6d269..eaab759b146 100644 --- a/include/linux/capability.h +++ b/include/linux/capability.h @@ -155,6 +155,7 @@ typedef struct kernel_cap_struct { * Add any capability from current's capability bounding set * to the current process' inheritable set * Allow taking bits out of capability bounding set + * Allow modification of the securebits for a process */ #define CAP_SETPCAP 8 @@ -490,8 +491,6 @@ extern const kernel_cap_t __cap_init_eff_set; int capable(int cap); int __capable(struct task_struct *t, int cap); -extern long cap_prctl_drop(unsigned long cap); - #endif /* __KERNEL__ */ #endif /* !_LINUX_CAPABILITY_H */ diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index a6a6035a4e1..e155aa78d85 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -88,6 +88,17 @@ static inline void css_put(struct cgroup_subsys_state *css) __css_put(css); } +/* bits in struct cgroup flags field */ +enum { + /* Control Group is dead */ + CGRP_REMOVED, + /* Control Group has previously had a child cgroup or a task, + * but no longer (only if CGRP_NOTIFY_ON_RELEASE is set) */ + CGRP_RELEASABLE, + /* Control Group requires release notifications to userspace */ + CGRP_NOTIFY_ON_RELEASE, +}; + struct cgroup { unsigned long flags; /* "unsigned long" so bitops work */ @@ -139,10 +150,10 @@ struct css_set { struct kref ref; /* - * List running through all cgroup groups. Protected by - * css_set_lock + * List running through all cgroup groups in the same hash + * slot. Protected by css_set_lock */ - struct list_head list; + struct hlist_node hlist; /* * List running through all tasks using this cgroup @@ -163,7 +174,16 @@ struct css_set { * during subsystem registration (at boot time). */ struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT]; +}; + +/* + * cgroup_map_cb is an abstract callback API for reporting map-valued + * control files + */ +struct cgroup_map_cb { + int (*fill)(struct cgroup_map_cb *cb, const char *key, u64 value); + void *state; }; /* struct cftype: @@ -190,20 +210,51 @@ struct cftype { struct file *file, char __user *buf, size_t nbytes, loff_t *ppos); /* - * read_uint() is a shortcut for the common case of returning a + * read_u64() is a shortcut for the common case of returning a * single integer. Use it in place of read() */ - u64 (*read_uint) (struct cgroup *cgrp, struct cftype *cft); + u64 (*read_u64) (struct cgroup *cgrp, struct cftype *cft); + /* + * read_s64() is a signed version of read_u64() + */ + s64 (*read_s64) (struct cgroup *cgrp, struct cftype *cft); + /* + * read_map() is used for defining a map of key/value + * pairs. It should call cb->fill(cb, key, value) for each + * entry. The key/value pairs (and their ordering) should not + * change between reboots. + */ + int (*read_map) (struct cgroup *cont, struct cftype *cft, + struct cgroup_map_cb *cb); + /* + * read_seq_string() is used for outputting a simple sequence + * using seqfile. + */ + int (*read_seq_string) (struct cgroup *cont, struct cftype *cft, + struct seq_file *m); + ssize_t (*write) (struct cgroup *cgrp, struct cftype *cft, struct file *file, const char __user *buf, size_t nbytes, loff_t *ppos); /* - * write_uint() is a shortcut for the common case of accepting + * write_u64() is a shortcut for the common case of accepting * a single integer (as parsed by simple_strtoull) from * userspace. Use in place of write(); return 0 or error. */ - int (*write_uint) (struct cgroup *cgrp, struct cftype *cft, u64 val); + int (*write_u64) (struct cgroup *cgrp, struct cftype *cft, u64 val); + /* + * write_s64() is a signed version of write_u64() + */ + int (*write_s64) (struct cgroup *cgrp, struct cftype *cft, s64 val); + + /* + * trigger() callback can be used to get some kick from the + * userspace, when the actual string written is not important + * at all. The private field can be used to determine the + * kick type for multiplexing. + */ + int (*trigger)(struct cgroup *cgrp, unsigned int event); int (*release) (struct inode *inode, struct file *file); }; @@ -254,6 +305,12 @@ struct cgroup_subsys { struct cgroup *cgrp); void (*post_clone)(struct cgroup_subsys *ss, struct cgroup *cgrp); void (*bind)(struct cgroup_subsys *ss, struct cgroup *root); + /* + * This routine is called with the task_lock of mm->owner held + */ + void (*mm_owner_changed)(struct cgroup_subsys *ss, + struct cgroup *old, + struct cgroup *new); int subsys_id; int active; int disabled; @@ -339,4 +396,13 @@ static inline int cgroupstats_build(struct cgroupstats *stats, #endif /* !CONFIG_CGROUPS */ +#ifdef CONFIG_MM_OWNER +extern void +cgroup_mm_owner_callbacks(struct task_struct *old, struct task_struct *new); +#else /* !CONFIG_MM_OWNER */ +static inline void +cgroup_mm_owner_callbacks(struct task_struct *old, struct task_struct *new) +{ +} +#endif /* CONFIG_MM_OWNER */ #endif /* _LINUX_CGROUP_H */ diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h index 1ddebfc5256..e2877454ec8 100644 --- a/include/linux/cgroup_subsys.h +++ b/include/linux/cgroup_subsys.h @@ -42,3 +42,9 @@ SUBSYS(mem_cgroup) #endif /* */ + +#ifdef CONFIG_CGROUP_DEVICE +SUBSYS(devices) +#endif + +/* */ diff --git a/include/linux/coda_linux.h b/include/linux/coda_linux.h index 1c47a34aa79..31b75311e2c 100644 --- a/include/linux/coda_linux.h +++ b/include/linux/coda_linux.h @@ -43,9 +43,6 @@ int coda_getattr(struct vfsmount *, struct dentry *, struct kstat *); int coda_setattr(struct dentry *, struct iattr *); /* this file: heloers */ -static __inline__ struct CodaFid *coda_i2f(struct inode *); -static __inline__ char *coda_i2s(struct inode *); -static __inline__ void coda_flag_inode(struct inode *, int flag); char *coda_f2s(struct CodaFid *f); int coda_isroot(struct inode *i); int coda_iscontrol(const char *name, size_t length); diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h index d71f7c0f931..b03f80a078b 100644 --- a/include/linux/console_struct.h +++ b/include/linux/console_struct.h @@ -53,6 +53,7 @@ struct vc_data { unsigned short vc_hi_font_mask; /* [#] Attribute set for upper 256 chars of font or 0 if not supported */ struct console_font vc_font; /* Current VC font set */ unsigned short vc_video_erase_char; /* Background erase character */ + unsigned short vc_scrl_erase_char; /* Erase character for scroll */ /* VT terminal data */ unsigned int vc_state; /* Escape sequence parser state */ unsigned int vc_npar,vc_par[NPAR]; /* Parameters of current escape sequence */ diff --git a/include/linux/cpu.h b/include/linux/cpu.h index f212fa98283..7464ba3b433 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -108,7 +108,7 @@ static inline void cpuhotplug_mutex_unlock(struct mutex *cpu_hp_mutex) extern void get_online_cpus(void); extern void put_online_cpus(void); #define hotcpu_notifier(fn, pri) { \ - static struct notifier_block fn##_nb = \ + static struct notifier_block fn##_nb __cpuinitdata = \ { .notifier_call = fn, .priority = pri }; \ register_cpu_notifier(&fn##_nb); \ } diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index ddd8652fc3f..e7e91dbfde0 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -83,7 +83,8 @@ struct cpufreq_real_policy { }; struct cpufreq_policy { - cpumask_t cpus; /* affected CPUs */ + cpumask_t cpus; /* CPUs requiring sw coordination */ + cpumask_t related_cpus; /* CPUs with any coordination */ unsigned int shared_type; /* ANY or ALL affected CPUs should set cpufreq */ unsigned int cpu; /* cpu nr of registered CPU */ @@ -307,6 +308,9 @@ extern struct cpufreq_governor cpufreq_gov_performance; #endif #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE #define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_performance) +#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE) +extern struct cpufreq_governor cpufreq_gov_powersave; +#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_powersave) #elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE) extern struct cpufreq_governor cpufreq_gov_userspace; #define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_userspace) diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 259c8051155..9650806fe2e 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -14,6 +14,8 @@ * bitmap_scnlistprintf() and bitmap_parselist(), also in bitmap.c. * For details of cpu_remap(), see bitmap_bitremap in lib/bitmap.c * For details of cpus_remap(), see bitmap_remap in lib/bitmap.c. + * For details of cpus_onto(), see bitmap_onto in lib/bitmap.c. + * For details of cpus_fold(), see bitmap_fold in lib/bitmap.c. * * The available cpumask operations are: * @@ -53,7 +55,9 @@ * int cpulist_scnprintf(buf, len, mask) Format cpumask as list for printing * int cpulist_parse(buf, map) Parse ascii string as cpulist * int cpu_remap(oldbit, old, new) newbit = map(old, new)(oldbit) - * int cpus_remap(dst, src, old, new) *dst = map(old, new)(src) + * void cpus_remap(dst, src, old, new) *dst = map(old, new)(src) + * void cpus_onto(dst, orig, relmap) *dst = orig relative to relmap + * void cpus_fold(dst, orig, sz) dst bits = orig bits mod sz * * for_each_cpu_mask(cpu, mask) for-loop cpu over mask * @@ -330,6 +334,22 @@ static inline void __cpus_remap(cpumask_t *dstp, const cpumask_t *srcp, bitmap_remap(dstp->bits, srcp->bits, oldp->bits, newp->bits, nbits); } +#define cpus_onto(dst, orig, relmap) \ + __cpus_onto(&(dst), &(orig), &(relmap), NR_CPUS) +static inline void __cpus_onto(cpumask_t *dstp, const cpumask_t *origp, + const cpumask_t *relmapp, int nbits) +{ + bitmap_onto(dstp->bits, origp->bits, relmapp->bits, nbits); +} + +#define cpus_fold(dst, orig, sz) \ + __cpus_fold(&(dst), &(orig), sz, NR_CPUS) +static inline void __cpus_fold(cpumask_t *dstp, const cpumask_t *origp, + int sz, int nbits) +{ + bitmap_fold(dstp->bits, origp->bits, sz, nbits); +} + #if NR_CPUS > 1 #define for_each_cpu_mask(cpu, mask) \ for ((cpu) = first_cpu(mask); \ diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index 726761e2400..038578362b4 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h @@ -26,7 +26,7 @@ extern nodemask_t cpuset_mems_allowed(struct task_struct *p); #define cpuset_current_mems_allowed (current->mems_allowed) void cpuset_init_current_mems_allowed(void); void cpuset_update_task_memory_state(void); -int cpuset_zonelist_valid_mems_allowed(struct zonelist *zl); +int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask); extern int __cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask); extern int __cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask); @@ -103,7 +103,7 @@ static inline nodemask_t cpuset_mems_allowed(struct task_struct *p) static inline void cpuset_init_current_mems_allowed(void) {} static inline void cpuset_update_task_memory_state(void) {} -static inline int cpuset_zonelist_valid_mems_allowed(struct zonelist *zl) +static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask) { return 1; } diff --git a/include/linux/device_cgroup.h b/include/linux/device_cgroup.h new file mode 100644 index 00000000000..0b0d9c39ed6 --- /dev/null +++ b/include/linux/device_cgroup.h @@ -0,0 +1,12 @@ +#include <linux/module.h> +#include <linux/fs.h> + +#ifdef CONFIG_CGROUP_DEVICE +extern int devcgroup_inode_permission(struct inode *inode, int mask); +extern int devcgroup_inode_mknod(int mode, dev_t dev); +#else +static inline int devcgroup_inode_permission(struct inode *inode, int mask) +{ return 0; } +static inline int devcgroup_inode_mknod(int mode, dev_t dev) +{ return 0; } +#endif diff --git a/include/linux/dma-attrs.h b/include/linux/dma-attrs.h new file mode 100644 index 00000000000..1677e2bfa00 --- /dev/null +++ b/include/linux/dma-attrs.h @@ -0,0 +1,74 @@ +#ifndef _DMA_ATTR_H +#define _DMA_ATTR_H + +#include <linux/bitmap.h> +#include <linux/bitops.h> +#include <linux/bug.h> + +/** + * an enum dma_attr represents an attribute associated with a DMA + * mapping. The semantics of each attribute should be defined in + * Documentation/DMA-attributes.txt. + */ +enum dma_attr { + DMA_ATTR_WRITE_BARRIER, + DMA_ATTR_MAX, +}; + +#define __DMA_ATTRS_LONGS BITS_TO_LONGS(DMA_ATTR_MAX) + +/** + * struct dma_attrs - an opaque container for DMA attributes + * @flags - bitmask representing a collection of enum dma_attr + */ +struct dma_attrs { + unsigned long flags[__DMA_ATTRS_LONGS]; +}; + +#define DEFINE_DMA_ATTRS(x) \ + struct dma_attrs x = { \ + .flags = { [0 ... __DMA_ATTRS_LONGS-1] = 0 }, \ + } + +static inline void init_dma_attrs(struct dma_attrs *attrs) +{ + bitmap_zero(attrs->flags, __DMA_ATTRS_LONGS); +} + +#ifdef CONFIG_HAVE_DMA_ATTRS +/** + * dma_set_attr - set a specific attribute + * @attr: attribute to set + * @attrs: struct dma_attrs (may be NULL) + */ +static inline void dma_set_attr(enum dma_attr attr, struct dma_attrs *attrs) +{ + if (attrs == NULL) + return; + BUG_ON(attr >= DMA_ATTR_MAX); + __set_bit(attr, attrs->flags); +} + +/** + * dma_get_attr - check for a specific attribute + * @attr: attribute to set + * @attrs: struct dma_attrs (may be NULL) + */ +static inline int dma_get_attr(enum dma_attr attr, struct dma_attrs *attrs) +{ + if (attrs == NULL) + return 0; + BUG_ON(attr >= DMA_ATTR_MAX); + return test_bit(attr, attrs->flags); +} +#else /* !CONFIG_HAVE_DMA_ATTRS */ +static inline void dma_set_attr(enum dma_attr attr, struct dma_attrs *attrs) +{ +} + +static inline int dma_get_attr(enum dma_attr attr, struct dma_attrs *attrs) +{ + return 0; +} +#endif /* CONFIG_HAVE_DMA_ATTRS */ +#endif /* _DMA_ATTR_H */ diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 33203070962..952e0f857ac 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -146,4 +146,21 @@ static inline void dmam_release_declared_memory(struct device *dev) } #endif /* ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY */ +#ifndef CONFIG_HAVE_DMA_ATTRS +struct dma_attrs; + +#define dma_map_single_attrs(dev, cpu_addr, size, dir, attrs) \ + dma_map_single(dev, cpu_addr, size, dir) + +#define dma_unmap_single_attrs(dev, dma_addr, size, dir, attrs) \ + dma_unmap_single(dev, dma_addr, size, dir) + +#define dma_map_sg_attrs(dev, sgl, nents, dir, attrs) \ + dma_map_sg(dev, sgl, nents, dir) + +#define dma_unmap_sg_attrs(dev, sgl, nents, dir, attrs) \ + dma_unmap_sg(dev, sgl, nents, dir) + +#endif /* CONFIG_HAVE_DMA_ATTRS */ + #endif diff --git a/include/linux/dmi.h b/include/linux/dmi.h index 325acdf5c46..2a063b64133 100644 --- a/include/linux/dmi.h +++ b/include/linux/dmi.h @@ -90,6 +90,7 @@ static inline int dmi_check_system(const struct dmi_system_id *list) { return 0; static inline const char * dmi_get_system_info(int field) { return NULL; } static inline const struct dmi_device * dmi_find_device(int type, const char *name, const struct dmi_device *from) { return NULL; } +static inline void dmi_scan_machine(void) { return; } static inline int dmi_get_year(int year) { return 0; } static inline int dmi_name_in_vendors(const char *s) { return 0; } #define dmi_available 0 diff --git a/include/linux/edac.h b/include/linux/edac.h index eab451e69a9..7cf92e8a419 100644 --- a/include/linux/edac.h +++ b/include/linux/edac.h @@ -3,7 +3,7 @@ * * Author: Dave Jiang <djiang@mvista.com> * - * 2006-2007 (c) MontaVista Software, Inc. This file is licensed under + * 2006-2008 (c) MontaVista Software, Inc. This file is licensed under * the terms of the GNU General Public License version 2. This program * is licensed "as is" without any warranty of any kind, whether express * or implied. @@ -26,4 +26,16 @@ extern atomic_t edac_handlers; extern int edac_handler_set(void); extern void edac_atomic_assert_error(void); +static inline void opstate_init(void) +{ + switch (edac_op_state) { + case EDAC_OPSTATE_POLL: + case EDAC_OPSTATE_NMI: + break; + default: + edac_op_state = EDAC_OPSTATE_POLL; + } + return; +} + #endif diff --git a/include/linux/elf.h b/include/linux/elf.h index bad1b16ec49..ff9fbed9012 100644 --- a/include/linux/elf.h +++ b/include/linux/elf.h @@ -208,7 +208,7 @@ typedef struct elf32_hdr{ } Elf32_Ehdr; typedef struct elf64_hdr { - unsigned char e_ident[16]; /* ELF "magic number" */ + unsigned char e_ident[EI_NIDENT]; /* ELF "magic number" */ Elf64_Half e_type; Elf64_Half e_machine; Elf64_Word e_version; diff --git a/include/linux/fb.h b/include/linux/fb.h index 58c57a33e5d..72295b09922 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -791,6 +791,17 @@ struct fb_tile_ops { */ #define FBINFO_MISC_ALWAYS_SETPAR 0x40000 +/* + * Host and GPU endianness differ. + */ +#define FBINFO_FOREIGN_ENDIAN 0x100000 +/* + * Big endian math. This is the same flags as above, but with different + * meaning, it is set by the fb subsystem depending FOREIGN_ENDIAN flag + * and host endianness. Drivers should not use this flag. + */ +#define FBINFO_BE_MATH 0x100000 + struct fb_info { int node; int flags; @@ -899,15 +910,11 @@ struct fb_info { #endif -#if defined (__BIG_ENDIAN) -#define FB_LEFT_POS(bpp) (32 - bpp) -#define FB_SHIFT_HIGH(val, bits) ((val) >> (bits)) -#define FB_SHIFT_LOW(val, bits) ((val) << (bits)) -#else -#define FB_LEFT_POS(bpp) (0) -#define FB_SHIFT_HIGH(val, bits) ((val) << (bits)) -#define FB_SHIFT_LOW(val, bits) ((val) >> (bits)) -#endif +#define FB_LEFT_POS(p, bpp) (fb_be_math(p) ? (32 - (bpp)) : 0) +#define FB_SHIFT_HIGH(p, val, bits) (fb_be_math(p) ? (val) >> (bits) : \ + (val) << (bits)) +#define FB_SHIFT_LOW(p, val, bits) (fb_be_math(p) ? (val) << (bits) : \ + (val) >> (bits)) /* * `Generic' versions of the frame buffer device operations @@ -970,6 +977,25 @@ extern void fb_deferred_io_cleanup(struct fb_info *info); extern int fb_deferred_io_fsync(struct file *file, struct dentry *dentry, int datasync); +static inline bool fb_be_math(struct fb_info *info) +{ +#ifdef CONFIG_FB_FOREIGN_ENDIAN +#if defined(CONFIG_FB_BOTH_ENDIAN) + return info->flags & FBINFO_BE_MATH; +#elif defined(CONFIG_FB_BIG_ENDIAN) + return true; +#elif defined(CONFIG_FB_LITTLE_ENDIAN) + return false; +#endif /* CONFIG_FB_BOTH_ENDIAN */ +#else +#ifdef __BIG_ENDIAN + return true; +#else + return false; +#endif /* __BIG_ENDIAN */ +#endif /* CONFIG_FB_FOREIGN_ENDIAN */ +} + /* drivers/video/fbsysfs.c */ extern struct fb_info *framebuffer_alloc(size_t size, struct device *dev); extern void framebuffer_release(struct fb_info *info); diff --git a/include/linux/fs.h b/include/linux/fs.h index d6d7c52055c..a1ba005d08e 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -474,8 +474,8 @@ struct address_space_operations { int (*releasepage) (struct page *, gfp_t); ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov, loff_t offset, unsigned long nr_segs); - struct page* (*get_xip_page)(struct address_space *, sector_t, - int); + int (*get_xip_mem)(struct address_space *, pgoff_t, int, + void **, unsigned long *); /* migrate the contents of a page to the specified target */ int (*migratepage) (struct address_space *, struct page *, struct page *); @@ -1178,7 +1178,8 @@ struct block_device_operations { int (*ioctl) (struct inode *, struct file *, unsigned, unsigned long); long (*unlocked_ioctl) (struct file *, unsigned, unsigned long); long (*compat_ioctl) (struct file *, unsigned, unsigned long); - int (*direct_access) (struct block_device *, sector_t, unsigned long *); + int (*direct_access) (struct block_device *, sector_t, + void **, unsigned long *); int (*media_changed) (struct gendisk *); int (*revalidate_disk) (struct gendisk *); int (*getgeo)(struct block_device *, struct hd_geometry *); @@ -1520,7 +1521,6 @@ extern int get_sb_pseudo(struct file_system_type *, char *, const struct super_operations *ops, unsigned long, struct vfsmount *mnt); extern int simple_set_mnt(struct vfsmount *mnt, struct super_block *sb); -int __put_super(struct super_block *sb); int __put_super_and_need_restart(struct super_block *sb); void unnamed_dev_init(void); @@ -1964,7 +1964,6 @@ extern int vfs_stat_fd(int dfd, char __user *, struct kstat *); extern int vfs_lstat_fd(int dfd, char __user *, struct kstat *); extern int vfs_fstat(unsigned int, struct kstat *); -extern long vfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); extern int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd, unsigned long arg); diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 164be9da3c1..b414be38718 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -40,9 +40,9 @@ struct vm_area_struct; #define __GFP_FS ((__force gfp_t)0x80u) /* Can call down to low-level FS? */ #define __GFP_COLD ((__force gfp_t)0x100u) /* Cache-cold page required */ #define __GFP_NOWARN ((__force gfp_t)0x200u) /* Suppress page allocation failure warning */ -#define __GFP_REPEAT ((__force gfp_t)0x400u) /* Retry the allocation. Might fail */ -#define __GFP_NOFAIL ((__force gfp_t)0x800u) /* Retry for ever. Cannot fail */ -#define __GFP_NORETRY ((__force gfp_t)0x1000u)/* Do not retry. Might fail */ +#define __GFP_REPEAT ((__force gfp_t)0x400u) /* See above */ +#define __GFP_NOFAIL ((__force gfp_t)0x800u) /* See above */ +#define __GFP_NORETRY ((__force gfp_t)0x1000u)/* See above */ #define __GFP_COMP ((__force gfp_t)0x4000u)/* Add compound page metadata */ #define __GFP_ZERO ((__force gfp_t)0x8000u)/* Return zeroed page on success */ #define __GFP_NOMEMALLOC ((__force gfp_t)0x10000u) /* Don't use emergency reserves */ @@ -119,35 +119,22 @@ static inline int allocflags_to_migratetype(gfp_t gfp_flags) static inline enum zone_type gfp_zone(gfp_t flags) { - int base = 0; - -#ifdef CONFIG_NUMA - if (flags & __GFP_THISNODE) - base = MAX_NR_ZONES; -#endif - #ifdef CONFIG_ZONE_DMA if (flags & __GFP_DMA) - return base + ZONE_DMA; + return ZONE_DMA; #endif #ifdef CONFIG_ZONE_DMA32 if (flags & __GFP_DMA32) - return base + ZONE_DMA32; + return ZONE_DMA32; #endif if ((flags & (__GFP_HIGHMEM | __GFP_MOVABLE)) == (__GFP_HIGHMEM | __GFP_MOVABLE)) - return base + ZONE_MOVABLE; + return ZONE_MOVABLE; #ifdef CONFIG_HIGHMEM if (flags & __GFP_HIGHMEM) - return base + ZONE_HIGHMEM; + return ZONE_HIGHMEM; #endif - return base + ZONE_NORMAL; -} - -static inline gfp_t set_migrateflags(gfp_t gfp, gfp_t migrate_flags) -{ - BUG_ON((gfp & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK); - return (gfp & ~(GFP_MOVABLE_MASK)) | migrate_flags; + return ZONE_NORMAL; } /* @@ -157,13 +144,27 @@ static inline gfp_t set_migrateflags(gfp_t gfp, gfp_t migrate_flags) * virtual kernel addresses to the allocated page(s). */ +static inline int gfp_zonelist(gfp_t flags) +{ + if (NUMA_BUILD && unlikely(flags & __GFP_THISNODE)) + return 1; + + return 0; +} + /* * We get the zone list from the current node and the gfp_mask. * This zone list contains a maximum of MAXNODES*MAX_NR_ZONES zones. + * There are two zonelists per node, one for all zones with memory and + * one containing just zones from the node the zonelist belongs to. * * For the normal case of non-DISCONTIGMEM systems the NODE_DATA() gets * optimized to &contig_page_data at compile-time. */ +static inline struct zonelist *node_zonelist(int nid, gfp_t flags) +{ + return NODE_DATA(nid)->node_zonelists + gfp_zonelist(flags); +} #ifndef HAVE_ARCH_FREE_PAGE static inline void arch_free_page(struct page *page, int order) { } @@ -174,6 +175,10 @@ static inline void arch_alloc_page(struct page *page, int order) { } extern struct page *__alloc_pages(gfp_t, unsigned int, struct zonelist *); +extern struct page * +__alloc_pages_nodemask(gfp_t, unsigned int, + struct zonelist *, nodemask_t *nodemask); + static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order) { @@ -184,8 +189,7 @@ static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, if (nid < 0) nid = numa_node_id(); - return __alloc_pages(gfp_mask, order, - NODE_DATA(nid)->node_zonelists + gfp_zone(gfp_mask)); + return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask)); } #ifdef CONFIG_NUMA diff --git a/include/linux/hid.h b/include/linux/hid.h index d951ec41124..4ce3b7a979b 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -498,13 +498,13 @@ struct hid_parser { struct hid_class_descriptor { __u8 bDescriptorType; - __u16 wDescriptorLength; + __le16 wDescriptorLength; } __attribute__ ((packed)); struct hid_descriptor { __u8 bLength; __u8 bDescriptorType; - __u16 bcdHID; + __le16 bcdHID; __u8 bCountryCode; __u8 bNumDescriptors; diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index addca4cd4f1..a79e80b689d 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -8,6 +8,7 @@ #include <linux/mempolicy.h> #include <linux/shm.h> #include <asm/tlbflush.h> +#include <asm/hugetlb.h> struct ctl_table; @@ -51,51 +52,6 @@ int pmd_huge(pmd_t pmd); void hugetlb_change_protection(struct vm_area_struct *vma, unsigned long address, unsigned long end, pgprot_t newprot); -#ifndef ARCH_HAS_HUGEPAGE_ONLY_RANGE -#define is_hugepage_only_range(mm, addr, len) 0 -#endif - -#ifndef ARCH_HAS_HUGETLB_FREE_PGD_RANGE -#define hugetlb_free_pgd_range free_pgd_range -#else -void hugetlb_free_pgd_range(struct mmu_gather **tlb, unsigned long addr, - unsigned long end, unsigned long floor, - unsigned long ceiling); -#endif - -#ifndef ARCH_HAS_PREPARE_HUGEPAGE_RANGE -/* - * If the arch doesn't supply something else, assume that hugepage - * size aligned regions are ok without further preparation. - */ -static inline int prepare_hugepage_range(unsigned long addr, unsigned long len) -{ - if (len & ~HPAGE_MASK) - return -EINVAL; - if (addr & ~HPAGE_MASK) - return -EINVAL; - return 0; -} -#else -int prepare_hugepage_range(unsigned long addr, unsigned long len); -#endif - -#ifndef ARCH_HAS_SETCLEAR_HUGE_PTE -#define set_huge_pte_at(mm, addr, ptep, pte) set_pte_at(mm, addr, ptep, pte) -#define huge_ptep_get_and_clear(mm, addr, ptep) ptep_get_and_clear(mm, addr, ptep) -#else -void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, - pte_t *ptep, pte_t pte); -pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, - pte_t *ptep); -#endif - -#ifndef ARCH_HAS_HUGETLB_PREFAULT_HOOK -#define hugetlb_prefault_arch_hook(mm) do { } while (0) -#else -void hugetlb_prefault_arch_hook(struct mm_struct *mm); -#endif - #else /* !CONFIG_HUGETLB_PAGE */ static inline int is_vm_hugetlb_page(struct vm_area_struct *vma) diff --git a/include/linux/i2o.h b/include/linux/i2o.h index e92170dda24..f65e58a1d92 100644 --- a/include/linux/i2o.h +++ b/include/linux/i2o.h @@ -613,14 +613,9 @@ struct i2o_sys_tbl { extern struct list_head i2o_controllers; /* Message functions */ -static inline struct i2o_message *i2o_msg_get(struct i2o_controller *); extern struct i2o_message *i2o_msg_get_wait(struct i2o_controller *, int); -static inline void i2o_msg_post(struct i2o_controller *, struct i2o_message *); -static inline int i2o_msg_post_wait(struct i2o_controller *, - struct i2o_message *, unsigned long); extern int i2o_msg_post_wait_mem(struct i2o_controller *, struct i2o_message *, unsigned long, struct i2o_dma *); -static inline void i2o_flush_reply(struct i2o_controller *, u32); /* IOP functions */ extern int i2o_status_get(struct i2o_controller *); diff --git a/include/linux/ide.h b/include/linux/ide.h index f0af504dfa4..b0135b0c3a0 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h @@ -48,13 +48,6 @@ typedef unsigned char byte; /* used everywhere */ #define ERROR_RECAL 1 /* Recalibrate every 2nd retry */ /* - * Tune flags - */ -#define IDE_TUNE_NOAUTO 2 -#define IDE_TUNE_AUTO 1 -#define IDE_TUNE_DEFAULT 0 - -/* * state flags */ @@ -68,23 +61,30 @@ typedef unsigned char byte; /* used everywhere */ */ #define IDE_NR_PORTS (10) -#define IDE_DATA_OFFSET (0) -#define IDE_ERROR_OFFSET (1) -#define IDE_NSECTOR_OFFSET (2) -#define IDE_SECTOR_OFFSET (3) -#define IDE_LCYL_OFFSET (4) -#define IDE_HCYL_OFFSET (5) -#define IDE_SELECT_OFFSET (6) -#define IDE_STATUS_OFFSET (7) -#define IDE_CONTROL_OFFSET (8) -#define IDE_IRQ_OFFSET (9) - -#define IDE_FEATURE_OFFSET IDE_ERROR_OFFSET -#define IDE_COMMAND_OFFSET IDE_STATUS_OFFSET -#define IDE_ALTSTATUS_OFFSET IDE_CONTROL_OFFSET -#define IDE_IREASON_OFFSET IDE_NSECTOR_OFFSET -#define IDE_BCOUNTL_OFFSET IDE_LCYL_OFFSET -#define IDE_BCOUNTH_OFFSET IDE_HCYL_OFFSET +struct ide_io_ports { + unsigned long data_addr; + + union { + unsigned long error_addr; /* read: error */ + unsigned long feature_addr; /* write: feature */ + }; + + unsigned long nsect_addr; + unsigned long lbal_addr; + unsigned long lbam_addr; + unsigned long lbah_addr; + + unsigned long device_addr; + + union { + unsigned long status_addr; /*  read: status  */ + unsigned long command_addr; /* write: command */ + }; + + unsigned long ctl_addr; + + unsigned long irq_addr; +}; #define OK_STAT(stat,good,bad) (((stat)&((good)|(bad)))==(good)) #define BAD_R_STAT (BUSY_STAT | ERR_STAT) @@ -163,7 +163,11 @@ typedef u8 hwif_chipset_t; * Structure to hold all information about the location of this port */ typedef struct hw_regs_s { - unsigned long io_ports[IDE_NR_PORTS]; /* task file registers */ + union { + struct ide_io_ports io_ports; + unsigned long io_ports_array[IDE_NR_PORTS]; + }; + int irq; /* our irq number */ ide_ack_intr_t *ack_intr; /* acknowledge interrupt */ hwif_chipset_t chipset; @@ -179,10 +183,10 @@ static inline void ide_std_init_ports(hw_regs_t *hw, { unsigned int i; - for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) - hw->io_ports[i] = io_addr++; + for (i = 0; i <= 7; i++) + hw->io_ports_array[i] = io_addr++; - hw->io_ports[IDE_CONTROL_OFFSET] = ctl_addr; + hw->io_ports.ctl_addr = ctl_addr; } #include <asm/ide.h> @@ -328,7 +332,6 @@ typedef struct ide_drive_s { unsigned atapi_overlap : 1; /* ATAPI overlap (not supported) */ unsigned doorlocking : 1; /* for removable only: door lock/unlock works */ unsigned nodma : 1; /* disallow DMA */ - unsigned autotune : 2; /* 0=default, 1=autotune, 2=noautotune */ unsigned remap_0_to_1 : 1; /* 0=noremap, 1=remap 0->1 (for EZDrive) */ unsigned blocked : 1; /* 1=powermanagment told us not to do anything, so sleep nicely */ unsigned vdma : 1; /* 1=doing PIO over DMA 0=doing normal DMA */ @@ -424,6 +427,8 @@ struct ide_dma_ops { void (*dma_timeout)(struct ide_drive_s *); }; +struct ide_task_s; + typedef struct hwif_s { struct hwif_s *next; /* for linked-list in ide_hwgroup_t */ struct hwif_s *mate; /* other hwif from same PCI chip */ @@ -432,8 +437,8 @@ typedef struct hwif_s { char name[6]; /* name of interface, eg. "ide0" */ - /* task file registers for pata and sata */ - unsigned long io_ports[IDE_NR_PORTS]; + struct ide_io_ports io_ports; + unsigned long sata_scr[SATA_NR_PORTS]; ide_drive_t drives[MAX_DRIVES]; /* drive info */ @@ -464,24 +469,18 @@ typedef struct hwif_s { const struct ide_port_ops *port_ops; const struct ide_dma_ops *dma_ops; - void (*ata_input_data)(ide_drive_t *, void *, u32); - void (*ata_output_data)(ide_drive_t *, void *, u32); + void (*tf_load)(ide_drive_t *, struct ide_task_s *); + void (*tf_read)(ide_drive_t *, struct ide_task_s *); - void (*atapi_input_bytes)(ide_drive_t *, void *, u32); - void (*atapi_output_bytes)(ide_drive_t *, void *, u32); + void (*input_data)(ide_drive_t *, struct request *, void *, unsigned); + void (*output_data)(ide_drive_t *, struct request *, void *, unsigned); void (*ide_dma_clear_irq)(ide_drive_t *drive); void (*OUTB)(u8 addr, unsigned long port); void (*OUTBSYNC)(ide_drive_t *drive, u8 addr, unsigned long port); - void (*OUTW)(u16 addr, unsigned long port); - void (*OUTSW)(unsigned long port, void *addr, u32 count); - void (*OUTSL)(unsigned long port, void *addr, u32 count); u8 (*INB)(unsigned long port); - u16 (*INW)(unsigned long port); - void (*INSW)(unsigned long port, void *addr, u32 count); - void (*INSL)(unsigned long port, void *addr, u32 count); /* dma physical region descriptor table (cpu view) */ unsigned int *dmatable_cpu; @@ -506,10 +505,7 @@ typedef struct hwif_s { unsigned long dma_base; /* base addr for dma ports */ unsigned long dma_command; /* dma command register */ - unsigned long dma_vendor1; /* dma vendor 1 register */ unsigned long dma_status; /* dma status register */ - unsigned long dma_vendor3; /* dma vendor 3 register */ - unsigned long dma_prdtable; /* actual prd table address */ unsigned long config_data; /* for use by chipset-specific code */ unsigned long select_data; /* for use by chipset-specific code */ @@ -520,7 +516,6 @@ typedef struct hwif_s { unsigned present : 1; /* this interface exists */ unsigned serialized : 1; /* serialized all channel operation */ unsigned sharing_irq: 1; /* 1 = sharing irq with another hwif */ - unsigned reset : 1; /* reset after probe */ unsigned sg_mapped : 1; /* sg_table and sg_nents are ready */ unsigned mmio : 1; /* host uses MMIO */ @@ -545,7 +540,7 @@ typedef ide_startstop_t (ide_handler_t)(ide_drive_t *); typedef int (ide_expiry_t)(ide_drive_t *); /* used by ide-cd, ide-floppy, etc. */ -typedef void (xfer_func_t)(ide_drive_t *, void *, u32); +typedef void (xfer_func_t)(ide_drive_t *, struct request *rq, void *, unsigned); typedef struct hwgroup_s { /* irq handler, if active */ @@ -703,10 +698,6 @@ void ide_add_generic_settings(ide_drive_t *); read_proc_t proc_ide_read_capacity; read_proc_t proc_ide_read_geometry; -#ifdef CONFIG_BLK_DEV_IDEPCI -void ide_pci_create_host_proc(const char *, get_info_t *); -#endif - /* * Standard exit stuff: */ @@ -807,8 +798,14 @@ int generic_ide_ioctl(ide_drive_t *, struct file *, struct block_device *, unsig #ifndef _IDE_C extern ide_hwif_t ide_hwifs[]; /* master data repository */ #endif +extern int ide_noacpi; +extern int ide_acpigtf; +extern int ide_acpionboot; extern int noautodma; +extern int ide_vlb_clk; +extern int ide_pci_clk; + ide_hwif_t *ide_find_port_slot(const struct ide_port_info *); static inline ide_hwif_t *ide_find_port(void) @@ -825,6 +822,10 @@ extern void ide_set_handler (ide_drive_t *drive, ide_handler_t *handler, unsigne void ide_execute_command(ide_drive_t *, u8, ide_handler_t *, unsigned int, ide_expiry_t *); +void ide_execute_pkt_cmd(ide_drive_t *); + +void ide_pad_transfer(ide_drive_t *, int, int); + ide_startstop_t __ide_error(ide_drive_t *, struct request *, u8, u8); ide_startstop_t ide_error (ide_drive_t *drive, const char *msg, byte stat); @@ -961,8 +962,7 @@ typedef struct ide_task_s { void *special; /* valid_t generally */ } ide_task_t; -void ide_tf_load(ide_drive_t *, ide_task_t *); -void ide_tf_read(ide_drive_t *, ide_task_t *); +void ide_tf_dump(const char *, struct ide_taskfile *); extern void SELECT_DRIVE(ide_drive_t *); extern void SELECT_MASK(ide_drive_t *, int); @@ -1068,8 +1068,8 @@ enum { IDE_HFLAG_NO_DMA = (1 << 14), /* check if host is PCI IDE device before allowing DMA */ IDE_HFLAG_NO_AUTODMA = (1 << 15), - /* don't autotune PIO */ - IDE_HFLAG_NO_AUTOTUNE = (1 << 16), + /* host uses MMIO */ + IDE_HFLAG_MMIO = (1 << 16), /* host is CS5510/CS5520 */ IDE_HFLAG_CS5520 = IDE_HFLAG_VDMA, /* no LBA48 */ @@ -1215,13 +1215,15 @@ static inline void ide_acpi_set_state(ide_hwif_t *hwif, int on) {} #endif void ide_remove_port_from_hwgroup(ide_hwif_t *); -void ide_unregister(unsigned int); +void ide_unregister(ide_hwif_t *); void ide_register_region(struct gendisk *); void ide_unregister_region(struct gendisk *); void ide_undecoded_slave(ide_drive_t *); +void ide_port_apply_params(ide_hwif_t *); + int ide_device_add_all(u8 *idx, const struct ide_port_info *); int ide_device_add(u8 idx[4], const struct ide_port_info *); int ide_legacy_device_add(const struct ide_port_info *, unsigned long); @@ -1333,51 +1335,27 @@ static inline void ide_set_irq(ide_drive_t *drive, int on) { ide_hwif_t *hwif = drive->hwif; - hwif->OUTB(drive->ctl | (on ? 0 : 2), - hwif->io_ports[IDE_CONTROL_OFFSET]); + hwif->OUTB(drive->ctl | (on ? 0 : 2), hwif->io_ports.ctl_addr); } static inline u8 ide_read_status(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; - return hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]); + return hwif->INB(hwif->io_ports.status_addr); } static inline u8 ide_read_altstatus(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; - return hwif->INB(hwif->io_ports[IDE_CONTROL_OFFSET]); + return hwif->INB(hwif->io_ports.ctl_addr); } static inline u8 ide_read_error(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; - return hwif->INB(hwif->io_ports[IDE_ERROR_OFFSET]); + return hwif->INB(hwif->io_ports.error_addr); } - -/* - * Too bad. The drive wants to send us data which we are not ready to accept. - * Just throw it away. - */ -static inline void ide_atapi_discard_data(ide_drive_t *drive, unsigned bcount) -{ - ide_hwif_t *hwif = drive->hwif; - - /* FIXME: use ->atapi_input_bytes */ - while (bcount--) - (void)hwif->INB(hwif->io_ports[IDE_DATA_OFFSET]); -} - -static inline void ide_atapi_write_zeros(ide_drive_t *drive, unsigned bcount) -{ - ide_hwif_t *hwif = drive->hwif; - - /* FIXME: use ->atapi_output_bytes */ - while (bcount--) - hwif->OUTB(0, hwif->io_ports[IDE_DATA_OFFSET]); -} - #endif /* _IDE_H */ diff --git a/include/linux/idr.h b/include/linux/idr.h index 0edda411959..9a2d762124d 100644 --- a/include/linux/idr.h +++ b/include/linux/idr.h @@ -14,6 +14,7 @@ #include <linux/types.h> #include <linux/bitops.h> +#include <linux/init.h> #if BITS_PER_LONG == 32 # define IDR_BITS 5 @@ -115,4 +116,6 @@ void ida_remove(struct ida *ida, int id); void ida_destroy(struct ida *ida); void ida_init(struct ida *ida); +void __init idr_init_cache(void); + #endif /* __IDR_H__ */ diff --git a/include/linux/init.h b/include/linux/init.h index fb58c0493cf..21d658cdfa2 100644 --- a/include/linux/init.h +++ b/include/linux/init.h @@ -147,6 +147,8 @@ extern unsigned int reset_devices; void setup_arch(char **); void prepare_namespace(void); +extern void (*late_time_init)(void); + #endif #ifndef MODULE diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 37a6f5bc4a9..bf6b8a61f8d 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -9,6 +9,7 @@ #include <linux/ipc.h> #include <linux/pid_namespace.h> #include <linux/user_namespace.h> +#include <linux/securebits.h> #include <net/net_namespace.h> #define INIT_FDTABLE \ @@ -172,7 +173,7 @@ extern struct group_info init_groups; .cap_inheritable = CAP_INIT_INH_SET, \ .cap_permitted = CAP_FULL_SET, \ .cap_bset = CAP_INIT_BSET, \ - .keep_capabilities = 0, \ + .securebits = SECUREBITS_DEFAULT, \ .user = INIT_USER, \ .comm = "swapper", \ .thread = INIT_THREAD, \ diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index b5fef13148b..f1fc7470d26 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -289,6 +289,7 @@ struct softirq_action }; asmlinkage void do_softirq(void); +asmlinkage void __do_softirq(void); extern void open_softirq(int nr, void (*action)(struct softirq_action*), void *data); extern void softirq_init(void); #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0) diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h index e4451d1da75..ea6c18a8b0d 100644 --- a/include/linux/ipc_namespace.h +++ b/include/linux/ipc_namespace.h @@ -4,6 +4,17 @@ #include <linux/err.h> #include <linux/idr.h> #include <linux/rwsem.h> +#include <linux/notifier.h> + +/* + * ipc namespace events + */ +#define IPCNS_MEMCHANGED 0x00000001 /* Notify lowmem size changed */ +#define IPCNS_CREATED 0x00000002 /* Notify new ipc namespace created */ +#define IPCNS_REMOVED 0x00000003 /* Notify ipc namespace removed */ + +#define IPCNS_CALLBACK_PRI 0 + struct ipc_ids { int in_use; @@ -30,15 +41,24 @@ struct ipc_namespace { size_t shm_ctlall; int shm_ctlmni; int shm_tot; + + struct notifier_block ipcns_nb; }; extern struct ipc_namespace init_ipc_ns; +extern atomic_t nr_ipc_ns; #ifdef CONFIG_SYSVIPC #define INIT_IPC_NS(ns) .ns = &init_ipc_ns, -#else + +extern int register_ipcns_notifier(struct ipc_namespace *); +extern int cond_register_ipcns_notifier(struct ipc_namespace *); +extern int unregister_ipcns_notifier(struct ipc_namespace *); +extern int ipcns_notify(unsigned long); + +#else /* CONFIG_SYSVIPC */ #define INIT_IPC_NS(ns) -#endif +#endif /* CONFIG_SYSVIPC */ #if defined(CONFIG_SYSVIPC) && defined(CONFIG_IPC_NS) extern void free_ipc_ns(struct kref *kref); diff --git a/include/linux/ipmi.h b/include/linux/ipmi.h index c5bd28b69ae..7ebdb4fb4e5 100644 --- a/include/linux/ipmi.h +++ b/include/linux/ipmi.h @@ -64,7 +64,7 @@ * applications and another for userland applications. The * capabilities are basically the same for both interface, although * the interfaces are somewhat different. The stuff in the - * #ifdef KERNEL below is the in-kernel interface. The userland + * #ifdef __KERNEL__ below is the in-kernel interface. The userland * interface is defined later in the file. */ @@ -75,8 +75,7 @@ * work for sockets. */ #define IPMI_MAX_ADDR_SIZE 32 -struct ipmi_addr -{ +struct ipmi_addr { /* Try to take these from the "Channel Medium Type" table in section 6.5 of the IPMI 1.5 manual. */ int addr_type; @@ -90,8 +89,7 @@ struct ipmi_addr * 0), or IPMC_BMC_CHANNEL if communicating directly with the BMC. */ #define IPMI_SYSTEM_INTERFACE_ADDR_TYPE 0x0c -struct ipmi_system_interface_addr -{ +struct ipmi_system_interface_addr { int addr_type; short channel; unsigned char lun; @@ -100,10 +98,9 @@ struct ipmi_system_interface_addr /* An IPMB Address. */ #define IPMI_IPMB_ADDR_TYPE 0x01 /* Used for broadcast get device id as described in section 17.9 of the - IPMI 1.5 manual. */ + IPMI 1.5 manual. */ #define IPMI_IPMB_BROADCAST_ADDR_TYPE 0x41 -struct ipmi_ipmb_addr -{ +struct ipmi_ipmb_addr { int addr_type; short channel; unsigned char slave_addr; @@ -128,8 +125,7 @@ struct ipmi_ipmb_addr * message is a little weird, but this is required. */ #define IPMI_LAN_ADDR_TYPE 0x04 -struct ipmi_lan_addr -{ +struct ipmi_lan_addr { int addr_type; short channel; unsigned char privilege; @@ -162,16 +158,14 @@ struct ipmi_lan_addr * byte of data in the response (as the spec shows the messages laid * out). */ -struct ipmi_msg -{ +struct ipmi_msg { unsigned char netfn; unsigned char cmd; unsigned short data_len; unsigned char __user *data; }; -struct kernel_ipmi_msg -{ +struct kernel_ipmi_msg { unsigned char netfn; unsigned char cmd; unsigned short data_len; @@ -239,12 +233,11 @@ typedef struct ipmi_user *ipmi_user_t; * used after the message is delivered, so the upper layer may use the * link to build a linked list, if it likes. */ -struct ipmi_recv_msg -{ +struct ipmi_recv_msg { struct list_head link; /* The type of message as defined in the "Receive Types" - defines above. */ + defines above. */ int recv_type; ipmi_user_t user; @@ -271,9 +264,8 @@ struct ipmi_recv_msg /* Allocate and free the receive message. */ void ipmi_free_recv_msg(struct ipmi_recv_msg *msg); -struct ipmi_user_hndl -{ - /* Routine type to call when a message needs to be routed to +struct ipmi_user_hndl { + /* Routine type to call when a message needs to be routed to the upper layer. This will be called with some locks held, the only IPMI routines that can be called are ipmi_request and the alloc/free operations. The handler_data is the @@ -368,9 +360,8 @@ int ipmi_request_supply_msgs(ipmi_user_t user, * Poll the IPMI interface for the user. This causes the IPMI code to * do an immediate check for information from the driver and handle * anything that is immediately pending. This will not block in any - * way. This is useful if you need to implement polling from the user - * for things like modifying the watchdog timeout when a panic occurs - * or disabling the watchdog timer on a reboot. + * way. This is useful if you need to spin waiting for something to + * happen in the IPMI driver. */ void ipmi_poll_interface(ipmi_user_t user); @@ -422,12 +413,6 @@ int ipmi_get_maintenance_mode(ipmi_user_t user); int ipmi_set_maintenance_mode(ipmi_user_t user, int mode); /* - * Allow run-to-completion mode to be set for the interface of - * a specific user. - */ -void ipmi_user_set_run_to_completion(ipmi_user_t user, int val); - -/* * When the user is created, it will not receive IPMI events by * default. The user must set this to TRUE to get incoming events. * The first user that sets this to TRUE will receive all events that @@ -440,8 +425,7 @@ int ipmi_set_gets_events(ipmi_user_t user, int val); * every existing interface when a new watcher is registered with * ipmi_smi_watcher_register(). */ -struct ipmi_smi_watcher -{ +struct ipmi_smi_watcher { struct list_head link; /* You must set the owner to the current module, if you are in @@ -512,8 +496,7 @@ int ipmi_validate_addr(struct ipmi_addr *addr, int len); /* Messages sent to the interface are this format. */ -struct ipmi_req -{ +struct ipmi_req { unsigned char __user *addr; /* Address to send the message to. */ unsigned int addr_len; @@ -538,12 +521,11 @@ struct ipmi_req /* Messages sent to the interface with timing parameters are this format. */ -struct ipmi_req_settime -{ +struct ipmi_req_settime { struct ipmi_req req; /* See ipmi_request_settime() above for details on these - values. */ + values. */ int retries; unsigned int retry_time_ms; }; @@ -560,8 +542,7 @@ struct ipmi_req_settime struct ipmi_req_settime) /* Messages received from the interface are this format. */ -struct ipmi_recv -{ +struct ipmi_recv { int recv_type; /* Is this a command, response or an asyncronous event. */ @@ -607,13 +588,12 @@ struct ipmi_recv struct ipmi_recv) /* Register to get commands from other entities on this interface. */ -struct ipmi_cmdspec -{ +struct ipmi_cmdspec { unsigned char netfn; unsigned char cmd; }; -/* +/* * Register to receive a specific command. error values: * - EFAULT - an address supplied was invalid. * - EBUSY - The netfn/cmd supplied was already in use. @@ -636,8 +616,7 @@ struct ipmi_cmdspec * else. The chans field is a bitmask, (1 << channel) for each channel. * It may be IPMI_CHAN_ALL for all channels. */ -struct ipmi_cmdspec_chans -{ +struct ipmi_cmdspec_chans { unsigned int netfn; unsigned int cmd; unsigned int chans; @@ -659,7 +638,7 @@ struct ipmi_cmdspec_chans #define IPMICTL_UNREGISTER_FOR_CMD_CHANS _IOR(IPMI_IOC_MAGIC, 29, \ struct ipmi_cmdspec_chans) -/* +/* * Set whether this interface receives events. Note that the first * user registered for events will get all pending events for the * interface. error values: @@ -675,15 +654,18 @@ struct ipmi_cmdspec_chans * things it takes to determine your address (if not the BMC) and set * it for everyone else. You should probably leave the LUN alone. */ -struct ipmi_channel_lun_address_set -{ +struct ipmi_channel_lun_address_set { unsigned short channel; unsigned char value; }; -#define IPMICTL_SET_MY_CHANNEL_ADDRESS_CMD _IOR(IPMI_IOC_MAGIC, 24, struct ipmi_channel_lun_address_set) -#define IPMICTL_GET_MY_CHANNEL_ADDRESS_CMD _IOR(IPMI_IOC_MAGIC, 25, struct ipmi_channel_lun_address_set) -#define IPMICTL_SET_MY_CHANNEL_LUN_CMD _IOR(IPMI_IOC_MAGIC, 26, struct ipmi_channel_lun_address_set) -#define IPMICTL_GET_MY_CHANNEL_LUN_CMD _IOR(IPMI_IOC_MAGIC, 27, struct ipmi_channel_lun_address_set) +#define IPMICTL_SET_MY_CHANNEL_ADDRESS_CMD \ + _IOR(IPMI_IOC_MAGIC, 24, struct ipmi_channel_lun_address_set) +#define IPMICTL_GET_MY_CHANNEL_ADDRESS_CMD \ + _IOR(IPMI_IOC_MAGIC, 25, struct ipmi_channel_lun_address_set) +#define IPMICTL_SET_MY_CHANNEL_LUN_CMD \ + _IOR(IPMI_IOC_MAGIC, 26, struct ipmi_channel_lun_address_set) +#define IPMICTL_GET_MY_CHANNEL_LUN_CMD \ + _IOR(IPMI_IOC_MAGIC, 27, struct ipmi_channel_lun_address_set) /* Legacy interfaces, these only set IPMB 0. */ #define IPMICTL_SET_MY_ADDRESS_CMD _IOR(IPMI_IOC_MAGIC, 17, unsigned int) #define IPMICTL_GET_MY_ADDRESS_CMD _IOR(IPMI_IOC_MAGIC, 18, unsigned int) @@ -694,8 +676,7 @@ struct ipmi_channel_lun_address_set * Get/set the default timing values for an interface. You shouldn't * generally mess with these. */ -struct ipmi_timing_parms -{ +struct ipmi_timing_parms { int retries; unsigned int retry_time_ms; }; diff --git a/include/linux/ipmi_smi.h b/include/linux/ipmi_smi.h index 6e8cec50338..62b73668b60 100644 --- a/include/linux/ipmi_smi.h +++ b/include/linux/ipmi_smi.h @@ -60,8 +60,7 @@ typedef struct ipmi_smi *ipmi_smi_t; * asynchronous data and messages and request them from the * interface. */ -struct ipmi_smi_msg -{ +struct ipmi_smi_msg { struct list_head link; long msgid; @@ -74,12 +73,11 @@ struct ipmi_smi_msg unsigned char rsp[IPMI_MAX_MSG_LENGTH]; /* Will be called when the system is done with the message - (presumably to free it). */ + (presumably to free it). */ void (*done)(struct ipmi_smi_msg *msg); }; -struct ipmi_smi_handlers -{ +struct ipmi_smi_handlers { struct module *owner; /* The low-level interface cannot start sending messages to @@ -231,7 +229,7 @@ static inline void ipmi_free_smi_msg(struct ipmi_smi_msg *msg) directory for this interface. Note that the entry will automatically be dstroyed when the interface is destroyed. */ int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name, - read_proc_t *read_proc, write_proc_t *write_proc, + read_proc_t *read_proc, void *data, struct module *owner); #endif /* __LINUX_IPMI_SMI_H */ diff --git a/include/linux/kbuild.h b/include/linux/kbuild.h new file mode 100644 index 00000000000..22a72198c14 --- /dev/null +++ b/include/linux/kbuild.h @@ -0,0 +1,15 @@ +#ifndef __LINUX_KBUILD_H +#define __LINUX_KBUILD_H + +#define DEFINE(sym, val) \ + asm volatile("\n->" #sym " %0 " #val : : "i" (val)) + +#define BLANK() asm volatile("\n->" : : ) + +#define OFFSET(sym, str, mem) \ + DEFINE(sym, offsetof(struct str, mem)) + +#define COMMENT(x) \ + asm volatile("\n->#" x) + +#endif diff --git a/include/linux/kernel.h b/include/linux/kernel.h index cd6d02cf854..53839ba265e 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -20,6 +20,9 @@ extern const char linux_banner[]; extern const char linux_proc_banner[]; +#define USHORT_MAX ((u16)(~0U)) +#define SHORT_MAX ((s16)(USHORT_MAX>>1)) +#define SHORT_MIN (-SHORT_MAX - 1) #define INT_MAX ((int)(~0U>>1)) #define INT_MIN (-INT_MAX - 1) #define UINT_MAX (~0U) @@ -188,6 +191,7 @@ extern int log_buf_copy(char *dest, int idx, int len); extern int printk_ratelimit_jiffies; extern int printk_ratelimit_burst; extern int printk_ratelimit(void); +extern int __ratelimit(int ratelimit_jiffies, int ratelimit_burst); extern int __printk_ratelimit(int ratelimit_jiffies, int ratelimit_burst); extern bool printk_timed_ratelimit(unsigned long *caller_jiffies, unsigned int interval_msec); @@ -255,6 +259,7 @@ extern enum system_states { #define TAINT_USER (1<<6) #define TAINT_DIE (1<<7) #define TAINT_OVERRIDDEN_ACPI_TABLE (1<<8) +#define TAINT_WARN (1<<9) extern void dump_stack(void) __cold; diff --git a/include/linux/key.h b/include/linux/key.h index a70b8a8f200..c45c962d1cc 100644 --- a/include/linux/key.h +++ b/include/linux/key.h @@ -19,6 +19,7 @@ #include <linux/list.h> #include <linux/rbtree.h> #include <linux/rcupdate.h> +#include <linux/sysctl.h> #include <asm/atomic.h> #ifdef __KERNEL__ @@ -67,6 +68,8 @@ struct key; #define KEY_OTH_SETATTR 0x00000020 #define KEY_OTH_ALL 0x0000003f +#define KEY_PERM_UNDEF 0xffffffff + struct seq_file; struct user_struct; struct signal_struct; @@ -208,16 +211,19 @@ extern struct key *request_key(struct key_type *type, extern struct key *request_key_with_auxdata(struct key_type *type, const char *description, - const char *callout_info, + const void *callout_info, + size_t callout_len, void *aux); extern struct key *request_key_async(struct key_type *type, const char *description, - const char *callout_info); + const void *callout_info, + size_t callout_len); extern struct key *request_key_async_with_auxdata(struct key_type *type, const char *description, - const char *callout_info, + const void *callout_info, + size_t callout_len, void *aux); extern int wait_for_key_construction(struct key *key, bool intr); @@ -229,6 +235,7 @@ extern key_ref_t key_create_or_update(key_ref_t keyring, const char *description, const void *payload, size_t plen, + key_perm_t perm, unsigned long flags); extern int key_update(key_ref_t key, @@ -257,14 +264,18 @@ extern int keyring_add_key(struct key *keyring, extern struct key *key_lookup(key_serial_t id); -#define key_serial(key) ((key) ? (key)->serial : 0) +static inline key_serial_t key_serial(struct key *key) +{ + return key ? key->serial : 0; +} + +#ifdef CONFIG_SYSCTL +extern ctl_table key_sysctls[]; +#endif /* * the userspace interface */ -extern struct key root_user_keyring, root_session_keyring; -extern int alloc_uid_keyring(struct user_struct *user, - struct task_struct *ctx); extern void switch_uid_keyring(struct user_struct *new_user); extern int copy_keys(unsigned long clone_flags, struct task_struct *tsk); extern int copy_thread_group_keys(struct task_struct *tsk); @@ -293,7 +304,6 @@ extern void key_init(void); #define make_key_ref(k, p) ({ NULL; }) #define key_ref_to_ptr(k) ({ NULL; }) #define is_key_possessed(k) 0 -#define alloc_uid_keyring(u,c) 0 #define switch_uid_keyring(u) do { } while(0) #define __install_session_keyring(t, k) ({ NULL; }) #define copy_keys(f,t) 0 @@ -306,10 +316,6 @@ extern void key_init(void); #define key_fsgid_changed(t) do { } while(0) #define key_init() do { } while(0) -/* Initial keyrings */ -extern struct key root_user_keyring; -extern struct key root_session_keyring; - #endif /* CONFIG_KEYS */ #endif /* __KERNEL__ */ #endif /* _LINUX_KEY_H */ diff --git a/include/linux/keyctl.h b/include/linux/keyctl.h index 3365945640c..656ee6b77a4 100644 --- a/include/linux/keyctl.h +++ b/include/linux/keyctl.h @@ -49,5 +49,6 @@ #define KEYCTL_SET_REQKEY_KEYRING 14 /* set default request-key keyring */ #define KEYCTL_SET_TIMEOUT 15 /* set key timeout */ #define KEYCTL_ASSUME_AUTHORITY 16 /* assume request_key() authorisation */ +#define KEYCTL_GET_SECURITY 17 /* get key security label */ #endif /* _LINUX_KEYCTL_H */ diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 0f28486f636..1036631ff4f 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -173,6 +173,13 @@ struct kretprobe_blackpoint { const char *name; void *addr; }; + +struct kprobe_blackpoint { + const char *name; + unsigned long start_addr; + unsigned long range; +}; + extern struct kretprobe_blackpoint kretprobe_blacklist[]; static inline void kretprobe_assert(struct kretprobe_instance *ri, @@ -227,15 +234,21 @@ static inline struct kprobe_ctlblk *get_kprobe_ctlblk(void) int register_kprobe(struct kprobe *p); void unregister_kprobe(struct kprobe *p); +int register_kprobes(struct kprobe **kps, int num); +void unregister_kprobes(struct kprobe **kps, int num); int setjmp_pre_handler(struct kprobe *, struct pt_regs *); int longjmp_break_handler(struct kprobe *, struct pt_regs *); int register_jprobe(struct jprobe *p); void unregister_jprobe(struct jprobe *p); +int register_jprobes(struct jprobe **jps, int num); +void unregister_jprobes(struct jprobe **jps, int num); void jprobe_return(void); unsigned long arch_deref_entry_point(void *); int register_kretprobe(struct kretprobe *rp); void unregister_kretprobe(struct kretprobe *rp); +int register_kretprobes(struct kretprobe **rps, int num); +void unregister_kretprobes(struct kretprobe **rps, int num); void kprobe_flush_task(struct task_struct *tk); void recycle_rp_inst(struct kretprobe_instance *ri, struct hlist_head *head); @@ -254,16 +267,30 @@ static inline int register_kprobe(struct kprobe *p) { return -ENOSYS; } +static inline int register_kprobes(struct kprobe **kps, int num) +{ + return -ENOSYS; +} static inline void unregister_kprobe(struct kprobe *p) { } +static inline void unregister_kprobes(struct kprobe **kps, int num) +{ +} static inline int register_jprobe(struct jprobe *p) { return -ENOSYS; } +static inline int register_jprobes(struct jprobe **jps, int num) +{ + return -ENOSYS; +} static inline void unregister_jprobe(struct jprobe *p) { } +static inline void unregister_jprobes(struct jprobe **jps, int num) +{ +} static inline void jprobe_return(void) { } @@ -271,9 +298,16 @@ static inline int register_kretprobe(struct kretprobe *rp) { return -ENOSYS; } +static inline int register_kretprobes(struct kretprobe **rps, int num) +{ + return -ENOSYS; +} static inline void unregister_kretprobe(struct kretprobe *rp) { } +static inline void unregister_kretprobes(struct kretprobe **rps, int num) +{ +} static inline void kprobe_flush_task(struct task_struct *tk) { } diff --git a/include/linux/kvm.h b/include/linux/kvm.h index c1ec04fd000..a281afeddfb 100644 --- a/include/linux/kvm.h +++ b/include/linux/kvm.h @@ -8,11 +8,18 @@ */ #include <asm/types.h> +#include <linux/compiler.h> #include <linux/ioctl.h> #include <asm/kvm.h> #define KVM_API_VERSION 12 +/* for KVM_TRACE_ENABLE */ +struct kvm_user_trace_setup { + __u32 buf_size; /* sub_buffer size of each per-cpu */ + __u32 buf_nr; /* the number of sub_buffers of each per-cpu */ +}; + /* for KVM_CREATE_MEMORY_REGION */ struct kvm_memory_region { __u32 slot; @@ -73,6 +80,9 @@ struct kvm_irqchip { #define KVM_EXIT_INTR 10 #define KVM_EXIT_SET_TPR 11 #define KVM_EXIT_TPR_ACCESS 12 +#define KVM_EXIT_S390_SIEIC 13 +#define KVM_EXIT_S390_RESET 14 +#define KVM_EXIT_DCR 15 /* for KVM_RUN, returned by mmap(vcpu_fd, offset=0) */ struct kvm_run { @@ -137,6 +147,27 @@ struct kvm_run { __u32 is_write; __u32 pad; } tpr_access; + /* KVM_EXIT_S390_SIEIC */ + struct { + __u8 icptcode; + __u64 mask; /* psw upper half */ + __u64 addr; /* psw lower half */ + __u16 ipa; + __u32 ipb; + } s390_sieic; + /* KVM_EXIT_S390_RESET */ +#define KVM_S390_RESET_POR 1 +#define KVM_S390_RESET_CLEAR 2 +#define KVM_S390_RESET_SUBSYSTEM 4 +#define KVM_S390_RESET_CPU_INIT 8 +#define KVM_S390_RESET_IPL 16 + __u64 s390_reset_flags; + /* KVM_EXIT_DCR */ + struct { + __u32 dcrn; + __u32 data; + __u8 is_write; + } dcr; /* Fix the size of the union. */ char padding[256]; }; @@ -204,6 +235,74 @@ struct kvm_vapic_addr { __u64 vapic_addr; }; +/* for KVM_SET_MPSTATE */ + +#define KVM_MP_STATE_RUNNABLE 0 +#define KVM_MP_STATE_UNINITIALIZED 1 +#define KVM_MP_STATE_INIT_RECEIVED 2 +#define KVM_MP_STATE_HALTED 3 +#define KVM_MP_STATE_SIPI_RECEIVED 4 + +struct kvm_mp_state { + __u32 mp_state; +}; + +struct kvm_s390_psw { + __u64 mask; + __u64 addr; +}; + +/* valid values for type in kvm_s390_interrupt */ +#define KVM_S390_SIGP_STOP 0xfffe0000u +#define KVM_S390_PROGRAM_INT 0xfffe0001u +#define KVM_S390_SIGP_SET_PREFIX 0xfffe0002u +#define KVM_S390_RESTART 0xfffe0003u +#define KVM_S390_INT_VIRTIO 0xffff2603u +#define KVM_S390_INT_SERVICE 0xffff2401u +#define KVM_S390_INT_EMERGENCY 0xffff1201u + +struct kvm_s390_interrupt { + __u32 type; + __u32 parm; + __u64 parm64; +}; + +#define KVM_TRC_SHIFT 16 +/* + * kvm trace categories + */ +#define KVM_TRC_ENTRYEXIT (1 << KVM_TRC_SHIFT) +#define KVM_TRC_HANDLER (1 << (KVM_TRC_SHIFT + 1)) /* only 12 bits */ + +/* + * kvm trace action + */ +#define KVM_TRC_VMENTRY (KVM_TRC_ENTRYEXIT + 0x01) +#define KVM_TRC_VMEXIT (KVM_TRC_ENTRYEXIT + 0x02) +#define KVM_TRC_PAGE_FAULT (KVM_TRC_HANDLER + 0x01) + +#define KVM_TRC_HEAD_SIZE 12 +#define KVM_TRC_CYCLE_SIZE 8 +#define KVM_TRC_EXTRA_MAX 7 + +/* This structure represents a single trace buffer record. */ +struct kvm_trace_rec { + __u32 event:28; + __u32 extra_u32:3; + __u32 cycle_in:1; + __u32 pid; + __u32 vcpu_id; + union { + struct { + __u32 cycle_lo, cycle_hi; + __u32 extra_u32[KVM_TRC_EXTRA_MAX]; + } cycle; + struct { + __u32 extra_u32[KVM_TRC_EXTRA_MAX]; + } nocycle; + } u; +}; + #define KVMIO 0xAE /* @@ -212,6 +311,8 @@ struct kvm_vapic_addr { #define KVM_GET_API_VERSION _IO(KVMIO, 0x00) #define KVM_CREATE_VM _IO(KVMIO, 0x01) /* returns a VM fd */ #define KVM_GET_MSR_INDEX_LIST _IOWR(KVMIO, 0x02, struct kvm_msr_list) + +#define KVM_S390_ENABLE_SIE _IO(KVMIO, 0x06) /* * Check if a kvm extension is available. Argument is extension number, * return is 1 (yes) or 0 (no, sorry). @@ -222,7 +323,12 @@ struct kvm_vapic_addr { */ #define KVM_GET_VCPU_MMAP_SIZE _IO(KVMIO, 0x04) /* in bytes */ #define KVM_GET_SUPPORTED_CPUID _IOWR(KVMIO, 0x05, struct kvm_cpuid2) - +/* + * ioctls for kvm trace + */ +#define KVM_TRACE_ENABLE _IOW(KVMIO, 0x06, struct kvm_user_trace_setup) +#define KVM_TRACE_PAUSE _IO(KVMIO, 0x07) +#define KVM_TRACE_DISABLE _IO(KVMIO, 0x08) /* * Extension capability list. */ @@ -233,6 +339,13 @@ struct kvm_vapic_addr { #define KVM_CAP_SET_TSS_ADDR 4 #define KVM_CAP_VAPIC 6 #define KVM_CAP_EXT_CPUID 7 +#define KVM_CAP_CLOCKSOURCE 8 +#define KVM_CAP_NR_VCPUS 9 /* returns max vcpus per vm */ +#define KVM_CAP_NR_MEMSLOTS 10 /* returns max memory slots per vm */ +#define KVM_CAP_PIT 11 +#define KVM_CAP_NOP_IO_DELAY 12 +#define KVM_CAP_PV_MMU 13 +#define KVM_CAP_MP_STATE 14 /* * ioctls for VM fds @@ -255,6 +368,9 @@ struct kvm_vapic_addr { #define KVM_IRQ_LINE _IOW(KVMIO, 0x61, struct kvm_irq_level) #define KVM_GET_IRQCHIP _IOWR(KVMIO, 0x62, struct kvm_irqchip) #define KVM_SET_IRQCHIP _IOR(KVMIO, 0x63, struct kvm_irqchip) +#define KVM_CREATE_PIT _IO(KVMIO, 0x64) +#define KVM_GET_PIT _IOWR(KVMIO, 0x65, struct kvm_pit_state) +#define KVM_SET_PIT _IOR(KVMIO, 0x66, struct kvm_pit_state) /* * ioctls for vcpu fds @@ -281,5 +397,17 @@ struct kvm_vapic_addr { #define KVM_TPR_ACCESS_REPORTING _IOWR(KVMIO, 0x92, struct kvm_tpr_access_ctl) /* Available with KVM_CAP_VAPIC */ #define KVM_SET_VAPIC_ADDR _IOW(KVMIO, 0x93, struct kvm_vapic_addr) +/* valid for virtual machine (for floating interrupt)_and_ vcpu */ +#define KVM_S390_INTERRUPT _IOW(KVMIO, 0x94, struct kvm_s390_interrupt) +/* store status for s390 */ +#define KVM_S390_STORE_STATUS_NOADDR (-1ul) +#define KVM_S390_STORE_STATUS_PREFIXED (-2ul) +#define KVM_S390_STORE_STATUS _IOW(KVMIO, 0x95, unsigned long) +/* initial ipl psw for s390 */ +#define KVM_S390_SET_INITIAL_PSW _IOW(KVMIO, 0x96, struct kvm_s390_psw) +/* initial reset for s390 */ +#define KVM_S390_INITIAL_RESET _IO(KVMIO, 0x97) +#define KVM_GET_MP_STATE _IOR(KVMIO, 0x98, struct kvm_mp_state) +#define KVM_SET_MP_STATE _IOW(KVMIO, 0x99, struct kvm_mp_state) #endif diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 928b0d59e9b..398978972b7 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -15,6 +15,7 @@ #include <linux/sched.h> #include <linux/mm.h> #include <linux/preempt.h> +#include <linux/marker.h> #include <asm/signal.h> #include <linux/kvm.h> @@ -24,29 +25,18 @@ #include <asm/kvm_host.h> -#define KVM_MAX_VCPUS 4 -#define KVM_MEMORY_SLOTS 8 -/* memory slots that does not exposed to userspace */ -#define KVM_PRIVATE_MEM_SLOTS 4 - -#define KVM_PIO_PAGE_OFFSET 1 - /* * vcpu->requests bit members */ #define KVM_REQ_TLB_FLUSH 0 #define KVM_REQ_MIGRATE_TIMER 1 #define KVM_REQ_REPORT_TPR_ACCESS 2 +#define KVM_REQ_MMU_RELOAD 3 +#define KVM_REQ_TRIPLE_FAULT 4 struct kvm_vcpu; extern struct kmem_cache *kvm_vcpu_cache; -struct kvm_guest_debug { - int enabled; - unsigned long bp[4]; - int singlestep; -}; - /* * It would be nice to use something smarter than a linear search, TBD... * Thankfully we dont expect many devices to register (famous last words :), @@ -67,7 +57,9 @@ void kvm_io_bus_register_dev(struct kvm_io_bus *bus, struct kvm_vcpu { struct kvm *kvm; +#ifdef CONFIG_PREEMPT_NOTIFIERS struct preempt_notifier preempt_notifier; +#endif int vcpu_id; struct mutex mutex; int cpu; @@ -100,6 +92,10 @@ struct kvm_memory_slot { unsigned long flags; unsigned long *rmap; unsigned long *dirty_bitmap; + struct { + unsigned long rmap_pde; + int write_count; + } *lpage_info; unsigned long userspace_addr; int user_alloc; }; @@ -114,11 +110,11 @@ struct kvm { KVM_PRIVATE_MEM_SLOTS]; struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; struct list_head vm_list; - struct file *filp; struct kvm_io_bus mmio_bus; struct kvm_io_bus pio_bus; struct kvm_vm_stat stat; struct kvm_arch arch; + atomic_t users_count; }; /* The guest did something we don't support. */ @@ -145,14 +141,19 @@ int kvm_init(void *opaque, unsigned int vcpu_size, struct module *module); void kvm_exit(void); +void kvm_get_kvm(struct kvm *kvm); +void kvm_put_kvm(struct kvm *kvm); + #define HPA_MSB ((sizeof(hpa_t) * 8) - 1) #define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB) static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; } struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva); extern struct page *bad_page; +extern pfn_t bad_pfn; int is_error_page(struct page *page); +int is_error_pfn(pfn_t pfn); int kvm_is_error_hva(unsigned long addr); int kvm_set_memory_region(struct kvm *kvm, struct kvm_userspace_memory_region *mem, @@ -166,8 +167,19 @@ int kvm_arch_set_memory_region(struct kvm *kvm, int user_alloc); gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn); struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); +unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); void kvm_release_page_clean(struct page *page); void kvm_release_page_dirty(struct page *page); +void kvm_set_page_dirty(struct page *page); +void kvm_set_page_accessed(struct page *page); + +pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn); +void kvm_release_pfn_dirty(pfn_t); +void kvm_release_pfn_clean(pfn_t pfn); +void kvm_set_pfn_dirty(pfn_t pfn); +void kvm_set_pfn_accessed(pfn_t pfn); +void kvm_get_pfn(pfn_t pfn); + int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, int len); int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data, @@ -188,6 +200,7 @@ void kvm_resched(struct kvm_vcpu *vcpu); void kvm_load_guest_fpu(struct kvm_vcpu *vcpu); void kvm_put_guest_fpu(struct kvm_vcpu *vcpu); void kvm_flush_remote_tlbs(struct kvm *kvm); +void kvm_reload_remote_mmus(struct kvm *kvm); long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg); @@ -223,6 +236,10 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); +int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, + struct kvm_mp_state *mp_state); +int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, + struct kvm_mp_state *mp_state); int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg); int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run); @@ -255,6 +272,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm); int kvm_cpu_get_interrupt(struct kvm_vcpu *v); int kvm_cpu_has_interrupt(struct kvm_vcpu *v); +int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu); void kvm_vcpu_kick(struct kvm_vcpu *vcpu); static inline void kvm_guest_enter(void) @@ -296,5 +314,18 @@ struct kvm_stats_debugfs_item { struct dentry *dentry; }; extern struct kvm_stats_debugfs_item debugfs_entries[]; +extern struct dentry *kvm_debugfs_dir; + +#ifdef CONFIG_KVM_TRACE +int kvm_trace_ioctl(unsigned int ioctl, unsigned long arg); +void kvm_trace_cleanup(void); +#else +static inline +int kvm_trace_ioctl(unsigned int ioctl, unsigned long arg) +{ + return -EINVAL; +} +#define kvm_trace_cleanup() ((void)0) +#endif #endif diff --git a/include/linux/kvm_para.h b/include/linux/kvm_para.h index 5497aac0d2f..3ddce03766c 100644 --- a/include/linux/kvm_para.h +++ b/include/linux/kvm_para.h @@ -11,8 +11,11 @@ /* Return values for hypercalls */ #define KVM_ENOSYS 1000 +#define KVM_EFAULT EFAULT +#define KVM_E2BIG E2BIG -#define KVM_HC_VAPIC_POLL_IRQ 1 +#define KVM_HC_VAPIC_POLL_IRQ 1 +#define KVM_HC_MMU_OP 2 /* * hypercalls use architecture specific @@ -20,6 +23,12 @@ #include <asm/kvm_para.h> #ifdef __KERNEL__ +#ifdef CONFIG_KVM_GUEST +void __init kvm_guest_init(void); +#else +#define kvm_guest_init() do { } while (0) +#endif + static inline int kvm_para_has_feature(unsigned int feature) { if (kvm_arch_para_features() & (1UL << feature)) diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h index 1c4e46decb2..9b6f395c962 100644 --- a/include/linux/kvm_types.h +++ b/include/linux/kvm_types.h @@ -38,6 +38,8 @@ typedef unsigned long hva_t; typedef u64 hpa_t; typedef unsigned long hfn_t; +typedef hfn_t pfn_t; + struct kvm_pio_request { unsigned long count; int cur_count; diff --git a/include/linux/list.h b/include/linux/list.h index dac16f99c70..7627508f1b7 100644 --- a/include/linux/list.h +++ b/include/linux/list.h @@ -319,7 +319,16 @@ static inline int list_empty_careful(const struct list_head *head) return (next == head) && (next == head->prev); } -static inline void __list_splice(struct list_head *list, +/** + * list_is_singular - tests whether a list has just one entry. + * @head: the list to test. + */ +static inline int list_is_singular(const struct list_head *head) +{ + return !list_empty(head) && (head->next == head->prev); +} + +static inline void __list_splice(const struct list_head *list, struct list_head *head) { struct list_head *first = list->next; @@ -338,7 +347,8 @@ static inline void __list_splice(struct list_head *list, * @list: the new list to add. * @head: the place to add it in the first list. */ -static inline void list_splice(struct list_head *list, struct list_head *head) +static inline void list_splice(const struct list_head *list, + struct list_head *head) { if (!list_empty(list)) __list_splice(list, head); diff --git a/include/linux/lmb.h b/include/linux/lmb.h index 271153d27fb..c46c89505da 100644 --- a/include/linux/lmb.h +++ b/include/linux/lmb.h @@ -40,7 +40,8 @@ extern struct lmb lmb; extern void __init lmb_init(void); extern void __init lmb_analyze(void); -extern long __init lmb_add(u64 base, u64 size); +extern long lmb_add(u64 base, u64 size); +extern long lmb_remove(u64 base, u64 size); extern long __init lmb_reserve(u64 base, u64 size); extern u64 __init lmb_alloc_nid(u64 size, u64 align, int nid, u64 (*nid_range)(u64, u64, int *)); @@ -53,6 +54,7 @@ extern u64 __init lmb_phys_mem_size(void); extern u64 __init lmb_end_of_DRAM(void); extern void __init lmb_enforce_memory_limit(u64 memory_limit); extern int __init lmb_is_reserved(u64 addr); +extern int lmb_find(struct lmb_property *res); extern void lmb_dump_all(void); diff --git a/include/linux/mca-legacy.h b/include/linux/mca-legacy.h index f2bb770e530..7a3aea84590 100644 --- a/include/linux/mca-legacy.h +++ b/include/linux/mca-legacy.h @@ -34,7 +34,6 @@ extern int mca_find_adapter(int id, int start); extern int mca_find_unused_adapter(int id, int start); -extern int mca_is_adapter_used(int slot); extern int mca_mark_as_used(int slot); extern void mca_mark_as_unused(int slot); diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 8b1c4295848..e6608776bc9 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -27,9 +27,6 @@ struct mm_struct; #ifdef CONFIG_CGROUP_MEM_RES_CTLR -extern void mm_init_cgroup(struct mm_struct *mm, struct task_struct *p); -extern void mm_free_cgroup(struct mm_struct *mm); - #define page_reset_bad_cgroup(page) ((page)->page_cgroup = 0) extern struct page_cgroup *page_get_page_cgroup(struct page *page); @@ -48,8 +45,10 @@ extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, extern void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask); int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem); +extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); + #define mm_match_cgroup(mm, cgroup) \ - ((cgroup) == rcu_dereference((mm)->mem_cgroup)) + ((cgroup) == mem_cgroup_from_task((mm)->owner)) extern int mem_cgroup_prepare_migration(struct page *page); extern void mem_cgroup_end_migration(struct page *page); @@ -73,15 +72,6 @@ extern long mem_cgroup_calc_reclaim_inactive(struct mem_cgroup *mem, struct zone *zone, int priority); #else /* CONFIG_CGROUP_MEM_RES_CTLR */ -static inline void mm_init_cgroup(struct mm_struct *mm, - struct task_struct *p) -{ -} - -static inline void mm_free_cgroup(struct mm_struct *mm) -{ -} - static inline void page_reset_bad_cgroup(struct page *page) { } diff --git a/include/linux/memory.h b/include/linux/memory.h index f80e0e331cb..2f5f8a5ef2a 100644 --- a/include/linux/memory.h +++ b/include/linux/memory.h @@ -53,6 +53,13 @@ struct memory_notify { struct notifier_block; struct mem_section; +/* + * Priorities for the hotplug memory callback routines (stored in decreasing + * order in the callback chain) + */ +#define SLAB_CALLBACK_PRI 1 +#define IPC_CALLBACK_PRI 10 + #ifndef CONFIG_MEMORY_HOTPLUG_SPARSE static inline int memory_dev_init(void) { diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index 8fee7a45736..73e358612ea 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -8,8 +8,18 @@ struct page; struct zone; struct pglist_data; +struct mem_section; #ifdef CONFIG_MEMORY_HOTPLUG + +/* + * Magic number for free bootmem. + * The normal smallest mapcount is -1. Here is smaller value than it. + */ +#define SECTION_INFO 0xfffffffe +#define MIX_INFO 0xfffffffd +#define NODE_INFO 0xfffffffc + /* * pgdat resizing functions */ @@ -64,9 +74,11 @@ extern int offline_pages(unsigned long, unsigned long, unsigned long); /* reasonably generic interface to expand the physical pages in a zone */ extern int __add_pages(struct zone *zone, unsigned long start_pfn, unsigned long nr_pages); +extern int __remove_pages(struct zone *zone, unsigned long start_pfn, + unsigned long nr_pages); /* - * Walk thorugh all memory which is registered as resource. + * Walk through all memory which is registered as resource. * arg is (start_pfn, nr_pages, private_arg_pointer) */ extern int walk_memory_resource(unsigned long start_pfn, @@ -142,6 +154,18 @@ static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat) #endif /* CONFIG_NUMA */ #endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */ +#ifdef CONFIG_SPARSEMEM_VMEMMAP +static inline void register_page_bootmem_info_node(struct pglist_data *pgdat) +{ +} +static inline void put_page_bootmem(struct page *page) +{ +} +#else +extern void register_page_bootmem_info_node(struct pglist_data *pgdat); +extern void put_page_bootmem(struct page *page); +#endif + #else /* ! CONFIG_MEMORY_HOTPLUG */ /* * Stub functions for when hotplug is off @@ -169,6 +193,10 @@ static inline int mhp_notimplemented(const char *func) return -ENOSYS; } +static inline void register_page_bootmem_info_node(struct pglist_data *pgdat) +{ +} + #endif /* ! CONFIG_MEMORY_HOTPLUG */ extern int add_memory(int nid, u64 start, u64 size); @@ -176,5 +204,8 @@ extern int arch_add_memory(int nid, u64 start, u64 size); extern int remove_memory(u64 start, u64 size); extern int sparse_add_one_section(struct zone *zone, unsigned long start_pfn, int nr_pages); +extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms); +extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map, + unsigned long pnum); #endif /* __LINUX_MEMORY_HOTPLUG_H */ diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index 59c4865bc85..3a39570b81b 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h @@ -8,15 +8,32 @@ * Copyright 2003,2004 Andi Kleen SuSE Labs */ +/* + * Both the MPOL_* mempolicy mode and the MPOL_F_* optional mode flags are + * passed by the user to either set_mempolicy() or mbind() in an 'int' actual. + * The MPOL_MODE_FLAGS macro determines the legal set of optional mode flags. + */ + /* Policies */ -#define MPOL_DEFAULT 0 -#define MPOL_PREFERRED 1 -#define MPOL_BIND 2 -#define MPOL_INTERLEAVE 3 +enum { + MPOL_DEFAULT, + MPOL_PREFERRED, + MPOL_BIND, + MPOL_INTERLEAVE, + MPOL_MAX, /* always last member of enum */ +}; -#define MPOL_MAX MPOL_INTERLEAVE +/* Flags for set_mempolicy */ +#define MPOL_F_STATIC_NODES (1 << 15) +#define MPOL_F_RELATIVE_NODES (1 << 14) -/* Flags for get_mem_policy */ +/* + * MPOL_MODE_FLAGS is the union of all possible optional mode flags passed to + * either set_mempolicy() or mbind(). + */ +#define MPOL_MODE_FLAGS (MPOL_F_STATIC_NODES | MPOL_F_RELATIVE_NODES) + +/* Flags for get_mempolicy */ #define MPOL_F_NODE (1<<0) /* return next IL mode instead of node mask */ #define MPOL_F_ADDR (1<<1) /* look up vma using address */ #define MPOL_F_MEMS_ALLOWED (1<<2) /* return allowed memories */ @@ -27,6 +44,14 @@ #define MPOL_MF_MOVE_ALL (1<<2) /* Move every page to conform to mapping */ #define MPOL_MF_INTERNAL (1<<3) /* Internal flags start here */ +/* + * Internal flags that share the struct mempolicy flags word with + * "mode flags". These flags are allocated from bit 0 up, as they + * are never OR'ed into the mode in mempolicy API arguments. + */ +#define MPOL_F_SHARED (1 << 0) /* identify shared policies */ +#define MPOL_F_LOCAL (1 << 1) /* preferred local allocation */ + #ifdef __KERNEL__ #include <linux/mmzone.h> @@ -35,7 +60,6 @@ #include <linux/spinlock.h> #include <linux/nodemask.h> -struct vm_area_struct; struct mm_struct; #ifdef CONFIG_NUMA @@ -54,22 +78,27 @@ struct mm_struct; * mmap_sem. * * Freeing policy: - * When policy is MPOL_BIND v.zonelist is kmalloc'ed and must be kfree'd. - * All other policies don't have any external state. mpol_free() handles this. + * Mempolicy objects are reference counted. A mempolicy will be freed when + * mpol_put() decrements the reference count to zero. * - * Copying policy objects: - * For MPOL_BIND the zonelist must be always duplicated. mpol_clone() does this. + * Duplicating policy objects: + * mpol_dup() allocates a new mempolicy and copies the specified mempolicy + * to the new storage. The reference count of the new object is initialized + * to 1, representing the caller of mpol_dup(). */ struct mempolicy { atomic_t refcnt; - short policy; /* See MPOL_* above */ + unsigned short mode; /* See MPOL_* above */ + unsigned short flags; /* See set_mempolicy() MPOL_F_* above */ union { - struct zonelist *zonelist; /* bind */ short preferred_node; /* preferred */ - nodemask_t nodes; /* interleave */ + nodemask_t nodes; /* interleave/bind */ /* undefined for default */ } v; - nodemask_t cpuset_mems_allowed; /* mempolicy relative to these nodes */ + union { + nodemask_t cpuset_mems_allowed; /* relative to these nodes */ + nodemask_t user_nodemask; /* nodemask passed by user */ + } w; }; /* @@ -77,18 +106,43 @@ struct mempolicy { * The default fast path of a NULL MPOL_DEFAULT policy is always inlined. */ -extern void __mpol_free(struct mempolicy *pol); -static inline void mpol_free(struct mempolicy *pol) +extern void __mpol_put(struct mempolicy *pol); +static inline void mpol_put(struct mempolicy *pol) { if (pol) - __mpol_free(pol); + __mpol_put(pol); } -extern struct mempolicy *__mpol_copy(struct mempolicy *pol); -static inline struct mempolicy *mpol_copy(struct mempolicy *pol) +/* + * Does mempolicy pol need explicit unref after use? + * Currently only needed for shared policies. + */ +static inline int mpol_needs_cond_ref(struct mempolicy *pol) +{ + return (pol && (pol->flags & MPOL_F_SHARED)); +} + +static inline void mpol_cond_put(struct mempolicy *pol) +{ + if (mpol_needs_cond_ref(pol)) + __mpol_put(pol); +} + +extern struct mempolicy *__mpol_cond_copy(struct mempolicy *tompol, + struct mempolicy *frompol); +static inline struct mempolicy *mpol_cond_copy(struct mempolicy *tompol, + struct mempolicy *frompol) +{ + if (!frompol) + return frompol; + return __mpol_cond_copy(tompol, frompol); +} + +extern struct mempolicy *__mpol_dup(struct mempolicy *pol); +static inline struct mempolicy *mpol_dup(struct mempolicy *pol) { if (pol) - pol = __mpol_copy(pol); + pol = __mpol_dup(pol); return pol; } @@ -108,11 +162,6 @@ static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b) return 1; return __mpol_equal(a, b); } -#define vma_mpol_equal(a,b) mpol_equal(vma_policy(a), vma_policy(b)) - -/* Could later add inheritance of the process policy here. */ - -#define mpol_set_vma_default(vma) ((vma)->vm_policy = NULL) /* * Tree of shared policies for a shared memory region. @@ -133,8 +182,7 @@ struct shared_policy { spinlock_t lock; }; -void mpol_shared_policy_init(struct shared_policy *info, int policy, - nodemask_t *nodes); +void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol); int mpol_set_shared_policy(struct shared_policy *info, struct vm_area_struct *vma, struct mempolicy *new); @@ -149,9 +197,9 @@ extern void mpol_rebind_task(struct task_struct *tsk, extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new); extern void mpol_fix_fork_child_flag(struct task_struct *p); -extern struct mempolicy default_policy; extern struct zonelist *huge_zonelist(struct vm_area_struct *vma, - unsigned long addr, gfp_t gfp_flags, struct mempolicy **mpol); + unsigned long addr, gfp_t gfp_flags, + struct mempolicy **mpol, nodemask_t **nodemask); extern unsigned slab_node(struct mempolicy *policy); extern enum zone_type policy_zone; @@ -165,6 +213,13 @@ static inline void check_highest_zone(enum zone_type k) int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags); + +#ifdef CONFIG_TMPFS +extern int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context); + +extern int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, + int no_context); +#endif #else struct mempolicy {}; @@ -173,19 +228,26 @@ static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b) { return 1; } -#define vma_mpol_equal(a,b) 1 -#define mpol_set_vma_default(vma) do {} while(0) +static inline void mpol_put(struct mempolicy *p) +{ +} + +static inline void mpol_cond_put(struct mempolicy *pol) +{ +} -static inline void mpol_free(struct mempolicy *p) +static inline struct mempolicy *mpol_cond_copy(struct mempolicy *to, + struct mempolicy *from) { + return from; } static inline void mpol_get(struct mempolicy *pol) { } -static inline struct mempolicy *mpol_copy(struct mempolicy *old) +static inline struct mempolicy *mpol_dup(struct mempolicy *old) { return NULL; } @@ -199,8 +261,8 @@ static inline int mpol_set_shared_policy(struct shared_policy *info, return -EINVAL; } -static inline void mpol_shared_policy_init(struct shared_policy *info, - int policy, nodemask_t *nodes) +static inline void mpol_shared_policy_init(struct shared_policy *sp, + struct mempolicy *mpol) { } @@ -239,9 +301,12 @@ static inline void mpol_fix_fork_child_flag(struct task_struct *p) } static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma, - unsigned long addr, gfp_t gfp_flags, struct mempolicy **mpol) + unsigned long addr, gfp_t gfp_flags, + struct mempolicy **mpol, nodemask_t **nodemask) { - return NODE_DATA(0)->node_zonelists + gfp_zone(gfp_flags); + *mpol = NULL; + *nodemask = NULL; + return node_zonelist(0, gfp_flags); } static inline int do_migrate_pages(struct mm_struct *mm, @@ -254,6 +319,21 @@ static inline int do_migrate_pages(struct mm_struct *mm, static inline void check_highest_zone(int k) { } + +#ifdef CONFIG_TMPFS +static inline int mpol_parse_str(char *str, struct mempolicy **mpol, + int no_context) +{ + return 1; /* error */ +} + +static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, + int no_context) +{ + return 0; +} +#endif + #endif /* CONFIG_NUMA */ #endif /* __KERNEL__ */ diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index ff7df1a2222..9fa1a8002ce 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -208,6 +208,38 @@ struct mlx4_mtt { int page_shift; }; +enum { + MLX4_DB_PER_PAGE = PAGE_SIZE / 4 +}; + +struct mlx4_db_pgdir { + struct list_head list; + DECLARE_BITMAP(order0, MLX4_DB_PER_PAGE); + DECLARE_BITMAP(order1, MLX4_DB_PER_PAGE / 2); + unsigned long *bits[2]; + __be32 *db_page; + dma_addr_t db_dma; +}; + +struct mlx4_ib_user_db_page; + +struct mlx4_db { + __be32 *db; + union { + struct mlx4_db_pgdir *pgdir; + struct mlx4_ib_user_db_page *user_page; + } u; + dma_addr_t dma; + int index; + int order; +}; + +struct mlx4_hwq_resources { + struct mlx4_db db; + struct mlx4_mtt mtt; + struct mlx4_buf buf; +}; + struct mlx4_mr { struct mlx4_mtt mtt; u64 iova; @@ -341,6 +373,14 @@ int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, struct mlx4_buf *buf); +int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order); +void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db); + +int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres, + int size, int max_direct); +void mlx4_free_hwq_res(struct mlx4_dev *mdev, struct mlx4_hwq_resources *wqres, + int size); + int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt, struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq); void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq); diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h index a5e43febee4..7f128b266fa 100644 --- a/include/linux/mlx4/qp.h +++ b/include/linux/mlx4/qp.h @@ -296,6 +296,10 @@ int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt, int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp, struct mlx4_qp_context *context); +int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt, + struct mlx4_qp_context *context, + struct mlx4_qp *qp, enum mlx4_qp_state *qp_state); + static inline struct mlx4_qp *__mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn) { return radix_tree_lookup(&dev->qp_table_tree, qpn & (dev->caps.num_qps - 1)); diff --git a/include/linux/mm.h b/include/linux/mm.h index 286d3152160..c31a9cd2a30 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -107,6 +107,7 @@ extern unsigned int kobjsize(const void *objp); #define VM_ALWAYSDUMP 0x04000000 /* Always include in core dumps */ #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */ +#define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */ #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */ #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS @@ -164,8 +165,6 @@ struct vm_operations_struct { void (*open)(struct vm_area_struct * area); void (*close)(struct vm_area_struct * area); int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf); - struct page *(*nopage)(struct vm_area_struct *area, - unsigned long address, int *type); unsigned long (*nopfn)(struct vm_area_struct *area, unsigned long address); @@ -173,7 +172,25 @@ struct vm_operations_struct { * writable, if an error is returned it will cause a SIGBUS */ int (*page_mkwrite)(struct vm_area_struct *vma, struct page *page); #ifdef CONFIG_NUMA + /* + * set_policy() op must add a reference to any non-NULL @new mempolicy + * to hold the policy upon return. Caller should pass NULL @new to + * remove a policy and fall back to surrounding context--i.e. do not + * install a MPOL_DEFAULT policy, nor the task or system default + * mempolicy. + */ int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new); + + /* + * get_policy() op must add reference [mpol_get()] to any policy at + * (vma,addr) marked as MPOL_SHARED. The shared policy infrastructure + * in mm/mempolicy.c will do this automatically. + * get_policy() must NOT add a ref if the policy at (vma,addr) is not + * marked as MPOL_SHARED. vma policies are protected by the mmap_sem. + * If no [shared/vma] mempolicy exists at the addr, get_policy() op + * must return NULL--i.e., do not "fallback" to task or system default + * policy. + */ struct mempolicy *(*get_policy)(struct vm_area_struct *vma, unsigned long addr); int (*migrate)(struct vm_area_struct *vma, const nodemask_t *from, @@ -397,11 +414,11 @@ static inline void set_compound_order(struct page *page, unsigned long order) * we have run out of space and have to fall back to an * alternate (slower) way of determining the node. * - * No sparsemem: | NODE | ZONE | ... | FLAGS | - * with space for node: | SECTION | NODE | ZONE | ... | FLAGS | - * no space for node: | SECTION | ZONE | ... | FLAGS | + * No sparsemem or sparsemem vmemmap: | NODE | ZONE | ... | FLAGS | + * classic sparse with space for node:| SECTION | NODE | ZONE | ... | FLAGS | + * classic sparse no space for node: | SECTION | ZONE | ... | FLAGS | */ -#ifdef CONFIG_SPARSEMEM +#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP) #define SECTIONS_WIDTH SECTIONS_SHIFT #else #define SECTIONS_WIDTH 0 @@ -409,9 +426,12 @@ static inline void set_compound_order(struct page *page, unsigned long order) #define ZONES_WIDTH ZONES_SHIFT -#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT <= FLAGS_RESERVED +#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS #define NODES_WIDTH NODES_SHIFT #else +#ifdef CONFIG_SPARSEMEM_VMEMMAP +#error "Vmemmap: No space for nodes field in page flags" +#endif #define NODES_WIDTH 0 #endif @@ -454,8 +474,8 @@ static inline void set_compound_order(struct page *page, unsigned long order) #define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0)) -#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > FLAGS_RESERVED -#error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > FLAGS_RESERVED +#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS +#error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS #endif #define ZONES_MASK ((1UL << ZONES_WIDTH) - 1) @@ -504,10 +524,12 @@ static inline struct zone *page_zone(struct page *page) return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)]; } +#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP) static inline unsigned long page_to_section(struct page *page) { return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK; } +#endif static inline void set_page_zone(struct page *page, enum zone_type zone) { @@ -602,9 +624,12 @@ static inline struct address_space *page_mapping(struct page *page) struct address_space *mapping = page->mapping; VM_BUG_ON(PageSlab(page)); +#ifdef CONFIG_SWAP if (unlikely(PageSwapCache(page))) mapping = &swapper_space; - else if (unlikely((unsigned long)mapping & PAGE_MAPPING_ANON)) + else +#endif + if (unlikely((unsigned long)mapping & PAGE_MAPPING_ANON)) mapping = NULL; return mapping; } @@ -649,12 +674,6 @@ static inline int page_mapped(struct page *page) } /* - * Error return values for the *_nopage functions - */ -#define NOPAGE_SIGBUS (NULL) -#define NOPAGE_OOM ((struct page *) (-1)) - -/* * Error return values for the *_nopfn functions */ #define NOPFN_SIGBUS ((unsigned long) -1) @@ -720,7 +739,9 @@ struct zap_details { unsigned long truncate_count; /* Compare vm_truncate_count */ }; -struct page *vm_normal_page(struct vm_area_struct *, unsigned long, pte_t); +struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, + pte_t pte); + unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address, unsigned long size, struct zap_details *); unsigned long unmap_vmas(struct mmu_gather **tlb, @@ -1045,6 +1066,19 @@ extern void unlink_file_vma(struct vm_area_struct *); extern struct vm_area_struct *copy_vma(struct vm_area_struct **, unsigned long addr, unsigned long len, pgoff_t pgoff); extern void exit_mmap(struct mm_struct *); + +#ifdef CONFIG_PROC_FS +/* From fs/proc/base.c. callers must _not_ hold the mm's exe_file_lock */ +extern void added_exe_file_vma(struct mm_struct *mm); +extern void removed_exe_file_vma(struct mm_struct *mm); +#else +static inline void added_exe_file_vma(struct mm_struct *mm) +{} + +static inline void removed_exe_file_vma(struct mm_struct *mm) +{} +#endif /* CONFIG_PROC_FS */ + extern int may_expand_vm(struct mm_struct *mm, unsigned long npages); extern int install_special_mapping(struct mm_struct *mm, unsigned long addr, unsigned long len, @@ -1149,6 +1183,8 @@ int remap_pfn_range(struct vm_area_struct *, unsigned long addr, int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *); int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn); +int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, + unsigned long pfn); struct page *follow_page(struct vm_area_struct *, unsigned long address, unsigned int foll_flags); @@ -1207,8 +1243,6 @@ int drop_caches_sysctl_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *); unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask, unsigned long lru_pages); -void drop_pagecache(void); -void drop_slab(void); #ifndef CONFIG_MMU #define randomize_va_space 0 diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index af190ceab97..eb7c16cc955 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -42,7 +42,10 @@ struct page { * to show when page is mapped * & limit reverse map searches. */ - unsigned int inuse; /* SLUB: Nr of objects */ + struct { /* SLUB */ + u16 inuse; + u16 objects; + }; }; union { struct { @@ -172,6 +175,7 @@ struct mm_struct { atomic_t mm_users; /* How many users with user space? */ atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */ int map_count; /* number of VMAs */ + int core_waiters; struct rw_semaphore mmap_sem; spinlock_t page_table_lock; /* Protects page tables and some counters */ @@ -216,14 +220,20 @@ struct mm_struct { unsigned long flags; /* Must use atomic bitops to access the bits */ /* coredumping support */ - int core_waiters; struct completion *core_startup_done, core_done; /* aio bits */ - rwlock_t ioctx_list_lock; + rwlock_t ioctx_list_lock; /* aio lock */ struct kioctx *ioctx_list; -#ifdef CONFIG_CGROUP_MEM_RES_CTLR - struct mem_cgroup *mem_cgroup; +#ifdef CONFIG_MM_OWNER + struct task_struct *owner; /* The thread group leader that */ + /* owns the mm_struct. */ +#endif + +#ifdef CONFIG_PROC_FS + /* store ref to file /proc/<pid>/exe symlink points to */ + struct file *exe_file; + unsigned long num_exe_file_vmas; #endif }; diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 9f274a687c7..aad98003176 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -3,6 +3,7 @@ #ifdef __KERNEL__ #ifndef __ASSEMBLY__ +#ifndef __GENERATING_BOUNDS_H #include <linux/spinlock.h> #include <linux/list.h> @@ -15,6 +16,7 @@ #include <linux/seqlock.h> #include <linux/nodemask.h> #include <linux/pageblock-flags.h> +#include <linux/bounds.h> #include <asm/atomic.h> #include <asm/page.h> @@ -129,6 +131,8 @@ struct per_cpu_pageset { #define zone_pcp(__z, __cpu) (&(__z)->pageset[(__cpu)]) #endif +#endif /* !__GENERATING_BOUNDS.H */ + enum zone_type { #ifdef CONFIG_ZONE_DMA /* @@ -177,9 +181,11 @@ enum zone_type { ZONE_HIGHMEM, #endif ZONE_MOVABLE, - MAX_NR_ZONES + __MAX_NR_ZONES }; +#ifndef __GENERATING_BOUNDS_H + /* * When a memory allocation must conform to specific limitations (such * as being suitable for DMA) the caller will pass in hints to the @@ -188,28 +194,15 @@ enum zone_type { * match the requested limits. See gfp_zone() in include/linux/gfp.h */ -/* - * Count the active zones. Note that the use of defined(X) outside - * #if and family is not necessarily defined so ensure we cannot use - * it later. Use __ZONE_COUNT to work out how many shift bits we need. - */ -#define __ZONE_COUNT ( \ - defined(CONFIG_ZONE_DMA) \ - + defined(CONFIG_ZONE_DMA32) \ - + 1 \ - + defined(CONFIG_HIGHMEM) \ - + 1 \ -) -#if __ZONE_COUNT < 2 +#if MAX_NR_ZONES < 2 #define ZONES_SHIFT 0 -#elif __ZONE_COUNT <= 2 +#elif MAX_NR_ZONES <= 2 #define ZONES_SHIFT 1 -#elif __ZONE_COUNT <= 4 +#elif MAX_NR_ZONES <= 4 #define ZONES_SHIFT 2 #else #error ZONES_SHIFT -- too many zones configured adjust calculation #endif -#undef __ZONE_COUNT struct zone { /* Fields commonly accessed by the page allocator */ @@ -393,10 +386,10 @@ static inline int zone_is_oom_locked(const struct zone *zone) * The NUMA zonelists are doubled becausse we need zonelists that restrict the * allocations to a single node for GFP_THISNODE. * - * [0 .. MAX_NR_ZONES -1] : Zonelists with fallback - * [MAZ_NR_ZONES ... MAZ_ZONELISTS -1] : No fallback (GFP_THISNODE) + * [0] : Zonelist with fallback + * [1] : No fallback (GFP_THISNODE) */ -#define MAX_ZONELISTS (2 * MAX_NR_ZONES) +#define MAX_ZONELISTS 2 /* @@ -464,11 +457,20 @@ struct zonelist_cache { unsigned long last_full_zap; /* when last zap'd (jiffies) */ }; #else -#define MAX_ZONELISTS MAX_NR_ZONES +#define MAX_ZONELISTS 1 struct zonelist_cache; #endif /* + * This struct contains information about a zone in a zonelist. It is stored + * here to avoid dereferences into large structures and lookups of tables + */ +struct zoneref { + struct zone *zone; /* Pointer to actual zone */ + int zone_idx; /* zone_idx(zoneref->zone) */ +}; + +/* * One allocation request operates on a zonelist. A zonelist * is a list of zones, the first one is the 'goal' of the * allocation, the other zones are fallback zones, in decreasing @@ -476,34 +478,23 @@ struct zonelist_cache; * * If zlcache_ptr is not NULL, then it is just the address of zlcache, * as explained above. If zlcache_ptr is NULL, there is no zlcache. + * * + * To speed the reading of the zonelist, the zonerefs contain the zone index + * of the entry being read. Helper functions to access information given + * a struct zoneref are + * + * zonelist_zone() - Return the struct zone * for an entry in _zonerefs + * zonelist_zone_idx() - Return the index of the zone for an entry + * zonelist_node_idx() - Return the index of the node for an entry */ - struct zonelist { struct zonelist_cache *zlcache_ptr; // NULL or &zlcache - struct zone *zones[MAX_ZONES_PER_ZONELIST + 1]; // NULL delimited + struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1]; #ifdef CONFIG_NUMA struct zonelist_cache zlcache; // optional ... #endif }; -#ifdef CONFIG_NUMA -/* - * Only custom zonelists like MPOL_BIND need to be filtered as part of - * policies. As described in the comment for struct zonelist_cache, these - * zonelists will not have a zlcache so zlcache_ptr will not be set. Use - * that to determine if the zonelists needs to be filtered or not. - */ -static inline int alloc_should_filter_zonelist(struct zonelist *zonelist) -{ - return !zonelist->zlcache_ptr; -} -#else -static inline int alloc_should_filter_zonelist(struct zonelist *zonelist) -{ - return 0; -} -#endif /* CONFIG_NUMA */ - #ifdef CONFIG_ARCH_POPULATES_NODE_MAP struct node_active_region { unsigned long start_pfn; @@ -637,9 +628,10 @@ static inline int is_normal_idx(enum zone_type idx) static inline int is_highmem(struct zone *zone) { #ifdef CONFIG_HIGHMEM - int zone_idx = zone - zone->zone_pgdat->node_zones; - return zone_idx == ZONE_HIGHMEM || - (zone_idx == ZONE_MOVABLE && zone_movable_is_highmem()); + int zone_off = (char *)zone - (char *)zone->zone_pgdat->node_zones; + return zone_off == ZONE_HIGHMEM * sizeof(*zone) || + (zone_off == ZONE_MOVABLE * sizeof(*zone) && + zone_movable_is_highmem()); #else return 0; #endif @@ -730,32 +722,103 @@ extern struct zone *next_zone(struct zone *zone); zone; \ zone = next_zone(zone)) -#ifdef CONFIG_SPARSEMEM -#include <asm/sparsemem.h> -#endif +static inline struct zone *zonelist_zone(struct zoneref *zoneref) +{ + return zoneref->zone; +} -#if BITS_PER_LONG == 32 -/* - * with 32 bit page->flags field, we reserve 9 bits for node/zone info. - * there are 4 zones (3 bits) and this leaves 9-3=6 bits for nodes. +static inline int zonelist_zone_idx(struct zoneref *zoneref) +{ + return zoneref->zone_idx; +} + +static inline int zonelist_node_idx(struct zoneref *zoneref) +{ +#ifdef CONFIG_NUMA + /* zone_to_nid not available in this context */ + return zoneref->zone->node; +#else + return 0; +#endif /* CONFIG_NUMA */ +} + +/** + * next_zones_zonelist - Returns the next zone at or below highest_zoneidx within the allowed nodemask using a cursor within a zonelist as a starting point + * @z - The cursor used as a starting point for the search + * @highest_zoneidx - The zone index of the highest zone to return + * @nodes - An optional nodemask to filter the zonelist with + * @zone - The first suitable zone found is returned via this parameter + * + * This function returns the next zone at or below a given zone index that is + * within the allowed nodemask using a cursor as the starting point for the + * search. The zoneref returned is a cursor that is used as the next starting + * point for future calls to next_zones_zonelist(). */ -#define FLAGS_RESERVED 9 +struct zoneref *next_zones_zonelist(struct zoneref *z, + enum zone_type highest_zoneidx, + nodemask_t *nodes, + struct zone **zone); -#elif BITS_PER_LONG == 64 -/* - * with 64 bit flags field, there's plenty of room. +/** + * first_zones_zonelist - Returns the first zone at or below highest_zoneidx within the allowed nodemask in a zonelist + * @zonelist - The zonelist to search for a suitable zone + * @highest_zoneidx - The zone index of the highest zone to return + * @nodes - An optional nodemask to filter the zonelist with + * @zone - The first suitable zone found is returned via this parameter + * + * This function returns the first zone at or below a given zone index that is + * within the allowed nodemask. The zoneref returned is a cursor that can be + * used to iterate the zonelist with next_zones_zonelist. The cursor should + * not be used by the caller as it does not match the value of the zone + * returned. */ -#define FLAGS_RESERVED 32 +static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist, + enum zone_type highest_zoneidx, + nodemask_t *nodes, + struct zone **zone) +{ + return next_zones_zonelist(zonelist->_zonerefs, highest_zoneidx, nodes, + zone); +} -#else +/** + * for_each_zone_zonelist_nodemask - helper macro to iterate over valid zones in a zonelist at or below a given zone index and within a nodemask + * @zone - The current zone in the iterator + * @z - The current pointer within zonelist->zones being iterated + * @zlist - The zonelist being iterated + * @highidx - The zone index of the highest zone to return + * @nodemask - Nodemask allowed by the allocator + * + * This iterator iterates though all zones at or below a given zone index and + * within a given nodemask + */ +#define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \ + for (z = first_zones_zonelist(zlist, highidx, nodemask, &zone); \ + zone; \ + z = next_zones_zonelist(z, highidx, nodemask, &zone)) \ -#error BITS_PER_LONG not defined +/** + * for_each_zone_zonelist - helper macro to iterate over valid zones in a zonelist at or below a given zone index + * @zone - The current zone in the iterator + * @z - The current pointer within zonelist->zones being iterated + * @zlist - The zonelist being iterated + * @highidx - The zone index of the highest zone to return + * + * This iterator iterates though all zones at or below a given zone index. + */ +#define for_each_zone_zonelist(zone, z, zlist, highidx) \ + for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL) +#ifdef CONFIG_SPARSEMEM +#include <asm/sparsemem.h> #endif #if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \ !defined(CONFIG_ARCH_POPULATES_NODE_MAP) -#define early_pfn_to_nid(nid) (0UL) +static inline unsigned long early_pfn_to_nid(unsigned long pfn) +{ + return 0; +} #endif #ifdef CONFIG_FLATMEM @@ -833,6 +896,7 @@ static inline struct mem_section *__nr_to_section(unsigned long nr) return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK]; } extern int __section_nr(struct mem_section* ms); +extern unsigned long usemap_size(void); /* * We use the lower bits of the mem_map pointer to store @@ -938,6 +1002,7 @@ unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long); #define pfn_valid_within(pfn) (1) #endif +#endif /* !__GENERATING_BOUNDS.H */ #endif /* !__ASSEMBLY__ */ #endif /* __KERNEL__ */ #endif /* _LINUX_MMZONE_H */ diff --git a/include/linux/msdos_fs.h b/include/linux/msdos_fs.h index f950921523f..b03b2745741 100644 --- a/include/linux/msdos_fs.h +++ b/include/linux/msdos_fs.h @@ -58,7 +58,11 @@ #define MSDOS_DOTDOT ".. " /* "..", padded to MSDOS_NAME chars */ /* media of boot sector */ -#define FAT_VALID_MEDIA(x) ((0xF8 <= (x) && (x) <= 0xFF) || (x) == 0xF0) +static inline int fat_valid_media(u8 media) +{ + return 0xf8 <= media || media == 0xf0; +} + #define FAT_FIRST_ENT(s, x) ((MSDOS_SB(s)->fat_bits == 32 ? 0x0FFFFF00 : \ MSDOS_SB(s)->fat_bits == 16 ? 0xFF00 : 0xF00) | (x)) @@ -195,6 +199,7 @@ struct fat_mount_options { char *iocharset; /* Charset used for filename input/display */ unsigned short shortname; /* flags for shortname display/create rule */ unsigned char name_check; /* r = relaxed, n = normal, s = strict */ + unsigned short allow_utime;/* permission for setting the [am]time */ unsigned quiet:1, /* set = fake successful chmods and chowns */ showexec:1, /* set = only set x bit for com/exe/bat */ sys_immutable:1, /* set = system files are immutable */ @@ -232,6 +237,7 @@ struct msdos_sb_info { struct mutex fat_lock; unsigned int prev_free; /* previously allocated cluster number */ unsigned int free_clusters; /* -1 if undefined */ + unsigned int free_clus_valid; /* is free_clusters valid? */ struct fat_mount_options options; struct nls_table *nls_disk; /* Codepage used on disk */ struct nls_table *nls_io; /* Charset used for input and display */ @@ -401,7 +407,7 @@ extern int fat_generic_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); extern const struct file_operations fat_file_operations; extern const struct inode_operations fat_file_inode_operations; -extern int fat_notify_change(struct dentry * dentry, struct iattr * attr); +extern int fat_setattr(struct dentry * dentry, struct iattr * attr); extern void fat_truncate(struct inode *inode); extern int fat_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat); diff --git a/include/linux/msg.h b/include/linux/msg.h index 10a3d5a1abf..6f3b8e79a99 100644 --- a/include/linux/msg.h +++ b/include/linux/msg.h @@ -49,16 +49,26 @@ struct msginfo { unsigned short msgseg; }; +/* + * Scaling factor to compute msgmni: + * the memory dedicated to msg queues (msgmni * msgmnb) should occupy + * at most 1/MSG_MEM_SCALE of the lowmem (see the formula in ipc/msg.c): + * up to 8MB : msgmni = 16 (MSGMNI) + * 4 GB : msgmni = 8K + * more than 16 GB : msgmni = 32K (IPCMNI) + */ +#define MSG_MEM_SCALE 32 + #define MSGMNI 16 /* <= IPCMNI */ /* max # of msg queue identifiers */ #define MSGMAX 8192 /* <= INT_MAX */ /* max size of message (bytes) */ #define MSGMNB 16384 /* <= INT_MAX */ /* default max size of a message queue */ /* unused */ -#define MSGPOOL (MSGMNI*MSGMNB/1024) /* size in kilobytes of message pool */ +#define MSGPOOL (MSGMNI * MSGMNB) /* size in bytes of message pool */ #define MSGTQL MSGMNB /* number of system message headers */ #define MSGMAP MSGMNB /* number of entries in message map */ #define MSGSSZ 16 /* message segment size */ -#define __MSGSEG ((MSGPOOL*1024)/ MSGSSZ) /* max no. of segments */ +#define __MSGSEG (MSGPOOL / MSGSSZ) /* max no. of segments */ #define MSGSEG (__MSGSEG <= 0xffff ? __MSGSEG : 0xffff) #ifdef __KERNEL__ diff --git a/include/linux/nbd.h b/include/linux/nbd.h index 986572081e1..155719dab81 100644 --- a/include/linux/nbd.h +++ b/include/linux/nbd.h @@ -56,9 +56,11 @@ struct nbd_device { int magic; spinlock_t queue_lock; - struct list_head queue_head;/* Requests are added here... */ + struct list_head queue_head; /* Requests waiting result */ struct request *active_req; wait_queue_head_t active_wq; + struct list_head waiting_queue; /* Requests to be sent */ + wait_queue_head_t waiting_wq; struct mutex tx_lock; struct gendisk *disk; @@ -86,11 +88,7 @@ struct nbd_request { char handle[8]; __be64 from; __be32 len; -} -#ifdef __GNUC__ - __attribute__ ((packed)) -#endif -; +} __attribute__ ((packed)); /* * This is the reply packet that nbd-server sends back to the client after diff --git a/include/linux/ncp_fs.h b/include/linux/ncp_fs.h index 88766e43e12..9f2d76347f1 100644 --- a/include/linux/ncp_fs.h +++ b/include/linux/ncp_fs.h @@ -204,6 +204,7 @@ void ncp_update_inode2(struct inode *, struct ncp_entry_info *); /* linux/fs/ncpfs/dir.c */ extern const struct inode_operations ncp_dir_inode_operations; extern const struct file_operations ncp_dir_operations; +extern struct dentry_operations ncp_root_dentry_operations; int ncp_conn_logged_in(struct super_block *); int ncp_date_dos2unix(__le16 time, __le16 date); void ncp_date_unix2dos(int unix_date, __le16 * time, __le16 * date); @@ -223,6 +224,12 @@ int ncp_disconnect(struct ncp_server *server); void ncp_lock_server(struct ncp_server *server); void ncp_unlock_server(struct ncp_server *server); +/* linux/fs/ncpfs/symlink.c */ +#if defined(CONFIG_NCPFS_EXTRAS) || defined(CONFIG_NCPFS_NFS_NS) +extern const struct address_space_operations ncp_symlink_aops; +int ncp_symlink(struct inode*, struct dentry*, const char*); +#endif + /* linux/fs/ncpfs/file.c */ extern const struct inode_operations ncp_file_inode_operations; extern const struct file_operations ncp_file_operations; diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h index 905e18f4b41..848025cd708 100644 --- a/include/linux/nodemask.h +++ b/include/linux/nodemask.h @@ -14,6 +14,8 @@ * bitmap_scnlistprintf() and bitmap_parselist(), also in bitmap.c. * For details of node_remap(), see bitmap_bitremap in lib/bitmap.c. * For details of nodes_remap(), see bitmap_remap in lib/bitmap.c. + * For details of nodes_onto(), see bitmap_onto in lib/bitmap.c. + * For details of nodes_fold(), see bitmap_fold in lib/bitmap.c. * * The available nodemask operations are: * @@ -55,7 +57,9 @@ * int nodelist_scnprintf(buf, len, mask) Format nodemask as list for printing * int nodelist_parse(buf, map) Parse ascii string as nodelist * int node_remap(oldbit, old, new) newbit = map(old, new)(oldbit) - * int nodes_remap(dst, src, old, new) *dst = map(old, new)(dst) + * void nodes_remap(dst, src, old, new) *dst = map(old, new)(src) + * void nodes_onto(dst, orig, relmap) *dst = orig relative to relmap + * void nodes_fold(dst, orig, sz) dst bits = orig bits mod sz * * for_each_node_mask(node, mask) for-loop node over mask * @@ -326,6 +330,22 @@ static inline void __nodes_remap(nodemask_t *dstp, const nodemask_t *srcp, bitmap_remap(dstp->bits, srcp->bits, oldp->bits, newp->bits, nbits); } +#define nodes_onto(dst, orig, relmap) \ + __nodes_onto(&(dst), &(orig), &(relmap), MAX_NUMNODES) +static inline void __nodes_onto(nodemask_t *dstp, const nodemask_t *origp, + const nodemask_t *relmapp, int nbits) +{ + bitmap_onto(dstp->bits, origp->bits, relmapp->bits, nbits); +} + +#define nodes_fold(dst, orig, sz) \ + __nodes_fold(&(dst), &(orig), sz, MAX_NUMNODES) +static inline void __nodes_fold(nodemask_t *dstp, const nodemask_t *origp, + int sz, int nbits) +{ + bitmap_fold(dstp->bits, origp->bits, sz, nbits); +} + #if MAX_NUMNODES > 1 #define for_each_node_mask(node, mask) \ for ((node) = first_node(mask); \ diff --git a/include/linux/notifier.h b/include/linux/notifier.h index f4df40038f0..0ff6224d172 100644 --- a/include/linux/notifier.h +++ b/include/linux/notifier.h @@ -121,6 +121,10 @@ extern int raw_notifier_chain_register(struct raw_notifier_head *nh, extern int srcu_notifier_chain_register(struct srcu_notifier_head *nh, struct notifier_block *nb); +extern int blocking_notifier_chain_cond_register( + struct blocking_notifier_head *nh, + struct notifier_block *nb); + extern int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh, struct notifier_block *nb); extern int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh, @@ -247,6 +251,7 @@ extern struct blocking_notifier_head reboot_notifier_list; #define VT_DEALLOCATE 0x0002 /* Console will be deallocated */ #define VT_WRITE 0x0003 /* A char got output */ #define VT_UPDATE 0x0004 /* A bigger update occurred */ +#define VT_PREWRITE 0x0005 /* A char is about to be written to the console */ #endif /* __KERNEL__ */ #endif /* _LINUX_NOTIFIER_H */ diff --git a/include/linux/oom.h b/include/linux/oom.h index 3852436b652..a7979baf1e3 100644 --- a/include/linux/oom.h +++ b/include/linux/oom.h @@ -23,8 +23,8 @@ enum oom_constraint { CONSTRAINT_MEMORY_POLICY, }; -extern int try_set_zone_oom(struct zonelist *zonelist); -extern void clear_zonelist_oom(struct zonelist *zonelist); +extern int try_set_zone_oom(struct zonelist *zonelist, gfp_t gfp_flags); +extern void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags); extern void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order); extern int register_oom_notifier(struct notifier_block *nb); diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index b5b30f1c1e5..590cff32415 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -6,7 +6,10 @@ #define PAGE_FLAGS_H #include <linux/types.h> +#ifndef __GENERATING_BOUNDS_H #include <linux/mm_types.h> +#include <linux/bounds.h> +#endif /* !__GENERATING_BOUNDS_H */ /* * Various page->flags bits: @@ -59,77 +62,138 @@ * extends from the high bits downwards. * * | FIELD | ... | FLAGS | - * N-1 ^ 0 - * (N-FLAGS_RESERVED) + * N-1 ^ 0 + * (NR_PAGEFLAGS) * - * The fields area is reserved for fields mapping zone, node and SPARSEMEM - * section. The boundry between these two areas is defined by - * FLAGS_RESERVED which defines the width of the fields section - * (see linux/mmzone.h). New flags must _not_ overlap with this area. + * The fields area is reserved for fields mapping zone, node (for NUMA) and + * SPARSEMEM section (for variants of SPARSEMEM that require section ids like + * SPARSEMEM_EXTREME with !SPARSEMEM_VMEMMAP). */ -#define PG_locked 0 /* Page is locked. Don't touch. */ -#define PG_error 1 -#define PG_referenced 2 -#define PG_uptodate 3 +enum pageflags { + PG_locked, /* Page is locked. Don't touch. */ + PG_error, + PG_referenced, + PG_uptodate, + PG_dirty, + PG_lru, + PG_active, + PG_slab, + PG_owner_priv_1, /* Owner use. If pagecache, fs may use*/ + PG_arch_1, + PG_reserved, + PG_private, /* If pagecache, has fs-private data */ + PG_writeback, /* Page is under writeback */ +#ifdef CONFIG_PAGEFLAGS_EXTENDED + PG_head, /* A head page */ + PG_tail, /* A tail page */ +#else + PG_compound, /* A compound page */ +#endif + PG_swapcache, /* Swap page: swp_entry_t in private */ + PG_mappedtodisk, /* Has blocks allocated on-disk */ + PG_reclaim, /* To be reclaimed asap */ + PG_buddy, /* Page is free, on buddy lists */ +#ifdef CONFIG_IA64_UNCACHED_ALLOCATOR + PG_uncached, /* Page has been mapped as uncached */ +#endif + __NR_PAGEFLAGS +}; + +#ifndef __GENERATING_BOUNDS_H + +/* + * Macros to create function definitions for page flags + */ +#define TESTPAGEFLAG(uname, lname) \ +static inline int Page##uname(struct page *page) \ + { return test_bit(PG_##lname, &page->flags); } -#define PG_dirty 4 -#define PG_lru 5 -#define PG_active 6 -#define PG_slab 7 /* slab debug (Suparna wants this) */ +#define SETPAGEFLAG(uname, lname) \ +static inline void SetPage##uname(struct page *page) \ + { set_bit(PG_##lname, &page->flags); } -#define PG_owner_priv_1 8 /* Owner use. If pagecache, fs may use*/ -#define PG_arch_1 9 -#define PG_reserved 10 -#define PG_private 11 /* If pagecache, has fs-private data */ +#define CLEARPAGEFLAG(uname, lname) \ +static inline void ClearPage##uname(struct page *page) \ + { clear_bit(PG_##lname, &page->flags); } -#define PG_writeback 12 /* Page is under writeback */ -#define PG_compound 14 /* Part of a compound page */ -#define PG_swapcache 15 /* Swap page: swp_entry_t in private */ +#define __SETPAGEFLAG(uname, lname) \ +static inline void __SetPage##uname(struct page *page) \ + { __set_bit(PG_##lname, &page->flags); } -#define PG_mappedtodisk 16 /* Has blocks allocated on-disk */ -#define PG_reclaim 17 /* To be reclaimed asap */ -#define PG_buddy 19 /* Page is free, on buddy lists */ +#define __CLEARPAGEFLAG(uname, lname) \ +static inline void __ClearPage##uname(struct page *page) \ + { __clear_bit(PG_##lname, &page->flags); } + +#define TESTSETFLAG(uname, lname) \ +static inline int TestSetPage##uname(struct page *page) \ + { return test_and_set_bit(PG_##lname, &page->flags); } + +#define TESTCLEARFLAG(uname, lname) \ +static inline int TestClearPage##uname(struct page *page) \ + { return test_and_clear_bit(PG_##lname, &page->flags); } -/* PG_readahead is only used for file reads; PG_reclaim is only for writes */ -#define PG_readahead PG_reclaim /* Reminder to do async read-ahead */ -/* PG_owner_priv_1 users should have descriptive aliases */ -#define PG_checked PG_owner_priv_1 /* Used by some filesystems */ -#define PG_pinned PG_owner_priv_1 /* Xen pinned pagetable */ +#define PAGEFLAG(uname, lname) TESTPAGEFLAG(uname, lname) \ + SETPAGEFLAG(uname, lname) CLEARPAGEFLAG(uname, lname) + +#define __PAGEFLAG(uname, lname) TESTPAGEFLAG(uname, lname) \ + __SETPAGEFLAG(uname, lname) __CLEARPAGEFLAG(uname, lname) + +#define PAGEFLAG_FALSE(uname) \ +static inline int Page##uname(struct page *page) \ + { return 0; } + +#define TESTSCFLAG(uname, lname) \ + TESTSETFLAG(uname, lname) TESTCLEARFLAG(uname, lname) + +struct page; /* forward declaration */ + +PAGEFLAG(Locked, locked) TESTSCFLAG(Locked, locked) +PAGEFLAG(Error, error) +PAGEFLAG(Referenced, referenced) TESTCLEARFLAG(Referenced, referenced) +PAGEFLAG(Dirty, dirty) TESTSCFLAG(Dirty, dirty) __CLEARPAGEFLAG(Dirty, dirty) +PAGEFLAG(LRU, lru) __CLEARPAGEFLAG(LRU, lru) +PAGEFLAG(Active, active) __CLEARPAGEFLAG(Active, active) +__PAGEFLAG(Slab, slab) +PAGEFLAG(Checked, owner_priv_1) /* Used by some filesystems */ +PAGEFLAG(Pinned, owner_priv_1) TESTSCFLAG(Pinned, owner_priv_1) /* Xen */ +PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved) +PAGEFLAG(Private, private) __CLEARPAGEFLAG(Private, private) + __SETPAGEFLAG(Private, private) -#if (BITS_PER_LONG > 32) /* - * 64-bit-only flags build down from bit 31 - * - * 32 bit -------------------------------| FIELDS | FLAGS | - * 64 bit | FIELDS | ?????? FLAGS | - * 63 32 0 + * Only test-and-set exist for PG_writeback. The unconditional operators are + * risky: they bypass page accounting. */ -#define PG_uncached 31 /* Page has been mapped as uncached */ -#endif +TESTPAGEFLAG(Writeback, writeback) TESTSCFLAG(Writeback, writeback) +__PAGEFLAG(Buddy, buddy) +PAGEFLAG(MappedToDisk, mappedtodisk) +/* PG_readahead is only used for file reads; PG_reclaim is only for writes */ +PAGEFLAG(Reclaim, reclaim) TESTCLEARFLAG(Reclaim, reclaim) +PAGEFLAG(Readahead, reclaim) /* Reminder to do async read-ahead */ + +#ifdef CONFIG_HIGHMEM /* - * Manipulation of page state flags + * Must use a macro here due to header dependency issues. page_zone() is not + * available at this point. */ -#define PageLocked(page) \ - test_bit(PG_locked, &(page)->flags) -#define SetPageLocked(page) \ - set_bit(PG_locked, &(page)->flags) -#define TestSetPageLocked(page) \ - test_and_set_bit(PG_locked, &(page)->flags) -#define ClearPageLocked(page) \ - clear_bit(PG_locked, &(page)->flags) -#define TestClearPageLocked(page) \ - test_and_clear_bit(PG_locked, &(page)->flags) - -#define PageError(page) test_bit(PG_error, &(page)->flags) -#define SetPageError(page) set_bit(PG_error, &(page)->flags) -#define ClearPageError(page) clear_bit(PG_error, &(page)->flags) - -#define PageReferenced(page) test_bit(PG_referenced, &(page)->flags) -#define SetPageReferenced(page) set_bit(PG_referenced, &(page)->flags) -#define ClearPageReferenced(page) clear_bit(PG_referenced, &(page)->flags) -#define TestClearPageReferenced(page) test_and_clear_bit(PG_referenced, &(page)->flags) +#define PageHighMem(__p) is_highmem(page_zone(__p)) +#else +PAGEFLAG_FALSE(HighMem) +#endif + +#ifdef CONFIG_SWAP +PAGEFLAG(SwapCache, swapcache) +#else +PAGEFLAG_FALSE(SwapCache) +#endif + +#ifdef CONFIG_IA64_UNCACHED_ALLOCATOR +PAGEFLAG(Uncached, uncached) +#else +PAGEFLAG_FALSE(Uncached) +#endif static inline int PageUptodate(struct page *page) { @@ -177,97 +241,59 @@ static inline void SetPageUptodate(struct page *page) #endif } -#define ClearPageUptodate(page) clear_bit(PG_uptodate, &(page)->flags) - -#define PageDirty(page) test_bit(PG_dirty, &(page)->flags) -#define SetPageDirty(page) set_bit(PG_dirty, &(page)->flags) -#define TestSetPageDirty(page) test_and_set_bit(PG_dirty, &(page)->flags) -#define ClearPageDirty(page) clear_bit(PG_dirty, &(page)->flags) -#define __ClearPageDirty(page) __clear_bit(PG_dirty, &(page)->flags) -#define TestClearPageDirty(page) test_and_clear_bit(PG_dirty, &(page)->flags) - -#define PageLRU(page) test_bit(PG_lru, &(page)->flags) -#define SetPageLRU(page) set_bit(PG_lru, &(page)->flags) -#define ClearPageLRU(page) clear_bit(PG_lru, &(page)->flags) -#define __ClearPageLRU(page) __clear_bit(PG_lru, &(page)->flags) - -#define PageActive(page) test_bit(PG_active, &(page)->flags) -#define SetPageActive(page) set_bit(PG_active, &(page)->flags) -#define ClearPageActive(page) clear_bit(PG_active, &(page)->flags) -#define __ClearPageActive(page) __clear_bit(PG_active, &(page)->flags) - -#define PageSlab(page) test_bit(PG_slab, &(page)->flags) -#define __SetPageSlab(page) __set_bit(PG_slab, &(page)->flags) -#define __ClearPageSlab(page) __clear_bit(PG_slab, &(page)->flags) - -#ifdef CONFIG_HIGHMEM -#define PageHighMem(page) is_highmem(page_zone(page)) -#else -#define PageHighMem(page) 0 /* needed to optimize away at compile time */ -#endif +CLEARPAGEFLAG(Uptodate, uptodate) -#define PageChecked(page) test_bit(PG_checked, &(page)->flags) -#define SetPageChecked(page) set_bit(PG_checked, &(page)->flags) -#define ClearPageChecked(page) clear_bit(PG_checked, &(page)->flags) - -#define PagePinned(page) test_bit(PG_pinned, &(page)->flags) -#define SetPagePinned(page) set_bit(PG_pinned, &(page)->flags) -#define ClearPagePinned(page) clear_bit(PG_pinned, &(page)->flags) +extern void cancel_dirty_page(struct page *page, unsigned int account_size); -#define PageReserved(page) test_bit(PG_reserved, &(page)->flags) -#define SetPageReserved(page) set_bit(PG_reserved, &(page)->flags) -#define ClearPageReserved(page) clear_bit(PG_reserved, &(page)->flags) -#define __ClearPageReserved(page) __clear_bit(PG_reserved, &(page)->flags) +int test_clear_page_writeback(struct page *page); +int test_set_page_writeback(struct page *page); -#define SetPagePrivate(page) set_bit(PG_private, &(page)->flags) -#define ClearPagePrivate(page) clear_bit(PG_private, &(page)->flags) -#define PagePrivate(page) test_bit(PG_private, &(page)->flags) -#define __SetPagePrivate(page) __set_bit(PG_private, &(page)->flags) -#define __ClearPagePrivate(page) __clear_bit(PG_private, &(page)->flags) +static inline void set_page_writeback(struct page *page) +{ + test_set_page_writeback(page); +} +#ifdef CONFIG_PAGEFLAGS_EXTENDED /* - * Only test-and-set exist for PG_writeback. The unconditional operators are - * risky: they bypass page accounting. + * System with lots of page flags available. This allows separate + * flags for PageHead() and PageTail() checks of compound pages so that bit + * tests can be used in performance sensitive paths. PageCompound is + * generally not used in hot code paths. */ -#define PageWriteback(page) test_bit(PG_writeback, &(page)->flags) -#define TestSetPageWriteback(page) test_and_set_bit(PG_writeback, \ - &(page)->flags) -#define TestClearPageWriteback(page) test_and_clear_bit(PG_writeback, \ - &(page)->flags) +__PAGEFLAG(Head, head) +__PAGEFLAG(Tail, tail) -#define PageBuddy(page) test_bit(PG_buddy, &(page)->flags) -#define __SetPageBuddy(page) __set_bit(PG_buddy, &(page)->flags) -#define __ClearPageBuddy(page) __clear_bit(PG_buddy, &(page)->flags) - -#define PageMappedToDisk(page) test_bit(PG_mappedtodisk, &(page)->flags) -#define SetPageMappedToDisk(page) set_bit(PG_mappedtodisk, &(page)->flags) -#define ClearPageMappedToDisk(page) clear_bit(PG_mappedtodisk, &(page)->flags) - -#define PageReadahead(page) test_bit(PG_readahead, &(page)->flags) -#define SetPageReadahead(page) set_bit(PG_readahead, &(page)->flags) -#define ClearPageReadahead(page) clear_bit(PG_readahead, &(page)->flags) - -#define PageReclaim(page) test_bit(PG_reclaim, &(page)->flags) -#define SetPageReclaim(page) set_bit(PG_reclaim, &(page)->flags) -#define ClearPageReclaim(page) clear_bit(PG_reclaim, &(page)->flags) -#define TestClearPageReclaim(page) test_and_clear_bit(PG_reclaim, &(page)->flags) +static inline int PageCompound(struct page *page) +{ + return page->flags & ((1L << PG_head) | (1L << PG_tail)); -#define PageCompound(page) test_bit(PG_compound, &(page)->flags) -#define __SetPageCompound(page) __set_bit(PG_compound, &(page)->flags) -#define __ClearPageCompound(page) __clear_bit(PG_compound, &(page)->flags) +} +#else +/* + * Reduce page flag use as much as possible by overlapping + * compound page flags with the flags used for page cache pages. Possible + * because PageCompound is always set for compound pages and not for + * pages on the LRU and/or pagecache. + */ +TESTPAGEFLAG(Compound, compound) +__PAGEFLAG(Head, compound) /* * PG_reclaim is used in combination with PG_compound to mark the - * head and tail of a compound page + * head and tail of a compound page. This saves one page flag + * but makes it impossible to use compound pages for the page cache. + * The PG_reclaim bit would have to be used for reclaim or readahead + * if compound pages enter the page cache. * * PG_compound & PG_reclaim => Tail page * PG_compound & ~PG_reclaim => Head page */ - #define PG_head_tail_mask ((1L << PG_compound) | (1L << PG_reclaim)) -#define PageTail(page) (((page)->flags & PG_head_tail_mask) \ - == PG_head_tail_mask) +static inline int PageTail(struct page *page) +{ + return ((page->flags & PG_head_tail_mask) == PG_head_tail_mask); +} static inline void __SetPageTail(struct page *page) { @@ -279,33 +305,6 @@ static inline void __ClearPageTail(struct page *page) page->flags &= ~PG_head_tail_mask; } -#define PageHead(page) (((page)->flags & PG_head_tail_mask) \ - == (1L << PG_compound)) -#define __SetPageHead(page) __SetPageCompound(page) -#define __ClearPageHead(page) __ClearPageCompound(page) - -#ifdef CONFIG_SWAP -#define PageSwapCache(page) test_bit(PG_swapcache, &(page)->flags) -#define SetPageSwapCache(page) set_bit(PG_swapcache, &(page)->flags) -#define ClearPageSwapCache(page) clear_bit(PG_swapcache, &(page)->flags) -#else -#define PageSwapCache(page) 0 -#endif - -#define PageUncached(page) test_bit(PG_uncached, &(page)->flags) -#define SetPageUncached(page) set_bit(PG_uncached, &(page)->flags) -#define ClearPageUncached(page) clear_bit(PG_uncached, &(page)->flags) - -struct page; /* forward declaration */ - -extern void cancel_dirty_page(struct page *page, unsigned int account_size); - -int test_clear_page_writeback(struct page *page); -int test_set_page_writeback(struct page *page); - -static inline void set_page_writeback(struct page *page) -{ - test_set_page_writeback(page); -} - +#endif /* !PAGEFLAGS_EXTENDED */ +#endif /* !__GENERATING_BOUNDS_H */ #endif /* PAGE_FLAGS_H */ diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 1ac969724bb..d746a2abb32 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h @@ -4,7 +4,6 @@ #include <linux/preempt.h> #include <linux/slab.h> /* For kmalloc() */ #include <linux/smp.h> -#include <linux/string.h> /* For memset() */ #include <linux/cpumask.h> #include <asm/percpu.h> diff --git a/include/linux/personality.h b/include/linux/personality.h index 012cd558189..a84e9ff9b27 100644 --- a/include/linux/personality.h +++ b/include/linux/personality.h @@ -105,10 +105,6 @@ struct exec_domain { */ #define personality(pers) (pers & PER_MASK) -/* - * Personality of the currently running process. - */ -#define get_personality (current->personality) /* * Change personality of the currently running process. diff --git a/include/linux/phantom.h b/include/linux/phantom.h index 96f4048a6cc..02268c54c25 100644 --- a/include/linux/phantom.h +++ b/include/linux/phantom.h @@ -27,14 +27,17 @@ struct phm_regs { #define PH_IOC_MAGIC 'p' #define PHN_GET_REG _IOWR(PH_IOC_MAGIC, 0, struct phm_reg *) -#define PHN_SET_REG _IOW (PH_IOC_MAGIC, 1, struct phm_reg *) +#define PHN_SET_REG _IOW(PH_IOC_MAGIC, 1, struct phm_reg *) #define PHN_GET_REGS _IOWR(PH_IOC_MAGIC, 2, struct phm_regs *) -#define PHN_SET_REGS _IOW (PH_IOC_MAGIC, 3, struct phm_regs *) +#define PHN_SET_REGS _IOW(PH_IOC_MAGIC, 3, struct phm_regs *) /* this ioctl tells the driver, that the caller is not OpenHaptics and might * use improved registers update (no more phantom switchoffs when using * libphantom) */ -#define PHN_NOT_OH _IO (PH_IOC_MAGIC, 4) -#define PH_IOC_MAXNR 4 +#define PHN_NOT_OH _IO(PH_IOC_MAGIC, 4) +#define PHN_GETREG _IOWR(PH_IOC_MAGIC, 5, struct phm_reg) +#define PHN_SETREG _IOW(PH_IOC_MAGIC, 6, struct phm_reg) +#define PHN_GETREGS _IOWR(PH_IOC_MAGIC, 7, struct phm_regs) +#define PHN_SETREGS _IOW(PH_IOC_MAGIC, 8, struct phm_regs) #define PHN_CONTROL 0x6 /* control byte in iaddr space */ #define PHN_CTL_AMP 0x1 /* switch after torques change */ diff --git a/include/linux/prctl.h b/include/linux/prctl.h index 5c80b193963..5ad79198d6f 100644 --- a/include/linux/prctl.h +++ b/include/linux/prctl.h @@ -16,7 +16,8 @@ # define PR_UNALIGN_NOPRINT 1 /* silently fix up unaligned user accesses */ # define PR_UNALIGN_SIGBUS 2 /* generate SIGBUS on unaligned user access */ -/* Get/set whether or not to drop capabilities on setuid() away from uid 0 */ +/* Get/set whether or not to drop capabilities on setuid() away from + * uid 0 (as per security/commoncap.c) */ #define PR_GET_KEEPCAPS 7 #define PR_SET_KEEPCAPS 8 @@ -63,7 +64,7 @@ #define PR_GET_SECCOMP 21 #define PR_SET_SECCOMP 22 -/* Get/set the capability bounding set */ +/* Get/set the capability bounding set (as per security/commoncap.c) */ #define PR_CAPBSET_READ 23 #define PR_CAPBSET_DROP 24 @@ -73,4 +74,8 @@ # define PR_TSC_ENABLE 1 /* allow the use of the timestamp counter */ # define PR_TSC_SIGSEGV 2 /* throw a SIGSEGV instead of reading the TSC */ +/* Get/set securebits (as per security/commoncap.c) */ +#define PR_GET_SECUREBITS 27 +#define PR_SET_SECUREBITS 28 + #endif /* _LINUX_PRCTL_H */ diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h index 9b6c935f69c..9883bc94226 100644 --- a/include/linux/proc_fs.h +++ b/include/linux/proc_fs.h @@ -9,7 +9,6 @@ struct net; struct completion; - /* * The proc filesystem constants/structures */ @@ -41,7 +40,7 @@ enum { * /proc file has a parent, but "subdir" is NULL for all * non-directory entries). * - * "get_info" is called at "read", while "owner" is used to protect module + * "owner" is used to protect module * from unloading while proc_dir_entry is in use */ @@ -49,7 +48,6 @@ typedef int (read_proc_t)(char *page, char **start, off_t off, int count, int *eof, void *data); typedef int (write_proc_t)(struct file *file, const char __user *buffer, unsigned long count, void *data); -typedef int (get_info_t)(char *, char **, off_t, int); struct proc_dir_entry { unsigned int low_ino; @@ -70,7 +68,6 @@ struct proc_dir_entry { * somewhere. */ const struct file_operations *proc_fops; - get_info_t *get_info; struct module *owner; struct proc_dir_entry *next, *parent, *subdir; void *data; @@ -97,10 +94,6 @@ struct vmcore { #ifdef CONFIG_PROC_FS -extern struct proc_dir_entry proc_root; -extern struct proc_dir_entry *proc_root_fs; -extern struct proc_dir_entry *proc_bus; -extern struct proc_dir_entry *proc_root_driver; extern struct proc_dir_entry *proc_root_kcore; extern spinlock_t proc_subdir_lock; @@ -123,9 +116,10 @@ void de_put(struct proc_dir_entry *de); extern struct proc_dir_entry *create_proc_entry(const char *name, mode_t mode, struct proc_dir_entry *parent); -struct proc_dir_entry *proc_create(const char *name, mode_t mode, +struct proc_dir_entry *proc_create_data(const char *name, mode_t mode, struct proc_dir_entry *parent, - const struct file_operations *proc_fops); + const struct file_operations *proc_fops, + void *data); extern void remove_proc_entry(const char *name, struct proc_dir_entry *parent); extern struct vfsmount *proc_mnt; @@ -180,6 +174,12 @@ extern struct proc_dir_entry *proc_mkdir(const char *,struct proc_dir_entry *); extern struct proc_dir_entry *proc_mkdir_mode(const char *name, mode_t mode, struct proc_dir_entry *parent); +static inline struct proc_dir_entry *proc_create(const char *name, mode_t mode, + struct proc_dir_entry *parent, const struct file_operations *proc_fops) +{ + return proc_create_data(name, mode, parent, proc_fops, NULL); +} + static inline struct proc_dir_entry *create_proc_read_entry(const char *name, mode_t mode, struct proc_dir_entry *base, read_proc_t *read_proc, void * data) @@ -192,24 +192,19 @@ static inline struct proc_dir_entry *create_proc_read_entry(const char *name, return res; } -static inline struct proc_dir_entry *create_proc_info_entry(const char *name, - mode_t mode, struct proc_dir_entry *base, get_info_t *get_info) -{ - struct proc_dir_entry *res=create_proc_entry(name,mode,base); - if (res) res->get_info=get_info; - return res; -} - extern struct proc_dir_entry *proc_net_fops_create(struct net *net, const char *name, mode_t mode, const struct file_operations *fops); extern void proc_net_remove(struct net *net, const char *name); extern struct proc_dir_entry *proc_net_mkdir(struct net *net, const char *name, struct proc_dir_entry *parent); -#else +/* While the {get|set|dup}_mm_exe_file functions are for mm_structs, they are + * only needed to implement /proc/<pid>|self/exe so we define them here. */ +extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file); +extern struct file *get_mm_exe_file(struct mm_struct *mm); +extern void dup_mm_exe_file(struct mm_struct *oldmm, struct mm_struct *newmm); -#define proc_root_driver NULL -#define proc_bus NULL +#else #define proc_net_fops_create(net, name, mode, fops) ({ (void)(mode), NULL; }) static inline void proc_net_remove(struct net *net, const char *name) {} @@ -226,6 +221,12 @@ static inline struct proc_dir_entry *proc_create(const char *name, { return NULL; } +static inline struct proc_dir_entry *proc_create_data(const char *name, + mode_t mode, struct proc_dir_entry *parent, + const struct file_operations *proc_fops, void *data) +{ + return NULL; +} #define remove_proc_entry(name, parent) do {} while (0) static inline struct proc_dir_entry *proc_symlink(const char *name, @@ -236,16 +237,11 @@ static inline struct proc_dir_entry *proc_mkdir(const char *name, static inline struct proc_dir_entry *create_proc_read_entry(const char *name, mode_t mode, struct proc_dir_entry *base, read_proc_t *read_proc, void * data) { return NULL; } -static inline struct proc_dir_entry *create_proc_info_entry(const char *name, - mode_t mode, struct proc_dir_entry *base, get_info_t *get_info) - { return NULL; } struct tty_driver; static inline void proc_tty_register_driver(struct tty_driver *driver) {}; static inline void proc_tty_unregister_driver(struct tty_driver *driver) {}; -extern struct proc_dir_entry proc_root; - static inline int pid_ns_prepare_proc(struct pid_namespace *ns) { return 0; @@ -255,6 +251,19 @@ static inline void pid_ns_release_proc(struct pid_namespace *ns) { } +static inline void set_mm_exe_file(struct mm_struct *mm, + struct file *new_exe_file) +{} + +static inline struct file *get_mm_exe_file(struct mm_struct *mm) +{ + return NULL; +} + +static inline void dup_mm_exe_file(struct mm_struct *oldmm, + struct mm_struct *newmm) +{} + #endif /* CONFIG_PROC_FS */ #if !defined(CONFIG_PROC_KCORE) diff --git a/include/linux/quota.h b/include/linux/quota.h index eb560d031ac..52e49dce658 100644 --- a/include/linux/quota.h +++ b/include/linux/quota.h @@ -202,10 +202,14 @@ struct quota_format_type; struct mem_dqinfo { struct quota_format_type *dqi_format; + int dqi_fmt_id; /* Id of the dqi_format - used when turning + * quotas on after remount RW */ struct list_head dqi_dirty_list; /* List of dirty dquots */ unsigned long dqi_flags; unsigned int dqi_bgrace; unsigned int dqi_igrace; + qsize_t dqi_maxblimit; + qsize_t dqi_maxilimit; union { struct v1_mem_dqinfo v1_i; struct v2_mem_dqinfo v2_i; @@ -296,8 +300,8 @@ struct dquot_operations { /* Operations handling requests from userspace */ struct quotactl_ops { - int (*quota_on)(struct super_block *, int, int, char *); - int (*quota_off)(struct super_block *, int); + int (*quota_on)(struct super_block *, int, int, char *, int); + int (*quota_off)(struct super_block *, int, int); int (*quota_sync)(struct super_block *, int); int (*get_info)(struct super_block *, int, struct if_dqinfo *); int (*set_info)(struct super_block *, int, struct if_dqinfo *); @@ -318,6 +322,10 @@ struct quota_format_type { #define DQUOT_USR_ENABLED 0x01 /* User diskquotas enabled */ #define DQUOT_GRP_ENABLED 0x02 /* Group diskquotas enabled */ +#define DQUOT_USR_SUSPENDED 0x04 /* User diskquotas are off, but + * we have necessary info in + * memory to turn them on */ +#define DQUOT_GRP_SUSPENDED 0x08 /* The same for group quotas */ struct quota_info { unsigned int flags; /* Flags for diskquotas on this device */ @@ -329,17 +337,16 @@ struct quota_info { struct quota_format_ops *ops[MAXQUOTAS]; /* Operations for each type */ }; -/* Inline would be better but we need to dereference super_block which is not defined yet */ -int mark_dquot_dirty(struct dquot *dquot); - -#define dquot_dirty(dquot) test_bit(DQ_MOD_B, &(dquot)->dq_flags) - #define sb_has_quota_enabled(sb, type) ((type)==USRQUOTA ? \ (sb_dqopt(sb)->flags & DQUOT_USR_ENABLED) : (sb_dqopt(sb)->flags & DQUOT_GRP_ENABLED)) #define sb_any_quota_enabled(sb) (sb_has_quota_enabled(sb, USRQUOTA) | \ sb_has_quota_enabled(sb, GRPQUOTA)) +#define sb_has_quota_suspended(sb, type) \ + ((type) == USRQUOTA ? (sb_dqopt(sb)->flags & DQUOT_USR_SUSPENDED) : \ + (sb_dqopt(sb)->flags & DQUOT_GRP_SUSPENDED)) + int register_quota_format(struct quota_format_type *fmt); void unregister_quota_format(struct quota_format_type *fmt); diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h index 5110201a415..f8670205385 100644 --- a/include/linux/quotaops.h +++ b/include/linux/quotaops.h @@ -37,11 +37,11 @@ extern int dquot_release(struct dquot *dquot); extern int dquot_commit_info(struct super_block *sb, int type); extern int dquot_mark_dquot_dirty(struct dquot *dquot); -extern int vfs_quota_on(struct super_block *sb, int type, int format_id, char *path); +extern int vfs_quota_on(struct super_block *sb, int type, int format_id, + char *path, int remount); extern int vfs_quota_on_mount(struct super_block *sb, char *qf_name, int format_id, int type); -extern int vfs_quota_off(struct super_block *sb, int type); -#define vfs_quota_off_mount(sb, type) vfs_quota_off(sb, type) +extern int vfs_quota_off(struct super_block *sb, int type, int remount); extern int vfs_quota_sync(struct super_block *sb, int type); extern int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii); extern int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii); @@ -59,7 +59,7 @@ extern struct quotactl_ops vfs_quotactl_ops; /* It is better to call this function outside of any transaction as it might * need a lot of space in journal for dquot structure allocation. */ -static __inline__ void DQUOT_INIT(struct inode *inode) +static inline void DQUOT_INIT(struct inode *inode) { BUG_ON(!inode->i_sb); if (sb_any_quota_enabled(inode->i_sb) && !IS_NOQUOTA(inode)) @@ -67,7 +67,7 @@ static __inline__ void DQUOT_INIT(struct inode *inode) } /* The same as with DQUOT_INIT */ -static __inline__ void DQUOT_DROP(struct inode *inode) +static inline void DQUOT_DROP(struct inode *inode) { /* Here we can get arbitrary inode from clear_inode() so we have * to be careful. OTOH we don't need locking as quota operations @@ -90,7 +90,7 @@ static __inline__ void DQUOT_DROP(struct inode *inode) /* The following allocation/freeing/transfer functions *must* be called inside * a transaction (deadlocks possible otherwise) */ -static __inline__ int DQUOT_PREALLOC_SPACE_NODIRTY(struct inode *inode, qsize_t nr) +static inline int DQUOT_PREALLOC_SPACE_NODIRTY(struct inode *inode, qsize_t nr) { if (sb_any_quota_enabled(inode->i_sb)) { /* Used space is updated in alloc_space() */ @@ -102,7 +102,7 @@ static __inline__ int DQUOT_PREALLOC_SPACE_NODIRTY(struct inode *inode, qsize_t return 0; } -static __inline__ int DQUOT_PREALLOC_SPACE(struct inode *inode, qsize_t nr) +static inline int DQUOT_PREALLOC_SPACE(struct inode *inode, qsize_t nr) { int ret; if (!(ret = DQUOT_PREALLOC_SPACE_NODIRTY(inode, nr))) @@ -110,7 +110,7 @@ static __inline__ int DQUOT_PREALLOC_SPACE(struct inode *inode, qsize_t nr) return ret; } -static __inline__ int DQUOT_ALLOC_SPACE_NODIRTY(struct inode *inode, qsize_t nr) +static inline int DQUOT_ALLOC_SPACE_NODIRTY(struct inode *inode, qsize_t nr) { if (sb_any_quota_enabled(inode->i_sb)) { /* Used space is updated in alloc_space() */ @@ -122,7 +122,7 @@ static __inline__ int DQUOT_ALLOC_SPACE_NODIRTY(struct inode *inode, qsize_t nr) return 0; } -static __inline__ int DQUOT_ALLOC_SPACE(struct inode *inode, qsize_t nr) +static inline int DQUOT_ALLOC_SPACE(struct inode *inode, qsize_t nr) { int ret; if (!(ret = DQUOT_ALLOC_SPACE_NODIRTY(inode, nr))) @@ -130,7 +130,7 @@ static __inline__ int DQUOT_ALLOC_SPACE(struct inode *inode, qsize_t nr) return ret; } -static __inline__ int DQUOT_ALLOC_INODE(struct inode *inode) +static inline int DQUOT_ALLOC_INODE(struct inode *inode) { if (sb_any_quota_enabled(inode->i_sb)) { DQUOT_INIT(inode); @@ -140,7 +140,7 @@ static __inline__ int DQUOT_ALLOC_INODE(struct inode *inode) return 0; } -static __inline__ void DQUOT_FREE_SPACE_NODIRTY(struct inode *inode, qsize_t nr) +static inline void DQUOT_FREE_SPACE_NODIRTY(struct inode *inode, qsize_t nr) { if (sb_any_quota_enabled(inode->i_sb)) inode->i_sb->dq_op->free_space(inode, nr); @@ -148,19 +148,19 @@ static __inline__ void DQUOT_FREE_SPACE_NODIRTY(struct inode *inode, qsize_t nr) inode_sub_bytes(inode, nr); } -static __inline__ void DQUOT_FREE_SPACE(struct inode *inode, qsize_t nr) +static inline void DQUOT_FREE_SPACE(struct inode *inode, qsize_t nr) { DQUOT_FREE_SPACE_NODIRTY(inode, nr); mark_inode_dirty(inode); } -static __inline__ void DQUOT_FREE_INODE(struct inode *inode) +static inline void DQUOT_FREE_INODE(struct inode *inode) { if (sb_any_quota_enabled(inode->i_sb)) inode->i_sb->dq_op->free_inode(inode, 1); } -static __inline__ int DQUOT_TRANSFER(struct inode *inode, struct iattr *iattr) +static inline int DQUOT_TRANSFER(struct inode *inode, struct iattr *iattr) { if (sb_any_quota_enabled(inode->i_sb) && !IS_NOQUOTA(inode)) { DQUOT_INIT(inode); @@ -171,14 +171,32 @@ static __inline__ int DQUOT_TRANSFER(struct inode *inode, struct iattr *iattr) } /* The following two functions cannot be called inside a transaction */ -#define DQUOT_SYNC(sb) sync_dquots(sb, -1) +static inline void DQUOT_SYNC(struct super_block *sb) +{ + sync_dquots(sb, -1); +} -static __inline__ int DQUOT_OFF(struct super_block *sb) +static inline int DQUOT_OFF(struct super_block *sb, int remount) { int ret = -ENOSYS; - if (sb_any_quota_enabled(sb) && sb->s_qcop && sb->s_qcop->quota_off) - ret = sb->s_qcop->quota_off(sb, -1); + if (sb->s_qcop && sb->s_qcop->quota_off) + ret = sb->s_qcop->quota_off(sb, -1, remount); + return ret; +} + +static inline int DQUOT_ON_REMOUNT(struct super_block *sb) +{ + int cnt; + int ret = 0, err; + + if (!sb->s_qcop || !sb->s_qcop->quota_on) + return -ENOSYS; + for (cnt = 0; cnt < MAXQUOTAS; cnt++) { + err = sb->s_qcop->quota_on(sb, cnt, 0, NULL, 1); + if (err < 0 && !ret) + ret = err; + } return ret; } @@ -189,13 +207,43 @@ static __inline__ int DQUOT_OFF(struct super_block *sb) */ #define sb_dquot_ops (NULL) #define sb_quotactl_ops (NULL) -#define DQUOT_INIT(inode) do { } while(0) -#define DQUOT_DROP(inode) do { } while(0) -#define DQUOT_ALLOC_INODE(inode) (0) -#define DQUOT_FREE_INODE(inode) do { } while(0) -#define DQUOT_SYNC(sb) do { } while(0) -#define DQUOT_OFF(sb) do { } while(0) -#define DQUOT_TRANSFER(inode, iattr) (0) + +static inline void DQUOT_INIT(struct inode *inode) +{ +} + +static inline void DQUOT_DROP(struct inode *inode) +{ +} + +static inline int DQUOT_ALLOC_INODE(struct inode *inode) +{ + return 0; +} + +static inline void DQUOT_FREE_INODE(struct inode *inode) +{ +} + +static inline void DQUOT_SYNC(struct super_block *sb) +{ +} + +static inline int DQUOT_OFF(struct super_block *sb, int remount) +{ + return 0; +} + +static inline int DQUOT_ON_REMOUNT(struct super_block *sb) +{ + return 0; +} + +static inline int DQUOT_TRANSFER(struct inode *inode, struct iattr *iattr) +{ + return 0; +} + static inline int DQUOT_PREALLOC_SPACE_NODIRTY(struct inode *inode, qsize_t nr) { inode_add_bytes(inode, nr); @@ -235,11 +283,38 @@ static inline void DQUOT_FREE_SPACE(struct inode *inode, qsize_t nr) #endif /* CONFIG_QUOTA */ -#define DQUOT_PREALLOC_BLOCK_NODIRTY(inode, nr) DQUOT_PREALLOC_SPACE_NODIRTY(inode, ((qsize_t)(nr)) << (inode)->i_sb->s_blocksize_bits) -#define DQUOT_PREALLOC_BLOCK(inode, nr) DQUOT_PREALLOC_SPACE(inode, ((qsize_t)(nr)) << (inode)->i_sb->s_blocksize_bits) -#define DQUOT_ALLOC_BLOCK_NODIRTY(inode, nr) DQUOT_ALLOC_SPACE_NODIRTY(inode, ((qsize_t)(nr)) << (inode)->i_sb->s_blocksize_bits) -#define DQUOT_ALLOC_BLOCK(inode, nr) DQUOT_ALLOC_SPACE(inode, ((qsize_t)(nr)) << (inode)->i_sb->s_blocksize_bits) -#define DQUOT_FREE_BLOCK_NODIRTY(inode, nr) DQUOT_FREE_SPACE_NODIRTY(inode, ((qsize_t)(nr)) << (inode)->i_sb->s_blocksize_bits) -#define DQUOT_FREE_BLOCK(inode, nr) DQUOT_FREE_SPACE(inode, ((qsize_t)(nr)) << (inode)->i_sb->s_blocksize_bits) +static inline int DQUOT_PREALLOC_BLOCK_NODIRTY(struct inode *inode, qsize_t nr) +{ + return DQUOT_PREALLOC_SPACE_NODIRTY(inode, + nr << inode->i_sb->s_blocksize_bits); +} + +static inline int DQUOT_PREALLOC_BLOCK(struct inode *inode, qsize_t nr) +{ + return DQUOT_PREALLOC_SPACE(inode, + nr << inode->i_sb->s_blocksize_bits); +} + +static inline int DQUOT_ALLOC_BLOCK_NODIRTY(struct inode *inode, qsize_t nr) +{ + return DQUOT_ALLOC_SPACE_NODIRTY(inode, + nr << inode->i_sb->s_blocksize_bits); +} + +static inline int DQUOT_ALLOC_BLOCK(struct inode *inode, qsize_t nr) +{ + return DQUOT_ALLOC_SPACE(inode, + nr << inode->i_sb->s_blocksize_bits); +} + +static inline void DQUOT_FREE_BLOCK_NODIRTY(struct inode *inode, qsize_t nr) +{ + DQUOT_FREE_SPACE_NODIRTY(inode, nr << inode->i_sb->s_blocksize_bits); +} + +static inline void DQUOT_FREE_BLOCK(struct inode *inode, qsize_t nr) +{ + DQUOT_FREE_SPACE(inode, nr << inode->i_sb->s_blocksize_bits); +} #endif /* _LINUX_QUOTAOPS_ */ diff --git a/include/linux/raid/raid5.h b/include/linux/raid/raid5.h index 93678f57ccb..f0827d31ae6 100644 --- a/include/linux/raid/raid5.h +++ b/include/linux/raid/raid5.h @@ -252,6 +252,8 @@ struct r6_state { #define STRIPE_EXPANDING 9 #define STRIPE_EXPAND_SOURCE 10 #define STRIPE_EXPAND_READY 11 +#define STRIPE_IO_STARTED 12 /* do not count towards 'bypass_count' */ +#define STRIPE_FULL_WRITE 13 /* all blocks are set to be overwritten */ /* * Operations flags (in issue order) */ @@ -316,12 +318,17 @@ struct raid5_private_data { int previous_raid_disks; struct list_head handle_list; /* stripes needing handling */ + struct list_head hold_list; /* preread ready stripes */ struct list_head delayed_list; /* stripes that have plugged requests */ struct list_head bitmap_list; /* stripes delaying awaiting bitmap update */ struct bio *retry_read_aligned; /* currently retrying aligned bios */ struct bio *retry_read_aligned_list; /* aligned bios retry list */ atomic_t preread_active_stripes; /* stripes with scheduled io */ atomic_t active_aligned_reads; + atomic_t pending_full_writes; /* full write backlog */ + int bypass_count; /* bypassed prereads */ + int bypass_threshold; /* preread nice */ + struct list_head *last_hold; /* detect hold_list promotions */ atomic_t reshape_stripes; /* stripes with pending writes for reshape */ /* unfortunately we need two cache names as we temporarily have diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h index 8e7eff2cd0a..4aacaeecb56 100644 --- a/include/linux/reiserfs_fs.h +++ b/include/linux/reiserfs_fs.h @@ -2176,6 +2176,7 @@ int reiserfs_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); long reiserfs_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); +int reiserfs_unpack(struct inode *inode, struct file *filp); /* ioctl's command */ #define REISERFS_IOC_UNPACK _IOW(0xCD,1,long) diff --git a/include/linux/res_counter.h b/include/linux/res_counter.h index 61363ce896d..6d9e1fca098 100644 --- a/include/linux/res_counter.h +++ b/include/linux/res_counter.h @@ -9,6 +9,8 @@ * * Author: Pavel Emelianov <xemul@openvz.org> * + * See Documentation/controllers/resource_counter.txt for more + * info about what this counter is. */ #include <linux/cgroup.h> @@ -25,6 +27,10 @@ struct res_counter { */ unsigned long long usage; /* + * the maximal value of the usage from the counter creation + */ + unsigned long long max_usage; + /* * the limit that usage cannot exceed */ unsigned long long limit; @@ -39,8 +45,9 @@ struct res_counter { spinlock_t lock; }; -/* +/** * Helpers to interact with userspace + * res_counter_read_u64() - returns the value of the specified member. * res_counter_read/_write - put/get the specified fields from the * res_counter struct to/from the user * @@ -51,6 +58,8 @@ struct res_counter { * @pos: and the offset. */ +u64 res_counter_read_u64(struct res_counter *counter, int member); + ssize_t res_counter_read(struct res_counter *counter, int member, const char __user *buf, size_t nbytes, loff_t *pos, int (*read_strategy)(unsigned long long val, char *s)); @@ -64,6 +73,7 @@ ssize_t res_counter_write(struct res_counter *counter, int member, enum { RES_USAGE, + RES_MAX_USAGE, RES_LIMIT, RES_FAILCNT, }; @@ -124,4 +134,21 @@ static inline bool res_counter_check_under_limit(struct res_counter *cnt) return ret; } +static inline void res_counter_reset_max(struct res_counter *cnt) +{ + unsigned long flags; + + spin_lock_irqsave(&cnt->lock, flags); + cnt->max_usage = cnt->usage; + spin_unlock_irqrestore(&cnt->lock, flags); +} + +static inline void res_counter_reset_failcnt(struct res_counter *cnt) +{ + unsigned long flags; + + spin_lock_irqsave(&cnt->lock, flags); + cnt->failcnt = 0; + spin_unlock_irqrestore(&cnt->lock, flags); +} #endif diff --git a/include/linux/resource.h b/include/linux/resource.h index ae13db71474..aaa423a6f3d 100644 --- a/include/linux/resource.h +++ b/include/linux/resource.h @@ -19,6 +19,7 @@ struct task_struct; #define RUSAGE_SELF 0 #define RUSAGE_CHILDREN (-1) #define RUSAGE_BOTH (-2) /* sys_wait4() uses this */ +#define RUSAGE_THREAD 1 /* only the calling thread */ struct rusage { struct timeval ru_utime; /* user time used */ diff --git a/include/linux/rio.h b/include/linux/rio.h index 68e3f6853fa..cfb66bbc0f2 100644 --- a/include/linux/rio.h +++ b/include/linux/rio.h @@ -23,7 +23,6 @@ #include <linux/device.h> #include <linux/rio_regs.h> -#define RIO_ANY_DESTID 0xff #define RIO_NO_HOPCOUNT -1 #define RIO_INVALID_DESTID 0xffff @@ -39,11 +38,8 @@ entry is invalid (no route exists for the device ID) */ -#ifdef CONFIG_RAPIDIO_8_BIT_TRANSPORT -#define RIO_MAX_ROUTE_ENTRIES (1 << 8) -#else -#define RIO_MAX_ROUTE_ENTRIES (1 << 16) -#endif +#define RIO_MAX_ROUTE_ENTRIES(size) (size ? (1 << 16) : (1 << 8)) +#define RIO_ANY_DESTID(size) (size ? 0xffff : 0xff) #define RIO_MAX_MBOX 4 #define RIO_MAX_MSG_SIZE 0x1000 @@ -149,6 +145,11 @@ struct rio_dbell { void *dev_id; }; +enum rio_phy_type { + RIO_PHY_PARALLEL, + RIO_PHY_SERIAL, +}; + /** * struct rio_mport - RIO master port info * @dbells: List of doorbell events @@ -163,6 +164,7 @@ struct rio_dbell { * @id: Port ID, unique among all ports * @index: Port index, unique among all port interfaces of the same type * @name: Port name string + * @priv: Master port private data */ struct rio_mport { struct list_head dbells; /* list of doorbell events */ @@ -177,7 +179,13 @@ struct rio_mport { unsigned char id; /* port ID, unique among all ports */ unsigned char index; /* port index, unique among all port interfaces of the same type */ + unsigned int sys_size; /* RapidIO common transport system size. + * 0 - Small size. 256 devices. + * 1 - Large size, 65536 devices. + */ + enum rio_phy_type phy_type; /* RapidIO phy type */ unsigned char name[40]; + void *priv; /* Master port private data */ }; /** @@ -211,7 +219,7 @@ struct rio_switch { u16 switchid; u16 hopcount; u16 destid; - u8 route_table[RIO_MAX_ROUTE_ENTRIES]; + u8 *route_table; int (*add_entry) (struct rio_mport * mport, u16 destid, u8 hopcount, u16 table, u16 route_destid, u8 route_port); int (*get_entry) (struct rio_mport * mport, u16 destid, u8 hopcount, @@ -229,13 +237,15 @@ struct rio_switch { * @dsend: Callback to send a doorbell message. */ struct rio_ops { - int (*lcread) (int index, u32 offset, int len, u32 * data); - int (*lcwrite) (int index, u32 offset, int len, u32 data); - int (*cread) (int index, u16 destid, u8 hopcount, u32 offset, int len, - u32 * data); - int (*cwrite) (int index, u16 destid, u8 hopcount, u32 offset, int len, - u32 data); - int (*dsend) (int index, u16 destid, u16 data); + int (*lcread) (struct rio_mport *mport, int index, u32 offset, int len, + u32 *data); + int (*lcwrite) (struct rio_mport *mport, int index, u32 offset, int len, + u32 data); + int (*cread) (struct rio_mport *mport, int index, u16 destid, + u8 hopcount, u32 offset, int len, u32 *data); + int (*cwrite) (struct rio_mport *mport, int index, u16 destid, + u8 hopcount, u32 offset, int len, u32 data); + int (*dsend) (struct rio_mport *mport, int index, u16 destid, u16 data); }; #define RIO_RESOURCE_MEM 0x00000100 diff --git a/include/linux/sched.h b/include/linux/sched.h index d0bd97044ab..1d02babdb2c 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -68,7 +68,6 @@ struct sched_param { #include <linux/smp.h> #include <linux/sem.h> #include <linux/signal.h> -#include <linux/securebits.h> #include <linux/fs_struct.h> #include <linux/compiler.h> #include <linux/completion.h> @@ -1133,7 +1132,7 @@ struct task_struct { gid_t gid,egid,sgid,fsgid; struct group_info *group_info; kernel_cap_t cap_effective, cap_inheritable, cap_permitted, cap_bset; - unsigned keep_capabilities:1; + unsigned securebits; struct user_struct *user; #ifdef CONFIG_KEYS struct key *request_key_auth; /* assumed request_key authority */ @@ -1798,6 +1797,8 @@ extern void mmput(struct mm_struct *); extern struct mm_struct *get_task_mm(struct task_struct *task); /* Remove the current tasks stale references to the old mm_struct */ extern void mm_release(struct task_struct *, struct mm_struct *); +/* Allocate a new mm structure and copy contents from tsk->mm */ +extern struct mm_struct *dup_mm(struct task_struct *tsk); extern int copy_thread(int, unsigned long, unsigned long, unsigned long, struct task_struct *, struct pt_regs *); extern void flush_thread(void); @@ -2147,6 +2148,19 @@ static inline void migration_init(void) #define TASK_SIZE_OF(tsk) TASK_SIZE #endif +#ifdef CONFIG_MM_OWNER +extern void mm_update_next_owner(struct mm_struct *mm); +extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p); +#else +static inline void mm_update_next_owner(struct mm_struct *mm) +{ +} + +static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p) +{ +} +#endif /* CONFIG_MM_OWNER */ + #endif /* __KERNEL__ */ #endif diff --git a/include/linux/securebits.h b/include/linux/securebits.h index 5b0617840fa..c1f19dbceb0 100644 --- a/include/linux/securebits.h +++ b/include/linux/securebits.h @@ -3,28 +3,39 @@ #define SECUREBITS_DEFAULT 0x00000000 -extern unsigned securebits; - /* When set UID 0 has no special privileges. When unset, we support inheritance of root-permissions and suid-root executable under compatibility mode. We raise the effective and inheritable bitmasks *of the executable file* if the effective uid of the new process is 0. If the real uid is 0, we raise the inheritable bitmask of the executable file. */ -#define SECURE_NOROOT 0 +#define SECURE_NOROOT 0 +#define SECURE_NOROOT_LOCKED 1 /* make bit-0 immutable */ /* When set, setuid to/from uid 0 does not trigger capability-"fixes" to be compatible with old programs relying on set*uid to loose privileges. When unset, setuid doesn't change privileges. */ -#define SECURE_NO_SETUID_FIXUP 2 +#define SECURE_NO_SETUID_FIXUP 2 +#define SECURE_NO_SETUID_FIXUP_LOCKED 3 /* make bit-2 immutable */ + +/* When set, a process can retain its capabilities even after + transitioning to a non-root user (the set-uid fixup suppressed by + bit 2). Bit-4 is cleared when a process calls exec(); setting both + bit 4 and 5 will create a barrier through exec that no exec()'d + child can use this feature again. */ +#define SECURE_KEEP_CAPS 4 +#define SECURE_KEEP_CAPS_LOCKED 5 /* make bit-4 immutable */ /* Each securesetting is implemented using two bits. One bit specify whether the setting is on or off. The other bit specify whether the setting is fixed or not. A setting which is fixed cannot be changed from user-level. */ +#define issecure_mask(X) (1 << (X)) +#define issecure(X) (issecure_mask(X) & current->securebits) -#define issecure(X) ( (1 << (X+1)) & SECUREBITS_DEFAULT ? \ - (1 << (X)) & SECUREBITS_DEFAULT : \ - (1 << (X)) & securebits ) +#define SECURE_ALL_BITS (issecure_mask(SECURE_NOROOT) | \ + issecure_mask(SECURE_NO_SETUID_FIXUP) | \ + issecure_mask(SECURE_KEEP_CAPS)) +#define SECURE_ALL_LOCKS (SECURE_ALL_BITS << 1) #endif /* !_LINUX_SECUREBITS_H */ diff --git a/include/linux/security.h b/include/linux/security.h index 53a34539382..adb09d893ae 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -34,8 +34,6 @@ #include <linux/xfrm.h> #include <net/flow.h> -extern unsigned securebits; - /* Maximum number of letters for an LSM name string */ #define SECURITY_NAME_MAX 10 @@ -46,25 +44,28 @@ struct audit_krule; * These functions are in security/capability.c and are used * as the default capabilities functions */ -extern int cap_capable (struct task_struct *tsk, int cap); -extern int cap_settime (struct timespec *ts, struct timezone *tz); -extern int cap_ptrace (struct task_struct *parent, struct task_struct *child); -extern int cap_capget (struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted); -extern int cap_capset_check (struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted); -extern void cap_capset_set (struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted); -extern int cap_bprm_set_security (struct linux_binprm *bprm); -extern void cap_bprm_apply_creds (struct linux_binprm *bprm, int unsafe); +extern int cap_capable(struct task_struct *tsk, int cap); +extern int cap_settime(struct timespec *ts, struct timezone *tz); +extern int cap_ptrace(struct task_struct *parent, struct task_struct *child); +extern int cap_capget(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted); +extern int cap_capset_check(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted); +extern void cap_capset_set(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted); +extern int cap_bprm_set_security(struct linux_binprm *bprm); +extern void cap_bprm_apply_creds(struct linux_binprm *bprm, int unsafe); extern int cap_bprm_secureexec(struct linux_binprm *bprm); -extern int cap_inode_setxattr(struct dentry *dentry, char *name, void *value, size_t size, int flags); -extern int cap_inode_removexattr(struct dentry *dentry, char *name); +extern int cap_inode_setxattr(struct dentry *dentry, const char *name, + const void *value, size_t size, int flags); +extern int cap_inode_removexattr(struct dentry *dentry, const char *name); extern int cap_inode_need_killpriv(struct dentry *dentry); extern int cap_inode_killpriv(struct dentry *dentry); -extern int cap_task_post_setuid (uid_t old_ruid, uid_t old_euid, uid_t old_suid, int flags); -extern void cap_task_reparent_to_init (struct task_struct *p); -extern int cap_task_setscheduler (struct task_struct *p, int policy, struct sched_param *lp); -extern int cap_task_setioprio (struct task_struct *p, int ioprio); -extern int cap_task_setnice (struct task_struct *p, int nice); -extern int cap_syslog (int type); +extern int cap_task_post_setuid(uid_t old_ruid, uid_t old_euid, uid_t old_suid, int flags); +extern void cap_task_reparent_to_init(struct task_struct *p); +extern int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3, + unsigned long arg4, unsigned long arg5, long *rc_p); +extern int cap_task_setscheduler(struct task_struct *p, int policy, struct sched_param *lp); +extern int cap_task_setioprio(struct task_struct *p, int ioprio); +extern int cap_task_setnice(struct task_struct *p, int nice); +extern int cap_syslog(int type); extern int cap_vm_enough_memory(struct mm_struct *mm, long pages); struct msghdr; @@ -128,7 +129,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) { int i; if (opts->mnt_opts) - for(i = 0; i < opts->num_mnt_opts; i++) + for (i = 0; i < opts->num_mnt_opts; i++) kfree(opts->mnt_opts[i]); kfree(opts->mnt_opts); opts->mnt_opts = NULL; @@ -190,21 +191,21 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * @bprm contains the linux_binprm structure. * Return 0 if the hook is successful and permission is granted. * @bprm_check_security: - * This hook mediates the point when a search for a binary handler will - * begin. It allows a check the @bprm->security value which is set in - * the preceding set_security call. The primary difference from - * set_security is that the argv list and envp list are reliably - * available in @bprm. This hook may be called multiple times - * during a single execve; and in each pass set_security is called - * first. - * @bprm contains the linux_binprm structure. + * This hook mediates the point when a search for a binary handler will + * begin. It allows a check the @bprm->security value which is set in + * the preceding set_security call. The primary difference from + * set_security is that the argv list and envp list are reliably + * available in @bprm. This hook may be called multiple times + * during a single execve; and in each pass set_security is called + * first. + * @bprm contains the linux_binprm structure. * Return 0 if the hook is successful and permission is granted. * @bprm_secureexec: - * Return a boolean value (0 or 1) indicating whether a "secure exec" - * is required. The flag is passed in the auxiliary table - * on the initial stack to the ELF interpreter to indicate whether libc - * should enable secure mode. - * @bprm contains the linux_binprm structure. + * Return a boolean value (0 or 1) indicating whether a "secure exec" + * is required. The flag is passed in the auxiliary table + * on the initial stack to the ELF interpreter to indicate whether libc + * should enable secure mode. + * @bprm contains the linux_binprm structure. * * Security hooks for filesystem operations. * @@ -221,7 +222,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * Check permission before obtaining filesystem statistics for the @mnt * mountpoint. * @dentry is a handle on the superblock for the filesystem. - * Return 0 if permission is granted. + * Return 0 if permission is granted. * @sb_mount: * Check permission before an object specified by @dev_name is mounted on * the mount point named by @nd. For an ordinary mount, @dev_name @@ -282,12 +283,12 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * @sb_pivotroot: * Check permission before pivoting the root filesystem. * @old_path contains the path for the new location of the current root (put_old). - * @new_path contains the path for the new root (new_root). + * @new_path contains the path for the new root (new_root). * Return 0 if permission is granted. * @sb_post_pivotroot: * Update module state after a successful pivot. * @old_path contains the path for the old root. - * @new_path contains the path for the new root. + * @new_path contains the path for the new root. * @sb_get_mnt_opts: * Get the security relevant mount options used for a superblock * @sb the superblock to get security mount options from @@ -316,9 +317,9 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * @inode_free_security: * @inode contains the inode structure. * Deallocate the inode security structure and set @inode->i_security to - * NULL. + * NULL. * @inode_init_security: - * Obtain the security attribute name suffix and value to set on a newly + * Obtain the security attribute name suffix and value to set on a newly * created inode and set up the incore security field for the new inode. * This hook is called by the fs code as part of the inode creation * transaction and provides for atomic labeling of the inode, unlike @@ -349,7 +350,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * @new_dentry contains the dentry structure for the new link. * Return 0 if permission is granted. * @inode_unlink: - * Check the permission to remove a hard link to a file. + * Check the permission to remove a hard link to a file. * @dir contains the inode structure of parent directory of the file. * @dentry contains the dentry structure for file to be unlinked. * Return 0 if permission is granted. @@ -361,7 +362,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * Return 0 if permission is granted. * @inode_mkdir: * Check permissions to create a new directory in the existing directory - * associated with inode strcture @dir. + * associated with inode strcture @dir. * @dir containst the inode structure of parent of the directory to be created. * @dentry contains the dentry structure of new directory. * @mode contains the mode of new directory. @@ -406,7 +407,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * called when the actual read/write operations are performed. * @inode contains the inode structure to check. * @mask contains the permission mask. - * @nd contains the nameidata (may be NULL). + * @nd contains the nameidata (may be NULL). * Return 0 if permission is granted. * @inode_setattr: * Check permission before setting file attributes. Note that the kernel @@ -428,24 +429,24 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * can use this hook to release any persistent label associated with the * inode. * @inode_setxattr: - * Check permission before setting the extended attributes - * @value identified by @name for @dentry. - * Return 0 if permission is granted. + * Check permission before setting the extended attributes + * @value identified by @name for @dentry. + * Return 0 if permission is granted. * @inode_post_setxattr: - * Update inode security field after successful setxattr operation. - * @value identified by @name for @dentry. + * Update inode security field after successful setxattr operation. + * @value identified by @name for @dentry. * @inode_getxattr: - * Check permission before obtaining the extended attributes - * identified by @name for @dentry. - * Return 0 if permission is granted. + * Check permission before obtaining the extended attributes + * identified by @name for @dentry. + * Return 0 if permission is granted. * @inode_listxattr: - * Check permission before obtaining the list of extended attribute - * names for @dentry. - * Return 0 if permission is granted. + * Check permission before obtaining the list of extended attribute + * names for @dentry. + * Return 0 if permission is granted. * @inode_removexattr: - * Check permission before removing the extended attribute - * identified by @name for @dentry. - * Return 0 if permission is granted. + * Check permission before removing the extended attribute + * identified by @name for @dentry. + * Return 0 if permission is granted. * @inode_getsecurity: * Retrieve a copy of the extended attribute representation of the * security label associated with @name for @inode via @buffer. Note that @@ -457,7 +458,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * Set the security label associated with @name for @inode from the * extended attribute value @value. @size indicates the size of the * @value in bytes. @flags may be XATTR_CREATE, XATTR_REPLACE, or 0. - * Note that @name is the remainder of the attribute name after the + * Note that @name is the remainder of the attribute name after the * security. prefix has been removed. * Return 0 on success. * @inode_listsecurity: @@ -564,7 +565,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * struct file, so the file structure (and associated security information) * can always be obtained: * container_of(fown, struct file, f_owner) - * @tsk contains the structure of task receiving signal. + * @tsk contains the structure of task receiving signal. * @fown contains the file owner information. * @sig is the signal that will be sent. When 0, kernel sends SIGIO. * Return 0 if permission is granted. @@ -720,14 +721,16 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * @arg3 contains a argument. * @arg4 contains a argument. * @arg5 contains a argument. - * Return 0 if permission is granted. + * @rc_p contains a pointer to communicate back the forced return code + * Return 0 if permission is granted, and non-zero if the security module + * has taken responsibility (setting *rc_p) for the prctl call. * @task_reparent_to_init: - * Set the security attributes in @p->security for a kernel thread that - * is being reparented to the init task. + * Set the security attributes in @p->security for a kernel thread that + * is being reparented to the init task. * @p contains the task_struct for the kernel thread. * @task_to_inode: - * Set the security attributes for an inode based on an associated task's - * security attributes, e.g. for /proc/pid inodes. + * Set the security attributes for an inode based on an associated task's + * security attributes, e.g. for /proc/pid inodes. * @p contains the task_struct for the task. * @inode contains the inode structure for the inode. * @@ -737,7 +740,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * Save security information for a netlink message so that permission * checking can be performed when the message is processed. The security * information can be saved using the eff_cap field of the - * netlink_skb_parms structure. Also may be used to provide fine + * netlink_skb_parms structure. Also may be used to provide fine * grained control over message transmission. * @sk associated sock of task sending the message., * @skb contains the sk_buff structure for the netlink message. @@ -805,14 +808,14 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * @sock contains the socket structure. * @address contains the address to bind to. * @addrlen contains the length of address. - * Return 0 if permission is granted. + * Return 0 if permission is granted. * @socket_connect: * Check permission before socket protocol layer connect operation * attempts to connect socket @sock to a remote address, @address. * @sock contains the socket structure. * @address contains the address of remote endpoint. * @addrlen contains the length of address. - * Return 0 if permission is granted. + * Return 0 if permission is granted. * @socket_listen: * Check permission before socket protocol layer listen operation. * @sock contains the socket structure. @@ -842,7 +845,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * @msg contains the message structure. * @size contains the size of message structure. * @flags contains the operational flags. - * Return 0 if permission is granted. + * Return 0 if permission is granted. * @socket_getsockname: * Check permission before the local address (name) of the socket object * @sock is retrieved. @@ -866,7 +869,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * @sock contains the socket structure. * @level contains the protocol level to set options for. * @optname contains the name of the option to set. - * Return 0 if permission is granted. + * Return 0 if permission is granted. * @socket_shutdown: * Checks permission before all or part of a connection on the socket * @sock is shut down. @@ -893,19 +896,19 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * Return 0 if all is well, otherwise, typical getsockopt return * values. * @socket_getpeersec_dgram: - * This hook allows the security module to provide peer socket security - * state for udp sockets on a per-packet basis to userspace via - * getsockopt SO_GETPEERSEC. The application must first have indicated - * the IP_PASSSEC option via getsockopt. It can then retrieve the - * security state returned by this hook for a packet via the SCM_SECURITY - * ancillary message type. - * @skb is the skbuff for the packet being queried - * @secdata is a pointer to a buffer in which to copy the security data - * @seclen is the maximum length for @secdata - * Return 0 on success, error on failure. + * This hook allows the security module to provide peer socket security + * state for udp sockets on a per-packet basis to userspace via + * getsockopt SO_GETPEERSEC. The application must first have indicated + * the IP_PASSSEC option via getsockopt. It can then retrieve the + * security state returned by this hook for a packet via the SCM_SECURITY + * ancillary message type. + * @skb is the skbuff for the packet being queried + * @secdata is a pointer to a buffer in which to copy the security data + * @seclen is the maximum length for @secdata + * Return 0 on success, error on failure. * @sk_alloc_security: - * Allocate and attach a security structure to the sk->sk_security field, - * which is used to copy security attributes between local stream sockets. + * Allocate and attach a security structure to the sk->sk_security field, + * which is used to copy security attributes between local stream sockets. * @sk_free_security: * Deallocate security structure. * @sk_clone_security: @@ -920,7 +923,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * @inet_csk_clone: * Sets the new child socket's sid to the openreq sid. * @inet_conn_established: - * Sets the connection's peersid to the secmark on skb. + * Sets the connection's peersid to the secmark on skb. * @req_classify_flow: * Sets the flow's sid to the openreq sid. * @@ -999,13 +1002,24 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * No return value. * @key_permission: * See whether a specific operational right is granted to a process on a - * key. + * key. * @key_ref refers to the key (key pointer + possession attribute bit). * @context points to the process to provide the context against which to - * evaluate the security data on the key. + * evaluate the security data on the key. * @perm describes the combination of permissions required of this key. * Return 1 if permission granted, 0 if permission denied and -ve it the - * normal permissions model should be effected. + * normal permissions model should be effected. + * @key_getsecurity: + * Get a textual representation of the security context attached to a key + * for the purposes of honouring KEYCTL_GETSECURITY. This function + * allocates the storage for the NUL-terminated string and the caller + * should free it. + * @key points to the key to be queried. + * @_buffer points to a pointer that should be set to point to the + * resulting string (if no label or an error occurs). + * Return the length of the string (including terminating NUL) or -ve if + * an error. + * May also return 0 (and a NULL buffer pointer) if there is no label. * * Security hooks affecting all System V IPC operations. * @@ -1056,7 +1070,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * The @msq may be NULL, e.g. for IPC_INFO or MSG_INFO. * @msq contains the message queue to act upon. May be NULL. * @cmd contains the operation to be performed. - * Return 0 if permission is granted. + * Return 0 if permission is granted. * @msg_queue_msgsnd: * Check permission before a message, @msg, is enqueued on the message * queue, @msq. @@ -1066,8 +1080,8 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * Return 0 if permission is granted. * @msg_queue_msgrcv: * Check permission before a message, @msg, is removed from the message - * queue, @msq. The @target task structure contains a pointer to the - * process that will be receiving the message (not equal to the current + * queue, @msq. The @target task structure contains a pointer to the + * process that will be receiving the message (not equal to the current * process when inline receives are being performed). * @msq contains the message queue to retrieve message from. * @msg contains the message destination. @@ -1132,15 +1146,15 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * Return 0 if permission is granted. * @sem_semctl: * Check permission when a semaphore operation specified by @cmd is to be - * performed on the semaphore @sma. The @sma may be NULL, e.g. for + * performed on the semaphore @sma. The @sma may be NULL, e.g. for * IPC_INFO or SEM_INFO. * @sma contains the semaphore structure. May be NULL. * @cmd contains the operation to be performed. * Return 0 if permission is granted. * @sem_semop * Check permissions before performing operations on members of the - * semaphore set @sma. If the @alter flag is nonzero, the semaphore set - * may be modified. + * semaphore set @sma. If the @alter flag is nonzero, the semaphore set + * may be modified. * @sma contains the semaphore structure. * @sops contains the operations to perform. * @nsops contains the number of operations to perform. @@ -1211,7 +1225,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * @syslog: * Check permission before accessing the kernel message ring or changing * logging to the console. - * See the syslog(2) manual page for an explanation of the @type values. + * See the syslog(2) manual page for an explanation of the @type values. * @type contains the type of action. * Return 0 if permission is granted. * @settime: @@ -1223,22 +1237,22 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * @vm_enough_memory: * Check permissions for allocating a new virtual mapping. * @mm contains the mm struct it is being added to. - * @pages contains the number of pages. + * @pages contains the number of pages. * Return 0 if permission is granted. * * @register_security: - * allow module stacking. - * @name contains the name of the security module being stacked. - * @ops contains a pointer to the struct security_operations of the module to stack. - * + * allow module stacking. + * @name contains the name of the security module being stacked. + * @ops contains a pointer to the struct security_operations of the module to stack. + * * @secid_to_secctx: * Convert secid to security context. * @secid contains the security ID. * @secdata contains the pointer that stores the converted security context. * @secctx_to_secid: - * Convert security context to secid. - * @secid contains the pointer to the generated security ID. - * @secdata contains the security context. + * Convert security context to secid. + * @secid contains the pointer to the generated security ID. + * @secdata contains the security context. * * @release_secctx: * Release the security context. @@ -1281,49 +1295,49 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) struct security_operations { char name[SECURITY_NAME_MAX + 1]; - int (*ptrace) (struct task_struct * parent, struct task_struct * child); - int (*capget) (struct task_struct * target, - kernel_cap_t * effective, - kernel_cap_t * inheritable, kernel_cap_t * permitted); - int (*capset_check) (struct task_struct * target, - kernel_cap_t * effective, - kernel_cap_t * inheritable, - kernel_cap_t * permitted); - void (*capset_set) (struct task_struct * target, - kernel_cap_t * effective, - kernel_cap_t * inheritable, - kernel_cap_t * permitted); - int (*capable) (struct task_struct * tsk, int cap); - int (*acct) (struct file * file); - int (*sysctl) (struct ctl_table * table, int op); - int (*quotactl) (int cmds, int type, int id, struct super_block * sb); - int (*quota_on) (struct dentry * dentry); + int (*ptrace) (struct task_struct *parent, struct task_struct *child); + int (*capget) (struct task_struct *target, + kernel_cap_t *effective, + kernel_cap_t *inheritable, kernel_cap_t *permitted); + int (*capset_check) (struct task_struct *target, + kernel_cap_t *effective, + kernel_cap_t *inheritable, + kernel_cap_t *permitted); + void (*capset_set) (struct task_struct *target, + kernel_cap_t *effective, + kernel_cap_t *inheritable, + kernel_cap_t *permitted); + int (*capable) (struct task_struct *tsk, int cap); + int (*acct) (struct file *file); + int (*sysctl) (struct ctl_table *table, int op); + int (*quotactl) (int cmds, int type, int id, struct super_block *sb); + int (*quota_on) (struct dentry *dentry); int (*syslog) (int type); int (*settime) (struct timespec *ts, struct timezone *tz); int (*vm_enough_memory) (struct mm_struct *mm, long pages); - int (*bprm_alloc_security) (struct linux_binprm * bprm); - void (*bprm_free_security) (struct linux_binprm * bprm); - void (*bprm_apply_creds) (struct linux_binprm * bprm, int unsafe); - void (*bprm_post_apply_creds) (struct linux_binprm * bprm); - int (*bprm_set_security) (struct linux_binprm * bprm); - int (*bprm_check_security) (struct linux_binprm * bprm); - int (*bprm_secureexec) (struct linux_binprm * bprm); - - int (*sb_alloc_security) (struct super_block * sb); - void (*sb_free_security) (struct super_block * sb); - int (*sb_copy_data)(char *orig, char *copy); + int (*bprm_alloc_security) (struct linux_binprm *bprm); + void (*bprm_free_security) (struct linux_binprm *bprm); + void (*bprm_apply_creds) (struct linux_binprm *bprm, int unsafe); + void (*bprm_post_apply_creds) (struct linux_binprm *bprm); + int (*bprm_set_security) (struct linux_binprm *bprm); + int (*bprm_check_security) (struct linux_binprm *bprm); + int (*bprm_secureexec) (struct linux_binprm *bprm); + + int (*sb_alloc_security) (struct super_block *sb); + void (*sb_free_security) (struct super_block *sb); + int (*sb_copy_data) (char *orig, char *copy); int (*sb_kern_mount) (struct super_block *sb, void *data); int (*sb_statfs) (struct dentry *dentry); int (*sb_mount) (char *dev_name, struct path *path, char *type, unsigned long flags, void *data); - int (*sb_check_sb) (struct vfsmount * mnt, struct path *path); - int (*sb_umount) (struct vfsmount * mnt, int flags); - void (*sb_umount_close) (struct vfsmount * mnt); - void (*sb_umount_busy) (struct vfsmount * mnt); - void (*sb_post_remount) (struct vfsmount * mnt, + int (*sb_check_sb) (struct vfsmount *mnt, struct path *path); + int (*sb_umount) (struct vfsmount *mnt, int flags); + void (*sb_umount_close) (struct vfsmount *mnt); + void (*sb_umount_busy) (struct vfsmount *mnt); + void (*sb_post_remount) (struct vfsmount *mnt, unsigned long flags, void *data); - void (*sb_post_addmount) (struct vfsmount * mnt, + void (*sb_post_addmount) (struct vfsmount *mnt, struct path *mountpoint); int (*sb_pivotroot) (struct path *old_path, struct path *new_path); @@ -1337,177 +1351,177 @@ struct security_operations { struct super_block *newsb); int (*sb_parse_opts_str) (char *options, struct security_mnt_opts *opts); - int (*inode_alloc_security) (struct inode *inode); + int (*inode_alloc_security) (struct inode *inode); void (*inode_free_security) (struct inode *inode); int (*inode_init_security) (struct inode *inode, struct inode *dir, char **name, void **value, size_t *len); int (*inode_create) (struct inode *dir, - struct dentry *dentry, int mode); + struct dentry *dentry, int mode); int (*inode_link) (struct dentry *old_dentry, - struct inode *dir, struct dentry *new_dentry); + struct inode *dir, struct dentry *new_dentry); int (*inode_unlink) (struct inode *dir, struct dentry *dentry); int (*inode_symlink) (struct inode *dir, - struct dentry *dentry, const char *old_name); + struct dentry *dentry, const char *old_name); int (*inode_mkdir) (struct inode *dir, struct dentry *dentry, int mode); int (*inode_rmdir) (struct inode *dir, struct dentry *dentry); int (*inode_mknod) (struct inode *dir, struct dentry *dentry, - int mode, dev_t dev); + int mode, dev_t dev); int (*inode_rename) (struct inode *old_dir, struct dentry *old_dentry, - struct inode *new_dir, struct dentry *new_dentry); + struct inode *new_dir, struct dentry *new_dentry); int (*inode_readlink) (struct dentry *dentry); int (*inode_follow_link) (struct dentry *dentry, struct nameidata *nd); int (*inode_permission) (struct inode *inode, int mask, struct nameidata *nd); int (*inode_setattr) (struct dentry *dentry, struct iattr *attr); int (*inode_getattr) (struct vfsmount *mnt, struct dentry *dentry); - void (*inode_delete) (struct inode *inode); - int (*inode_setxattr) (struct dentry *dentry, char *name, void *value, - size_t size, int flags); - void (*inode_post_setxattr) (struct dentry *dentry, char *name, void *value, - size_t size, int flags); - int (*inode_getxattr) (struct dentry *dentry, char *name); + void (*inode_delete) (struct inode *inode); + int (*inode_setxattr) (struct dentry *dentry, const char *name, + const void *value, size_t size, int flags); + void (*inode_post_setxattr) (struct dentry *dentry, const char *name, + const void *value, size_t size, int flags); + int (*inode_getxattr) (struct dentry *dentry, const char *name); int (*inode_listxattr) (struct dentry *dentry); - int (*inode_removexattr) (struct dentry *dentry, char *name); + int (*inode_removexattr) (struct dentry *dentry, const char *name); int (*inode_need_killpriv) (struct dentry *dentry); int (*inode_killpriv) (struct dentry *dentry); - int (*inode_getsecurity)(const struct inode *inode, const char *name, void **buffer, bool alloc); - int (*inode_setsecurity)(struct inode *inode, const char *name, const void *value, size_t size, int flags); - int (*inode_listsecurity)(struct inode *inode, char *buffer, size_t buffer_size); - void (*inode_getsecid)(const struct inode *inode, u32 *secid); - - int (*file_permission) (struct file * file, int mask); - int (*file_alloc_security) (struct file * file); - void (*file_free_security) (struct file * file); - int (*file_ioctl) (struct file * file, unsigned int cmd, + int (*inode_getsecurity) (const struct inode *inode, const char *name, void **buffer, bool alloc); + int (*inode_setsecurity) (struct inode *inode, const char *name, const void *value, size_t size, int flags); + int (*inode_listsecurity) (struct inode *inode, char *buffer, size_t buffer_size); + void (*inode_getsecid) (const struct inode *inode, u32 *secid); + + int (*file_permission) (struct file *file, int mask); + int (*file_alloc_security) (struct file *file); + void (*file_free_security) (struct file *file); + int (*file_ioctl) (struct file *file, unsigned int cmd, unsigned long arg); - int (*file_mmap) (struct file * file, + int (*file_mmap) (struct file *file, unsigned long reqprot, unsigned long prot, unsigned long flags, unsigned long addr, unsigned long addr_only); - int (*file_mprotect) (struct vm_area_struct * vma, + int (*file_mprotect) (struct vm_area_struct *vma, unsigned long reqprot, unsigned long prot); - int (*file_lock) (struct file * file, unsigned int cmd); - int (*file_fcntl) (struct file * file, unsigned int cmd, + int (*file_lock) (struct file *file, unsigned int cmd); + int (*file_fcntl) (struct file *file, unsigned int cmd, unsigned long arg); - int (*file_set_fowner) (struct file * file); - int (*file_send_sigiotask) (struct task_struct * tsk, - struct fown_struct * fown, int sig); - int (*file_receive) (struct file * file); - int (*dentry_open) (struct file *file); + int (*file_set_fowner) (struct file *file); + int (*file_send_sigiotask) (struct task_struct *tsk, + struct fown_struct *fown, int sig); + int (*file_receive) (struct file *file); + int (*dentry_open) (struct file *file); int (*task_create) (unsigned long clone_flags); - int (*task_alloc_security) (struct task_struct * p); - void (*task_free_security) (struct task_struct * p); + int (*task_alloc_security) (struct task_struct *p); + void (*task_free_security) (struct task_struct *p); int (*task_setuid) (uid_t id0, uid_t id1, uid_t id2, int flags); int (*task_post_setuid) (uid_t old_ruid /* or fsuid */ , uid_t old_euid, uid_t old_suid, int flags); int (*task_setgid) (gid_t id0, gid_t id1, gid_t id2, int flags); - int (*task_setpgid) (struct task_struct * p, pid_t pgid); - int (*task_getpgid) (struct task_struct * p); - int (*task_getsid) (struct task_struct * p); - void (*task_getsecid) (struct task_struct * p, u32 * secid); + int (*task_setpgid) (struct task_struct *p, pid_t pgid); + int (*task_getpgid) (struct task_struct *p); + int (*task_getsid) (struct task_struct *p); + void (*task_getsecid) (struct task_struct *p, u32 *secid); int (*task_setgroups) (struct group_info *group_info); - int (*task_setnice) (struct task_struct * p, int nice); - int (*task_setioprio) (struct task_struct * p, int ioprio); - int (*task_getioprio) (struct task_struct * p); - int (*task_setrlimit) (unsigned int resource, struct rlimit * new_rlim); - int (*task_setscheduler) (struct task_struct * p, int policy, - struct sched_param * lp); - int (*task_getscheduler) (struct task_struct * p); - int (*task_movememory) (struct task_struct * p); - int (*task_kill) (struct task_struct * p, - struct siginfo * info, int sig, u32 secid); - int (*task_wait) (struct task_struct * p); + int (*task_setnice) (struct task_struct *p, int nice); + int (*task_setioprio) (struct task_struct *p, int ioprio); + int (*task_getioprio) (struct task_struct *p); + int (*task_setrlimit) (unsigned int resource, struct rlimit *new_rlim); + int (*task_setscheduler) (struct task_struct *p, int policy, + struct sched_param *lp); + int (*task_getscheduler) (struct task_struct *p); + int (*task_movememory) (struct task_struct *p); + int (*task_kill) (struct task_struct *p, + struct siginfo *info, int sig, u32 secid); + int (*task_wait) (struct task_struct *p); int (*task_prctl) (int option, unsigned long arg2, unsigned long arg3, unsigned long arg4, - unsigned long arg5); - void (*task_reparent_to_init) (struct task_struct * p); - void (*task_to_inode)(struct task_struct *p, struct inode *inode); + unsigned long arg5, long *rc_p); + void (*task_reparent_to_init) (struct task_struct *p); + void (*task_to_inode) (struct task_struct *p, struct inode *inode); - int (*ipc_permission) (struct kern_ipc_perm * ipcp, short flag); + int (*ipc_permission) (struct kern_ipc_perm *ipcp, short flag); void (*ipc_getsecid) (struct kern_ipc_perm *ipcp, u32 *secid); - int (*msg_msg_alloc_security) (struct msg_msg * msg); - void (*msg_msg_free_security) (struct msg_msg * msg); - - int (*msg_queue_alloc_security) (struct msg_queue * msq); - void (*msg_queue_free_security) (struct msg_queue * msq); - int (*msg_queue_associate) (struct msg_queue * msq, int msqflg); - int (*msg_queue_msgctl) (struct msg_queue * msq, int cmd); - int (*msg_queue_msgsnd) (struct msg_queue * msq, - struct msg_msg * msg, int msqflg); - int (*msg_queue_msgrcv) (struct msg_queue * msq, - struct msg_msg * msg, - struct task_struct * target, + int (*msg_msg_alloc_security) (struct msg_msg *msg); + void (*msg_msg_free_security) (struct msg_msg *msg); + + int (*msg_queue_alloc_security) (struct msg_queue *msq); + void (*msg_queue_free_security) (struct msg_queue *msq); + int (*msg_queue_associate) (struct msg_queue *msq, int msqflg); + int (*msg_queue_msgctl) (struct msg_queue *msq, int cmd); + int (*msg_queue_msgsnd) (struct msg_queue *msq, + struct msg_msg *msg, int msqflg); + int (*msg_queue_msgrcv) (struct msg_queue *msq, + struct msg_msg *msg, + struct task_struct *target, long type, int mode); - int (*shm_alloc_security) (struct shmid_kernel * shp); - void (*shm_free_security) (struct shmid_kernel * shp); - int (*shm_associate) (struct shmid_kernel * shp, int shmflg); - int (*shm_shmctl) (struct shmid_kernel * shp, int cmd); - int (*shm_shmat) (struct shmid_kernel * shp, + int (*shm_alloc_security) (struct shmid_kernel *shp); + void (*shm_free_security) (struct shmid_kernel *shp); + int (*shm_associate) (struct shmid_kernel *shp, int shmflg); + int (*shm_shmctl) (struct shmid_kernel *shp, int cmd); + int (*shm_shmat) (struct shmid_kernel *shp, char __user *shmaddr, int shmflg); - int (*sem_alloc_security) (struct sem_array * sma); - void (*sem_free_security) (struct sem_array * sma); - int (*sem_associate) (struct sem_array * sma, int semflg); - int (*sem_semctl) (struct sem_array * sma, int cmd); - int (*sem_semop) (struct sem_array * sma, - struct sembuf * sops, unsigned nsops, int alter); + int (*sem_alloc_security) (struct sem_array *sma); + void (*sem_free_security) (struct sem_array *sma); + int (*sem_associate) (struct sem_array *sma, int semflg); + int (*sem_semctl) (struct sem_array *sma, int cmd); + int (*sem_semop) (struct sem_array *sma, + struct sembuf *sops, unsigned nsops, int alter); - int (*netlink_send) (struct sock * sk, struct sk_buff * skb); - int (*netlink_recv) (struct sk_buff * skb, int cap); + int (*netlink_send) (struct sock *sk, struct sk_buff *skb); + int (*netlink_recv) (struct sk_buff *skb, int cap); /* allow module stacking */ int (*register_security) (const char *name, - struct security_operations *ops); + struct security_operations *ops); void (*d_instantiate) (struct dentry *dentry, struct inode *inode); - int (*getprocattr)(struct task_struct *p, char *name, char **value); - int (*setprocattr)(struct task_struct *p, char *name, void *value, size_t size); - int (*secid_to_secctx)(u32 secid, char **secdata, u32 *seclen); - int (*secctx_to_secid)(char *secdata, u32 seclen, u32 *secid); - void (*release_secctx)(char *secdata, u32 seclen); + int (*getprocattr) (struct task_struct *p, char *name, char **value); + int (*setprocattr) (struct task_struct *p, char *name, void *value, size_t size); + int (*secid_to_secctx) (u32 secid, char **secdata, u32 *seclen); + int (*secctx_to_secid) (char *secdata, u32 seclen, u32 *secid); + void (*release_secctx) (char *secdata, u32 seclen); #ifdef CONFIG_SECURITY_NETWORK - int (*unix_stream_connect) (struct socket * sock, - struct socket * other, struct sock * newsk); - int (*unix_may_send) (struct socket * sock, struct socket * other); + int (*unix_stream_connect) (struct socket *sock, + struct socket *other, struct sock *newsk); + int (*unix_may_send) (struct socket *sock, struct socket *other); int (*socket_create) (int family, int type, int protocol, int kern); - int (*socket_post_create) (struct socket * sock, int family, + int (*socket_post_create) (struct socket *sock, int family, int type, int protocol, int kern); - int (*socket_bind) (struct socket * sock, - struct sockaddr * address, int addrlen); - int (*socket_connect) (struct socket * sock, - struct sockaddr * address, int addrlen); - int (*socket_listen) (struct socket * sock, int backlog); - int (*socket_accept) (struct socket * sock, struct socket * newsock); - void (*socket_post_accept) (struct socket * sock, - struct socket * newsock); - int (*socket_sendmsg) (struct socket * sock, - struct msghdr * msg, int size); - int (*socket_recvmsg) (struct socket * sock, - struct msghdr * msg, int size, int flags); - int (*socket_getsockname) (struct socket * sock); - int (*socket_getpeername) (struct socket * sock); - int (*socket_getsockopt) (struct socket * sock, int level, int optname); - int (*socket_setsockopt) (struct socket * sock, int level, int optname); - int (*socket_shutdown) (struct socket * sock, int how); - int (*socket_sock_rcv_skb) (struct sock * sk, struct sk_buff * skb); + int (*socket_bind) (struct socket *sock, + struct sockaddr *address, int addrlen); + int (*socket_connect) (struct socket *sock, + struct sockaddr *address, int addrlen); + int (*socket_listen) (struct socket *sock, int backlog); + int (*socket_accept) (struct socket *sock, struct socket *newsock); + void (*socket_post_accept) (struct socket *sock, + struct socket *newsock); + int (*socket_sendmsg) (struct socket *sock, + struct msghdr *msg, int size); + int (*socket_recvmsg) (struct socket *sock, + struct msghdr *msg, int size, int flags); + int (*socket_getsockname) (struct socket *sock); + int (*socket_getpeername) (struct socket *sock); + int (*socket_getsockopt) (struct socket *sock, int level, int optname); + int (*socket_setsockopt) (struct socket *sock, int level, int optname); + int (*socket_shutdown) (struct socket *sock, int how); + int (*socket_sock_rcv_skb) (struct sock *sk, struct sk_buff *skb); int (*socket_getpeersec_stream) (struct socket *sock, char __user *optval, int __user *optlen, unsigned len); int (*socket_getpeersec_dgram) (struct socket *sock, struct sk_buff *skb, u32 *secid); int (*sk_alloc_security) (struct sock *sk, int family, gfp_t priority); void (*sk_free_security) (struct sock *sk); void (*sk_clone_security) (const struct sock *sk, struct sock *newsk); void (*sk_getsecid) (struct sock *sk, u32 *secid); - void (*sock_graft)(struct sock* sk, struct socket *parent); - int (*inet_conn_request)(struct sock *sk, struct sk_buff *skb, - struct request_sock *req); - void (*inet_csk_clone)(struct sock *newsk, const struct request_sock *req); - void (*inet_conn_established)(struct sock *sk, struct sk_buff *skb); - void (*req_classify_flow)(const struct request_sock *req, struct flowi *fl); + void (*sock_graft) (struct sock *sk, struct socket *parent); + int (*inet_conn_request) (struct sock *sk, struct sk_buff *skb, + struct request_sock *req); + void (*inet_csk_clone) (struct sock *newsk, const struct request_sock *req); + void (*inet_conn_established) (struct sock *sk, struct sk_buff *skb); + void (*req_classify_flow) (const struct request_sock *req, struct flowi *fl); #endif /* CONFIG_SECURITY_NETWORK */ #ifdef CONFIG_SECURITY_NETWORK_XFRM @@ -1521,57 +1535,57 @@ struct security_operations { u32 secid); void (*xfrm_state_free_security) (struct xfrm_state *x); int (*xfrm_state_delete_security) (struct xfrm_state *x); - int (*xfrm_policy_lookup)(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir); - int (*xfrm_state_pol_flow_match)(struct xfrm_state *x, - struct xfrm_policy *xp, struct flowi *fl); - int (*xfrm_decode_session)(struct sk_buff *skb, u32 *secid, int ckall); + int (*xfrm_policy_lookup) (struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir); + int (*xfrm_state_pol_flow_match) (struct xfrm_state *x, + struct xfrm_policy *xp, + struct flowi *fl); + int (*xfrm_decode_session) (struct sk_buff *skb, u32 *secid, int ckall); #endif /* CONFIG_SECURITY_NETWORK_XFRM */ /* key management security hooks */ #ifdef CONFIG_KEYS - int (*key_alloc)(struct key *key, struct task_struct *tsk, unsigned long flags); - void (*key_free)(struct key *key); - int (*key_permission)(key_ref_t key_ref, - struct task_struct *context, - key_perm_t perm); - + int (*key_alloc) (struct key *key, struct task_struct *tsk, unsigned long flags); + void (*key_free) (struct key *key); + int (*key_permission) (key_ref_t key_ref, + struct task_struct *context, + key_perm_t perm); + int (*key_getsecurity)(struct key *key, char **_buffer); #endif /* CONFIG_KEYS */ #ifdef CONFIG_AUDIT - int (*audit_rule_init)(u32 field, u32 op, char *rulestr, void **lsmrule); - int (*audit_rule_known)(struct audit_krule *krule); - int (*audit_rule_match)(u32 secid, u32 field, u32 op, void *lsmrule, - struct audit_context *actx); - void (*audit_rule_free)(void *lsmrule); + int (*audit_rule_init) (u32 field, u32 op, char *rulestr, void **lsmrule); + int (*audit_rule_known) (struct audit_krule *krule); + int (*audit_rule_match) (u32 secid, u32 field, u32 op, void *lsmrule, + struct audit_context *actx); + void (*audit_rule_free) (void *lsmrule); #endif /* CONFIG_AUDIT */ }; /* prototypes */ -extern int security_init (void); +extern int security_init(void); extern int security_module_enable(struct security_operations *ops); -extern int register_security (struct security_operations *ops); -extern int mod_reg_security (const char *name, struct security_operations *ops); +extern int register_security(struct security_operations *ops); +extern int mod_reg_security(const char *name, struct security_operations *ops); extern struct dentry *securityfs_create_file(const char *name, mode_t mode, struct dentry *parent, void *data, const struct file_operations *fops); extern struct dentry *securityfs_create_dir(const char *name, struct dentry *parent); extern void securityfs_remove(struct dentry *dentry); - /* Security operations */ int security_ptrace(struct task_struct *parent, struct task_struct *child); int security_capget(struct task_struct *target, - kernel_cap_t *effective, - kernel_cap_t *inheritable, - kernel_cap_t *permitted); + kernel_cap_t *effective, + kernel_cap_t *inheritable, + kernel_cap_t *permitted); int security_capset_check(struct task_struct *target, - kernel_cap_t *effective, - kernel_cap_t *inheritable, - kernel_cap_t *permitted); -void security_capset_set(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted); +void security_capset_set(struct task_struct *target, + kernel_cap_t *effective, + kernel_cap_t *inheritable, + kernel_cap_t *permitted); int security_capable(struct task_struct *tsk, int cap); int security_acct(struct file *file); int security_sysctl(struct ctl_table *table, int op); @@ -1594,7 +1608,7 @@ int security_sb_copy_data(char *orig, char *copy); int security_sb_kern_mount(struct super_block *sb, void *data); int security_sb_statfs(struct dentry *dentry); int security_sb_mount(char *dev_name, struct path *path, - char *type, unsigned long flags, void *data); + char *type, unsigned long flags, void *data); int security_sb_check_sb(struct vfsmount *mnt, struct path *path); int security_sb_umount(struct vfsmount *mnt, int flags); void security_sb_umount_close(struct vfsmount *mnt); @@ -1619,25 +1633,25 @@ int security_inode_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry); int security_inode_unlink(struct inode *dir, struct dentry *dentry); int security_inode_symlink(struct inode *dir, struct dentry *dentry, - const char *old_name); + const char *old_name); int security_inode_mkdir(struct inode *dir, struct dentry *dentry, int mode); int security_inode_rmdir(struct inode *dir, struct dentry *dentry); int security_inode_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev); int security_inode_rename(struct inode *old_dir, struct dentry *old_dentry, - struct inode *new_dir, struct dentry *new_dentry); + struct inode *new_dir, struct dentry *new_dentry); int security_inode_readlink(struct dentry *dentry); int security_inode_follow_link(struct dentry *dentry, struct nameidata *nd); int security_inode_permission(struct inode *inode, int mask, struct nameidata *nd); int security_inode_setattr(struct dentry *dentry, struct iattr *attr); int security_inode_getattr(struct vfsmount *mnt, struct dentry *dentry); void security_inode_delete(struct inode *inode); -int security_inode_setxattr(struct dentry *dentry, char *name, - void *value, size_t size, int flags); -void security_inode_post_setxattr(struct dentry *dentry, char *name, - void *value, size_t size, int flags); -int security_inode_getxattr(struct dentry *dentry, char *name); +int security_inode_setxattr(struct dentry *dentry, const char *name, + const void *value, size_t size, int flags); +void security_inode_post_setxattr(struct dentry *dentry, const char *name, + const void *value, size_t size, int flags); +int security_inode_getxattr(struct dentry *dentry, const char *name); int security_inode_listxattr(struct dentry *dentry); -int security_inode_removexattr(struct dentry *dentry, char *name); +int security_inode_removexattr(struct dentry *dentry, const char *name); int security_inode_need_killpriv(struct dentry *dentry); int security_inode_killpriv(struct dentry *dentry); int security_inode_getsecurity(const struct inode *inode, const char *name, void **buffer, bool alloc); @@ -1652,12 +1666,12 @@ int security_file_mmap(struct file *file, unsigned long reqprot, unsigned long prot, unsigned long flags, unsigned long addr, unsigned long addr_only); int security_file_mprotect(struct vm_area_struct *vma, unsigned long reqprot, - unsigned long prot); + unsigned long prot); int security_file_lock(struct file *file, unsigned int cmd); int security_file_fcntl(struct file *file, unsigned int cmd, unsigned long arg); int security_file_set_fowner(struct file *file); int security_file_send_sigiotask(struct task_struct *tsk, - struct fown_struct *fown, int sig); + struct fown_struct *fown, int sig); int security_file_receive(struct file *file); int security_dentry_open(struct file *file); int security_task_create(unsigned long clone_flags); @@ -1665,7 +1679,7 @@ int security_task_alloc(struct task_struct *p); void security_task_free(struct task_struct *p); int security_task_setuid(uid_t id0, uid_t id1, uid_t id2, int flags); int security_task_post_setuid(uid_t old_ruid, uid_t old_euid, - uid_t old_suid, int flags); + uid_t old_suid, int flags); int security_task_setgid(gid_t id0, gid_t id1, gid_t id2, int flags); int security_task_setpgid(struct task_struct *p, pid_t pgid); int security_task_getpgid(struct task_struct *p); @@ -1684,7 +1698,7 @@ int security_task_kill(struct task_struct *p, struct siginfo *info, int sig, u32 secid); int security_task_wait(struct task_struct *p); int security_task_prctl(int option, unsigned long arg2, unsigned long arg3, - unsigned long arg4, unsigned long arg5); + unsigned long arg4, unsigned long arg5, long *rc_p); void security_task_reparent_to_init(struct task_struct *p); void security_task_to_inode(struct task_struct *p, struct inode *inode); int security_ipc_permission(struct kern_ipc_perm *ipcp, short flag); @@ -1696,9 +1710,9 @@ void security_msg_queue_free(struct msg_queue *msq); int security_msg_queue_associate(struct msg_queue *msq, int msqflg); int security_msg_queue_msgctl(struct msg_queue *msq, int cmd); int security_msg_queue_msgsnd(struct msg_queue *msq, - struct msg_msg *msg, int msqflg); + struct msg_msg *msg, int msqflg); int security_msg_queue_msgrcv(struct msg_queue *msq, struct msg_msg *msg, - struct task_struct *target, long type, int mode); + struct task_struct *target, long type, int mode); int security_shm_alloc(struct shmid_kernel *shp); void security_shm_free(struct shmid_kernel *shp); int security_shm_associate(struct shmid_kernel *shp, int shmflg); @@ -1710,7 +1724,7 @@ int security_sem_associate(struct sem_array *sma, int semflg); int security_sem_semctl(struct sem_array *sma, int cmd); int security_sem_semop(struct sem_array *sma, struct sembuf *sops, unsigned nsops, int alter); -void security_d_instantiate (struct dentry *dentry, struct inode *inode); +void security_d_instantiate(struct dentry *dentry, struct inode *inode); int security_getprocattr(struct task_struct *p, char *name, char **value); int security_setprocattr(struct task_struct *p, char *name, void *value, size_t size); int security_netlink_send(struct sock *sk, struct sk_buff *skb); @@ -1741,33 +1755,33 @@ static inline int security_init(void) return 0; } -static inline int security_ptrace (struct task_struct *parent, struct task_struct * child) +static inline int security_ptrace(struct task_struct *parent, struct task_struct *child) { - return cap_ptrace (parent, child); + return cap_ptrace(parent, child); } -static inline int security_capget (struct task_struct *target, +static inline int security_capget(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted) { - return cap_capget (target, effective, inheritable, permitted); + return cap_capget(target, effective, inheritable, permitted); } -static inline int security_capset_check (struct task_struct *target, +static inline int security_capset_check(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted) { - return cap_capset_check (target, effective, inheritable, permitted); + return cap_capset_check(target, effective, inheritable, permitted); } -static inline void security_capset_set (struct task_struct *target, +static inline void security_capset_set(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted) { - cap_capset_set (target, effective, inheritable, permitted); + cap_capset_set(target, effective, inheritable, permitted); } static inline int security_capable(struct task_struct *tsk, int cap) @@ -1775,7 +1789,7 @@ static inline int security_capable(struct task_struct *tsk, int cap) return cap_capable(tsk, cap); } -static inline int security_acct (struct file *file) +static inline int security_acct(struct file *file) { return 0; } @@ -1785,13 +1799,13 @@ static inline int security_sysctl(struct ctl_table *table, int op) return 0; } -static inline int security_quotactl (int cmds, int type, int id, - struct super_block * sb) +static inline int security_quotactl(int cmds, int type, int id, + struct super_block *sb) { return 0; } -static inline int security_quota_on (struct dentry * dentry) +static inline int security_quota_on(struct dentry *dentry) { return 0; } @@ -1816,102 +1830,102 @@ static inline int security_vm_enough_memory_mm(struct mm_struct *mm, long pages) return cap_vm_enough_memory(mm, pages); } -static inline int security_bprm_alloc (struct linux_binprm *bprm) +static inline int security_bprm_alloc(struct linux_binprm *bprm) { return 0; } -static inline void security_bprm_free (struct linux_binprm *bprm) +static inline void security_bprm_free(struct linux_binprm *bprm) { } -static inline void security_bprm_apply_creds (struct linux_binprm *bprm, int unsafe) -{ - cap_bprm_apply_creds (bprm, unsafe); +static inline void security_bprm_apply_creds(struct linux_binprm *bprm, int unsafe) +{ + cap_bprm_apply_creds(bprm, unsafe); } -static inline void security_bprm_post_apply_creds (struct linux_binprm *bprm) +static inline void security_bprm_post_apply_creds(struct linux_binprm *bprm) { return; } -static inline int security_bprm_set (struct linux_binprm *bprm) +static inline int security_bprm_set(struct linux_binprm *bprm) { - return cap_bprm_set_security (bprm); + return cap_bprm_set_security(bprm); } -static inline int security_bprm_check (struct linux_binprm *bprm) +static inline int security_bprm_check(struct linux_binprm *bprm) { return 0; } -static inline int security_bprm_secureexec (struct linux_binprm *bprm) +static inline int security_bprm_secureexec(struct linux_binprm *bprm) { return cap_bprm_secureexec(bprm); } -static inline int security_sb_alloc (struct super_block *sb) +static inline int security_sb_alloc(struct super_block *sb) { return 0; } -static inline void security_sb_free (struct super_block *sb) +static inline void security_sb_free(struct super_block *sb) { } -static inline int security_sb_copy_data (char *orig, char *copy) +static inline int security_sb_copy_data(char *orig, char *copy) { return 0; } -static inline int security_sb_kern_mount (struct super_block *sb, void *data) +static inline int security_sb_kern_mount(struct super_block *sb, void *data) { return 0; } -static inline int security_sb_statfs (struct dentry *dentry) +static inline int security_sb_statfs(struct dentry *dentry) { return 0; } -static inline int security_sb_mount (char *dev_name, struct path *path, +static inline int security_sb_mount(char *dev_name, struct path *path, char *type, unsigned long flags, void *data) { return 0; } -static inline int security_sb_check_sb (struct vfsmount *mnt, - struct path *path) +static inline int security_sb_check_sb(struct vfsmount *mnt, + struct path *path) { return 0; } -static inline int security_sb_umount (struct vfsmount *mnt, int flags) +static inline int security_sb_umount(struct vfsmount *mnt, int flags) { return 0; } -static inline void security_sb_umount_close (struct vfsmount *mnt) +static inline void security_sb_umount_close(struct vfsmount *mnt) { } -static inline void security_sb_umount_busy (struct vfsmount *mnt) +static inline void security_sb_umount_busy(struct vfsmount *mnt) { } -static inline void security_sb_post_remount (struct vfsmount *mnt, +static inline void security_sb_post_remount(struct vfsmount *mnt, unsigned long flags, void *data) { } -static inline void security_sb_post_addmount (struct vfsmount *mnt, - struct path *mountpoint) +static inline void security_sb_post_addmount(struct vfsmount *mnt, + struct path *mountpoint) { } -static inline int security_sb_pivotroot (struct path *old_path, - struct path *new_path) +static inline int security_sb_pivotroot(struct path *old_path, + struct path *new_path) { return 0; } -static inline void security_sb_post_pivotroot (struct path *old_path, - struct path *new_path) +static inline void security_sb_post_pivotroot(struct path *old_path, + struct path *new_path) { } static inline int security_sb_get_mnt_opts(const struct super_block *sb, struct security_mnt_opts *opts) @@ -1935,15 +1949,15 @@ static inline int security_sb_parse_opts_str(char *options, struct security_mnt_ return 0; } -static inline int security_inode_alloc (struct inode *inode) +static inline int security_inode_alloc(struct inode *inode) { return 0; } -static inline void security_inode_free (struct inode *inode) +static inline void security_inode_free(struct inode *inode) { } -static inline int security_inode_init_security (struct inode *inode, +static inline int security_inode_init_security(struct inode *inode, struct inode *dir, char **name, void **value, @@ -1951,55 +1965,55 @@ static inline int security_inode_init_security (struct inode *inode, { return -EOPNOTSUPP; } - -static inline int security_inode_create (struct inode *dir, + +static inline int security_inode_create(struct inode *dir, struct dentry *dentry, int mode) { return 0; } -static inline int security_inode_link (struct dentry *old_dentry, +static inline int security_inode_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry) { return 0; } -static inline int security_inode_unlink (struct inode *dir, +static inline int security_inode_unlink(struct inode *dir, struct dentry *dentry) { return 0; } -static inline int security_inode_symlink (struct inode *dir, +static inline int security_inode_symlink(struct inode *dir, struct dentry *dentry, const char *old_name) { return 0; } -static inline int security_inode_mkdir (struct inode *dir, +static inline int security_inode_mkdir(struct inode *dir, struct dentry *dentry, int mode) { return 0; } -static inline int security_inode_rmdir (struct inode *dir, +static inline int security_inode_rmdir(struct inode *dir, struct dentry *dentry) { return 0; } -static inline int security_inode_mknod (struct inode *dir, +static inline int security_inode_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev) { return 0; } -static inline int security_inode_rename (struct inode *old_dir, +static inline int security_inode_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) @@ -2007,59 +2021,61 @@ static inline int security_inode_rename (struct inode *old_dir, return 0; } -static inline int security_inode_readlink (struct dentry *dentry) +static inline int security_inode_readlink(struct dentry *dentry) { return 0; } -static inline int security_inode_follow_link (struct dentry *dentry, +static inline int security_inode_follow_link(struct dentry *dentry, struct nameidata *nd) { return 0; } -static inline int security_inode_permission (struct inode *inode, int mask, +static inline int security_inode_permission(struct inode *inode, int mask, struct nameidata *nd) { return 0; } -static inline int security_inode_setattr (struct dentry *dentry, +static inline int security_inode_setattr(struct dentry *dentry, struct iattr *attr) { return 0; } -static inline int security_inode_getattr (struct vfsmount *mnt, +static inline int security_inode_getattr(struct vfsmount *mnt, struct dentry *dentry) { return 0; } -static inline void security_inode_delete (struct inode *inode) +static inline void security_inode_delete(struct inode *inode) { } -static inline int security_inode_setxattr (struct dentry *dentry, char *name, - void *value, size_t size, int flags) +static inline int security_inode_setxattr(struct dentry *dentry, + const char *name, const void *value, size_t size, int flags) { return cap_inode_setxattr(dentry, name, value, size, flags); } -static inline void security_inode_post_setxattr (struct dentry *dentry, char *name, - void *value, size_t size, int flags) +static inline void security_inode_post_setxattr(struct dentry *dentry, + const char *name, const void *value, size_t size, int flags) { } -static inline int security_inode_getxattr (struct dentry *dentry, char *name) +static inline int security_inode_getxattr(struct dentry *dentry, + const char *name) { return 0; } -static inline int security_inode_listxattr (struct dentry *dentry) +static inline int security_inode_listxattr(struct dentry *dentry) { return 0; } -static inline int security_inode_removexattr (struct dentry *dentry, char *name) +static inline int security_inode_removexattr(struct dentry *dentry, + const char *name) { return cap_inode_removexattr(dentry, name); } @@ -2094,198 +2110,198 @@ static inline void security_inode_getsecid(const struct inode *inode, u32 *secid *secid = 0; } -static inline int security_file_permission (struct file *file, int mask) +static inline int security_file_permission(struct file *file, int mask) { return 0; } -static inline int security_file_alloc (struct file *file) +static inline int security_file_alloc(struct file *file) { return 0; } -static inline void security_file_free (struct file *file) +static inline void security_file_free(struct file *file) { } -static inline int security_file_ioctl (struct file *file, unsigned int cmd, - unsigned long arg) +static inline int security_file_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) { return 0; } -static inline int security_file_mmap (struct file *file, unsigned long reqprot, - unsigned long prot, - unsigned long flags, - unsigned long addr, - unsigned long addr_only) +static inline int security_file_mmap(struct file *file, unsigned long reqprot, + unsigned long prot, + unsigned long flags, + unsigned long addr, + unsigned long addr_only) { return 0; } -static inline int security_file_mprotect (struct vm_area_struct *vma, - unsigned long reqprot, - unsigned long prot) +static inline int security_file_mprotect(struct vm_area_struct *vma, + unsigned long reqprot, + unsigned long prot) { return 0; } -static inline int security_file_lock (struct file *file, unsigned int cmd) +static inline int security_file_lock(struct file *file, unsigned int cmd) { return 0; } -static inline int security_file_fcntl (struct file *file, unsigned int cmd, - unsigned long arg) +static inline int security_file_fcntl(struct file *file, unsigned int cmd, + unsigned long arg) { return 0; } -static inline int security_file_set_fowner (struct file *file) +static inline int security_file_set_fowner(struct file *file) { return 0; } -static inline int security_file_send_sigiotask (struct task_struct *tsk, - struct fown_struct *fown, - int sig) +static inline int security_file_send_sigiotask(struct task_struct *tsk, + struct fown_struct *fown, + int sig) { return 0; } -static inline int security_file_receive (struct file *file) +static inline int security_file_receive(struct file *file) { return 0; } -static inline int security_dentry_open (struct file *file) +static inline int security_dentry_open(struct file *file) { return 0; } -static inline int security_task_create (unsigned long clone_flags) +static inline int security_task_create(unsigned long clone_flags) { return 0; } -static inline int security_task_alloc (struct task_struct *p) +static inline int security_task_alloc(struct task_struct *p) { return 0; } -static inline void security_task_free (struct task_struct *p) +static inline void security_task_free(struct task_struct *p) { } -static inline int security_task_setuid (uid_t id0, uid_t id1, uid_t id2, - int flags) +static inline int security_task_setuid(uid_t id0, uid_t id1, uid_t id2, + int flags) { return 0; } -static inline int security_task_post_setuid (uid_t old_ruid, uid_t old_euid, - uid_t old_suid, int flags) +static inline int security_task_post_setuid(uid_t old_ruid, uid_t old_euid, + uid_t old_suid, int flags) { - return cap_task_post_setuid (old_ruid, old_euid, old_suid, flags); + return cap_task_post_setuid(old_ruid, old_euid, old_suid, flags); } -static inline int security_task_setgid (gid_t id0, gid_t id1, gid_t id2, - int flags) +static inline int security_task_setgid(gid_t id0, gid_t id1, gid_t id2, + int flags) { return 0; } -static inline int security_task_setpgid (struct task_struct *p, pid_t pgid) +static inline int security_task_setpgid(struct task_struct *p, pid_t pgid) { return 0; } -static inline int security_task_getpgid (struct task_struct *p) +static inline int security_task_getpgid(struct task_struct *p) { return 0; } -static inline int security_task_getsid (struct task_struct *p) +static inline int security_task_getsid(struct task_struct *p) { return 0; } -static inline void security_task_getsecid (struct task_struct *p, u32 *secid) +static inline void security_task_getsecid(struct task_struct *p, u32 *secid) { *secid = 0; } -static inline int security_task_setgroups (struct group_info *group_info) +static inline int security_task_setgroups(struct group_info *group_info) { return 0; } -static inline int security_task_setnice (struct task_struct *p, int nice) +static inline int security_task_setnice(struct task_struct *p, int nice) { return cap_task_setnice(p, nice); } -static inline int security_task_setioprio (struct task_struct *p, int ioprio) +static inline int security_task_setioprio(struct task_struct *p, int ioprio) { return cap_task_setioprio(p, ioprio); } -static inline int security_task_getioprio (struct task_struct *p) +static inline int security_task_getioprio(struct task_struct *p) { return 0; } -static inline int security_task_setrlimit (unsigned int resource, - struct rlimit *new_rlim) +static inline int security_task_setrlimit(unsigned int resource, + struct rlimit *new_rlim) { return 0; } -static inline int security_task_setscheduler (struct task_struct *p, - int policy, - struct sched_param *lp) +static inline int security_task_setscheduler(struct task_struct *p, + int policy, + struct sched_param *lp) { return cap_task_setscheduler(p, policy, lp); } -static inline int security_task_getscheduler (struct task_struct *p) +static inline int security_task_getscheduler(struct task_struct *p) { return 0; } -static inline int security_task_movememory (struct task_struct *p) +static inline int security_task_movememory(struct task_struct *p) { return 0; } -static inline int security_task_kill (struct task_struct *p, - struct siginfo *info, int sig, - u32 secid) +static inline int security_task_kill(struct task_struct *p, + struct siginfo *info, int sig, + u32 secid) { return 0; } -static inline int security_task_wait (struct task_struct *p) +static inline int security_task_wait(struct task_struct *p) { return 0; } -static inline int security_task_prctl (int option, unsigned long arg2, - unsigned long arg3, - unsigned long arg4, - unsigned long arg5) +static inline int security_task_prctl(int option, unsigned long arg2, + unsigned long arg3, + unsigned long arg4, + unsigned long arg5, long *rc_p) { - return 0; + return cap_task_prctl(option, arg2, arg3, arg3, arg5, rc_p); } -static inline void security_task_reparent_to_init (struct task_struct *p) +static inline void security_task_reparent_to_init(struct task_struct *p) { - cap_task_reparent_to_init (p); + cap_task_reparent_to_init(p); } static inline void security_task_to_inode(struct task_struct *p, struct inode *inode) { } -static inline int security_ipc_permission (struct kern_ipc_perm *ipcp, - short flag) +static inline int security_ipc_permission(struct kern_ipc_perm *ipcp, + short flag) { return 0; } @@ -2295,98 +2311,98 @@ static inline void security_ipc_getsecid(struct kern_ipc_perm *ipcp, u32 *secid) *secid = 0; } -static inline int security_msg_msg_alloc (struct msg_msg * msg) +static inline int security_msg_msg_alloc(struct msg_msg *msg) { return 0; } -static inline void security_msg_msg_free (struct msg_msg * msg) +static inline void security_msg_msg_free(struct msg_msg *msg) { } -static inline int security_msg_queue_alloc (struct msg_queue *msq) +static inline int security_msg_queue_alloc(struct msg_queue *msq) { return 0; } -static inline void security_msg_queue_free (struct msg_queue *msq) +static inline void security_msg_queue_free(struct msg_queue *msq) { } -static inline int security_msg_queue_associate (struct msg_queue * msq, - int msqflg) +static inline int security_msg_queue_associate(struct msg_queue *msq, + int msqflg) { return 0; } -static inline int security_msg_queue_msgctl (struct msg_queue * msq, int cmd) +static inline int security_msg_queue_msgctl(struct msg_queue *msq, int cmd) { return 0; } -static inline int security_msg_queue_msgsnd (struct msg_queue * msq, - struct msg_msg * msg, int msqflg) +static inline int security_msg_queue_msgsnd(struct msg_queue *msq, + struct msg_msg *msg, int msqflg) { return 0; } -static inline int security_msg_queue_msgrcv (struct msg_queue * msq, - struct msg_msg * msg, - struct task_struct * target, - long type, int mode) +static inline int security_msg_queue_msgrcv(struct msg_queue *msq, + struct msg_msg *msg, + struct task_struct *target, + long type, int mode) { return 0; } -static inline int security_shm_alloc (struct shmid_kernel *shp) +static inline int security_shm_alloc(struct shmid_kernel *shp) { return 0; } -static inline void security_shm_free (struct shmid_kernel *shp) +static inline void security_shm_free(struct shmid_kernel *shp) { } -static inline int security_shm_associate (struct shmid_kernel * shp, - int shmflg) +static inline int security_shm_associate(struct shmid_kernel *shp, + int shmflg) { return 0; } -static inline int security_shm_shmctl (struct shmid_kernel * shp, int cmd) +static inline int security_shm_shmctl(struct shmid_kernel *shp, int cmd) { return 0; } -static inline int security_shm_shmat (struct shmid_kernel * shp, - char __user *shmaddr, int shmflg) +static inline int security_shm_shmat(struct shmid_kernel *shp, + char __user *shmaddr, int shmflg) { return 0; } -static inline int security_sem_alloc (struct sem_array *sma) +static inline int security_sem_alloc(struct sem_array *sma) { return 0; } -static inline void security_sem_free (struct sem_array *sma) +static inline void security_sem_free(struct sem_array *sma) { } -static inline int security_sem_associate (struct sem_array * sma, int semflg) +static inline int security_sem_associate(struct sem_array *sma, int semflg) { return 0; } -static inline int security_sem_semctl (struct sem_array * sma, int cmd) +static inline int security_sem_semctl(struct sem_array *sma, int cmd) { return 0; } -static inline int security_sem_semop (struct sem_array * sma, - struct sembuf * sops, unsigned nsops, - int alter) +static inline int security_sem_semop(struct sem_array *sma, + struct sembuf *sops, unsigned nsops, + int alter) { return 0; } -static inline void security_d_instantiate (struct dentry *dentry, struct inode *inode) +static inline void security_d_instantiate(struct dentry *dentry, struct inode *inode) { } static inline int security_getprocattr(struct task_struct *p, char *name, char **value) @@ -2399,14 +2415,14 @@ static inline int security_setprocattr(struct task_struct *p, char *name, void * return -EINVAL; } -static inline int security_netlink_send (struct sock *sk, struct sk_buff *skb) +static inline int security_netlink_send(struct sock *sk, struct sk_buff *skb) { - return cap_netlink_send (sk, skb); + return cap_netlink_send(sk, skb); } -static inline int security_netlink_recv (struct sk_buff *skb, int cap) +static inline int security_netlink_recv(struct sk_buff *skb, int cap) { - return cap_netlink_recv (skb, cap); + return cap_netlink_recv(skb, cap); } static inline struct dentry *securityfs_create_dir(const char *name, @@ -2484,26 +2500,26 @@ void security_inet_conn_established(struct sock *sk, struct sk_buff *skb); #else /* CONFIG_SECURITY_NETWORK */ -static inline int security_unix_stream_connect(struct socket * sock, - struct socket * other, - struct sock * newsk) +static inline int security_unix_stream_connect(struct socket *sock, + struct socket *other, + struct sock *newsk) { return 0; } -static inline int security_unix_may_send(struct socket * sock, - struct socket * other) +static inline int security_unix_may_send(struct socket *sock, + struct socket *other) { return 0; } -static inline int security_socket_create (int family, int type, - int protocol, int kern) +static inline int security_socket_create(int family, int type, + int protocol, int kern) { return 0; } -static inline int security_socket_post_create(struct socket * sock, +static inline int security_socket_post_create(struct socket *sock, int family, int type, int protocol, int kern) @@ -2511,77 +2527,77 @@ static inline int security_socket_post_create(struct socket * sock, return 0; } -static inline int security_socket_bind(struct socket * sock, - struct sockaddr * address, +static inline int security_socket_bind(struct socket *sock, + struct sockaddr *address, int addrlen) { return 0; } -static inline int security_socket_connect(struct socket * sock, - struct sockaddr * address, +static inline int security_socket_connect(struct socket *sock, + struct sockaddr *address, int addrlen) { return 0; } -static inline int security_socket_listen(struct socket * sock, int backlog) +static inline int security_socket_listen(struct socket *sock, int backlog) { return 0; } -static inline int security_socket_accept(struct socket * sock, - struct socket * newsock) +static inline int security_socket_accept(struct socket *sock, + struct socket *newsock) { return 0; } -static inline void security_socket_post_accept(struct socket * sock, - struct socket * newsock) +static inline void security_socket_post_accept(struct socket *sock, + struct socket *newsock) { } -static inline int security_socket_sendmsg(struct socket * sock, - struct msghdr * msg, int size) +static inline int security_socket_sendmsg(struct socket *sock, + struct msghdr *msg, int size) { return 0; } -static inline int security_socket_recvmsg(struct socket * sock, - struct msghdr * msg, int size, +static inline int security_socket_recvmsg(struct socket *sock, + struct msghdr *msg, int size, int flags) { return 0; } -static inline int security_socket_getsockname(struct socket * sock) +static inline int security_socket_getsockname(struct socket *sock) { return 0; } -static inline int security_socket_getpeername(struct socket * sock) +static inline int security_socket_getpeername(struct socket *sock) { return 0; } -static inline int security_socket_getsockopt(struct socket * sock, +static inline int security_socket_getsockopt(struct socket *sock, int level, int optname) { return 0; } -static inline int security_socket_setsockopt(struct socket * sock, +static inline int security_socket_setsockopt(struct socket *sock, int level, int optname) { return 0; } -static inline int security_socket_shutdown(struct socket * sock, int how) +static inline int security_socket_shutdown(struct socket *sock, int how) { return 0; } -static inline int security_sock_rcv_skb (struct sock * sk, - struct sk_buff * skb) +static inline int security_sock_rcv_skb(struct sock *sk, + struct sk_buff *skb) { return 0; } @@ -2618,7 +2634,7 @@ static inline void security_req_classify_flow(const struct request_sock *req, st { } -static inline void security_sock_graft(struct sock* sk, struct socket *parent) +static inline void security_sock_graft(struct sock *sk, struct socket *parent) { } @@ -2727,6 +2743,7 @@ int security_key_alloc(struct key *key, struct task_struct *tsk, unsigned long f void security_key_free(struct key *key); int security_key_permission(key_ref_t key_ref, struct task_struct *context, key_perm_t perm); +int security_key_getsecurity(struct key *key, char **_buffer); #else @@ -2748,6 +2765,12 @@ static inline int security_key_permission(key_ref_t key_ref, return 0; } +static inline int security_key_getsecurity(struct key *key, char **_buffer) +{ + *_buffer = NULL; + return 0; +} + #endif #endif /* CONFIG_KEYS */ diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h index 00b65c0a82c..3d37c94abbc 100644 --- a/include/linux/serial_8250.h +++ b/include/linux/serial_8250.h @@ -46,6 +46,7 @@ enum { PLAT8250_DEV_HUB6, PLAT8250_DEV_MCA, PLAT8250_DEV_AU1X00, + PLAT8250_DEV_SM501, }; /* diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index 8d5fb36ea04..f2d12d5a21b 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h @@ -34,8 +34,7 @@ struct shmem_sb_info { uid_t uid; /* Mount uid for root directory */ gid_t gid; /* Mount gid for root directory */ mode_t mode; /* Mount mode for root directory */ - int policy; /* Default NUMA memory alloc policy */ - nodemask_t policy_nodes; /* nodemask for preferred and bind */ + struct mempolicy *mpol; /* default memory policy for mappings */ }; static inline struct shmem_inode_info *SHMEM_I(struct inode *inode) diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 79d59c937fa..71e43a12ebb 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -29,6 +29,7 @@ enum stat_item { DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */ DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */ DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */ + ORDER_FALLBACK, /* Number of times fallback was necessary */ NR_SLUB_STAT_ITEMS }; struct kmem_cache_cpu { @@ -48,11 +49,21 @@ struct kmem_cache_node { struct list_head partial; #ifdef CONFIG_SLUB_DEBUG atomic_long_t nr_slabs; + atomic_long_t total_objects; struct list_head full; #endif }; /* + * Word size structure that can be atomically updated or read and that + * contains both the order and the number of objects that a slab of the + * given order would contain. + */ +struct kmem_cache_order_objects { + unsigned long x; +}; + +/* * Slab cache management. */ struct kmem_cache { @@ -61,7 +72,7 @@ struct kmem_cache { int size; /* The size of an object including meta data */ int objsize; /* The size of an object without meta data */ int offset; /* Free pointer offset. */ - int order; /* Current preferred allocation order */ + struct kmem_cache_order_objects oo; /* * Avoid an extra cache line for UP, SMP and for the node local to @@ -70,7 +81,8 @@ struct kmem_cache { struct kmem_cache_node local_node; /* Allocation and freeing of slabs */ - int objects; /* Number of objects in slab */ + struct kmem_cache_order_objects max; + struct kmem_cache_order_objects min; gfp_t allocflags; /* gfp flags to use on each alloc */ int refcount; /* Refcount for slab cache destroy */ void (*ctor)(struct kmem_cache *, void *); diff --git a/include/linux/smb.h b/include/linux/smb.h index f098dff93f6..caa43b2370c 100644 --- a/include/linux/smb.h +++ b/include/linux/smb.h @@ -11,6 +11,7 @@ #include <linux/types.h> #include <linux/magic.h> +#include <linux/time.h> enum smb_protocol { SMB_PROTOCOL_NONE, diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 1d7d4c5797e..a6977423baf 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -12,11 +12,22 @@ #include <asm/errno.h> #if defined(CONFIG_PM_SLEEP) && defined(CONFIG_VT) && defined(CONFIG_VT_CONSOLE) +extern void pm_set_vt_switch(int); extern int pm_prepare_console(void); extern void pm_restore_console(void); #else -static inline int pm_prepare_console(void) { return 0; } -static inline void pm_restore_console(void) {} +static inline void pm_set_vt_switch(int do_switch) +{ +} + +static inline int pm_prepare_console(void) +{ + return 0; +} + +static inline void pm_restore_console(void) +{ +} #endif typedef int __bitwise suspend_state_t; diff --git a/include/linux/swap.h b/include/linux/swap.h index 878459ae045..0b3377650c8 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -177,11 +177,11 @@ extern void activate_page(struct page *); extern void mark_page_accessed(struct page *); extern void lru_add_drain(void); extern int lru_add_drain_all(void); -extern int rotate_reclaimable_page(struct page *page); +extern void rotate_reclaimable_page(struct page *page); extern void swap_setup(void); /* linux/mm/vmscan.c */ -extern unsigned long try_to_free_pages(struct zone **zones, int order, +extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask); extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem, gfp_t gfp_mask); diff --git a/include/linux/synclink.h b/include/linux/synclink.h index 5562fbf7209..45f6bc82d31 100644 --- a/include/linux/synclink.h +++ b/include/linux/synclink.h @@ -13,10 +13,6 @@ #define _SYNCLINK_H_ #define SYNCLINK_H_VERSION 3.6 -#define BOOLEAN int -#define TRUE 1 -#define FALSE 0 - #define BIT0 0x0001 #define BIT1 0x0002 #define BIT2 0x0004 diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 8df6d1382ac..0522f368f9d 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -240,26 +240,28 @@ asmlinkage long sys_truncate64(const char __user *path, loff_t length); asmlinkage long sys_ftruncate64(unsigned int fd, loff_t length); #endif -asmlinkage long sys_setxattr(char __user *path, char __user *name, - void __user *value, size_t size, int flags); -asmlinkage long sys_lsetxattr(char __user *path, char __user *name, - void __user *value, size_t size, int flags); -asmlinkage long sys_fsetxattr(int fd, char __user *name, void __user *value, - size_t size, int flags); -asmlinkage ssize_t sys_getxattr(char __user *path, char __user *name, +asmlinkage long sys_setxattr(const char __user *path, const char __user *name, + const void __user *value, size_t size, int flags); +asmlinkage long sys_lsetxattr(const char __user *path, const char __user *name, + const void __user *value, size_t size, int flags); +asmlinkage long sys_fsetxattr(int fd, const char __user *name, + const void __user *value, size_t size, int flags); +asmlinkage ssize_t sys_getxattr(const char __user *path, const char __user *name, void __user *value, size_t size); -asmlinkage ssize_t sys_lgetxattr(char __user *path, char __user *name, +asmlinkage ssize_t sys_lgetxattr(const char __user *path, const char __user *name, void __user *value, size_t size); -asmlinkage ssize_t sys_fgetxattr(int fd, char __user *name, +asmlinkage ssize_t sys_fgetxattr(int fd, const char __user *name, void __user *value, size_t size); -asmlinkage ssize_t sys_listxattr(char __user *path, char __user *list, +asmlinkage ssize_t sys_listxattr(const char __user *path, char __user *list, size_t size); -asmlinkage ssize_t sys_llistxattr(char __user *path, char __user *list, +asmlinkage ssize_t sys_llistxattr(const char __user *path, char __user *list, size_t size); asmlinkage ssize_t sys_flistxattr(int fd, char __user *list, size_t size); -asmlinkage long sys_removexattr(char __user *path, char __user *name); -asmlinkage long sys_lremovexattr(char __user *path, char __user *name); -asmlinkage long sys_fremovexattr(int fd, char __user *name); +asmlinkage long sys_removexattr(const char __user *path, + const char __user *name); +asmlinkage long sys_lremovexattr(const char __user *path, + const char __user *name); +asmlinkage long sys_fremovexattr(int fd, const char __user *name); asmlinkage unsigned long sys_brk(unsigned long brk); asmlinkage long sys_mprotect(unsigned long start, size_t len, diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index 571f01d20a8..24141b4d1a1 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h @@ -945,11 +945,14 @@ enum /* For the /proc/sys support */ struct ctl_table; struct nsproxy; +struct ctl_table_root; + extern struct ctl_table_header *sysctl_head_next(struct ctl_table_header *prev); extern struct ctl_table_header *__sysctl_head_next(struct nsproxy *namespaces, struct ctl_table_header *prev); extern void sysctl_head_finish(struct ctl_table_header *prev); -extern int sysctl_perm(struct ctl_table *table, int op); +extern int sysctl_perm(struct ctl_table_root *root, + struct ctl_table *table, int op); typedef struct ctl_table ctl_table; @@ -981,11 +984,6 @@ extern int do_sysctl (int __user *name, int nlen, void __user *oldval, size_t __user *oldlenp, void __user *newval, size_t newlen); -extern int do_sysctl_strategy (struct ctl_table *table, - int __user *name, int nlen, - void __user *oldval, size_t __user *oldlenp, - void __user *newval, size_t newlen); - extern ctl_handler sysctl_data; extern ctl_handler sysctl_string; extern ctl_handler sysctl_intvec; @@ -1054,6 +1052,8 @@ struct ctl_table_root { struct list_head header_list; struct list_head *(*lookup)(struct ctl_table_root *root, struct nsproxy *namespaces); + int (*permissions)(struct ctl_table_root *root, + struct nsproxy *namespaces, struct ctl_table *table); }; /* struct ctl_table_header is used to maintain dynamic lists of @@ -1085,8 +1085,6 @@ struct ctl_table_header *register_sysctl_paths(const struct ctl_path *path, void unregister_sysctl_table(struct ctl_table_header * table); int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table); -#else /* __KERNEL__ */ - #endif /* __KERNEL__ */ #endif /* _LINUX_SYSCTL_H */ diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h index 03378e3515b..add3c5a4082 100644 --- a/include/linux/sysfs.h +++ b/include/linux/sysfs.h @@ -32,7 +32,7 @@ struct attribute { struct attribute_group { const char *name; - int (*is_visible)(struct kobject *, + mode_t (*is_visible)(struct kobject *, struct attribute *, int); struct attribute **attrs; }; @@ -105,6 +105,8 @@ void sysfs_remove_link(struct kobject *kobj, const char *name); int __must_check sysfs_create_group(struct kobject *kobj, const struct attribute_group *grp); +int sysfs_update_group(struct kobject *kobj, + const struct attribute_group *grp); void sysfs_remove_group(struct kobject *kobj, const struct attribute_group *grp); int sysfs_add_file_to_group(struct kobject *kobj, diff --git a/include/linux/sysv_fs.h b/include/linux/sysv_fs.h index e0248631e46..96411306eec 100644 --- a/include/linux/sysv_fs.h +++ b/include/linux/sysv_fs.h @@ -1,11 +1,7 @@ #ifndef _LINUX_SYSV_FS_H #define _LINUX_SYSV_FS_H -#if defined(__GNUC__) -# define __packed2__ __attribute__((packed, aligned(2))) -#else ->> I want to scream! << -#endif +#define __packed2__ __attribute__((packed, aligned(2))) #ifndef __KERNEL__ diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h new file mode 100644 index 00000000000..99c1b4d20b0 --- /dev/null +++ b/include/linux/unaligned/access_ok.h @@ -0,0 +1,67 @@ +#ifndef _LINUX_UNALIGNED_ACCESS_OK_H +#define _LINUX_UNALIGNED_ACCESS_OK_H + +#include <linux/kernel.h> +#include <asm/byteorder.h> + +static inline u16 get_unaligned_le16(const void *p) +{ + return le16_to_cpup((__le16 *)p); +} + +static inline u32 get_unaligned_le32(const void *p) +{ + return le32_to_cpup((__le32 *)p); +} + +static inline u64 get_unaligned_le64(const void *p) +{ + return le64_to_cpup((__le64 *)p); +} + +static inline u16 get_unaligned_be16(const void *p) +{ + return be16_to_cpup((__be16 *)p); +} + +static inline u32 get_unaligned_be32(const void *p) +{ + return be32_to_cpup((__be32 *)p); +} + +static inline u64 get_unaligned_be64(const void *p) +{ + return be64_to_cpup((__be64 *)p); +} + +static inline void put_unaligned_le16(u16 val, void *p) +{ + *((__le16 *)p) = cpu_to_le16(val); +} + +static inline void put_unaligned_le32(u32 val, void *p) +{ + *((__le32 *)p) = cpu_to_le32(val); +} + +static inline void put_unaligned_le64(u64 val, void *p) +{ + *((__le64 *)p) = cpu_to_le64(val); +} + +static inline void put_unaligned_be16(u16 val, void *p) +{ + *((__be16 *)p) = cpu_to_be16(val); +} + +static inline void put_unaligned_be32(u32 val, void *p) +{ + *((__be32 *)p) = cpu_to_be32(val); +} + +static inline void put_unaligned_be64(u64 val, void *p) +{ + *((__be64 *)p) = cpu_to_be64(val); +} + +#endif /* _LINUX_UNALIGNED_ACCESS_OK_H */ diff --git a/include/linux/unaligned/be_byteshift.h b/include/linux/unaligned/be_byteshift.h new file mode 100644 index 00000000000..46dd12c5709 --- /dev/null +++ b/include/linux/unaligned/be_byteshift.h @@ -0,0 +1,70 @@ +#ifndef _LINUX_UNALIGNED_BE_BYTESHIFT_H +#define _LINUX_UNALIGNED_BE_BYTESHIFT_H + +#include <linux/kernel.h> + +static inline u16 __get_unaligned_be16(const u8 *p) +{ + return p[0] << 8 | p[1]; +} + +static inline u32 __get_unaligned_be32(const u8 *p) +{ + return p[0] << 24 | p[1] << 16 | p[2] << 8 | p[3]; +} + +static inline u64 __get_unaligned_be64(const u8 *p) +{ + return (u64)__get_unaligned_be32(p) << 32 | + __get_unaligned_be32(p + 4); +} + +static inline void __put_unaligned_be16(u16 val, u8 *p) +{ + *p++ = val >> 8; + *p++ = val; +} + +static inline void __put_unaligned_be32(u32 val, u8 *p) +{ + __put_unaligned_be16(val >> 16, p); + __put_unaligned_be16(val, p + 2); +} + +static inline void __put_unaligned_be64(u64 val, u8 *p) +{ + __put_unaligned_be32(val >> 32, p); + __put_unaligned_be32(val, p + 4); +} + +static inline u16 get_unaligned_be16(const void *p) +{ + return __get_unaligned_be16((const u8 *)p); +} + +static inline u32 get_unaligned_be32(const void *p) +{ + return __get_unaligned_be32((const u8 *)p); +} + +static inline u64 get_unaligned_be64(const void *p) +{ + return __get_unaligned_be64((const u8 *)p); +} + +static inline void put_unaligned_be16(u16 val, void *p) +{ + __put_unaligned_be16(val, p); +} + +static inline void put_unaligned_be32(u32 val, void *p) +{ + __put_unaligned_be32(val, p); +} + +static inline void put_unaligned_be64(u64 val, void *p) +{ + __put_unaligned_be64(val, p); +} + +#endif /* _LINUX_UNALIGNED_BE_BYTESHIFT_H */ diff --git a/include/linux/unaligned/be_memmove.h b/include/linux/unaligned/be_memmove.h new file mode 100644 index 00000000000..c2a76c5c9ed --- /dev/null +++ b/include/linux/unaligned/be_memmove.h @@ -0,0 +1,36 @@ +#ifndef _LINUX_UNALIGNED_BE_MEMMOVE_H +#define _LINUX_UNALIGNED_BE_MEMMOVE_H + +#include <linux/unaligned/memmove.h> + +static inline u16 get_unaligned_be16(const void *p) +{ + return __get_unaligned_memmove16((const u8 *)p); +} + +static inline u32 get_unaligned_be32(const void *p) +{ + return __get_unaligned_memmove32((const u8 *)p); +} + +static inline u64 get_unaligned_be64(const void *p) +{ + return __get_unaligned_memmove64((const u8 *)p); +} + +static inline void put_unaligned_be16(u16 val, void *p) +{ + __put_unaligned_memmove16(val, p); +} + +static inline void put_unaligned_be32(u32 val, void *p) +{ + __put_unaligned_memmove32(val, p); +} + +static inline void put_unaligned_be64(u64 val, void *p) +{ + __put_unaligned_memmove64(val, p); +} + +#endif /* _LINUX_UNALIGNED_LE_MEMMOVE_H */ diff --git a/include/linux/unaligned/be_struct.h b/include/linux/unaligned/be_struct.h new file mode 100644 index 00000000000..132415836c5 --- /dev/null +++ b/include/linux/unaligned/be_struct.h @@ -0,0 +1,36 @@ +#ifndef _LINUX_UNALIGNED_BE_STRUCT_H +#define _LINUX_UNALIGNED_BE_STRUCT_H + +#include <linux/unaligned/packed_struct.h> + +static inline u16 get_unaligned_be16(const void *p) +{ + return __get_unaligned_cpu16((const u8 *)p); +} + +static inline u32 get_unaligned_be32(const void *p) +{ + return __get_unaligned_cpu32((const u8 *)p); +} + +static inline u64 get_unaligned_be64(const void *p) +{ + return __get_unaligned_cpu64((const u8 *)p); +} + +static inline void put_unaligned_be16(u16 val, void *p) +{ + __put_unaligned_cpu16(val, p); +} + +static inline void put_unaligned_be32(u32 val, void *p) +{ + __put_unaligned_cpu32(val, p); +} + +static inline void put_unaligned_be64(u64 val, void *p) +{ + __put_unaligned_cpu64(val, p); +} + +#endif /* _LINUX_UNALIGNED_BE_STRUCT_H */ diff --git a/include/linux/unaligned/generic.h b/include/linux/unaligned/generic.h new file mode 100644 index 00000000000..02d97ff3df7 --- /dev/null +++ b/include/linux/unaligned/generic.h @@ -0,0 +1,68 @@ +#ifndef _LINUX_UNALIGNED_GENERIC_H +#define _LINUX_UNALIGNED_GENERIC_H + +/* + * Cause a link-time error if we try an unaligned access other than + * 1,2,4 or 8 bytes long + */ +extern void __bad_unaligned_access_size(void); + +#define __get_unaligned_le(ptr) ((__force typeof(*(ptr)))({ \ + __builtin_choose_expr(sizeof(*(ptr)) == 1, *(ptr), \ + __builtin_choose_expr(sizeof(*(ptr)) == 2, get_unaligned_le16((ptr)), \ + __builtin_choose_expr(sizeof(*(ptr)) == 4, get_unaligned_le32((ptr)), \ + __builtin_choose_expr(sizeof(*(ptr)) == 8, get_unaligned_le64((ptr)), \ + __bad_unaligned_access_size())))); \ + })) + +#define __get_unaligned_be(ptr) ((__force typeof(*(ptr)))({ \ + __builtin_choose_expr(sizeof(*(ptr)) == 1, *(ptr), \ + __builtin_choose_expr(sizeof(*(ptr)) == 2, get_unaligned_be16((ptr)), \ + __builtin_choose_expr(sizeof(*(ptr)) == 4, get_unaligned_be32((ptr)), \ + __builtin_choose_expr(sizeof(*(ptr)) == 8, get_unaligned_be64((ptr)), \ + __bad_unaligned_access_size())))); \ + })) + +#define __put_unaligned_le(val, ptr) ({ \ + void *__gu_p = (ptr); \ + switch (sizeof(*(ptr))) { \ + case 1: \ + *(u8 *)__gu_p = (__force u8)(val); \ + break; \ + case 2: \ + put_unaligned_le16((__force u16)(val), __gu_p); \ + break; \ + case 4: \ + put_unaligned_le32((__force u32)(val), __gu_p); \ + break; \ + case 8: \ + put_unaligned_le64((__force u64)(val), __gu_p); \ + break; \ + default: \ + __bad_unaligned_access_size(); \ + break; \ + } \ + (void)0; }) + +#define __put_unaligned_be(val, ptr) ({ \ + void *__gu_p = (ptr); \ + switch (sizeof(*(ptr))) { \ + case 1: \ + *(u8 *)__gu_p = (__force u8)(val); \ + break; \ + case 2: \ + put_unaligned_be16((__force u16)(val), __gu_p); \ + break; \ + case 4: \ + put_unaligned_be32((__force u32)(val), __gu_p); \ + break; \ + case 8: \ + put_unaligned_be64((__force u64)(val), __gu_p); \ + break; \ + default: \ + __bad_unaligned_access_size(); \ + break; \ + } \ + (void)0; }) + +#endif /* _LINUX_UNALIGNED_GENERIC_H */ diff --git a/include/linux/unaligned/le_byteshift.h b/include/linux/unaligned/le_byteshift.h new file mode 100644 index 00000000000..59777e951ba --- /dev/null +++ b/include/linux/unaligned/le_byteshift.h @@ -0,0 +1,70 @@ +#ifndef _LINUX_UNALIGNED_LE_BYTESHIFT_H +#define _LINUX_UNALIGNED_LE_BYTESHIFT_H + +#include <linux/kernel.h> + +static inline u16 __get_unaligned_le16(const u8 *p) +{ + return p[0] | p[1] << 8; +} + +static inline u32 __get_unaligned_le32(const u8 *p) +{ + return p[0] | p[1] << 8 | p[2] << 16 | p[3] << 24; +} + +static inline u64 __get_unaligned_le64(const u8 *p) +{ + return (u64)__get_unaligned_le32(p + 4) << 32 | + __get_unaligned_le32(p); +} + +static inline void __put_unaligned_le16(u16 val, u8 *p) +{ + *p++ = val; + *p++ = val >> 8; +} + +static inline void __put_unaligned_le32(u32 val, u8 *p) +{ + __put_unaligned_le16(val >> 16, p + 2); + __put_unaligned_le16(val, p); +} + +static inline void __put_unaligned_le64(u64 val, u8 *p) +{ + __put_unaligned_le32(val >> 32, p + 4); + __put_unaligned_le32(val, p); +} + +static inline u16 get_unaligned_le16(const void *p) +{ + return __get_unaligned_le16((const u8 *)p); +} + +static inline u32 get_unaligned_le32(const void *p) +{ + return __get_unaligned_le32((const u8 *)p); +} + +static inline u64 get_unaligned_le64(const void *p) +{ + return __get_unaligned_le64((const u8 *)p); +} + +static inline void put_unaligned_le16(u16 val, void *p) +{ + __put_unaligned_le16(val, p); +} + +static inline void put_unaligned_le32(u32 val, void *p) +{ + __put_unaligned_le32(val, p); +} + +static inline void put_unaligned_le64(u64 val, void *p) +{ + __put_unaligned_le64(val, p); +} + +#endif /* _LINUX_UNALIGNED_LE_BYTESHIFT_H */ diff --git a/include/linux/unaligned/le_memmove.h b/include/linux/unaligned/le_memmove.h new file mode 100644 index 00000000000..269849bee4e --- /dev/null +++ b/include/linux/unaligned/le_memmove.h @@ -0,0 +1,36 @@ +#ifndef _LINUX_UNALIGNED_LE_MEMMOVE_H +#define _LINUX_UNALIGNED_LE_MEMMOVE_H + +#include <linux/unaligned/memmove.h> + +static inline u16 get_unaligned_le16(const void *p) +{ + return __get_unaligned_memmove16((const u8 *)p); +} + +static inline u32 get_unaligned_le32(const void *p) +{ + return __get_unaligned_memmove32((const u8 *)p); +} + +static inline u64 get_unaligned_le64(const void *p) +{ + return __get_unaligned_memmove64((const u8 *)p); +} + +static inline void put_unaligned_le16(u16 val, void *p) +{ + __put_unaligned_memmove16(val, p); +} + +static inline void put_unaligned_le32(u32 val, void *p) +{ + __put_unaligned_memmove32(val, p); +} + +static inline void put_unaligned_le64(u64 val, void *p) +{ + __put_unaligned_memmove64(val, p); +} + +#endif /* _LINUX_UNALIGNED_LE_MEMMOVE_H */ diff --git a/include/linux/unaligned/le_struct.h b/include/linux/unaligned/le_struct.h new file mode 100644 index 00000000000..088c4572faa --- /dev/null +++ b/include/linux/unaligned/le_struct.h @@ -0,0 +1,36 @@ +#ifndef _LINUX_UNALIGNED_LE_STRUCT_H +#define _LINUX_UNALIGNED_LE_STRUCT_H + +#include <linux/unaligned/packed_struct.h> + +static inline u16 get_unaligned_le16(const void *p) +{ + return __get_unaligned_cpu16((const u8 *)p); +} + +static inline u32 get_unaligned_le32(const void *p) +{ + return __get_unaligned_cpu32((const u8 *)p); +} + +static inline u64 get_unaligned_le64(const void *p) +{ + return __get_unaligned_cpu64((const u8 *)p); +} + +static inline void put_unaligned_le16(u16 val, void *p) +{ + __put_unaligned_cpu16(val, p); +} + +static inline void put_unaligned_le32(u32 val, void *p) +{ + __put_unaligned_cpu32(val, p); +} + +static inline void put_unaligned_le64(u64 val, void *p) +{ + __put_unaligned_cpu64(val, p); +} + +#endif /* _LINUX_UNALIGNED_LE_STRUCT_H */ diff --git a/include/linux/unaligned/memmove.h b/include/linux/unaligned/memmove.h new file mode 100644 index 00000000000..eeb5a779a4f --- /dev/null +++ b/include/linux/unaligned/memmove.h @@ -0,0 +1,45 @@ +#ifndef _LINUX_UNALIGNED_MEMMOVE_H +#define _LINUX_UNALIGNED_MEMMOVE_H + +#include <linux/kernel.h> +#include <linux/string.h> + +/* Use memmove here, so gcc does not insert a __builtin_memcpy. */ + +static inline u16 __get_unaligned_memmove16(const void *p) +{ + u16 tmp; + memmove(&tmp, p, 2); + return tmp; +} + +static inline u32 __get_unaligned_memmove32(const void *p) +{ + u32 tmp; + memmove(&tmp, p, 4); + return tmp; +} + +static inline u64 __get_unaligned_memmove64(const void *p) +{ + u64 tmp; + memmove(&tmp, p, 8); + return tmp; +} + +static inline void __put_unaligned_memmove16(u16 val, void *p) +{ + memmove(p, &val, 2); +} + +static inline void __put_unaligned_memmove32(u32 val, void *p) +{ + memmove(p, &val, 4); +} + +static inline void __put_unaligned_memmove64(u64 val, void *p) +{ + memmove(p, &val, 8); +} + +#endif /* _LINUX_UNALIGNED_MEMMOVE_H */ diff --git a/include/linux/unaligned/packed_struct.h b/include/linux/unaligned/packed_struct.h new file mode 100644 index 00000000000..2498bb9fe00 --- /dev/null +++ b/include/linux/unaligned/packed_struct.h @@ -0,0 +1,46 @@ +#ifndef _LINUX_UNALIGNED_PACKED_STRUCT_H +#define _LINUX_UNALIGNED_PACKED_STRUCT_H + +#include <linux/kernel.h> + +struct __una_u16 { u16 x __attribute__((packed)); }; +struct __una_u32 { u32 x __attribute__((packed)); }; +struct __una_u64 { u64 x __attribute__((packed)); }; + +static inline u16 __get_unaligned_cpu16(const void *p) +{ + const struct __una_u16 *ptr = (const struct __una_u16 *)p; + return ptr->x; +} + +static inline u32 __get_unaligned_cpu32(const void *p) +{ + const struct __una_u32 *ptr = (const struct __una_u32 *)p; + return ptr->x; +} + +static inline u64 __get_unaligned_cpu64(const void *p) +{ + const struct __una_u64 *ptr = (const struct __una_u64 *)p; + return ptr->x; +} + +static inline void __put_unaligned_cpu16(u16 val, void *p) +{ + struct __una_u16 *ptr = (struct __una_u16 *)p; + ptr->x = val; +} + +static inline void __put_unaligned_cpu32(u32 val, void *p) +{ + struct __una_u32 *ptr = (struct __una_u32 *)p; + ptr->x = val; +} + +static inline void __put_unaligned_cpu64(u64 val, void *p) +{ + struct __una_u64 *ptr = (struct __una_u64 *)p; + ptr->x = val; +} + +#endif /* _LINUX_UNALIGNED_PACKED_STRUCT_H */ diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index ce8e7da0580..364789aae9f 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -31,6 +31,7 @@ struct vm_struct { struct page **pages; unsigned int nr_pages; unsigned long phys_addr; + void *caller; }; /* @@ -66,6 +67,8 @@ static inline size_t get_vm_area_size(const struct vm_struct *area) } extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags); +extern struct vm_struct *get_vm_area_caller(unsigned long size, + unsigned long flags, void *caller); extern struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, unsigned long start, unsigned long end); extern struct vm_struct *get_vm_area_node(unsigned long size, @@ -87,4 +90,6 @@ extern void free_vm_area(struct vm_struct *area); extern rwlock_t vmlist_lock; extern struct vm_struct *vmlist; +extern const struct seq_operations vmalloc_op; + #endif /* _LINUX_VMALLOC_H */ diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index 9f1b4b46151..e83b69346d2 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -25,6 +25,7 @@ #define HIGHMEM_ZONE(xx) #endif + #define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) , xx##_MOVABLE enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, @@ -37,6 +38,9 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, FOR_ALL_ZONES(PGSCAN_DIRECT), PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL, PAGEOUTRUN, ALLOCSTALL, PGROTATED, +#ifdef CONFIG_HUGETLB_PAGE + HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL, +#endif NR_VM_EVENT_ITEMS }; @@ -174,7 +178,7 @@ static inline unsigned long node_page_state(int node, zone_page_state(&zones[ZONE_MOVABLE], item); } -extern void zone_statistics(struct zonelist *, struct zone *); +extern void zone_statistics(struct zone *, struct zone *); #else diff --git a/include/linux/xattr.h b/include/linux/xattr.h index df6b95d2218..d131e352cfe 100644 --- a/include/linux/xattr.h +++ b/include/linux/xattr.h @@ -47,10 +47,10 @@ struct xattr_handler { }; ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t); -ssize_t vfs_getxattr(struct dentry *, char *, void *, size_t); +ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t); ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size); -int vfs_setxattr(struct dentry *, char *, void *, size_t, int); -int vfs_removexattr(struct dentry *, char *); +int vfs_setxattr(struct dentry *, const char *, const void *, size_t, int); +int vfs_removexattr(struct dentry *, const char *); ssize_t generic_getxattr(struct dentry *dentry, const char *name, void *buffer, size_t size); ssize_t generic_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size); diff --git a/include/net/compat.h b/include/net/compat.h index 406db242f73..05fa5d0254a 100644 --- a/include/net/compat.h +++ b/include/net/compat.h @@ -40,4 +40,7 @@ extern int put_cmsg_compat(struct msghdr*, int, int, int, void *); extern int cmsghdr_from_user_compat_to_kern(struct msghdr *, struct sock *, unsigned char *, int); +extern int compat_mc_setsockopt(struct sock *, int, int, char __user *, int, + int (*)(struct sock *, int, int, char __user *, int)); + #endif /* NET_COMPAT_H */ diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h index 22298423cf0..9ee0d2e51b1 100644 --- a/include/rdma/ib_umem.h +++ b/include/rdma/ib_umem.h @@ -62,7 +62,7 @@ struct ib_umem_chunk { #ifdef CONFIG_INFINIBAND_USER_MEM struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, - size_t size, int access); + size_t size, int access, int dmasync); void ib_umem_release(struct ib_umem *umem); int ib_umem_page_count(struct ib_umem *umem); @@ -72,7 +72,7 @@ int ib_umem_page_count(struct ib_umem *umem); static inline struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, size_t size, - int access) { + int access, int dmasync) { return ERR_PTR(-EINVAL); } static inline void ib_umem_release(struct ib_umem *umem) { } diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 2dcbecce3f6..911a661b727 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -1542,6 +1542,24 @@ static inline void ib_dma_unmap_single(struct ib_device *dev, dma_unmap_single(dev->dma_device, addr, size, direction); } +static inline u64 ib_dma_map_single_attrs(struct ib_device *dev, + void *cpu_addr, size_t size, + enum dma_data_direction direction, + struct dma_attrs *attrs) +{ + return dma_map_single_attrs(dev->dma_device, cpu_addr, size, + direction, attrs); +} + +static inline void ib_dma_unmap_single_attrs(struct ib_device *dev, + u64 addr, size_t size, + enum dma_data_direction direction, + struct dma_attrs *attrs) +{ + return dma_unmap_single_attrs(dev->dma_device, addr, size, + direction, attrs); +} + /** * ib_dma_map_page - Map a physical page to DMA address * @dev: The device for which the dma_addr is to be created @@ -1611,6 +1629,21 @@ static inline void ib_dma_unmap_sg(struct ib_device *dev, dma_unmap_sg(dev->dma_device, sg, nents, direction); } +static inline int ib_dma_map_sg_attrs(struct ib_device *dev, + struct scatterlist *sg, int nents, + enum dma_data_direction direction, + struct dma_attrs *attrs) +{ + return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, attrs); +} + +static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev, + struct scatterlist *sg, int nents, + enum dma_data_direction direction, + struct dma_attrs *attrs) +{ + dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, attrs); +} /** * ib_sg_dma_address - Return the DMA address from a scatter/gather entry * @dev: The device for which the DMA addresses were created diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index b8b19e2f57b..f6a9fe0ef09 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h @@ -181,7 +181,8 @@ struct scsi_device { sdev_printk(prefix, (scmd)->device, fmt, ##a) enum scsi_target_state { - STARGET_RUNNING = 1, + STARGET_CREATED = 1, + STARGET_RUNNING, STARGET_DEL, }; diff --git a/include/video/atmel_lcdc.h b/include/video/atmel_lcdc.h index 336c20db87f..ed64862c4e1 100644 --- a/include/video/atmel_lcdc.h +++ b/include/video/atmel_lcdc.h @@ -22,6 +22,15 @@ #ifndef __ATMEL_LCDC_H__ #define __ATMEL_LCDC_H__ + +/* Way LCD wires are connected to the chip: + * Some Atmel chips use BGR color mode (instead of standard RGB) + * A swapped wiring onboard can bring to RGB mode. + */ +#define ATMEL_LCDC_WIRING_BGR 0 +#define ATMEL_LCDC_WIRING_RGB 1 + + /* LCD Controller info data structure, stored in device platform_data */ struct atmel_lcdfb_info { spinlock_t lock; @@ -39,8 +48,10 @@ struct atmel_lcdfb_info { u8 bl_power; #endif bool lcdcon_is_backlight; + u8 saved_lcdcon; u8 default_bpp; + u8 lcd_wiring_mode; unsigned int default_lcdcon2; unsigned int default_dmacon; void (*atmel_lcdfb_power_control)(int on); diff --git a/include/video/hecubafb.h b/include/video/hecubafb.h new file mode 100644 index 00000000000..7b995233976 --- /dev/null +++ b/include/video/hecubafb.h @@ -0,0 +1,51 @@ +/* + * hecubafb.h - definitions for the hecuba framebuffer driver + * + * Copyright (C) 2008 by Jaya Kumar + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of this archive for + * more details. + * + */ + +#ifndef _LINUX_HECUBAFB_H_ +#define _LINUX_HECUBAFB_H_ + +/* Apollo controller specific defines */ +#define APOLLO_START_NEW_IMG 0xA0 +#define APOLLO_STOP_IMG_DATA 0xA1 +#define APOLLO_DISPLAY_IMG 0xA2 +#define APOLLO_ERASE_DISPLAY 0xA3 +#define APOLLO_INIT_DISPLAY 0xA4 + +/* Hecuba interface specific defines */ +#define HCB_WUP_BIT 0x01 +#define HCB_DS_BIT 0x02 +#define HCB_RW_BIT 0x04 +#define HCB_CD_BIT 0x08 +#define HCB_ACK_BIT 0x80 + +/* struct used by hecuba. board specific stuff comes from *board */ +struct hecubafb_par { + struct fb_info *info; + struct hecuba_board *board; + void (*send_command)(struct hecubafb_par *, unsigned char); + void (*send_data)(struct hecubafb_par *, unsigned char); +}; + +/* board specific routines +board drivers can implement wait_for_ack with interrupts if desired. if +wait_for_ack is called with clear=0, then go to sleep and return when ack +goes hi or if wait_for_ack with clear=1, then return when ack goes lo */ +struct hecuba_board { + struct module *owner; + void (*remove)(struct hecubafb_par *); + void (*set_ctl)(struct hecubafb_par *, unsigned char, unsigned char); + void (*set_data)(struct hecubafb_par *, unsigned char); + void (*wait_for_ack)(struct hecubafb_par *, int); + int (*init)(struct hecubafb_par *); +}; + + +#endif diff --git a/include/video/metronomefb.h b/include/video/metronomefb.h new file mode 100644 index 00000000000..dab04b4fad7 --- /dev/null +++ b/include/video/metronomefb.h @@ -0,0 +1,62 @@ +/* + * metronomefb.h - definitions for the metronome framebuffer driver + * + * Copyright (C) 2008 by Jaya Kumar + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of this archive for + * more details. + * + */ + +#ifndef _LINUX_METRONOMEFB_H_ +#define _LINUX_METRONOMEFB_H_ + +/* address and control descriptors used by metronome controller */ +struct metromem_desc { + u32 mFDADR0; + u32 mFSADR0; + u32 mFIDR0; + u32 mLDCMD0; +}; + +/* command structure used by metronome controller */ +struct metromem_cmd { + u16 opcode; + u16 args[((64-2)/2)]; + u16 csum; +}; + +/* struct used by metronome. board specific stuff comes from *board */ +struct metronomefb_par { + unsigned char *metromem; + struct metromem_desc *metromem_desc; + struct metromem_cmd *metromem_cmd; + unsigned char *metromem_wfm; + unsigned char *metromem_img; + u16 *metromem_img_csum; + u16 *csum_table; + int metromemsize; + dma_addr_t metromem_dma; + dma_addr_t metromem_desc_dma; + struct fb_info *info; + struct metronome_board *board; + wait_queue_head_t waitq; + u8 frame_count; +}; + +/* board specific routines */ +struct metronome_board { + struct module *owner; + void (*free_irq)(struct fb_info *); + void (*init_gpio_regs)(struct metronomefb_par *); + void (*init_lcdc_regs)(struct metronomefb_par *); + void (*post_dma_setup)(struct metronomefb_par *); + void (*set_rst)(struct metronomefb_par *, int); + void (*set_stdby)(struct metronomefb_par *, int); + int (*met_wait_event)(struct metronomefb_par *); + int (*met_wait_event_intr)(struct metronomefb_par *); + int (*setup_irq)(struct fb_info *); +}; + +#endif |