diff options
Diffstat (limited to 'include/asm-powerpc')
-rw-r--r-- | include/asm-powerpc/hugetlb.h | 10 | ||||
-rw-r--r-- | include/asm-powerpc/io.h | 5 | ||||
-rw-r--r-- | include/asm-powerpc/kgdb.h | 92 | ||||
-rw-r--r-- | include/asm-powerpc/mmu-hash64.h | 6 | ||||
-rw-r--r-- | include/asm-powerpc/page.h | 3 | ||||
-rw-r--r-- | include/asm-powerpc/page_64.h | 1 | ||||
-rw-r--r-- | include/asm-powerpc/pgalloc-64.h | 4 | ||||
-rw-r--r-- | include/asm-powerpc/pgtable-4k.h | 3 | ||||
-rw-r--r-- | include/asm-powerpc/pgtable-64k.h | 2 | ||||
-rw-r--r-- | include/asm-powerpc/pgtable-ppc32.h | 16 | ||||
-rw-r--r-- | include/asm-powerpc/pgtable-ppc64.h | 8 |
11 files changed, 95 insertions, 55 deletions
diff --git a/include/asm-powerpc/hugetlb.h b/include/asm-powerpc/hugetlb.h index be32ff02f4a..26f0d0ab27a 100644 --- a/include/asm-powerpc/hugetlb.h +++ b/include/asm-powerpc/hugetlb.h @@ -7,7 +7,7 @@ int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr, unsigned long len); -void hugetlb_free_pgd_range(struct mmu_gather **tlb, unsigned long addr, +void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr, unsigned long end, unsigned long floor, unsigned long ceiling); @@ -21,11 +21,13 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, * If the arch doesn't supply something else, assume that hugepage * size aligned regions are ok without further preparation. */ -static inline int prepare_hugepage_range(unsigned long addr, unsigned long len) +static inline int prepare_hugepage_range(struct file *file, + unsigned long addr, unsigned long len) { - if (len & ~HPAGE_MASK) + struct hstate *h = hstate_file(file); + if (len & ~huge_page_mask(h)) return -EINVAL; - if (addr & ~HPAGE_MASK) + if (addr & ~huge_page_mask(h)) return -EINVAL; return 0; } diff --git a/include/asm-powerpc/io.h b/include/asm-powerpc/io.h index 8b627823f5f..77c7fa025e6 100644 --- a/include/asm-powerpc/io.h +++ b/include/asm-powerpc/io.h @@ -617,7 +617,8 @@ static inline void iosync(void) * and can be hooked by the platform via ppc_md * * * ioremap_flags allows to specify the page flags as an argument and can - * also be hooked by the platform via ppc_md + * also be hooked by the platform via ppc_md. ioremap_prot is the exact + * same thing as ioremap_flags. * * * ioremap_nocache is identical to ioremap * @@ -639,6 +640,8 @@ extern void __iomem *ioremap(phys_addr_t address, unsigned long size); extern void __iomem *ioremap_flags(phys_addr_t address, unsigned long size, unsigned long flags); #define ioremap_nocache(addr, size) ioremap((addr), (size)) +#define ioremap_prot(addr, size, prot) ioremap_flags((addr), (size), (prot)) + extern void iounmap(volatile void __iomem *addr); extern void __iomem *__ioremap(phys_addr_t, unsigned long size, diff --git a/include/asm-powerpc/kgdb.h b/include/asm-powerpc/kgdb.h index b617dac8296..1399caf719a 100644 --- a/include/asm-powerpc/kgdb.h +++ b/include/asm-powerpc/kgdb.h @@ -1,57 +1,65 @@ /* - * kgdb.h: Defines and declarations for serial line source level - * remote debugging of the Linux kernel using gdb. + * include/asm-powerpc/kgdb.h * + * The PowerPC (32/64) specific defines / externs for KGDB. Based on + * the previous 32bit and 64bit specific files, which had the following + * copyrights: + * + * PPC64 Mods (C) 2005 Frank Rowand (frowand@mvista.com) + * PPC Mods (C) 2004 Tom Rini (trini@mvista.com) + * PPC Mods (C) 2003 John Whitney (john.whitney@timesys.com) * PPC Mods (C) 1998 Michael Tesch (tesch@cs.wisc.edu) * + * * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) + * Author: Tom Rini <trini@kernel.crashing.org> + * + * 2006 (c) MontaVista Software, Inc. This file is licensed under + * the terms of the GNU General Public License version 2. This program + * is licensed "as is" without any warranty of any kind, whether express + * or implied. */ #ifdef __KERNEL__ -#ifndef _PPC_KGDB_H -#define _PPC_KGDB_H +#ifndef __POWERPC_KGDB_H__ +#define __POWERPC_KGDB_H__ #ifndef __ASSEMBLY__ -/* Things specific to the gen550 backend. */ -struct uart_port; - -extern void gen550_progress(char *, unsigned short); -extern void gen550_kgdb_map_scc(void); -extern void gen550_init(int, struct uart_port *); - -/* Things specific to the pmac backend. */ -extern void zs_kgdb_hook(int tty_num); - -/* To init the kgdb engine. (called by serial hook)*/ -extern void set_debug_traps(void); - -/* To enter the debugger explicitly. */ -extern void breakpoint(void); - -/* For taking exceptions - * these are defined in traps.c - */ -extern int (*debugger)(struct pt_regs *regs); -extern int (*debugger_bpt)(struct pt_regs *regs); -extern int (*debugger_sstep)(struct pt_regs *regs); -extern int (*debugger_iabr_match)(struct pt_regs *regs); -extern int (*debugger_dabr_match)(struct pt_regs *regs); -extern void (*debugger_fault_handler)(struct pt_regs *regs); - -/* What we bring to the party */ -int kgdb_bpt(struct pt_regs *regs); -int kgdb_sstep(struct pt_regs *regs); -void kgdb(struct pt_regs *regs); -int kgdb_iabr_match(struct pt_regs *regs); -int kgdb_dabr_match(struct pt_regs *regs); +#define BREAK_INSTR_SIZE 4 +#define BUFMAX ((NUMREGBYTES * 2) + 512) +#define OUTBUFMAX ((NUMREGBYTES * 2) + 512) +static inline void arch_kgdb_breakpoint(void) +{ + asm(".long 0x7d821008"); /* twge r2, r2 */ +} +#define CACHE_FLUSH_IS_SAFE 1 +/* The number bytes of registers we have to save depends on a few + * things. For 64bit we default to not including vector registers and + * vector state registers. */ +#ifdef CONFIG_PPC64 /* - * external low-level support routines (ie macserial.c) + * 64 bit (8 byte) registers: + * 32 gpr, 32 fpr, nip, msr, link, ctr + * 32 bit (4 byte) registers: + * ccr, xer, fpscr */ -extern void kgdb_interruptible(int); /* control interrupts from serial */ -extern void putDebugChar(char); /* write a single character */ -extern char getDebugChar(void); /* read and return a single char */ - +#define NUMREGBYTES ((68 * 8) + (3 * 4)) +#define NUMCRITREGBYTES 184 +#else /* CONFIG_PPC32 */ +/* On non-E500 family PPC32 we determine the size by picking the last + * register we need, but on E500 we skip sections so we list what we + * need to store, and add it up. */ +#ifndef CONFIG_E500 +#define MAXREG (PT_FPSCR+1) +#else +/* 32 GPRs (8 bytes), nip, msr, ccr, link, ctr, xer, acc (8 bytes), spefscr*/ +#define MAXREG ((32*2)+6+2+1) +#endif +#define NUMREGBYTES (MAXREG * sizeof(int)) +/* CR/LR, R1, R2, R13-R31 inclusive. */ +#define NUMCRITREGBYTES (23 * sizeof(int)) +#endif /* 32/64 */ #endif /* !(__ASSEMBLY__) */ -#endif /* !(_PPC_KGDB_H) */ +#endif /* !__POWERPC_KGDB_H__ */ #endif /* __KERNEL__ */ diff --git a/include/asm-powerpc/mmu-hash64.h b/include/asm-powerpc/mmu-hash64.h index d1dc16afb11..19c7a940349 100644 --- a/include/asm-powerpc/mmu-hash64.h +++ b/include/asm-powerpc/mmu-hash64.h @@ -194,9 +194,9 @@ extern int mmu_ci_restrictions; #ifdef CONFIG_HUGETLB_PAGE /* - * The page size index of the huge pages for use by hugetlbfs + * The page size indexes of the huge pages for use by hugetlbfs */ -extern int mmu_huge_psize; +extern unsigned int mmu_huge_psizes[MMU_PAGE_COUNT]; #endif /* CONFIG_HUGETLB_PAGE */ @@ -281,6 +281,8 @@ extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend, unsigned long pstart, unsigned long mode, int psize, int ssize); extern void set_huge_psize(int psize); +extern void add_gpage(unsigned long addr, unsigned long page_size, + unsigned long number_of_pages); extern void demote_segment_4k(struct mm_struct *mm, unsigned long addr); extern void htab_initialize(void); diff --git a/include/asm-powerpc/page.h b/include/asm-powerpc/page.h index cffdf0eb0df..e088545cb3f 100644 --- a/include/asm-powerpc/page.h +++ b/include/asm-powerpc/page.h @@ -119,9 +119,6 @@ extern phys_addr_t kernstart_addr; /* align addr on a size boundary - adjust address up if needed */ #define _ALIGN(addr,size) _ALIGN_UP(addr,size) -/* to align the pointer to the (next) page boundary */ -#define PAGE_ALIGN(addr) _ALIGN(addr, PAGE_SIZE) - /* * Don't compare things with KERNELBASE or PAGE_OFFSET to test for * "kernelness", use is_kernel_addr() - it should do what you want. diff --git a/include/asm-powerpc/page_64.h b/include/asm-powerpc/page_64.h index 02fd80710e9..043bfdfe4f7 100644 --- a/include/asm-powerpc/page_64.h +++ b/include/asm-powerpc/page_64.h @@ -90,6 +90,7 @@ extern unsigned int HPAGE_SHIFT; #define HPAGE_SIZE ((1UL) << HPAGE_SHIFT) #define HPAGE_MASK (~(HPAGE_SIZE - 1)) #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) +#define HUGE_MAX_HSTATE 3 #endif /* __ASSEMBLY__ */ diff --git a/include/asm-powerpc/pgalloc-64.h b/include/asm-powerpc/pgalloc-64.h index 68980990f62..812a1d8f35c 100644 --- a/include/asm-powerpc/pgalloc-64.h +++ b/include/asm-powerpc/pgalloc-64.h @@ -22,7 +22,7 @@ extern struct kmem_cache *pgtable_cache[]; #define PUD_CACHE_NUM 1 #define PMD_CACHE_NUM 1 #define HUGEPTE_CACHE_NUM 2 -#define PTE_NONCACHE_NUM 3 /* from GFP rather than kmem_cache */ +#define PTE_NONCACHE_NUM 7 /* from GFP rather than kmem_cache */ static inline pgd_t *pgd_alloc(struct mm_struct *mm) { @@ -119,7 +119,7 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage) __free_page(ptepage); } -#define PGF_CACHENUM_MASK 0x3 +#define PGF_CACHENUM_MASK 0x7 typedef struct pgtable_free { unsigned long val; diff --git a/include/asm-powerpc/pgtable-4k.h b/include/asm-powerpc/pgtable-4k.h index fd2090dc1dc..c9601dfb4a1 100644 --- a/include/asm-powerpc/pgtable-4k.h +++ b/include/asm-powerpc/pgtable-4k.h @@ -51,6 +51,9 @@ #define _PAGE_HPTEFLAGS (_PAGE_BUSY | _PAGE_HASHPTE | \ _PAGE_SECONDARY | _PAGE_GROUP_IX) +/* There is no 4K PFN hack on 4K pages */ +#define _PAGE_4K_PFN 0 + /* PAGE_MASK gives the right answer below, but only by accident */ /* It should be preserving the high 48 bits and then specifically */ /* preserving _PAGE_SECONDARY | _PAGE_GROUP_IX */ diff --git a/include/asm-powerpc/pgtable-64k.h b/include/asm-powerpc/pgtable-64k.h index c5007712473..7e54adb3559 100644 --- a/include/asm-powerpc/pgtable-64k.h +++ b/include/asm-powerpc/pgtable-64k.h @@ -138,7 +138,7 @@ static inline struct subpage_prot_table *pgd_subpage_prot(pgd_t *pgd) unsigned __split = (psize == MMU_PAGE_4K || \ psize == MMU_PAGE_64K_AP); \ shift = mmu_psize_defs[psize].shift; \ - for (index = 0; va < __end; index++, va += (1 << shift)) { \ + for (index = 0; va < __end; index++, va += (1L << shift)) { \ if (!__split || __rpte_sub_valid(rpte, index)) do { \ #define pte_iterate_hashed_end() } while(0); } } while(0) diff --git a/include/asm-powerpc/pgtable-ppc32.h b/include/asm-powerpc/pgtable-ppc32.h index 3a96d001cb7..bdbab72f3eb 100644 --- a/include/asm-powerpc/pgtable-ppc32.h +++ b/include/asm-powerpc/pgtable-ppc32.h @@ -395,6 +395,12 @@ extern int icache_44x_need_flush; #ifndef _PAGE_EXEC #define _PAGE_EXEC 0 #endif +#ifndef _PAGE_ENDIAN +#define _PAGE_ENDIAN 0 +#endif +#ifndef _PAGE_COHERENT +#define _PAGE_COHERENT 0 +#endif #ifndef _PMD_PRESENT_MASK #define _PMD_PRESENT_MASK _PMD_PRESENT #endif @@ -405,6 +411,12 @@ extern int icache_44x_need_flush; #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) + +#define PAGE_PROT_BITS __pgprot(_PAGE_GUARDED | _PAGE_COHERENT | _PAGE_NO_CACHE | \ + _PAGE_WRITETHRU | _PAGE_ENDIAN | \ + _PAGE_USER | _PAGE_ACCESSED | \ + _PAGE_RW | _PAGE_HWWRITE | _PAGE_DIRTY | \ + _PAGE_EXEC | _PAGE_HWEXEC) /* * Note: the _PAGE_COHERENT bit automatically gets set in the hardware * PTE if CONFIG_SMP is defined (hash_page does this); there is no need @@ -538,6 +550,10 @@ static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; } static inline pte_t pte_mkspecial(pte_t pte) { return pte; } +static inline unsigned long pte_pgprot(pte_t pte) +{ + return __pgprot(pte_val(pte)) & PAGE_PROT_BITS; +} static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { diff --git a/include/asm-powerpc/pgtable-ppc64.h b/include/asm-powerpc/pgtable-ppc64.h index ab98a9c80b2..ba8000352b9 100644 --- a/include/asm-powerpc/pgtable-ppc64.h +++ b/include/asm-powerpc/pgtable-ppc64.h @@ -117,6 +117,10 @@ #define PAGE_AGP __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_NO_CACHE) #define HAVE_PAGE_AGP +#define PAGE_PROT_BITS __pgprot(_PAGE_GUARDED | _PAGE_COHERENT | \ + _PAGE_NO_CACHE | _PAGE_WRITETHRU | \ + _PAGE_4K_PFN | _PAGE_RW | _PAGE_USER | \ + _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_EXEC) /* PTEIDX nibble */ #define _PTEIDX_SECONDARY 0x8 #define _PTEIDX_GROUP_IX 0x7 @@ -262,6 +266,10 @@ static inline pte_t pte_mkhuge(pte_t pte) { return pte; } static inline pte_t pte_mkspecial(pte_t pte) { return pte; } +static inline unsigned long pte_pgprot(pte_t pte) +{ + return __pgprot(pte_val(pte)) & PAGE_PROT_BITS; +} /* Atomic PTE updates */ static inline unsigned long pte_update(struct mm_struct *mm, |