From b8b572e1015f81b4e748417be2629dfe51ab99f9 Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Fri, 1 Aug 2008 15:20:30 +1000 Subject: powerpc: Move include files to arch/powerpc/include/asm from include/asm-powerpc. This is the result of a mkdir arch/powerpc/include/asm git mv include/asm-powerpc/* arch/powerpc/include/asm Followed by a few documentation/comment fixups and a couple of places where was being used explicitly. Of the latter only one was outside the arch code and it is a driver only built for powerpc. Signed-off-by: Stephen Rothwell Signed-off-by: Paul Mackerras --- arch/powerpc/include/asm/pgtable.h | 57 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 57 insertions(+) create mode 100644 arch/powerpc/include/asm/pgtable.h (limited to 'arch/powerpc/include/asm/pgtable.h') diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h new file mode 100644 index 00000000000..dbb8ca172e4 --- /dev/null +++ b/arch/powerpc/include/asm/pgtable.h @@ -0,0 +1,57 @@ +#ifndef _ASM_POWERPC_PGTABLE_H +#define _ASM_POWERPC_PGTABLE_H +#ifdef __KERNEL__ + +#ifndef __ASSEMBLY__ +#include /* For TASK_SIZE */ +#include +#include +struct mm_struct; +#endif /* !__ASSEMBLY__ */ + +#if defined(CONFIG_PPC64) +# include +#else +# include +#endif + +#ifndef __ASSEMBLY__ +/* + * ZERO_PAGE is a global shared page that is always zero: used + * for zero-mapped memory areas etc.. + */ +extern unsigned long empty_zero_page[]; +#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) + +extern pgd_t swapper_pg_dir[]; + +extern void paging_init(void); + +/* + * kern_addr_valid is intended to indicate whether an address is a valid + * kernel address. Most 32-bit archs define it as always true (like this) + * but most 64-bit archs actually perform a test. What should we do here? + */ +#define kern_addr_valid(addr) (1) + +#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ + remap_pfn_range(vma, vaddr, pfn, size, prot) + +#include + + +/* + * This gets called at the end of handling a page fault, when + * the kernel has put a new PTE into the page table for the process. + * We use it to ensure coherency between the i-cache and d-cache + * for the page which has just been mapped in. + * On machines which use an MMU hash table, we use this to put a + * corresponding HPTE into the hash table ahead of time, instead of + * waiting for the inevitable extra hash-table miss exception. + */ +extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); + +#endif /* __ASSEMBLY__ */ + +#endif /* __KERNEL__ */ +#endif /* _ASM_POWERPC_PGTABLE_H */ -- cgit v1.2.3