aboutsummaryrefslogtreecommitdiff
path: root/include/asm-um
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-um')
-rw-r--r--include/asm-um/a.out.h4
-rw-r--r--include/asm-um/current.h23
-rw-r--r--include/asm-um/elf-i386.h28
-rw-r--r--include/asm-um/elf-x86_64.h8
-rw-r--r--include/asm-um/fixmap.h6
-rw-r--r--include/asm-um/ldt.h4
-rw-r--r--include/asm-um/linkage.h6
-rw-r--r--include/asm-um/mmu_context.h7
-rw-r--r--include/asm-um/page.h6
-rw-r--r--include/asm-um/param.h2
-rw-r--r--include/asm-um/pgalloc.h8
-rw-r--r--include/asm-um/pgtable-2level.h3
-rw-r--r--include/asm-um/pgtable-3level.h35
-rw-r--r--include/asm-um/pgtable.h100
-rw-r--r--include/asm-um/processor-generic.h13
-rw-r--r--include/asm-um/processor-i386.h1
-rw-r--r--include/asm-um/thread_info.h11
-rw-r--r--include/asm-um/tlb.h122
-rw-r--r--include/asm-um/uaccess.h10
19 files changed, 206 insertions, 191 deletions
diff --git a/include/asm-um/a.out.h b/include/asm-um/a.out.h
index 9281dd8eb33..f42ff14577f 100644
--- a/include/asm-um/a.out.h
+++ b/include/asm-um/a.out.h
@@ -13,11 +13,9 @@
extern unsigned long stacksizelim;
-extern unsigned long host_task_size;
-
#define STACK_ROOM (stacksizelim)
-#define STACK_TOP task_size
+#define STACK_TOP (TASK_SIZE - 2 * PAGE_SIZE)
#define STACK_TOP_MAX STACK_TOP
diff --git a/include/asm-um/current.h b/include/asm-um/current.h
index 8fd72f69ce6..c2191d9aa03 100644
--- a/include/asm-um/current.h
+++ b/include/asm-um/current.h
@@ -1,32 +1,13 @@
-/*
- * Copyright (C) 2000 Jeff Dike (jdike@karaya.com)
+/*
+ * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Licensed under the GPL
*/
#ifndef __UM_CURRENT_H
#define __UM_CURRENT_H
-#ifndef __ASSEMBLY__
-
-#include "asm/page.h"
#include "linux/thread_info.h"
#define current (current_thread_info()->task)
-/*Backward compatibility - it's used inside arch/um.*/
-#define current_thread current_thread_info()
-
-#endif /* __ASSEMBLY__ */
-
#endif
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
diff --git a/include/asm-um/elf-i386.h b/include/asm-um/elf-i386.h
index ca94a136dfe..23d6893e861 100644
--- a/include/asm-um/elf-i386.h
+++ b/include/asm-um/elf-i386.h
@@ -1,11 +1,11 @@
/*
- * Copyright (C) 2000 - 2003 Jeff Dike (jdike@addtoit.com)
+ * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Licensed under the GPL
*/
#ifndef __UM_ELF_I386_H
#define __UM_ELF_I386_H
-#include <linux/sched.h>
+#include <asm/user.h>
#include "skas.h"
#define R_386_NONE 0
@@ -46,7 +46,7 @@ typedef struct user_i387_struct elf_fpregset_t;
PT_REGS_EDI(regs) = 0; \
PT_REGS_EBP(regs) = 0; \
PT_REGS_EAX(regs) = 0; \
-} while(0)
+} while (0)
#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 4096
@@ -74,14 +74,9 @@ typedef struct user_i387_struct elf_fpregset_t;
pr_reg[14] = PT_REGS_EFLAGS(regs); \
pr_reg[15] = PT_REGS_SP(regs); \
pr_reg[16] = PT_REGS_SS(regs); \
-} while(0);
+} while (0);
-static inline int elf_core_copy_fpregs(struct task_struct *t,
- elf_fpregset_t *fpu)
-{
- int cpu = ((struct thread_info *) t->stack)->cpu;
- return save_fp_registers(userspace_pid[cpu], (unsigned long *) fpu);
-}
+extern int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu);
#define ELF_CORE_COPY_FPREGS(t, fpu) elf_core_copy_fpregs(t, fpu)
@@ -91,7 +86,7 @@ extern long elf_aux_hwcap;
extern char * elf_aux_platform;
#define ELF_PLATFORM (elf_aux_platform)
-#define SET_PERSONALITY(ex, ibcs2) do ; while(0)
+#define SET_PERSONALITY(ex, ibcs2) do { } while (0)
extern unsigned long vsyscall_ehdr;
extern unsigned long vsyscall_end;
@@ -166,14 +161,3 @@ if ( vsyscall_ehdr ) { \
}
#endif
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
diff --git a/include/asm-um/elf-x86_64.h b/include/asm-um/elf-x86_64.h
index 3c9d543eb61..3b2d5224a7e 100644
--- a/include/asm-um/elf-x86_64.h
+++ b/include/asm-um/elf-x86_64.h
@@ -7,7 +7,6 @@
#ifndef __UM_ELF_X86_64_H
#define __UM_ELF_X86_64_H
-#include <linux/sched.h>
#include <asm/user.h>
#include "skas.h"
@@ -96,12 +95,7 @@ typedef struct user_i387_struct elf_fpregset_t;
(pr_reg)[25] = 0; \
(pr_reg)[26] = 0;
-static inline int elf_core_copy_fpregs(struct task_struct *t,
- elf_fpregset_t *fpu)
-{
- int cpu = current_thread->cpu;
- return save_fp_registers(userspace_pid[cpu], (unsigned long *) fpu);
-}
+extern int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu);
#define ELF_CORE_COPY_FPREGS(t, fpu) elf_core_copy_fpregs(t, fpu)
diff --git a/include/asm-um/fixmap.h b/include/asm-um/fixmap.h
index d352a35cfaf..89a87c18b92 100644
--- a/include/asm-um/fixmap.h
+++ b/include/asm-um/fixmap.h
@@ -1,9 +1,10 @@
#ifndef __UM_FIXMAP_H
#define __UM_FIXMAP_H
+#include <asm/system.h>
#include <asm/kmap_types.h>
#include <asm/archparam.h>
-#include <asm/elf.h>
+#include <asm/page.h>
/*
* Here we define all the compile-time 'special' virtual
@@ -55,9 +56,8 @@ extern void __set_fixmap (enum fixed_addresses idx,
* the start of the fixmap, and leave one page empty
* at the top of mem..
*/
-extern unsigned long get_kmem_end(void);
-#define FIXADDR_TOP (get_kmem_end() - 0x2000)
+#define FIXADDR_TOP (CONFIG_TOP_ADDR - 2 * PAGE_SIZE)
#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
diff --git a/include/asm-um/ldt.h b/include/asm-um/ldt.h
index b2553f3e87e..52af512f5e7 100644
--- a/include/asm-um/ldt.h
+++ b/include/asm-um/ldt.h
@@ -8,7 +8,7 @@
#ifndef __ASM_LDT_H
#define __ASM_LDT_H
-#include "asm/semaphore.h"
+#include <linux/mutex.h>
#include "asm/host_ldt.h"
extern void ldt_host_info(void);
@@ -27,7 +27,7 @@ struct ldt_entry {
typedef struct uml_ldt {
int entry_count;
- struct semaphore semaphore;
+ struct mutex lock;
union {
struct ldt_entry * pages[LDT_PAGES_MAX];
struct ldt_entry entries[LDT_DIRECT_ENTRIES];
diff --git a/include/asm-um/linkage.h b/include/asm-um/linkage.h
index cdb3024a699..7dfce37adc8 100644
--- a/include/asm-um/linkage.h
+++ b/include/asm-um/linkage.h
@@ -3,10 +3,4 @@
#include "asm/arch/linkage.h"
-
-/* <linux/linkage.h> will pick sane defaults */
-#ifdef CONFIG_GPROF
-#undef fastcall
-#endif
-
#endif
diff --git a/include/asm-um/mmu_context.h b/include/asm-um/mmu_context.h
index 5f3b863aef9..6686fc524ca 100644
--- a/include/asm-um/mmu_context.h
+++ b/include/asm-um/mmu_context.h
@@ -6,11 +6,12 @@
#ifndef __UM_MMU_CONTEXT_H
#define __UM_MMU_CONTEXT_H
-#include <asm-generic/mm_hooks.h>
-
#include "linux/sched.h"
#include "um_mmu.h"
+extern void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm);
+extern void arch_exit_mmap(struct mm_struct *mm);
+
#define get_mmu_context(task) do ; while(0)
#define activate_context(tsk) do ; while(0)
@@ -30,6 +31,8 @@ static inline void activate_mm(struct mm_struct *old, struct mm_struct *new)
*/
if (old != new && (current->flags & PF_BORROWED_MM))
__switch_mm(&new->context.id);
+
+ arch_dup_mmap(old, new);
}
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
diff --git a/include/asm-um/page.h b/include/asm-um/page.h
index 4b424c75fca..fe2374d705d 100644
--- a/include/asm-um/page.h
+++ b/include/asm-um/page.h
@@ -30,7 +30,7 @@ struct page;
#if defined(CONFIG_3_LEVEL_PGTABLES) && !defined(CONFIG_64BIT)
typedef struct { unsigned long pte_low, pte_high; } pte_t;
-typedef struct { unsigned long long pmd; } pmd_t;
+typedef struct { unsigned long pmd; } pmd_t;
typedef struct { unsigned long pgd; } pgd_t;
#define pte_val(x) ((x).pte_low | ((unsigned long long) (x).pte_high << 32))
@@ -106,8 +106,8 @@ extern unsigned long uml_physmem;
#define __pa(virt) to_phys((void *) (unsigned long) (virt))
#define __va(phys) to_virt((unsigned long) (phys))
-#define phys_to_pfn(p) ((p) >> PAGE_SHIFT)
-#define pfn_to_phys(pfn) ((pfn) << PAGE_SHIFT)
+#define phys_to_pfn(p) ((pfn_t) ((p) >> PAGE_SHIFT))
+#define pfn_to_phys(pfn) ((phys_t) ((pfn) << PAGE_SHIFT))
#define pfn_valid(pfn) ((pfn) < max_mapnr)
#define virt_addr_valid(v) pfn_valid(phys_to_pfn(__pa(v)))
diff --git a/include/asm-um/param.h b/include/asm-um/param.h
index f914e7d67b0..4cd4a226f8c 100644
--- a/include/asm-um/param.h
+++ b/include/asm-um/param.h
@@ -10,7 +10,7 @@
#define MAXHOSTNAMELEN 64 /* max length of hostname */
#ifdef __KERNEL__
-#define HZ 100
+#define HZ CONFIG_HZ
#define USER_HZ 100 /* .. some user interfaces are in "ticks" */
#define CLOCKS_PER_SEC (USER_HZ) /* frequency at which times() counts */
#endif
diff --git a/include/asm-um/pgalloc.h b/include/asm-um/pgalloc.h
index 14904876e8f..4f3e62b0286 100644
--- a/include/asm-um/pgalloc.h
+++ b/include/asm-um/pgalloc.h
@@ -23,17 +23,17 @@
* Allocate and free page tables.
*/
extern pgd_t *pgd_alloc(struct mm_struct *);
-extern void pgd_free(pgd_t *pgd);
+extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
extern struct page *pte_alloc_one(struct mm_struct *, unsigned long);
-static inline void pte_free_kernel(pte_t *pte)
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
free_page((unsigned long) pte);
}
-static inline void pte_free(struct page *pte)
+static inline void pte_free(struct mm_struct *mm, struct page *pte)
{
__free_page(pte);
}
@@ -42,7 +42,7 @@ static inline void pte_free(struct page *pte)
#ifdef CONFIG_3_LEVEL_PGTABLES
-static inline void pmd_free(pmd_t *pmd)
+static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
{
free_page((unsigned long)pmd);
}
diff --git a/include/asm-um/pgtable-2level.h b/include/asm-um/pgtable-2level.h
index 172a75fde51..f534b73e753 100644
--- a/include/asm-um/pgtable-2level.h
+++ b/include/asm-um/pgtable-2level.h
@@ -41,9 +41,6 @@ static inline void pgd_mkuptodate(pgd_t pgd) { }
#define pfn_pte(pfn, prot) __pte(pfn_to_phys(pfn) | pgprot_val(prot))
#define pfn_pmd(pfn, prot) __pmd(pfn_to_phys(pfn) | pgprot_val(prot))
-#define pmd_page_vaddr(pmd) \
- ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
-
/*
* Bits 0 through 4 are taken
*/
diff --git a/include/asm-um/pgtable-3level.h b/include/asm-um/pgtable-3level.h
index 3ebafbaacb2..0446f456b42 100644
--- a/include/asm-um/pgtable-3level.h
+++ b/include/asm-um/pgtable-3level.h
@@ -11,7 +11,11 @@
/* PGDIR_SHIFT determines what a third-level page table entry can map */
+#ifdef CONFIG_64BIT
#define PGDIR_SHIFT 30
+#else
+#define PGDIR_SHIFT 31
+#endif
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE-1))
@@ -28,9 +32,15 @@
*/
#define PTRS_PER_PTE 512
+#ifdef CONFIG_64BIT
#define PTRS_PER_PMD 512
-#define USER_PTRS_PER_PGD ((TASK_SIZE + (PGDIR_SIZE - 1)) / PGDIR_SIZE)
#define PTRS_PER_PGD 512
+#else
+#define PTRS_PER_PMD 1024
+#define PTRS_PER_PGD 1024
+#endif
+
+#define USER_PTRS_PER_PGD ((TASK_SIZE + (PGDIR_SIZE - 1)) / PGDIR_SIZE)
#define FIRST_USER_ADDRESS 0
#define pte_ERROR(e) \
@@ -49,7 +59,12 @@
#define pud_populate(mm, pud, pmd) \
set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
+#ifdef CONFIG_64BIT
#define set_pud(pudptr, pudval) set_64bit((phys_t *) (pudptr), pud_val(pudval))
+#else
+#define set_pud(pudptr, pudval) (*(pudptr) = (pudval))
+#endif
+
static inline int pgd_newpage(pgd_t pgd)
{
return(pgd_val(pgd) & _PAGE_NEWPAGE);
@@ -57,17 +72,14 @@ static inline int pgd_newpage(pgd_t pgd)
static inline void pgd_mkuptodate(pgd_t pgd) { pgd_val(pgd) &= ~_PAGE_NEWPAGE; }
+#ifdef CONFIG_64BIT
#define set_pmd(pmdptr, pmdval) set_64bit((phys_t *) (pmdptr), pmd_val(pmdval))
+#else
+#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
+#endif
-static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
-{
- pmd_t *pmd = (pmd_t *) __get_free_page(GFP_KERNEL);
-
- if(pmd)
- memset(pmd, 0, PAGE_SIZE);
-
- return pmd;
-}
+struct mm_struct;
+extern pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address);
static inline void pud_clear (pud_t *pud)
{
@@ -75,8 +87,7 @@ static inline void pud_clear (pud_t *pud)
}
#define pud_page(pud) phys_to_page(pud_val(pud) & PAGE_MASK)
-#define pud_page_vaddr(pud) \
- ((struct page *) __va(pud_val(pud) & PAGE_MASK))
+#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PAGE_MASK))
/* Find an entry in the second-level page table.. */
#define pmd_offset(pud, address) ((pmd_t *) pud_page_vaddr(*(pud)) + \
diff --git a/include/asm-um/pgtable.h b/include/asm-um/pgtable.h
index 830fc6e5d49..4102b443e92 100644
--- a/include/asm-um/pgtable.h
+++ b/include/asm-um/pgtable.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
+ * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Copyright 2003 PathScale, Inc.
* Derived from include/asm-i386/pgtable.h
* Licensed under the GPL
@@ -8,11 +8,7 @@
#ifndef __UM_PGTABLE_H
#define __UM_PGTABLE_H
-#include "linux/sched.h"
-#include "linux/linkage.h"
-#include "asm/processor.h"
-#include "asm/page.h"
-#include "asm/fixmap.h"
+#include <asm/fixmap.h>
#define _PAGE_PRESENT 0x001
#define _PAGE_NEWPAGE 0x002
@@ -34,22 +30,11 @@
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
-extern void *um_virt_to_phys(struct task_struct *task, unsigned long virt,
- pte_t *pte_out);
-
/* zero page used for uninitialized stuff */
extern unsigned long *empty_zero_page;
#define pgtable_cache_init() do ; while (0)
-/*
- * pgd entries used up by user/kernel:
- */
-
-#define USER_PGD_PTRS (TASK_SIZE >> PGDIR_SHIFT)
-#define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
-
-#ifndef __ASSEMBLY__
/* Just any arbitrary offset to the start of the vmalloc VM area: the
* current 8MB value just means that there will be a 8MB "hole" after the
* physical memory until the kernel virtual memory starts. That means that
@@ -62,16 +47,12 @@ extern unsigned long end_iomem;
#define VMALLOC_OFFSET (__va_space)
#define VMALLOC_START ((end_iomem + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
-
#ifdef CONFIG_HIGHMEM
# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
#else
# define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
#endif
-#define REGION_SHIFT (sizeof(pte_t) * 8 - 4)
-#define REGION_MASK (((unsigned long) 0xf) << REGION_SHIFT)
-
#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
@@ -81,11 +62,12 @@ extern unsigned long end_iomem;
#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
-#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED)
/*
- * The i386 can't do page protection for execute, and considers that the same are read.
- * Also, write permissions imply read permissions. This is the closest we can get..
+ * The i386 can't do page protection for execute, and considers that the same
+ * are read.
+ * Also, write permissions imply read permissions. This is the closest we can
+ * get..
*/
#define __P000 PAGE_NONE
#define __P001 PAGE_READONLY
@@ -106,40 +88,16 @@ extern unsigned long end_iomem;
#define __S111 PAGE_SHARED
/*
- * Define this if things work differently on an i386 and an i486:
- * it will (on an i486) warn about kernel memory accesses that are
- * done without a 'access_ok(VERIFY_WRITE,..)'
- */
-#undef TEST_VERIFY_AREA
-
-/* page table for 0-4MB for everybody */
-extern unsigned long pg0[1024];
-
-/*
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
*/
-
#define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page)
-/* number of bits that fit into a memory pointer */
-#define BITS_PER_PTR (8*sizeof(unsigned long))
-
-/* to align the pointer to a pointer address */
-#define PTR_MASK (~(sizeof(void*)-1))
-
-/* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */
-/* 64-bit machines, beware! SRB. */
-#define SIZEOF_PTR_LOG2 3
-
-/* to find an entry in a page-table */
-#define PAGE_PTR(address) \
-((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)
-
#define pte_clear(mm,addr,xp) pte_set_val(*(xp), (phys_t) 0, __pgprot(_PAGE_NEWPAGE))
#define pmd_none(x) (!((unsigned long)pmd_val(x) & ~_PAGE_NEWPAGE))
#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
+
#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
#define pmd_clear(xp) do { pmd_val(*(xp)) = _PAGE_NEWPAGE; } while (0)
@@ -149,14 +107,9 @@ extern unsigned long pg0[1024];
#define pud_newpage(x) (pud_val(x) & _PAGE_NEWPAGE)
#define pud_mkuptodate(x) (pud_val(x) &= ~_PAGE_NEWPAGE)
-#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
-
#define pmd_page(pmd) phys_to_page(pmd_val(pmd) & PAGE_MASK)
#define pte_page(x) pfn_to_page(pte_pfn(x))
-#define pte_address(x) (__va(pte_val(x) & PAGE_MASK))
-#define mk_phys(a, r) ((a) + (((unsigned long) r) << REGION_SHIFT))
-#define phys_addr(p) ((p) & ~REGION_MASK)
#define pte_present(x) pte_get_bits(x, (_PAGE_PRESENT | _PAGE_PROTNONE))
@@ -309,7 +262,8 @@ static inline void set_pte(pte_t *pteptr, pte_t pteval)
#define phys_to_page(phys) pfn_to_page(phys_to_pfn(phys))
#define __virt_to_page(virt) phys_to_page(__pa(virt))
-#define page_to_phys(page) pfn_to_phys(page_to_pfn(page))
+#define page_to_phys(page) pfn_to_phys((pfn_t) page_to_pfn(page))
+#define virt_to_page(addr) __virt_to_page((const unsigned long) addr)
#define mk_pte(page, pgprot) \
({ pte_t pte; \
@@ -325,8 +279,6 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
return pte;
}
-#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
-
/*
* the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
*
@@ -335,8 +287,6 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
*/
#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
-#define pgd_index_k(addr) pgd_index(addr)
-
/*
* pgd_offset() returns a (pgd_t *)
* pgd_index() is used get the offset into the pgd page's array of pgd_t's;
@@ -355,8 +305,12 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
* this macro returns the index of the entry in the pmd page which would
* control the given virtual address
*/
+#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
+#define pmd_page_vaddr(pmd) \
+ ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
+
/*
* the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
*
@@ -372,6 +326,9 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
#define pte_unmap(pte) do { } while (0)
#define pte_unmap_nested(pte) do { } while (0)
+struct mm_struct;
+extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
+
#define update_mmu_cache(vma,address,pte) do ; while (0)
/* Encode and de-code a swap entry */
@@ -388,29 +345,4 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
#include <asm-generic/pgtable.h>
-#include <asm-generic/pgtable-nopud.h>
-
-#ifdef CONFIG_HIGHMEM
-/* Clear a kernel PTE and flush it from the TLB */
-#define kpte_clear_flush(ptep, vaddr) \
-do { \
- pte_clear(&init_mm, vaddr, ptep); \
- __flush_tlb_one(vaddr); \
-} while (0)
#endif
-
-#endif
-#endif
-
-#define virt_to_page(addr) __virt_to_page((const unsigned long) addr)
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
diff --git a/include/asm-um/processor-generic.h b/include/asm-um/processor-generic.h
index 78c0599cc80..b7d9a16a745 100644
--- a/include/asm-um/processor-generic.h
+++ b/include/asm-um/processor-generic.h
@@ -11,6 +11,7 @@ struct pt_regs;
struct task_struct;
#include "asm/ptrace.h"
+#include "asm/pgtable.h"
#include "registers.h"
#include "sysdep/archsetjmp.h"
@@ -26,7 +27,6 @@ struct thread_struct {
* as of 2.6.11).
*/
int forking;
- int nsyscalls;
struct pt_regs regs;
int singlestep_syscall;
void *fault_addr;
@@ -58,7 +58,6 @@ struct thread_struct {
#define INIT_THREAD \
{ \
.forking = 0, \
- .nsyscalls = 0, \
.regs = EMPTY_REGS, \
.fault_addr = NULL, \
.prev_sched = NULL, \
@@ -68,10 +67,6 @@ struct thread_struct {
.request = { 0 } \
}
-typedef struct {
- unsigned long seg;
-} mm_segment_t;
-
extern struct task_struct *alloc_task_struct(void);
static inline void release_thread(struct task_struct *task)
@@ -97,9 +92,7 @@ static inline void mm_copy_segments(struct mm_struct *from_mm,
/*
* User space process size: 3GB (default).
*/
-extern unsigned long task_size;
-
-#define TASK_SIZE (task_size)
+#define TASK_SIZE (CONFIG_TOP_ADDR & PGDIR_MASK)
/* This decides where the kernel will search for a free chunk of vm
* space during mmap's.
@@ -128,6 +121,6 @@ extern struct cpuinfo_um cpu_data[];
#define KSTK_REG(tsk, reg) get_thread_reg(reg, &tsk->thread.switch_buf)
-#define get_wchan(p) (0)
+extern unsigned long get_wchan(struct task_struct *p);
#endif
diff --git a/include/asm-um/processor-i386.h b/include/asm-um/processor-i386.h
index 595f1c3e1e4..a2b7fe13fe1 100644
--- a/include/asm-um/processor-i386.h
+++ b/include/asm-um/processor-i386.h
@@ -10,7 +10,6 @@
#include "asm/host_ldt.h"
#include "asm/segment.h"
-extern int host_has_xmm;
extern int host_has_cmov;
/* include faultinfo structure */
diff --git a/include/asm-um/thread_info.h b/include/asm-um/thread_info.h
index 6e5fd5c892d..356b83e2c22 100644
--- a/include/asm-um/thread_info.h
+++ b/include/asm-um/thread_info.h
@@ -1,5 +1,5 @@
-/*
- * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
+/*
+ * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Licensed under the GPL
*/
@@ -8,8 +8,9 @@
#ifndef __ASSEMBLY__
-#include <asm/processor.h>
#include <asm/types.h>
+#include <asm/page.h>
+#include <asm/uaccess.h>
struct thread_info {
struct task_struct *task; /* main task structure */
@@ -75,8 +76,8 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
#define TIF_SIGPENDING 1 /* signal pending */
#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
-#define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling
- * TIF_NEED_RESCHED
+#define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling
+ * TIF_NEED_RESCHED
*/
#define TIF_RESTART_BLOCK 4
#define TIF_MEMDIE 5
diff --git a/include/asm-um/tlb.h b/include/asm-um/tlb.h
index c640033bc1f..39fc475df6c 100644
--- a/include/asm-um/tlb.h
+++ b/include/asm-um/tlb.h
@@ -1,6 +1,126 @@
#ifndef __UM_TLB_H
#define __UM_TLB_H
-#include <asm/arch/tlb.h>
+#include <linux/swap.h>
+#include <asm/percpu.h>
+#include <asm/pgalloc.h>
+#include <asm/tlbflush.h>
+
+#define tlb_start_vma(tlb, vma) do { } while (0)
+#define tlb_end_vma(tlb, vma) do { } while (0)
+#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
+
+/* struct mmu_gather is an opaque type used by the mm code for passing around
+ * any data needed by arch specific code for tlb_remove_page.
+ */
+struct mmu_gather {
+ struct mm_struct *mm;
+ unsigned int need_flush; /* Really unmapped some ptes? */
+ unsigned long start;
+ unsigned long end;
+ unsigned int fullmm; /* non-zero means full mm flush */
+};
+
+/* Users of the generic TLB shootdown code must declare this storage space. */
+DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
+
+static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
+ unsigned long address)
+{
+ if (tlb->start > address)
+ tlb->start = address;
+ if (tlb->end < address + PAGE_SIZE)
+ tlb->end = address + PAGE_SIZE;
+}
+
+static inline void init_tlb_gather(struct mmu_gather *tlb)
+{
+ tlb->need_flush = 0;
+
+ tlb->start = TASK_SIZE;
+ tlb->end = 0;
+
+ if (tlb->fullmm) {
+ tlb->start = 0;
+ tlb->end = TASK_SIZE;
+ }
+}
+
+/* tlb_gather_mmu
+ * Return a pointer to an initialized struct mmu_gather.
+ */
+static inline struct mmu_gather *
+tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
+{
+ struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);
+
+ tlb->mm = mm;
+ tlb->fullmm = full_mm_flush;
+
+ init_tlb_gather(tlb);
+
+ return tlb;
+}
+
+extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
+ unsigned long end);
+
+static inline void
+tlb_flush_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
+{
+ if (!tlb->need_flush)
+ return;
+
+ flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end);
+ init_tlb_gather(tlb);
+}
+
+/* tlb_finish_mmu
+ * Called at the end of the shootdown operation to free up any resources
+ * that were required.
+ */
+static inline void
+tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
+{
+ tlb_flush_mmu(tlb, start, end);
+
+ /* keep the page table cache within bounds */
+ check_pgt_cache();
+
+ put_cpu_var(mmu_gathers);
+}
+
+/* tlb_remove_page
+ * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)),
+ * while handling the additional races in SMP caused by other CPUs
+ * caching valid mappings in their TLBs.
+ */
+static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
+{
+ tlb->need_flush = 1;
+ free_page_and_swap_cache(page);
+ return;
+}
+
+/**
+ * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
+ *
+ * Record the fact that pte's were really umapped in ->need_flush, so we can
+ * later optimise away the tlb invalidate. This helps when userspace is
+ * unmapping already-unmapped pages, which happens quite a lot.
+ */
+#define tlb_remove_tlb_entry(tlb, ptep, address) \
+ do { \
+ tlb->need_flush = 1; \
+ __tlb_remove_tlb_entry(tlb, ptep, address); \
+ } while (0)
+
+#define pte_free_tlb(tlb, ptep) __pte_free_tlb(tlb, ptep)
+
+#define pud_free_tlb(tlb, pudp) __pud_free_tlb(tlb, pudp)
+
+#define pmd_free_tlb(tlb, pmdp) __pmd_free_tlb(tlb, pmdp)
+
+#define tlb_migrate_finish(mm) do {} while (0)
#endif
diff --git a/include/asm-um/uaccess.h b/include/asm-um/uaccess.h
index 077032d4fc4..b9a895d6fa1 100644
--- a/include/asm-um/uaccess.h
+++ b/include/asm-um/uaccess.h
@@ -6,7 +6,15 @@
#ifndef __UM_UACCESS_H
#define __UM_UACCESS_H
-#include "linux/sched.h"
+#include <asm/errno.h>
+#include <asm/processor.h>
+
+/* thread_info has a mm_segment_t in it, so put the definition up here */
+typedef struct {
+ unsigned long seg;
+} mm_segment_t;
+
+#include "linux/thread_info.h"
#define VERIFY_READ 0
#define VERIFY_WRITE 1