aboutsummaryrefslogtreecommitdiff
path: root/include/asm-i386
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-i386')
-rw-r--r--include/asm-i386/bugs.h2
-rw-r--r--include/asm-i386/elf.h2
-rw-r--r--include/asm-i386/pgtable-2level.h1
-rw-r--r--include/asm-i386/pgtable-3level.h16
-rw-r--r--include/asm-i386/pgtable.h80
-rw-r--r--include/asm-i386/ptrace.h3
-rw-r--r--include/asm-i386/smp.h2
-rw-r--r--include/asm-i386/spinlock.h4
-rw-r--r--include/asm-i386/unistd.h39
9 files changed, 94 insertions, 55 deletions
diff --git a/include/asm-i386/bugs.h b/include/asm-i386/bugs.h
index 2a9e4ee5904..592ffeeda45 100644
--- a/include/asm-i386/bugs.h
+++ b/include/asm-i386/bugs.h
@@ -189,6 +189,6 @@ static void __init check_bugs(void)
check_fpu();
check_hlt();
check_popad();
- system_utsname.machine[1] = '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
+ init_utsname()->machine[1] = '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
alternative_instructions();
}
diff --git a/include/asm-i386/elf.h b/include/asm-i386/elf.h
index db4344d9f73..3a05436f31c 100644
--- a/include/asm-i386/elf.h
+++ b/include/asm-i386/elf.h
@@ -112,7 +112,7 @@ typedef struct user_fxsr_struct elf_fpxregset_t;
For the moment, we have only optimizations for the Intel generations,
but that could change... */
-#define ELF_PLATFORM (system_utsname.machine)
+#define ELF_PLATFORM (utsname()->machine)
#define SET_PERSONALITY(ex, ibcs2) do { } while (0)
diff --git a/include/asm-i386/pgtable-2level.h b/include/asm-i386/pgtable-2level.h
index 201c86a6711..8d8d3b9ecdb 100644
--- a/include/asm-i386/pgtable-2level.h
+++ b/include/asm-i386/pgtable-2level.h
@@ -16,6 +16,7 @@
#define set_pte(pteptr, pteval) (*(pteptr) = pteval)
#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
#define set_pte_atomic(pteptr, pteval) set_pte(pteptr,pteval)
+#define set_pte_present(mm,addr,ptep,pteval) set_pte_at(mm,addr,ptep,pteval)
#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
diff --git a/include/asm-i386/pgtable-3level.h b/include/asm-i386/pgtable-3level.h
index 0d899173232..c2d701ea35b 100644
--- a/include/asm-i386/pgtable-3level.h
+++ b/include/asm-i386/pgtable-3level.h
@@ -58,7 +58,21 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
}
#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
-#define __HAVE_ARCH_SET_PTE_ATOMIC
+/*
+ * Since this is only called on user PTEs, and the page fault handler
+ * must handle the already racy situation of simultaneous page faults,
+ * we are justified in merely clearing the PTE present bit, followed
+ * by a set. The ordering here is important.
+ */
+static inline void set_pte_present(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte)
+{
+ ptep->pte_low = 0;
+ smp_wmb();
+ ptep->pte_high = pte.pte_high;
+ smp_wmb();
+ ptep->pte_low = pte.pte_low;
+}
+
#define set_pte_atomic(pteptr,pteval) \
set_64bit((unsigned long long *)(pteptr),pte_val(pteval))
#define set_pmd(pmdptr,pmdval) \
diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h
index 541b3e23433..7d398f493dd 100644
--- a/include/asm-i386/pgtable.h
+++ b/include/asm-i386/pgtable.h
@@ -247,6 +247,23 @@ static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= _PAGE_PSE; return p
#endif
/*
+ * Rules for using pte_update - it must be called after any PTE update which
+ * has not been done using the set_pte / clear_pte interfaces. It is used by
+ * shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE
+ * updates should either be sets, clears, or set_pte_atomic for P->P
+ * transitions, which means this hook should only be called for user PTEs.
+ * This hook implies a P->P protection or access change has taken place, which
+ * requires a subsequent TLB flush. The notification can optionally be delayed
+ * until the TLB flush event by using the pte_update_defer form of the
+ * interface, but care must be taken to assure that the flush happens while
+ * still holding the same page table lock so that the shadow and primary pages
+ * do not become out of sync on SMP.
+ */
+#define pte_update(mm, addr, ptep) do { } while (0)
+#define pte_update_defer(mm, addr, ptep) do { } while (0)
+
+
+/*
* We only update the dirty/accessed state if we set
* the dirty bit by hand in the kernel, since the hardware
* will do the accessed bit for us, and we don't want to
@@ -258,25 +275,54 @@ static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= _PAGE_PSE; return p
do { \
if (dirty) { \
(ptep)->pte_low = (entry).pte_low; \
+ pte_update_defer((vma)->vm_mm, (addr), (ptep)); \
flush_tlb_page(vma, address); \
} \
} while (0)
+/*
+ * We don't actually have these, but we want to advertise them so that
+ * we can encompass the flush here.
+ */
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
-static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
-{
- if (!pte_dirty(*ptep))
- return 0;
- return test_and_clear_bit(_PAGE_BIT_DIRTY, &ptep->pte_low);
-}
-
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
-static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
-{
- if (!pte_young(*ptep))
- return 0;
- return test_and_clear_bit(_PAGE_BIT_ACCESSED, &ptep->pte_low);
-}
+
+/*
+ * Rules for using ptep_establish: the pte MUST be a user pte, and
+ * must be a present->present transition.
+ */
+#define __HAVE_ARCH_PTEP_ESTABLISH
+#define ptep_establish(vma, address, ptep, pteval) \
+do { \
+ set_pte_present((vma)->vm_mm, address, ptep, pteval); \
+ flush_tlb_page(vma, address); \
+} while (0)
+
+#define __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH
+#define ptep_clear_flush_dirty(vma, address, ptep) \
+({ \
+ int __dirty; \
+ __dirty = pte_dirty(*(ptep)); \
+ if (__dirty) { \
+ clear_bit(_PAGE_BIT_DIRTY, &(ptep)->pte_low); \
+ pte_update_defer((vma)->vm_mm, (addr), (ptep)); \
+ flush_tlb_page(vma, address); \
+ } \
+ __dirty; \
+})
+
+#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
+#define ptep_clear_flush_young(vma, address, ptep) \
+({ \
+ int __young; \
+ __young = pte_young(*(ptep)); \
+ if (__young) { \
+ clear_bit(_PAGE_BIT_ACCESSED, &(ptep)->pte_low); \
+ pte_update_defer((vma)->vm_mm, (addr), (ptep)); \
+ flush_tlb_page(vma, address); \
+ } \
+ __young; \
+})
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full)
@@ -295,6 +341,7 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
clear_bit(_PAGE_BIT_RW, &ptep->pte_low);
+ pte_update(mm, addr, ptep);
}
/*
@@ -426,6 +473,13 @@ extern pte_t *lookup_address(unsigned long address);
#define pte_unmap_nested(pte) do { } while (0)
#endif
+/* Clear a kernel PTE and flush it from the TLB */
+#define kpte_clear_flush(ptep, vaddr) \
+do { \
+ pte_clear(&init_mm, vaddr, ptep); \
+ __flush_tlb_one(vaddr); \
+} while (0)
+
/*
* The i386 doesn't have any external MMU info: the kernel page
* tables contain all the necessary information.
diff --git a/include/asm-i386/ptrace.h b/include/asm-i386/ptrace.h
index a4a0e5207db..d505f501077 100644
--- a/include/asm-i386/ptrace.h
+++ b/include/asm-i386/ptrace.h
@@ -47,7 +47,10 @@ static inline int user_mode_vm(struct pt_regs *regs)
{
return ((regs->xcs & SEGMENT_RPL_MASK) | (regs->eflags & VM_MASK)) >= USER_RPL;
}
+
#define instruction_pointer(regs) ((regs)->eip)
+#define regs_return_value(regs) ((regs)->eax)
+
extern unsigned long profile_pc(struct pt_regs *regs);
#endif /* __KERNEL__ */
diff --git a/include/asm-i386/smp.h b/include/asm-i386/smp.h
index 915c26a31b7..6aa1206f6e2 100644
--- a/include/asm-i386/smp.h
+++ b/include/asm-i386/smp.h
@@ -84,6 +84,7 @@ static inline int hard_smp_processor_id(void)
#endif
#endif
+extern int safe_smp_processor_id(void);
extern int __cpu_disable(void);
extern void __cpu_die(unsigned int cpu);
extern unsigned int num_processors;
@@ -92,6 +93,7 @@ extern unsigned int num_processors;
#else /* CONFIG_SMP */
+#define safe_smp_processor_id() 0
#define cpu_physical_id(cpu) boot_cpu_physical_apicid
#define NO_PROC_ID 0xFF /* No processor magic marker */
diff --git a/include/asm-i386/spinlock.h b/include/asm-i386/spinlock.h
index b0b3043f05e..c18b71fae6b 100644
--- a/include/asm-i386/spinlock.h
+++ b/include/asm-i386/spinlock.h
@@ -205,4 +205,8 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw)
: "+m" (rw->lock) : : "memory");
}
+#define _raw_spin_relax(lock) cpu_relax()
+#define _raw_read_relax(lock) cpu_relax()
+#define _raw_write_relax(lock) cpu_relax()
+
#endif /* __ASM_SPINLOCK_H */
diff --git a/include/asm-i386/unistd.h b/include/asm-i386/unistd.h
index bd9987087ad..3ca7ab963d7 100644
--- a/include/asm-i386/unistd.h
+++ b/include/asm-i386/unistd.h
@@ -451,45 +451,6 @@ __syscall_return(type,__res); \
#define __ARCH_WANT_SYS_RT_SIGACTION
#define __ARCH_WANT_SYS_RT_SIGSUSPEND
-#ifdef __KERNEL_SYSCALLS__
-
-#include <linux/compiler.h>
-#include <linux/types.h>
-#include <linux/linkage.h>
-#include <asm/ptrace.h>
-
-/*
- * we need this inline - forking from kernel space will result
- * in NO COPY ON WRITE (!!!), until an execve is executed. This
- * is no problem, but for the stack. This is handled by not letting
- * main() use the stack at all after fork(). Thus, no function
- * calls - which means inline code for fork too, as otherwise we
- * would use the stack upon exit from 'fork()'.
- *
- * Actually only pause and fork are needed inline, so that there
- * won't be any messing with the stack from main(), but we define
- * some others too.
- */
-static inline _syscall3(int,execve,const char *,file,char **,argv,char **,envp)
-
-asmlinkage int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount);
-asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
- unsigned long prot, unsigned long flags,
- unsigned long fd, unsigned long pgoff);
-asmlinkage int sys_execve(struct pt_regs regs);
-asmlinkage int sys_clone(struct pt_regs regs);
-asmlinkage int sys_fork(struct pt_regs regs);
-asmlinkage int sys_vfork(struct pt_regs regs);
-asmlinkage int sys_pipe(unsigned long __user *fildes);
-asmlinkage long sys_iopl(unsigned long unused);
-struct sigaction;
-asmlinkage long sys_rt_sigaction(int sig,
- const struct sigaction __user *act,
- struct sigaction __user *oact,
- size_t sigsetsize);
-
-#endif /* __KERNEL_SYSCALLS__ */
-
/*
* "Conditional" syscalls
*