aboutsummaryrefslogtreecommitdiff
path: root/include/asm-i386
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-i386')
-rw-r--r--include/asm-i386/dma-mapping.h12
-rw-r--r--include/asm-i386/mach-default/do_timer.h2
-rw-r--r--include/asm-i386/mach-summit/mach_apic.h2
-rw-r--r--include/asm-i386/mach-visws/do_timer.h2
-rw-r--r--include/asm-i386/mach-voyager/do_timer.h2
-rw-r--r--include/asm-i386/nmi.h6
-rw-r--r--include/asm-i386/pgtable-2level.h1
-rw-r--r--include/asm-i386/pgtable-3level.h16
-rw-r--r--include/asm-i386/pgtable.h80
-rw-r--r--include/asm-i386/smp.h4
-rw-r--r--include/asm-i386/spinlock.h4
11 files changed, 107 insertions, 24 deletions
diff --git a/include/asm-i386/dma-mapping.h b/include/asm-i386/dma-mapping.h
index 576ae01d71c..81999a3ebe7 100644
--- a/include/asm-i386/dma-mapping.h
+++ b/include/asm-i386/dma-mapping.h
@@ -21,7 +21,7 @@ static inline dma_addr_t
dma_map_single(struct device *dev, void *ptr, size_t size,
enum dma_data_direction direction)
{
- BUG_ON(direction == DMA_NONE);
+ BUG_ON(!valid_dma_direction(direction));
WARN_ON(size == 0);
flush_write_buffers();
return virt_to_phys(ptr);
@@ -31,7 +31,7 @@ static inline void
dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
enum dma_data_direction direction)
{
- BUG_ON(direction == DMA_NONE);
+ BUG_ON(!valid_dma_direction(direction));
}
static inline int
@@ -40,7 +40,7 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
{
int i;
- BUG_ON(direction == DMA_NONE);
+ BUG_ON(!valid_dma_direction(direction));
WARN_ON(nents == 0 || sg[0].length == 0);
for (i = 0; i < nents; i++ ) {
@@ -57,7 +57,7 @@ static inline dma_addr_t
dma_map_page(struct device *dev, struct page *page, unsigned long offset,
size_t size, enum dma_data_direction direction)
{
- BUG_ON(direction == DMA_NONE);
+ BUG_ON(!valid_dma_direction(direction));
return page_to_phys(page) + offset;
}
@@ -65,7 +65,7 @@ static inline void
dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
enum dma_data_direction direction)
{
- BUG_ON(direction == DMA_NONE);
+ BUG_ON(!valid_dma_direction(direction));
}
@@ -73,7 +73,7 @@ static inline void
dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
enum dma_data_direction direction)
{
- BUG_ON(direction == DMA_NONE);
+ BUG_ON(!valid_dma_direction(direction));
}
static inline void
diff --git a/include/asm-i386/mach-default/do_timer.h b/include/asm-i386/mach-default/do_timer.h
index 6312c3e7981..4182c347ef8 100644
--- a/include/asm-i386/mach-default/do_timer.h
+++ b/include/asm-i386/mach-default/do_timer.h
@@ -16,7 +16,7 @@
static inline void do_timer_interrupt_hook(struct pt_regs *regs)
{
- do_timer(regs);
+ do_timer(1);
#ifndef CONFIG_SMP
update_process_times(user_mode_vm(regs));
#endif
diff --git a/include/asm-i386/mach-summit/mach_apic.h b/include/asm-i386/mach-summit/mach_apic.h
index a81b0596159..254a0fe01c6 100644
--- a/include/asm-i386/mach-summit/mach_apic.h
+++ b/include/asm-i386/mach-summit/mach_apic.h
@@ -88,7 +88,7 @@ static inline void clustered_apic_check(void)
static inline int apicid_to_node(int logical_apicid)
{
- return logical_apicid >> 5; /* 2 clusterids per CEC */
+ return apicid_2_node[logical_apicid];
}
/* Mapping from cpu number to logical apicid */
diff --git a/include/asm-i386/mach-visws/do_timer.h b/include/asm-i386/mach-visws/do_timer.h
index 95568e6ca91..8db618c5a72 100644
--- a/include/asm-i386/mach-visws/do_timer.h
+++ b/include/asm-i386/mach-visws/do_timer.h
@@ -9,7 +9,7 @@ static inline void do_timer_interrupt_hook(struct pt_regs *regs)
/* Clear the interrupt */
co_cpu_write(CO_CPU_STAT,co_cpu_read(CO_CPU_STAT) & ~CO_STAT_TIMEINTR);
- do_timer(regs);
+ do_timer(1);
#ifndef CONFIG_SMP
update_process_times(user_mode_vm(regs));
#endif
diff --git a/include/asm-i386/mach-voyager/do_timer.h b/include/asm-i386/mach-voyager/do_timer.h
index eaf51809898..099fe9f5c1b 100644
--- a/include/asm-i386/mach-voyager/do_timer.h
+++ b/include/asm-i386/mach-voyager/do_timer.h
@@ -3,7 +3,7 @@
static inline void do_timer_interrupt_hook(struct pt_regs *regs)
{
- do_timer(regs);
+ do_timer(1);
#ifndef CONFIG_SMP
update_process_times(user_mode_vm(regs));
#endif
diff --git a/include/asm-i386/nmi.h b/include/asm-i386/nmi.h
index 303bcd4592b..269d315719c 100644
--- a/include/asm-i386/nmi.h
+++ b/include/asm-i386/nmi.h
@@ -36,4 +36,10 @@ extern unsigned int nmi_watchdog;
#define NMI_LOCAL_APIC 2
#define NMI_INVALID 3
+struct ctl_table;
+struct file;
+extern int proc_nmi_enabled(struct ctl_table *, int , struct file *,
+ void __user *, size_t *, loff_t *);
+extern int unknown_nmi_panic;
+
#endif /* ASM_NMI_H */
diff --git a/include/asm-i386/pgtable-2level.h b/include/asm-i386/pgtable-2level.h
index 201c86a6711..8d8d3b9ecdb 100644
--- a/include/asm-i386/pgtable-2level.h
+++ b/include/asm-i386/pgtable-2level.h
@@ -16,6 +16,7 @@
#define set_pte(pteptr, pteval) (*(pteptr) = pteval)
#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
#define set_pte_atomic(pteptr, pteval) set_pte(pteptr,pteval)
+#define set_pte_present(mm,addr,ptep,pteval) set_pte_at(mm,addr,ptep,pteval)
#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
diff --git a/include/asm-i386/pgtable-3level.h b/include/asm-i386/pgtable-3level.h
index 0d899173232..c2d701ea35b 100644
--- a/include/asm-i386/pgtable-3level.h
+++ b/include/asm-i386/pgtable-3level.h
@@ -58,7 +58,21 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
}
#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
-#define __HAVE_ARCH_SET_PTE_ATOMIC
+/*
+ * Since this is only called on user PTEs, and the page fault handler
+ * must handle the already racy situation of simultaneous page faults,
+ * we are justified in merely clearing the PTE present bit, followed
+ * by a set. The ordering here is important.
+ */
+static inline void set_pte_present(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte)
+{
+ ptep->pte_low = 0;
+ smp_wmb();
+ ptep->pte_high = pte.pte_high;
+ smp_wmb();
+ ptep->pte_low = pte.pte_low;
+}
+
#define set_pte_atomic(pteptr,pteval) \
set_64bit((unsigned long long *)(pteptr),pte_val(pteval))
#define set_pmd(pmdptr,pmdval) \
diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h
index 541b3e23433..7d398f493dd 100644
--- a/include/asm-i386/pgtable.h
+++ b/include/asm-i386/pgtable.h
@@ -247,6 +247,23 @@ static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= _PAGE_PSE; return p
#endif
/*
+ * Rules for using pte_update - it must be called after any PTE update which
+ * has not been done using the set_pte / clear_pte interfaces. It is used by
+ * shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE
+ * updates should either be sets, clears, or set_pte_atomic for P->P
+ * transitions, which means this hook should only be called for user PTEs.
+ * This hook implies a P->P protection or access change has taken place, which
+ * requires a subsequent TLB flush. The notification can optionally be delayed
+ * until the TLB flush event by using the pte_update_defer form of the
+ * interface, but care must be taken to assure that the flush happens while
+ * still holding the same page table lock so that the shadow and primary pages
+ * do not become out of sync on SMP.
+ */
+#define pte_update(mm, addr, ptep) do { } while (0)
+#define pte_update_defer(mm, addr, ptep) do { } while (0)
+
+
+/*
* We only update the dirty/accessed state if we set
* the dirty bit by hand in the kernel, since the hardware
* will do the accessed bit for us, and we don't want to
@@ -258,25 +275,54 @@ static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= _PAGE_PSE; return p
do { \
if (dirty) { \
(ptep)->pte_low = (entry).pte_low; \
+ pte_update_defer((vma)->vm_mm, (addr), (ptep)); \
flush_tlb_page(vma, address); \
} \
} while (0)
+/*
+ * We don't actually have these, but we want to advertise them so that
+ * we can encompass the flush here.
+ */
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
-static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
-{
- if (!pte_dirty(*ptep))
- return 0;
- return test_and_clear_bit(_PAGE_BIT_DIRTY, &ptep->pte_low);
-}
-
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
-static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
-{
- if (!pte_young(*ptep))
- return 0;
- return test_and_clear_bit(_PAGE_BIT_ACCESSED, &ptep->pte_low);
-}
+
+/*
+ * Rules for using ptep_establish: the pte MUST be a user pte, and
+ * must be a present->present transition.
+ */
+#define __HAVE_ARCH_PTEP_ESTABLISH
+#define ptep_establish(vma, address, ptep, pteval) \
+do { \
+ set_pte_present((vma)->vm_mm, address, ptep, pteval); \
+ flush_tlb_page(vma, address); \
+} while (0)
+
+#define __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH
+#define ptep_clear_flush_dirty(vma, address, ptep) \
+({ \
+ int __dirty; \
+ __dirty = pte_dirty(*(ptep)); \
+ if (__dirty) { \
+ clear_bit(_PAGE_BIT_DIRTY, &(ptep)->pte_low); \
+ pte_update_defer((vma)->vm_mm, (addr), (ptep)); \
+ flush_tlb_page(vma, address); \
+ } \
+ __dirty; \
+})
+
+#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
+#define ptep_clear_flush_young(vma, address, ptep) \
+({ \
+ int __young; \
+ __young = pte_young(*(ptep)); \
+ if (__young) { \
+ clear_bit(_PAGE_BIT_ACCESSED, &(ptep)->pte_low); \
+ pte_update_defer((vma)->vm_mm, (addr), (ptep)); \
+ flush_tlb_page(vma, address); \
+ } \
+ __young; \
+})
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full)
@@ -295,6 +341,7 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
clear_bit(_PAGE_BIT_RW, &ptep->pte_low);
+ pte_update(mm, addr, ptep);
}
/*
@@ -426,6 +473,13 @@ extern pte_t *lookup_address(unsigned long address);
#define pte_unmap_nested(pte) do { } while (0)
#endif
+/* Clear a kernel PTE and flush it from the TLB */
+#define kpte_clear_flush(ptep, vaddr) \
+do { \
+ pte_clear(&init_mm, vaddr, ptep); \
+ __flush_tlb_one(vaddr); \
+} while (0)
+
/*
* The i386 doesn't have any external MMU info: the kernel page
* tables contain all the necessary information.
diff --git a/include/asm-i386/smp.h b/include/asm-i386/smp.h
index 32ac8c91d5c..6aa1206f6e2 100644
--- a/include/asm-i386/smp.h
+++ b/include/asm-i386/smp.h
@@ -46,6 +46,8 @@ extern u8 x86_cpu_to_apicid[];
#define cpu_physical_id(cpu) x86_cpu_to_apicid[cpu]
+extern u8 apicid_2_node[];
+
#ifdef CONFIG_HOTPLUG_CPU
extern void cpu_exit_clear(void);
extern void cpu_uninit(void);
@@ -82,6 +84,7 @@ static inline int hard_smp_processor_id(void)
#endif
#endif
+extern int safe_smp_processor_id(void);
extern int __cpu_disable(void);
extern void __cpu_die(unsigned int cpu);
extern unsigned int num_processors;
@@ -90,6 +93,7 @@ extern unsigned int num_processors;
#else /* CONFIG_SMP */
+#define safe_smp_processor_id() 0
#define cpu_physical_id(cpu) boot_cpu_physical_apicid
#define NO_PROC_ID 0xFF /* No processor magic marker */
diff --git a/include/asm-i386/spinlock.h b/include/asm-i386/spinlock.h
index b0b3043f05e..c18b71fae6b 100644
--- a/include/asm-i386/spinlock.h
+++ b/include/asm-i386/spinlock.h
@@ -205,4 +205,8 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw)
: "+m" (rw->lock) : : "memory");
}
+#define _raw_spin_relax(lock) cpu_relax()
+#define _raw_read_relax(lock) cpu_relax()
+#define _raw_write_relax(lock) cpu_relax()
+
#endif /* __ASM_SPINLOCK_H */