aboutsummaryrefslogtreecommitdiff
path: root/include/asm-generic
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-generic')
-rw-r--r--include/asm-generic/atomic.h2
-rw-r--r--include/asm-generic/audit_write.h2
-rw-r--r--include/asm-generic/bug.h8
-rw-r--r--include/asm-generic/kdebug.h1
-rw-r--r--include/asm-generic/memory_model.h2
-rw-r--r--include/asm-generic/mutex-dec.h26
-rw-r--r--include/asm-generic/mutex-xchg.h9
-rw-r--r--include/asm-generic/pgtable.h50
8 files changed, 66 insertions, 34 deletions
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
index 4ec0a296bde..7abdaa91ccd 100644
--- a/include/asm-generic/atomic.h
+++ b/include/asm-generic/atomic.h
@@ -251,7 +251,7 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
#define atomic_long_cmpxchg(l, old, new) \
(atomic_cmpxchg((atomic_t *)(l), (old), (new)))
#define atomic_long_xchg(v, new) \
- (atomic_xchg((atomic_t *)(l), (new)))
+ (atomic_xchg((atomic_t *)(v), (new)))
#endif /* BITS_PER_LONG == 64 */
diff --git a/include/asm-generic/audit_write.h b/include/asm-generic/audit_write.h
index f10d367fb2a..c5f1c2c920e 100644
--- a/include/asm-generic/audit_write.h
+++ b/include/asm-generic/audit_write.h
@@ -1,6 +1,8 @@
#include <asm-generic/audit_dir_write.h>
__NR_acct,
+#ifdef __NR_swapon
__NR_swapon,
+#endif
__NR_quotactl,
__NR_truncate,
#ifdef __NR_truncate64
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index 12c07c1866b..4c794d73fb8 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -8,9 +8,17 @@
#ifdef CONFIG_GENERIC_BUG
#ifndef __ASSEMBLY__
struct bug_entry {
+#ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
unsigned long bug_addr;
+#else
+ signed int bug_addr_disp;
+#endif
#ifdef CONFIG_DEBUG_BUGVERBOSE
+#ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
const char *file;
+#else
+ signed int file_disp;
+#endif
unsigned short line;
#endif
unsigned short flags;
diff --git a/include/asm-generic/kdebug.h b/include/asm-generic/kdebug.h
index 2b799c90b2d..11e57b6a85f 100644
--- a/include/asm-generic/kdebug.h
+++ b/include/asm-generic/kdebug.h
@@ -3,6 +3,7 @@
enum die_val {
DIE_UNUSED,
+ DIE_OOPS=1
};
#endif /* _ASM_GENERIC_KDEBUG_H */
diff --git a/include/asm-generic/memory_model.h b/include/asm-generic/memory_model.h
index ae060c62aff..18546d8eb78 100644
--- a/include/asm-generic/memory_model.h
+++ b/include/asm-generic/memory_model.h
@@ -34,7 +34,7 @@
#define __pfn_to_page(pfn) \
({ unsigned long __pfn = (pfn); \
- unsigned long __nid = arch_pfn_to_nid(pfn); \
+ unsigned long __nid = arch_pfn_to_nid(__pfn); \
NODE_DATA(__nid)->node_mem_map + arch_local_page_offset(__pfn, __nid);\
})
diff --git a/include/asm-generic/mutex-dec.h b/include/asm-generic/mutex-dec.h
index ed108be6743..f104af7cf43 100644
--- a/include/asm-generic/mutex-dec.h
+++ b/include/asm-generic/mutex-dec.h
@@ -22,8 +22,6 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
{
if (unlikely(atomic_dec_return(count) < 0))
fail_fn(count);
- else
- smp_mb();
}
/**
@@ -41,10 +39,7 @@ __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
{
if (unlikely(atomic_dec_return(count) < 0))
return fail_fn(count);
- else {
- smp_mb();
- return 0;
- }
+ return 0;
}
/**
@@ -63,7 +58,6 @@ __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
static inline void
__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
{
- smp_mb();
if (unlikely(atomic_inc_return(count) <= 0))
fail_fn(count);
}
@@ -88,25 +82,9 @@ __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
static inline int
__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
{
- /*
- * We have two variants here. The cmpxchg based one is the best one
- * because it never induce a false contention state. It is included
- * here because architectures using the inc/dec algorithms over the
- * xchg ones are much more likely to support cmpxchg natively.
- *
- * If not we fall back to the spinlock based variant - that is
- * just as efficient (and simpler) as a 'destructive' probing of
- * the mutex state would be.
- */
-#ifdef __HAVE_ARCH_CMPXCHG
- if (likely(atomic_cmpxchg(count, 1, 0) == 1)) {
- smp_mb();
+ if (likely(atomic_cmpxchg(count, 1, 0) == 1))
return 1;
- }
return 0;
-#else
- return fail_fn(count);
-#endif
}
#endif
diff --git a/include/asm-generic/mutex-xchg.h b/include/asm-generic/mutex-xchg.h
index 7b9cd2cbfeb..580a6d35c70 100644
--- a/include/asm-generic/mutex-xchg.h
+++ b/include/asm-generic/mutex-xchg.h
@@ -27,8 +27,6 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
{
if (unlikely(atomic_xchg(count, 0) != 1))
fail_fn(count);
- else
- smp_mb();
}
/**
@@ -46,10 +44,7 @@ __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
{
if (unlikely(atomic_xchg(count, 0) != 1))
return fail_fn(count);
- else {
- smp_mb();
- return 0;
- }
+ return 0;
}
/**
@@ -67,7 +62,6 @@ __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
static inline void
__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
{
- smp_mb();
if (unlikely(atomic_xchg(count, 1) != 0))
fail_fn(count);
}
@@ -110,7 +104,6 @@ __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
if (prev < 0)
prev = 0;
}
- smp_mb();
return prev;
}
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index ef87f889ef6..72ebe91005a 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -129,6 +129,10 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres
#define move_pte(pte, prot, old_addr, new_addr) (pte)
#endif
+#ifndef pgprot_writecombine
+#define pgprot_writecombine pgprot_noncached
+#endif
+
/*
* When walking page tables, get the address of the next boundary,
* or the end address of the range if that comes earlier. Although no
@@ -289,6 +293,52 @@ static inline void ptep_modify_prot_commit(struct mm_struct *mm,
#define arch_flush_lazy_cpu_mode() do {} while (0)
#endif
+#ifndef __HAVE_PFNMAP_TRACKING
+/*
+ * Interface that can be used by architecture code to keep track of
+ * memory type of pfn mappings (remap_pfn_range, vm_insert_pfn)
+ *
+ * track_pfn_vma_new is called when a _new_ pfn mapping is being established
+ * for physical range indicated by pfn and size.
+ */
+static inline int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot,
+ unsigned long pfn, unsigned long size)
+{
+ return 0;
+}
+
+/*
+ * Interface that can be used by architecture code to keep track of
+ * memory type of pfn mappings (remap_pfn_range, vm_insert_pfn)
+ *
+ * track_pfn_vma_copy is called when vma that is covering the pfnmap gets
+ * copied through copy_page_range().
+ */
+static inline int track_pfn_vma_copy(struct vm_area_struct *vma)
+{
+ return 0;
+}
+
+/*
+ * Interface that can be used by architecture code to keep track of
+ * memory type of pfn mappings (remap_pfn_range, vm_insert_pfn)
+ *
+ * untrack_pfn_vma is called while unmapping a pfnmap for a region.
+ * untrack can be called for a specific region indicated by pfn and size or
+ * can be for the entire vma (in which case size can be zero).
+ */
+static inline void untrack_pfn_vma(struct vm_area_struct *vma,
+ unsigned long pfn, unsigned long size)
+{
+}
+#else
+extern int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot,
+ unsigned long pfn, unsigned long size);
+extern int track_pfn_vma_copy(struct vm_area_struct *vma);
+extern void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
+ unsigned long size);
+#endif
+
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_GENERIC_PGTABLE_H */