aboutsummaryrefslogtreecommitdiff
path: root/arch/parisc/include/asm/atomic.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/parisc/include/asm/atomic.h')
-rw-r--r--arch/parisc/include/asm/atomic.h11
1 files changed, 3 insertions, 8 deletions
diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
index 57fcc4a5ebb..edbfe25c5fc 100644
--- a/arch/parisc/include/asm/atomic.h
+++ b/arch/parisc/include/asm/atomic.h
@@ -155,14 +155,11 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
#endif
-/* Note that we need not lock read accesses - aligned word writes/reads
- * are atomic, so a reader never sees unconsistent values.
- *
- * Cache-line alignment would conflict with, for example, linux/module.h
+/*
+ * Note that we need not lock read accesses - aligned word writes/reads
+ * are atomic, so a reader never sees inconsistent values.
*/
-typedef struct { volatile int counter; } atomic_t;
-
/* It's possible to reduce all atomic operations to either
* __atomic_add_return, atomic_set and atomic_read (the latter
* is there only for consistency).
@@ -260,8 +257,6 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
#ifdef CONFIG_64BIT
-typedef struct { volatile s64 counter; } atomic64_t;
-
#define ATOMIC64_INIT(i) ((atomic64_t) { (i) })
static __inline__ int