From 9a937c91eea31c4b594ea49a2a23c57003e04987 Mon Sep 17 00:00:00 2001 From: FUJITA Tomonori Date: Fri, 10 Jul 2009 10:04:57 +0900 Subject: powerpc: add dma_capable() to replace is_buffer_dma_capable() dma_capable() eventually replaces is_buffer_dma_capable(), which tells if a memory area is dma-capable or not. The problem of is_buffer_dma_capable() is that it doesn't take a pointer to struct device so it doesn't work for POWERPC. Signed-off-by: FUJITA Tomonori Acked-by: Becky Bruce --- arch/powerpc/include/asm/dma-mapping.h | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h index b44aaabdd1a..6ff1f8581d7 100644 --- a/arch/powerpc/include/asm/dma-mapping.h +++ b/arch/powerpc/include/asm/dma-mapping.h @@ -424,6 +424,19 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) #endif } +static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) +{ + struct dma_mapping_ops *ops = get_dma_ops(dev); + + if (ops->addr_needs_map && ops->addr_needs_map(dev, addr, size)) + return 0; + + if (!dev->dma_mask) + return 0; + + return addr + size <= *dev->dma_mask; +} + #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) #ifdef CONFIG_NOT_COHERENT_CACHE -- cgit v1.2.3 From 8d4f5339d1ee4027c07e6b2a1cfa9dc41b0d383b Mon Sep 17 00:00:00 2001 From: FUJITA Tomonori Date: Fri, 10 Jul 2009 10:05:01 +0900 Subject: x86, IA64, powerpc: add phys_to_dma() and dma_to_phys() This adds two functions, phys_to_dma() and dma_to_phys() to x86, IA64 and powerpc. swiotlb uses them. phys_to_dma() converts a physical address to a dma address. dma_to_phys() does the opposite. Signed-off-by: FUJITA Tomonori Acked-by: Becky Bruce --- arch/powerpc/include/asm/dma-mapping.h | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h index 6ff1f8581d7..0c34371ec49 100644 --- a/arch/powerpc/include/asm/dma-mapping.h +++ b/arch/powerpc/include/asm/dma-mapping.h @@ -437,6 +437,16 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) return addr + size <= *dev->dma_mask; } +static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) +{ + return paddr + get_dma_direct_offset(dev); +} + +static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) +{ + return daddr - get_dma_direct_offset(dev); +} + #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) #ifdef CONFIG_NOT_COHERENT_CACHE -- cgit v1.2.3 From 49c794e94649020248e37b78db16cd25bad38b4f Mon Sep 17 00:00:00 2001 From: Jan Engelhardt Date: Tue, 4 Aug 2009 07:28:28 +0000 Subject: net: implement a SO_PROTOCOL getsockoption Similar to SO_TYPE returning the socket type, SO_PROTOCOL allows to retrieve the protocol used with a given socket. I am not quite sure why we have that-many copies of socket.h, and why the values are not the same on all arches either, but for where hex numbers dominate, I use 0x1029 for SO_PROTOCOL as that seems to be the next free unused number across a bunch of operating systems, or so Google results make me want to believe. SO_PROTOCOL for others just uses the next free Linux number, 38. Signed-off-by: Jan Engelhardt Signed-off-by: David S. Miller --- arch/powerpc/include/asm/socket.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/socket.h b/arch/powerpc/include/asm/socket.h index 1e5cfad0e3f..609049d7117 100644 --- a/arch/powerpc/include/asm/socket.h +++ b/arch/powerpc/include/asm/socket.h @@ -64,4 +64,6 @@ #define SO_TIMESTAMPING 37 #define SCM_TIMESTAMPING SO_TIMESTAMPING +#define SO_PROTOCOL 38 + #endif /* _ASM_POWERPC_SOCKET_H */ -- cgit v1.2.3 From 0d6038ee76f2e06b79d0465807f67e86bf4025de Mon Sep 17 00:00:00 2001 From: Jan Engelhardt Date: Tue, 4 Aug 2009 07:28:29 +0000 Subject: net: implement a SO_DOMAIN getsockoption MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This sockopt goes in line with SO_TYPE and SO_PROTOCOL. It makes it possible for userspace programs to pass around file descriptors — I am referring to arguments-to-functions, but it may even work for the fd passing over UNIX sockets — without needing to also pass the auxiliary information (PF_INET6/IPPROTO_TCP). Signed-off-by: Jan Engelhardt Signed-off-by: David S. Miller --- arch/powerpc/include/asm/socket.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/socket.h b/arch/powerpc/include/asm/socket.h index 609049d7117..3ab8b3e6feb 100644 --- a/arch/powerpc/include/asm/socket.h +++ b/arch/powerpc/include/asm/socket.h @@ -65,5 +65,6 @@ #define SCM_TIMESTAMPING SO_TIMESTAMPING #define SO_PROTOCOL 38 +#define SO_DOMAIN 39 #endif /* _ASM_POWERPC_SOCKET_H */ -- cgit v1.2.3 From 1660e9d3d04b6c636b7171bf6c08ac7b82a7de79 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Mon, 17 Aug 2009 14:36:32 +1000 Subject: powerpc/32: Always order writes to halves of 64-bit PTEs On 32-bit systems with 64-bit PTEs, the PTEs have to be written in two 32-bit halves. On SMP we write the higher-order half and then the lower-order half, with a write barrier between the two halves, but on UP there was no particular ordering of the writes to the two halves. This extends the ordering that we already do on SMP to the UP case as well. The reason is that with the perf_counter subsystem potentially accessing user memory at interrupt time to get stack traces, we have to be careful not to create an incorrect but apparently valid PTE even on UP. Acked-by: Benjamin Herrenschmidt Signed-off-by: Paul Mackerras --- arch/powerpc/include/asm/pgtable.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index eb17da78112..2a5da069714 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h @@ -104,8 +104,8 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, else pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte)); -#elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT) && defined(CONFIG_SMP) - /* Second case is 32-bit with 64-bit PTE in SMP mode. In this case, we +#elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT) + /* Second case is 32-bit with 64-bit PTE. In this case, we * can just store as long as we do the two halves in the right order * with a barrier in between. This is possible because we take care, * in the hash code, to pre-invalidate if the PTE was already hashed, @@ -140,7 +140,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, #else /* Anything else just stores the PTE normally. That covers all 64-bit - * cases, and 32-bit non-hash with 64-bit PTEs in UP mode + * cases, and 32-bit non-hash with 32-bit PTEs. */ *ptep = pte; #endif -- cgit v1.2.3 From ed24157ede901608e00f28b4897398a373e1e926 Mon Sep 17 00:00:00 2001 From: Anton Vorontsov Date: Thu, 27 Aug 2009 07:35:50 +0000 Subject: powerpc/qe: Implement qe_alive_during_sleep() helper function In some CPUs (i.e. MPC8569) QE shuts down completely during sleep, drivers may want to know that to reinitialize registers and buffer descriptors. This patch implements qe_alive_during_sleep() helper function, so far it just checks if MPC8569-compatible power management controller is present, which is a sign that QE turns off during sleep. Signed-off-by: Anton Vorontsov Signed-off-by: David S. Miller --- arch/powerpc/include/asm/qe.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/qe.h b/arch/powerpc/include/asm/qe.h index 157c5ca581c..f388f0ab193 100644 --- a/arch/powerpc/include/asm/qe.h +++ b/arch/powerpc/include/asm/qe.h @@ -154,6 +154,7 @@ int qe_get_snum(void); void qe_put_snum(u8 snum); unsigned int qe_get_num_of_risc(void); unsigned int qe_get_num_of_snums(void); +int qe_alive_during_sleep(void); /* we actually use cpm_muram implementation, define this for convenience */ #define qe_muram_init cpm_muram_init -- cgit v1.2.3 From 8307a98097222f4d9c2e62ebccd6f5df439328de Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Mon, 31 Aug 2009 14:43:31 +0200 Subject: locking, powerpc: Rename __spin_try_lock() and friends Needed to avoid namespace conflicts when the common code function bodies of _spin_try_lock() etc. are moved to a header file where the function name would be __spin_try_lock(). Signed-off-by: Heiko Carstens Acked-by: Peter Zijlstra Cc: Arnd Bergmann Cc: Nick Piggin Cc: Martin Schwidefsky Cc: Horst Hartmann Cc: Christian Ehrhardt Cc: Andrew Morton Cc: Linus Torvalds Cc: David Miller Cc: Benjamin Herrenschmidt Cc: Paul Mackerras Cc: Geert Uytterhoeven Cc: Roman Zippel Cc: LKML-Reference: <20090831124415.918799705@de.ibm.com> Signed-off-by: Ingo Molnar --- arch/powerpc/include/asm/spinlock.h | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h index c3b193121f8..198266cf9e2 100644 --- a/arch/powerpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h @@ -54,7 +54,7 @@ * This returns the old value in the lock, so we succeeded * in getting the lock if the return value is 0. */ -static inline unsigned long __spin_trylock(raw_spinlock_t *lock) +static inline unsigned long arch_spin_trylock(raw_spinlock_t *lock) { unsigned long tmp, token; @@ -76,7 +76,7 @@ static inline unsigned long __spin_trylock(raw_spinlock_t *lock) static inline int __raw_spin_trylock(raw_spinlock_t *lock) { CLEAR_IO_SYNC; - return __spin_trylock(lock) == 0; + return arch_spin_trylock(lock) == 0; } /* @@ -108,7 +108,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) { CLEAR_IO_SYNC; while (1) { - if (likely(__spin_trylock(lock) == 0)) + if (likely(arch_spin_trylock(lock) == 0)) break; do { HMT_low(); @@ -126,7 +126,7 @@ void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) CLEAR_IO_SYNC; while (1) { - if (likely(__spin_trylock(lock) == 0)) + if (likely(arch_spin_trylock(lock) == 0)) break; local_save_flags(flags_dis); local_irq_restore(flags); @@ -181,7 +181,7 @@ extern void __raw_spin_unlock_wait(raw_spinlock_t *lock); * This returns the old value in the lock + 1, * so we got a read lock if the return value is > 0. */ -static inline long __read_trylock(raw_rwlock_t *rw) +static inline long arch_read_trylock(raw_rwlock_t *rw) { long tmp; @@ -205,7 +205,7 @@ static inline long __read_trylock(raw_rwlock_t *rw) * This returns the old value in the lock, * so we got the write lock if the return value is 0. */ -static inline long __write_trylock(raw_rwlock_t *rw) +static inline long arch_write_trylock(raw_rwlock_t *rw) { long tmp, token; @@ -228,7 +228,7 @@ static inline long __write_trylock(raw_rwlock_t *rw) static inline void __raw_read_lock(raw_rwlock_t *rw) { while (1) { - if (likely(__read_trylock(rw) > 0)) + if (likely(arch_read_trylock(rw) > 0)) break; do { HMT_low(); @@ -242,7 +242,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) static inline void __raw_write_lock(raw_rwlock_t *rw) { while (1) { - if (likely(__write_trylock(rw) == 0)) + if (likely(arch_write_trylock(rw) == 0)) break; do { HMT_low(); @@ -255,12 +255,12 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) static inline int __raw_read_trylock(raw_rwlock_t *rw) { - return __read_trylock(rw) > 0; + return arch_read_trylock(rw) > 0; } static inline int __raw_write_trylock(raw_rwlock_t *rw) { - return __write_trylock(rw) == 0; + return arch_write_trylock(rw) == 0; } static inline void __raw_read_unlock(raw_rwlock_t *rw) -- cgit v1.2.3