From 25d6e2d7c58ddc4a3b614fc5381591c0cfe66556 Mon Sep 17 00:00:00 2001 From: Mark Nelson Date: Mon, 27 Oct 2008 00:46:51 +0000 Subject: powerpc: Update 64bit memcpy() using CPU_FTR_UNALIGNED_LD_STD Update memcpy() to add two new feature sections: one for aligning the destination before copying and one for copying using aligned load and store doubles. These new feature sections will only affect Power6 and Cell because the CPU feature bit was only added to these two processors. Power6 gets its best performance in memcpy() when aligning neither the source nor the destination, while Cell gets its best performance when just the destination is aligned. But in order to save on CPU feature bits we can use the previously added CPU_FTR_CP_USE_DCBTZ feature bit to differentiate between Power6 and Cell (because CPU_FTR_CP_USE_DCBTZ was added to Cell but not Power6). The first feature section acts to nop out the branch that takes us to the code that aligns us to an eight byte boundary for the destination. We only want to nop out this branch on Power6. So the ALT_FTR_SECTION_END() for this feature section creates a test mask of the two feature bits ORed together and provides an expected result of just CPU_FTR_UNALIGNED_LD_STD, thus we nop out the branch if we're on a CPU that has CPU_FTR_UNALIGNED_LD_STD set and CPU_FTR_CP_USE_DCBTZ unset. For the second feature section added, if we're on a CPU that has the CPU_FTR_UNALIGNED_LD_STD bit set then we don't want to do the copy with aligned loads and stores (and the appropriate shifting left and right instructions), so we want to nop out the branch to .Lsrc_unaligned. The andi. used for this branch is moved to just above the branch because this allows us to nop out both instructions with just one feature section which gives us better performance and doesn't hurt readability which two separate feature sections did. Moving the andi. to just above the branch doesn't have any noticeable negative effect on the remaining 64bit processors (the ones that didn't have this feature bit added). On Cell this simple modification results in an improvement to measured memcpy() bandwidth of up to 50% in the hot cache case and up to 15% in the cold cache case. On Power6 we get memory bandwidth results that are up to three times faster in the hot cache case and up to 50% faster in the cold cache case. Commit 2a9294369bd020db89bfdf78b84c3615b39a5c84 ("powerpc: Add new CPU feature: CPU_FTR_CP_USE_DCBTZ") was where CPU_FTR_CP_USE_DCBTZ was added. To say that Cell gets its best performance in memcpy() with just the destination aligned is true but only for the reason that the indirect shift and rotate instructions, sld and srd, are microcoded on Cell. This means that either the destination or the source can be aligned, but not both, and seeing as we get better performance with the destination aligned we choose this option. While we're at it make a one line change from cmpldi r1,... to cmpldi cr1,... for consistency. Signed-off-by: Mark Nelson Signed-off-by: Paul Mackerras --- arch/powerpc/lib/memcpy_64.S | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) (limited to 'arch/powerpc/lib') diff --git a/arch/powerpc/lib/memcpy_64.S b/arch/powerpc/lib/memcpy_64.S index 3f131129d1c..fe2d34e5332 100644 --- a/arch/powerpc/lib/memcpy_64.S +++ b/arch/powerpc/lib/memcpy_64.S @@ -18,11 +18,23 @@ _GLOBAL(memcpy) andi. r6,r6,7 dcbt 0,r4 blt cr1,.Lshort_copy +/* Below we want to nop out the bne if we're on a CPU that has the + CPU_FTR_UNALIGNED_LD_STD bit set and the CPU_FTR_CP_USE_DCBTZ bit + cleared. + At the time of writing the only CPU that has this combination of bits + set is Power6. */ +BEGIN_FTR_SECTION + nop +FTR_SECTION_ELSE bne .Ldst_unaligned +ALT_FTR_SECTION_END(CPU_FTR_UNALIGNED_LD_STD | CPU_FTR_CP_USE_DCBTZ, \ + CPU_FTR_UNALIGNED_LD_STD) .Ldst_aligned: - andi. r0,r4,7 addi r3,r3,-16 +BEGIN_FTR_SECTION + andi. r0,r4,7 bne .Lsrc_unaligned +END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) srdi r7,r5,4 ld r9,0(r4) addi r4,r4,-8 @@ -131,7 +143,7 @@ _GLOBAL(memcpy) PPC_MTOCRF 0x01,r6 # put #bytes to 8B bdry into cr7 subf r5,r6,r5 li r7,0 - cmpldi r1,r5,16 + cmpldi cr1,r5,16 bf cr7*4+3,1f lbz r0,0(r4) stb r0,0(r3) -- cgit v1.2.3 From 7526ff76f84178f8c926de7e590e4c5f9d4a2e62 Mon Sep 17 00:00:00 2001 From: Hollis Blanchard Date: Mon, 10 Nov 2008 14:33:36 +0000 Subject: powerpc: Remove superfluous WARN_ON() from dma-noncoherent.c I can't tell why this WARN_ON exists, and there's no comment explaining it. Whether the pmd is present or not, pte_alloc_kernel() seems to handle both cases. Booting a 440 kernel with 64K PAGE_SIZE triggers the warning, but boot successfully completes and I see no problems beyond that. Signed-off-by: Hollis Blanchard Acked-by: Benjamin Herrenschmidt Signed-off-by: Paul Mackerras --- arch/powerpc/lib/dma-noncoherent.c | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/powerpc/lib') diff --git a/arch/powerpc/lib/dma-noncoherent.c b/arch/powerpc/lib/dma-noncoherent.c index 31734c0969c..2b1ce184934 100644 --- a/arch/powerpc/lib/dma-noncoherent.c +++ b/arch/powerpc/lib/dma-noncoherent.c @@ -320,7 +320,6 @@ static int __init dma_alloc_init(void) ret = -ENOMEM; break; } - WARN_ON(!pmd_none(*pmd)); pte = pte_alloc_kernel(pmd, CONSISTENT_BASE); if (!pte) { -- cgit v1.2.3 From a4e22f02f5b6518c1484faea1f88d81802b9feac Mon Sep 17 00:00:00 2001 From: Mark Nelson Date: Tue, 11 Nov 2008 00:53:34 +0000 Subject: powerpc: Update 64bit __copy_tofrom_user() using CPU_FTR_UNALIGNED_LD_STD In exactly the same way that we updated memcpy() with new feature sections in commit 25d6e2d7c58ddc4a3b614fc5381591c0cfe66556 ("powerpc: Update 64bit memcpy() using CPU_FTR_UNALIGNED_LD_STD"), we do the same thing here for __copy_tofrom_user(). Once again this is purely a performance tweak for Cell and Power6 - this has no effect on all the other 64bit powerpc chips. We can make these same changes to __copy_tofrom_user() because the basic copy algorithm is the same as in memcpy() - this version just has all the exception handling logic needed when copying to or from userspace as well as a special case for copying whole 4K pages that are page aligned. CPU_FTR_UNALIGNED_LD_STD CPU was added in commit 4ec577a28980a0790df3c3dfe9c81f6e2222acfb ("powerpc: Add new CPU feature: CPU_FTR_UNALIGNED_LD_STD"). We also make the same simple one line change from cmpldi r1,... to cmpldi cr1,... for consistency. Signed-off-by: Mark Nelson Signed-off-by: Paul Mackerras --- arch/powerpc/lib/copyuser_64.S | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) (limited to 'arch/powerpc/lib') diff --git a/arch/powerpc/lib/copyuser_64.S b/arch/powerpc/lib/copyuser_64.S index 25ec5378afa..70693a5c12a 100644 --- a/arch/powerpc/lib/copyuser_64.S +++ b/arch/powerpc/lib/copyuser_64.S @@ -26,11 +26,24 @@ _GLOBAL(__copy_tofrom_user) andi. r6,r6,7 PPC_MTOCRF 0x01,r5 blt cr1,.Lshort_copy +/* Below we want to nop out the bne if we're on a CPU that has the + * CPU_FTR_UNALIGNED_LD_STD bit set and the CPU_FTR_CP_USE_DCBTZ bit + * cleared. + * At the time of writing the only CPU that has this combination of bits + * set is Power6. + */ +BEGIN_FTR_SECTION + nop +FTR_SECTION_ELSE bne .Ldst_unaligned +ALT_FTR_SECTION_END(CPU_FTR_UNALIGNED_LD_STD | CPU_FTR_CP_USE_DCBTZ, \ + CPU_FTR_UNALIGNED_LD_STD) .Ldst_aligned: - andi. r0,r4,7 addi r3,r3,-16 +BEGIN_FTR_SECTION + andi. r0,r4,7 bne .Lsrc_unaligned +END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) srdi r7,r5,4 20: ld r9,0(r4) addi r4,r4,-8 @@ -138,7 +151,7 @@ _GLOBAL(__copy_tofrom_user) PPC_MTOCRF 0x01,r6 /* put #bytes to 8B bdry into cr7 */ subf r5,r6,r5 li r7,0 - cmpldi r1,r5,16 + cmpldi cr1,r5,16 bf cr7*4+3,1f 35: lbz r0,0(r4) 81: stb r0,0(r3) -- cgit v1.2.3 From 8168b5400b06353293f9844976435886eb2a8ff2 Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 11 Dec 2008 02:53:54 +0000 Subject: powerpc: Rename struct vm_region to avoid conflict with NOMMU Rename PowerPC's struct vm_region so that I can introduce my own global version for NOMMU. It's feasible that the PowerPC version may wish to use my global one instead. The NOMMU vm_region struct defines areas of the physical memory map that are under mmap. This may include chunks of RAM or regions of memory mapped devices, such as flash. It is also used to retain copies of file content so that shareable private memory mappings of files can be made. As such, it may be compatible with what is described in the banner comment for PowerPC's vm_region struct. Signed-off-by: David Howells Signed-off-by: Paul Mackerras --- arch/powerpc/lib/dma-noncoherent.c | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) (limited to 'arch/powerpc/lib') diff --git a/arch/powerpc/lib/dma-noncoherent.c b/arch/powerpc/lib/dma-noncoherent.c index 2b1ce184934..b7dc4c19f58 100644 --- a/arch/powerpc/lib/dma-noncoherent.c +++ b/arch/powerpc/lib/dma-noncoherent.c @@ -77,26 +77,26 @@ static DEFINE_SPINLOCK(consistent_lock); * the amount of RAM found at boot time.) I would imagine that get_vm_area() * would have to initialise this each time prior to calling vm_region_alloc(). */ -struct vm_region { +struct ppc_vm_region { struct list_head vm_list; unsigned long vm_start; unsigned long vm_end; }; -static struct vm_region consistent_head = { +static struct ppc_vm_region consistent_head = { .vm_list = LIST_HEAD_INIT(consistent_head.vm_list), .vm_start = CONSISTENT_BASE, .vm_end = CONSISTENT_END, }; -static struct vm_region * -vm_region_alloc(struct vm_region *head, size_t size, gfp_t gfp) +static struct ppc_vm_region * +ppc_vm_region_alloc(struct ppc_vm_region *head, size_t size, gfp_t gfp) { unsigned long addr = head->vm_start, end = head->vm_end - size; unsigned long flags; - struct vm_region *c, *new; + struct ppc_vm_region *c, *new; - new = kmalloc(sizeof(struct vm_region), gfp); + new = kmalloc(sizeof(struct ppc_vm_region), gfp); if (!new) goto out; @@ -130,9 +130,9 @@ vm_region_alloc(struct vm_region *head, size_t size, gfp_t gfp) return NULL; } -static struct vm_region *vm_region_find(struct vm_region *head, unsigned long addr) +static struct ppc_vm_region *ppc_vm_region_find(struct ppc_vm_region *head, unsigned long addr) { - struct vm_region *c; + struct ppc_vm_region *c; list_for_each_entry(c, &head->vm_list, vm_list) { if (c->vm_start == addr) @@ -151,7 +151,7 @@ void * __dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp) { struct page *page; - struct vm_region *c; + struct ppc_vm_region *c; unsigned long order; u64 mask = 0x00ffffff, limit; /* ISA default */ @@ -191,7 +191,7 @@ __dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp) /* * Allocate a virtual address in the consistent mapping region. */ - c = vm_region_alloc(&consistent_head, size, + c = ppc_vm_region_alloc(&consistent_head, size, gfp & ~(__GFP_DMA | __GFP_HIGHMEM)); if (c) { unsigned long vaddr = c->vm_start; @@ -239,7 +239,7 @@ EXPORT_SYMBOL(__dma_alloc_coherent); */ void __dma_free_coherent(size_t size, void *vaddr) { - struct vm_region *c; + struct ppc_vm_region *c; unsigned long flags, addr; pte_t *ptep; @@ -247,7 +247,7 @@ void __dma_free_coherent(size_t size, void *vaddr) spin_lock_irqsave(&consistent_lock, flags); - c = vm_region_find(&consistent_head, (unsigned long)vaddr); + c = ppc_vm_region_find(&consistent_head, (unsigned long)vaddr); if (!c) goto no_area; -- cgit v1.2.3