From 94bbc1763b6b6d20d5cfa70c41cda23af27f8b55 Mon Sep 17 00:00:00 2001 From: Bernhard R Link Date: Fri, 10 Mar 2006 01:23:13 -0800 Subject: [SPARC64]: fix sparc_floppy_irq's auxio_register reseting The patch "[SPARC64]: Get rid of fast IRQ feature" moved the the code from arch/sparc64/kernel/entry.S: lduba [%g7] ASI_PHYS_BYPASS_EC_E, %g5 or %g5, AUXIO_AUX1_FTCNT, %g5 stba %g5, [%g7] ASI_PHYS_BYPASS_EC_E andn %g5, AUXIO_AUX1_FTCNT, %g5 stba %g5, [%g7] ASI_PHYS_BYPASS_EC_E to arch/sparc64/kernel/irq.c: val = readb(auxio_register); val |= AUXIO_AUX1_FTCNT; writeb(val, auxio_register); val &= AUXIO_AUX1_FTCNT; writeb(val, auxio_register); This looks like it it missing a bitwise not, which is reintroduced by this patch. Due to lack of a floppy device, I could not test it, but it looks evident. Signed-off-by: Bernhard R Link Signed-off-by: David S. Miller --- arch/sparc64/kernel/irq.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c index 233526ba3ab..f7490ef629b 100644 --- a/arch/sparc64/kernel/irq.c +++ b/arch/sparc64/kernel/irq.c @@ -694,7 +694,7 @@ irqreturn_t sparc_floppy_irq(int irq, void *dev_cookie, struct pt_regs *regs) val = readb(auxio_register); val |= AUXIO_AUX1_FTCNT; writeb(val, auxio_register); - val &= AUXIO_AUX1_FTCNT; + val &= ~AUXIO_AUX1_FTCNT; writeb(val, auxio_register); doing_pdma = 0; -- cgit v1.2.3 From 30d4d1ffed7098afe2641536d67eef150499da02 Mon Sep 17 00:00:00 2001 From: Eric Sesterhenn Date: Fri, 10 Mar 2006 02:55:20 -0800 Subject: [SPARC]: BUG_ON() Conversion in arch/sparc/kernel/ioport.c this changes if() BUG(); constructs to BUG_ON() which is cleaner and can better optimized away Signed-off-by: Eric Sesterhenn Signed-off-by: David S. Miller --- arch/sparc/kernel/ioport.c | 40 +++++++++++++++------------------------- 1 file changed, 15 insertions(+), 25 deletions(-) (limited to 'arch') diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c index d39c9f20627..460f72e640e 100644 --- a/arch/sparc/kernel/ioport.c +++ b/arch/sparc/kernel/ioport.c @@ -217,7 +217,7 @@ static void _sparc_free_io(struct resource *res) unsigned long plen; plen = res->end - res->start + 1; - if ((plen & (PAGE_SIZE-1)) != 0) BUG(); + BUG_ON((plen & (PAGE_SIZE-1)) != 0); sparc_unmapiorange(res->start, plen); release_resource(res); } @@ -512,8 +512,7 @@ void pci_free_consistent(struct pci_dev *pdev, size_t n, void *p, dma_addr_t ba) dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction) { - if (direction == PCI_DMA_NONE) - BUG(); + BUG_ON(direction == PCI_DMA_NONE); /* IIep is write-through, not flushing. */ return virt_to_phys(ptr); } @@ -528,8 +527,7 @@ dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction) { - if (direction == PCI_DMA_NONE) - BUG(); + BUG_ON(direction == PCI_DMA_NONE); if (direction != PCI_DMA_TODEVICE) { mmu_inval_dma_area((unsigned long)phys_to_virt(ba), (size + PAGE_SIZE-1) & PAGE_MASK); @@ -542,8 +540,7 @@ void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t ba, size_t size, dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page, unsigned long offset, size_t size, int direction) { - if (direction == PCI_DMA_NONE) - BUG(); + BUG_ON(direction == PCI_DMA_NONE); /* IIep is write-through, not flushing. */ return page_to_phys(page) + offset; } @@ -551,8 +548,7 @@ dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page, void pci_unmap_page(struct pci_dev *hwdev, dma_addr_t dma_address, size_t size, int direction) { - if (direction == PCI_DMA_NONE) - BUG(); + BUG_ON(direction == PCI_DMA_NONE); /* mmu_inval_dma_area XXX */ } @@ -576,11 +572,10 @@ int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, { int n; - if (direction == PCI_DMA_NONE) - BUG(); + BUG_ON(direction == PCI_DMA_NONE); /* IIep is write-through, not flushing. */ for (n = 0; n < nents; n++) { - if (page_address(sg->page) == NULL) BUG(); + BUG_ON(page_address(sg->page) == NULL); sg->dvma_address = virt_to_phys(page_address(sg->page)); sg->dvma_length = sg->length; sg++; @@ -597,11 +592,10 @@ void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, { int n; - if (direction == PCI_DMA_NONE) - BUG(); + BUG_ON(direction == PCI_DMA_NONE); if (direction != PCI_DMA_TODEVICE) { for (n = 0; n < nents; n++) { - if (page_address(sg->page) == NULL) BUG(); + BUG_ON(page_address(sg->page) == NULL); mmu_inval_dma_area( (unsigned long) page_address(sg->page), (sg->length + PAGE_SIZE-1) & PAGE_MASK); @@ -622,8 +616,7 @@ void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, */ void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction) { - if (direction == PCI_DMA_NONE) - BUG(); + BUG_ON(direction == PCI_DMA_NONE); if (direction != PCI_DMA_TODEVICE) { mmu_inval_dma_area((unsigned long)phys_to_virt(ba), (size + PAGE_SIZE-1) & PAGE_MASK); @@ -632,8 +625,7 @@ void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t ba, size_t si void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction) { - if (direction == PCI_DMA_NONE) - BUG(); + BUG_ON(direction == PCI_DMA_NONE); if (direction != PCI_DMA_TODEVICE) { mmu_inval_dma_area((unsigned long)phys_to_virt(ba), (size + PAGE_SIZE-1) & PAGE_MASK); @@ -650,11 +642,10 @@ void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg, int { int n; - if (direction == PCI_DMA_NONE) - BUG(); + BUG_ON(direction == PCI_DMA_NONE); if (direction != PCI_DMA_TODEVICE) { for (n = 0; n < nents; n++) { - if (page_address(sg->page) == NULL) BUG(); + BUG_ON(page_address(sg->page) == NULL); mmu_inval_dma_area( (unsigned long) page_address(sg->page), (sg->length + PAGE_SIZE-1) & PAGE_MASK); @@ -667,11 +658,10 @@ void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg, i { int n; - if (direction == PCI_DMA_NONE) - BUG(); + BUG_ON(direction == PCI_DMA_NONE); if (direction != PCI_DMA_TODEVICE) { for (n = 0; n < nents; n++) { - if (page_address(sg->page) == NULL) BUG(); + BUG_ON(page_address(sg->page) == NULL); mmu_inval_dma_area( (unsigned long) page_address(sg->page), (sg->length + PAGE_SIZE-1) & PAGE_MASK); -- cgit v1.2.3 From 74bf4312fff083ab25c3f357cc653ada7995e5f6 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 31 Jan 2006 18:29:18 -0800 Subject: [SPARC64]: Move away from virtual page tables, part 1. We now use the TSB hardware assist features of the UltraSPARC MMUs. SMP is currently knowingly broken, we need to find another place to store the per-cpu base pointers. We hid them away in the TSB base register, and that obviously will not work any more :-) Another known broken case is non-8KB base page size. Also noticed that flush_tlb_all() is not referenced anywhere, only the internal __flush_tlb_all() (local cpu only) is used by the sparc64 port, so we can get rid of flush_tlb_all(). The kernel gets it's own 8KB TSB (swapper_tsb) and each address space gets it's own private 8K TSB. Later we can add code to dynamically increase the size of per-process TSB as the RSS grows. An 8KB TSB is good enough for up to about a 4MB RSS, after which the TSB starts to incur many capacity and conflict misses. We even accumulate OBP translations into the kernel TSB. Another area for refinement is large page size support. We could use a secondary address space TSB to handle those. Signed-off-by: David S. Miller --- arch/sparc64/kernel/Makefile | 2 +- arch/sparc64/kernel/binfmt_aout32.c | 13 +- arch/sparc64/kernel/dtlb_backend.S | 170 ----------------------- arch/sparc64/kernel/dtlb_base.S | 109 --------------- arch/sparc64/kernel/dtlb_miss.S | 39 ++++++ arch/sparc64/kernel/etrap.S | 2 + arch/sparc64/kernel/head.S | 46 +------ arch/sparc64/kernel/itlb_base.S | 79 ----------- arch/sparc64/kernel/itlb_miss.S | 39 ++++++ arch/sparc64/kernel/ktlb.S | 263 +++++++++++++++++------------------- arch/sparc64/kernel/process.c | 25 +--- arch/sparc64/kernel/rtrap.S | 6 +- arch/sparc64/kernel/smp.c | 12 +- arch/sparc64/kernel/trampoline.S | 33 ----- arch/sparc64/kernel/tsb.S | 169 +++++++++++++++++++++++ arch/sparc64/kernel/ttable.S | 6 +- arch/sparc64/kernel/vmlinux.lds.S | 3 + arch/sparc64/kernel/winfixup.S | 8 +- arch/sparc64/mm/Makefile | 2 +- arch/sparc64/mm/init.c | 91 +------------ arch/sparc64/mm/tlb.c | 61 +-------- arch/sparc64/mm/tsb.c | 84 ++++++++++++ arch/sparc64/mm/ultra.S | 58 -------- 23 files changed, 489 insertions(+), 831 deletions(-) delete mode 100644 arch/sparc64/kernel/dtlb_backend.S delete mode 100644 arch/sparc64/kernel/dtlb_base.S create mode 100644 arch/sparc64/kernel/dtlb_miss.S delete mode 100644 arch/sparc64/kernel/itlb_base.S create mode 100644 arch/sparc64/kernel/itlb_miss.S create mode 100644 arch/sparc64/kernel/tsb.S create mode 100644 arch/sparc64/mm/tsb.c (limited to 'arch') diff --git a/arch/sparc64/kernel/Makefile b/arch/sparc64/kernel/Makefile index 83d67eb1889..a482a9ffe5b 100644 --- a/arch/sparc64/kernel/Makefile +++ b/arch/sparc64/kernel/Makefile @@ -38,5 +38,5 @@ else CMODEL_CFLAG := -m64 -mcmodel=medlow endif -head.o: head.S ttable.S itlb_base.S dtlb_base.S dtlb_backend.S dtlb_prot.S \ +head.o: head.S ttable.S itlb_miss.S dtlb_miss.S ktlb.S tsb.S \ etrap.S rtrap.S winfixup.S entry.S diff --git a/arch/sparc64/kernel/binfmt_aout32.c b/arch/sparc64/kernel/binfmt_aout32.c index 202a80c24b6..a57d7f2b6f1 100644 --- a/arch/sparc64/kernel/binfmt_aout32.c +++ b/arch/sparc64/kernel/binfmt_aout32.c @@ -31,6 +31,7 @@ #include #include #include +#include static int load_aout32_binary(struct linux_binprm *, struct pt_regs * regs); static int load_aout32_library(struct file*); @@ -329,15 +330,9 @@ beyond_if: current->mm->start_stack = (unsigned long) create_aout32_tables((char __user *)bprm->p, bprm); - if (!(orig_thr_flags & _TIF_32BIT)) { - unsigned long pgd_cache = get_pgd_cache(current->mm->pgd); - - __asm__ __volatile__("stxa\t%0, [%1] %2\n\t" - "membar #Sync" - : /* no outputs */ - : "r" (pgd_cache), - "r" (TSB_REG), "i" (ASI_DMMU)); - } + tsb_context_switch(__pa(current->mm->pgd), + current->mm->context.sparc64_tsb); + start_thread32(regs, ex.a_entry, current->mm->start_stack); if (current->ptrace & PT_PTRACED) send_sig(SIGTRAP, current, 0); diff --git a/arch/sparc64/kernel/dtlb_backend.S b/arch/sparc64/kernel/dtlb_backend.S deleted file mode 100644 index acc889a7f9c..00000000000 --- a/arch/sparc64/kernel/dtlb_backend.S +++ /dev/null @@ -1,170 +0,0 @@ -/* $Id: dtlb_backend.S,v 1.16 2001/10/09 04:02:11 davem Exp $ - * dtlb_backend.S: Back end to DTLB miss replacement strategy. - * This is included directly into the trap table. - * - * Copyright (C) 1996,1998 David S. Miller (davem@redhat.com) - * Copyright (C) 1997,1998 Jakub Jelinek (jj@ultra.linux.cz) - */ - -#include -#include - -#define VALID_SZ_BITS (_PAGE_VALID | _PAGE_SZBITS) - -#define VPTE_BITS (_PAGE_CP | _PAGE_CV | _PAGE_P ) -#define VPTE_SHIFT (PAGE_SHIFT - 3) - -/* Ways we can get here: - * - * 1) Nucleus loads and stores to/from PA-->VA direct mappings at tl>1. - * 2) Nucleus loads and stores to/from user/kernel window save areas. - * 3) VPTE misses from dtlb_base and itlb_base. - * - * We need to extract out the PMD and PGDIR indexes from the - * linear virtual page table access address. The PTE index - * is at the bottom, but we are not concerned with it. Bits - * 0 to 2 are clear since each PTE is 8 bytes in size. Each - * PMD and PGDIR entry are 4 bytes in size. Thus, this - * address looks something like: - * - * |---------------------------------------------------------------| - * | ... | PGDIR index | PMD index | PTE index | | - * |---------------------------------------------------------------| - * 63 F E D C B A 3 2 0 <- bit nr - * - * The variable bits above are defined as: - * A --> 3 + (PAGE_SHIFT - log2(8)) - * --> 3 + (PAGE_SHIFT - 3) - 1 - * (ie. this is "bit 3" + PAGE_SIZE - size of PTE entry in bits - 1) - * B --> A + 1 - * C --> B + (PAGE_SHIFT - log2(4)) - * --> B + (PAGE_SHIFT - 2) - 1 - * (ie. this is "bit B" + PAGE_SIZE - size of PMD entry in bits - 1) - * D --> C + 1 - * E --> D + (PAGE_SHIFT - log2(4)) - * --> D + (PAGE_SHIFT - 2) - 1 - * (ie. this is "bit D" + PAGE_SIZE - size of PGDIR entry in bits - 1) - * F --> E + 1 - * - * (Note how "B" always evalutes to PAGE_SHIFT, all the other constants - * cancel out.) - * - * For 8K PAGE_SIZE (thus, PAGE_SHIFT of 13) the bit numbers are: - * A --> 12 - * B --> 13 - * C --> 23 - * D --> 24 - * E --> 34 - * F --> 35 - * - * For 64K PAGE_SIZE (thus, PAGE_SHIFT of 16) the bit numbers are: - * A --> 15 - * B --> 16 - * C --> 29 - * D --> 30 - * E --> 43 - * F --> 44 - * - * Because bits both above and below each PGDIR and PMD index need to - * be masked out, and the index can be as long as 14 bits (when using a - * 64K PAGE_SIZE, and thus a PAGE_SHIFT of 16), we need 3 instructions - * to extract each index out. - * - * Shifts do not pair very well on UltraSPARC-I, II, IIi, and IIe, so - * we try to avoid using them for the entire operation. We could setup - * a mask anywhere from bit 31 down to bit 10 using the sethi instruction. - * - * We need a mask covering bits B --> C and one covering D --> E. - * For 8K PAGE_SIZE these masks are 0x00ffe000 and 0x7ff000000. - * For 64K PAGE_SIZE these masks are 0x3fff0000 and 0xfffc0000000. - * The second in each set cannot be loaded with a single sethi - * instruction, because the upper bits are past bit 32. We would - * need to use a sethi + a shift. - * - * For the time being, we use 2 shifts and a simple "and" mask. - * We shift left to clear the bits above the index, we shift down - * to clear the bits below the index (sans the log2(4 or 8) bits) - * and a mask to clear the log2(4 or 8) bits. We need therefore - * define 4 shift counts, all of which are relative to PAGE_SHIFT. - * - * Although unsupportable for other reasons, this does mean that - * 512K and 4MB page sizes would be generaally supported by the - * kernel. (ELF binaries would break with > 64K PAGE_SIZE since - * the sections are only aligned that strongly). - * - * The operations performed for extraction are thus: - * - * ((X << FOO_SHIFT_LEFT) >> FOO_SHIFT_RIGHT) & ~0x3 - * - */ - -#define A (3 + (PAGE_SHIFT - 3) - 1) -#define B (A + 1) -#define C (B + (PAGE_SHIFT - 2) - 1) -#define D (C + 1) -#define E (D + (PAGE_SHIFT - 2) - 1) -#define F (E + 1) - -#define PMD_SHIFT_LEFT (64 - D) -#define PMD_SHIFT_RIGHT (64 - (D - B) - 2) -#define PGDIR_SHIFT_LEFT (64 - F) -#define PGDIR_SHIFT_RIGHT (64 - (F - D) - 2) -#define LOW_MASK_BITS 0x3 - -/* TLB1 ** ICACHE line 1: tl1 DTLB and quick VPTE miss */ - ldxa [%g1 + %g1] ASI_DMMU, %g4 ! Get TAG_ACCESS - add %g3, %g3, %g5 ! Compute VPTE base - cmp %g4, %g5 ! VPTE miss? - bgeu,pt %xcc, 1f ! Continue here - andcc %g4, TAG_CONTEXT_BITS, %g5 ! tl0 miss Nucleus test - ba,a,pt %xcc, from_tl1_trap ! Fall to tl0 miss -1: sllx %g6, VPTE_SHIFT, %g4 ! Position TAG_ACCESS - or %g4, %g5, %g4 ! Prepare TAG_ACCESS - -/* TLB1 ** ICACHE line 2: Quick VPTE miss */ - mov TSB_REG, %g1 ! Grab TSB reg - ldxa [%g1] ASI_DMMU, %g5 ! Doing PGD caching? - sllx %g6, PMD_SHIFT_LEFT, %g1 ! Position PMD offset - be,pn %xcc, sparc64_vpte_nucleus ! Is it from Nucleus? - srlx %g1, PMD_SHIFT_RIGHT, %g1 ! Mask PMD offset bits - brnz,pt %g5, sparc64_vpte_continue ! Yep, go like smoke - andn %g1, LOW_MASK_BITS, %g1 ! Final PMD mask - sllx %g6, PGDIR_SHIFT_LEFT, %g5 ! Position PGD offset - -/* TLB1 ** ICACHE line 3: Quick VPTE miss */ - srlx %g5, PGDIR_SHIFT_RIGHT, %g5 ! Mask PGD offset bits - andn %g5, LOW_MASK_BITS, %g5 ! Final PGD mask - lduwa [%g7 + %g5] ASI_PHYS_USE_EC, %g5! Load PGD - brz,pn %g5, vpte_noent ! Valid? -sparc64_kpte_continue: - sllx %g5, 11, %g5 ! Shift into place -sparc64_vpte_continue: - lduwa [%g5 + %g1] ASI_PHYS_USE_EC, %g5! Load PMD - sllx %g5, 11, %g5 ! Shift into place - brz,pn %g5, vpte_noent ! Valid? - -/* TLB1 ** ICACHE line 4: Quick VPTE miss */ - mov (VALID_SZ_BITS >> 61), %g1 ! upper vpte into %g1 - sllx %g1, 61, %g1 ! finish calc - or %g5, VPTE_BITS, %g5 ! Prepare VPTE data - or %g5, %g1, %g5 ! ... - mov TLB_SFSR, %g1 ! Restore %g1 value - stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Load VPTE into TLB - stxa %g4, [%g1 + %g1] ASI_DMMU ! Restore previous TAG_ACCESS - retry ! Load PTE once again - -#undef VALID_SZ_BITS -#undef VPTE_SHIFT -#undef VPTE_BITS -#undef A -#undef B -#undef C -#undef D -#undef E -#undef F -#undef PMD_SHIFT_LEFT -#undef PMD_SHIFT_RIGHT -#undef PGDIR_SHIFT_LEFT -#undef PGDIR_SHIFT_RIGHT -#undef LOW_MASK_BITS - diff --git a/arch/sparc64/kernel/dtlb_base.S b/arch/sparc64/kernel/dtlb_base.S deleted file mode 100644 index 6528786840c..00000000000 --- a/arch/sparc64/kernel/dtlb_base.S +++ /dev/null @@ -1,109 +0,0 @@ -/* $Id: dtlb_base.S,v 1.17 2001/10/11 22:33:52 davem Exp $ - * dtlb_base.S: Front end to DTLB miss replacement strategy. - * This is included directly into the trap table. - * - * Copyright (C) 1996,1998 David S. Miller (davem@redhat.com) - * Copyright (C) 1997,1998 Jakub Jelinek (jj@ultra.linux.cz) - */ - -#include -#include - -/* %g1 TLB_SFSR (%g1 + %g1 == TLB_TAG_ACCESS) - * %g2 (KERN_HIGHBITS | KERN_LOWBITS) - * %g3 VPTE base (0xfffffffe00000000) Spitfire/Blackbird (44-bit VA space) - * (0xffe0000000000000) Cheetah (64-bit VA space) - * %g7 __pa(current->mm->pgd) - * - * The VPTE base value is completely magic, but note that - * few places in the kernel other than these TLB miss - * handlers know anything about the VPTE mechanism or - * how it works (see VPTE_SIZE, TASK_SIZE and PTRS_PER_PGD). - * Consider the 44-bit VADDR Ultra-I/II case as an example: - * - * VA[0 : (1<<43)] produce VPTE index [%g3 : 0] - * VA[0 : -(1<<43)] produce VPTE index [%g3-(1<<(43-PAGE_SHIFT+3)) : %g3] - * - * For Cheetah's 64-bit VADDR space this is: - * - * VA[0 : (1<<63)] produce VPTE index [%g3 : 0] - * VA[0 : -(1<<63)] produce VPTE index [%g3-(1<<(63-PAGE_SHIFT+3)) : %g3] - * - * If you're paying attention you'll notice that this means half of - * the VPTE table is above %g3 and half is below, low VA addresses - * map progressively upwards from %g3, and high VA addresses map - * progressively upwards towards %g3. This trick was needed to make - * the same 8 instruction handler work both for Spitfire/Blackbird's - * peculiar VA space hole configuration and the full 64-bit VA space - * one of Cheetah at the same time. - */ - -/* Ways we can get here: - * - * 1) Nucleus loads and stores to/from PA-->VA direct mappings. - * 2) Nucleus loads and stores to/from vmalloc() areas. - * 3) User loads and stores. - * 4) User space accesses by nucleus at tl0 - */ - -#if PAGE_SHIFT == 13 -/* - * To compute vpte offset, we need to do ((addr >> 13) << 3), - * which can be optimized to (addr >> 10) if bits 10/11/12 can - * be guaranteed to be 0 ... mmu_context.h does guarantee this - * by only using 10 bits in the hwcontext value. - */ -#define CREATE_VPTE_OFFSET1(r1, r2) nop -#define CREATE_VPTE_OFFSET2(r1, r2) \ - srax r1, 10, r2 -#else -#define CREATE_VPTE_OFFSET1(r1, r2) \ - srax r1, PAGE_SHIFT, r2 -#define CREATE_VPTE_OFFSET2(r1, r2) \ - sllx r2, 3, r2 -#endif - -/* DTLB ** ICACHE line 1: Quick user TLB misses */ - mov TLB_SFSR, %g1 - ldxa [%g1 + %g1] ASI_DMMU, %g4 ! Get TAG_ACCESS - andcc %g4, TAG_CONTEXT_BITS, %g0 ! From Nucleus? -from_tl1_trap: - rdpr %tl, %g5 ! For TL==3 test - CREATE_VPTE_OFFSET1(%g4, %g6) ! Create VPTE offset - be,pn %xcc, kvmap ! Yep, special processing - CREATE_VPTE_OFFSET2(%g4, %g6) ! Create VPTE offset - cmp %g5, 4 ! Last trap level? - -/* DTLB ** ICACHE line 2: User finish + quick kernel TLB misses */ - be,pn %xcc, longpath ! Yep, cannot risk VPTE miss - nop ! delay slot - ldxa [%g3 + %g6] ASI_S, %g5 ! Load VPTE -1: brgez,pn %g5, longpath ! Invalid, branch out - nop ! Delay-slot -9: stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB - retry ! Trap return - nop - -/* DTLB ** ICACHE line 3: winfixups+real_faults */ -longpath: - rdpr %pstate, %g5 ! Move into alternate globals - wrpr %g5, PSTATE_AG|PSTATE_MG, %pstate - rdpr %tl, %g4 ! See where we came from. - cmp %g4, 1 ! Is etrap/rtrap window fault? - mov TLB_TAG_ACCESS, %g4 ! Prepare for fault processing - ldxa [%g4] ASI_DMMU, %g5 ! Load faulting VA page - be,pt %xcc, sparc64_realfault_common ! Jump to normal fault handling - mov FAULT_CODE_DTLB, %g4 ! It was read from DTLB - -/* DTLB ** ICACHE line 4: Unused... */ - ba,a,pt %xcc, winfix_trampoline ! Call window fixup code - nop - nop - nop - nop - nop - nop - nop - -#undef CREATE_VPTE_OFFSET1 -#undef CREATE_VPTE_OFFSET2 diff --git a/arch/sparc64/kernel/dtlb_miss.S b/arch/sparc64/kernel/dtlb_miss.S new file mode 100644 index 00000000000..d0f1565cb56 --- /dev/null +++ b/arch/sparc64/kernel/dtlb_miss.S @@ -0,0 +1,39 @@ +/* DTLB ** ICACHE line 1: Context 0 check and TSB load */ + ldxa [%g0] ASI_DMMU_TSB_8KB_PTR, %g1 ! Get TSB 8K pointer + ldxa [%g0] ASI_DMMU, %g6 ! Get TAG TARGET + srlx %g6, 48, %g5 ! Get context + brz,pn %g5, kvmap_dtlb ! Context 0 processing + nop ! Delay slot (fill me) + ldda [%g1] ASI_NUCLEUS_QUAD_LDD, %g4 ! Load TSB entry + nop ! Push branch to next I$ line + cmp %g4, %g6 ! Compare TAG + +/* DTLB ** ICACHE line 2: TSB compare and TLB load */ + bne,pn %xcc, tsb_miss_dtlb ! Miss + mov FAULT_CODE_DTLB, %g3 + stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Load TLB + retry ! Trap done + nop + nop + nop + nop + +/* DTLB ** ICACHE line 3: */ + nop + nop + nop + nop + nop + nop + nop + nop + +/* DTLB ** ICACHE line 4: */ + nop + nop + nop + nop + nop + nop + nop + nop diff --git a/arch/sparc64/kernel/etrap.S b/arch/sparc64/kernel/etrap.S index 0d8eba21111..567dbb765c3 100644 --- a/arch/sparc64/kernel/etrap.S +++ b/arch/sparc64/kernel/etrap.S @@ -99,6 +99,7 @@ etrap_irq: wrpr %g0, ETRAP_PSTATE2, %pstate mov %l6, %g6 #ifdef CONFIG_SMP +#error IMMU TSB usage must be fixed mov TSB_REG, %g3 ldxa [%g3] ASI_IMMU, %g5 #endif @@ -248,6 +249,7 @@ scetrap: rdpr %pil, %g2 mov %l6, %g6 stx %i7, [%sp + PTREGS_OFF + PT_V9_I7] #ifdef CONFIG_SMP +#error IMMU TSB usage must be fixed mov TSB_REG, %g3 ldxa [%g3] ASI_IMMU, %g5 #endif diff --git a/arch/sparc64/kernel/head.S b/arch/sparc64/kernel/head.S index b49dcd4504b..d00e20693be 100644 --- a/arch/sparc64/kernel/head.S +++ b/arch/sparc64/kernel/head.S @@ -429,17 +429,6 @@ setup_trap_table: * * %g6 --> current_thread_info() * - * MMU Globals (PSTATE_MG): - * - * %g1 --> TLB_SFSR - * %g2 --> ((_PAGE_VALID | _PAGE_SZ4MB | - * _PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W) - * ^ 0xfffff80000000000) - * (this %g2 value is used for computing the PAGE_OFFSET kernel - * TLB entries quickly, the virtual address of the fault XOR'd - * with this %g2 value is the PTE to load into the TLB) - * %g3 --> VPTE_BASE_CHEETAH or VPTE_BASE_SPITFIRE - * * Interrupt Globals (PSTATE_IG, setup by init_irqwork_curcpu()): * * %g6 --> __irq_work[smp_processor_id()] @@ -450,40 +439,6 @@ setup_trap_table: wrpr %o1, PSTATE_AG, %pstate mov %o2, %g6 -#define KERN_HIGHBITS ((_PAGE_VALID|_PAGE_SZ4MB)^0xfffff80000000000) -#define KERN_LOWBITS (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W) - wrpr %o1, PSTATE_MG, %pstate - mov TSB_REG, %g1 - stxa %g0, [%g1] ASI_DMMU - membar #Sync - stxa %g0, [%g1] ASI_IMMU - membar #Sync - mov TLB_SFSR, %g1 - sethi %uhi(KERN_HIGHBITS), %g2 - or %g2, %ulo(KERN_HIGHBITS), %g2 - sllx %g2, 32, %g2 - or %g2, KERN_LOWBITS, %g2 - - BRANCH_IF_ANY_CHEETAH(g3,g7,8f) - ba,pt %xcc, 9f - nop - -8: - sethi %uhi(VPTE_BASE_CHEETAH), %g3 - or %g3, %ulo(VPTE_BASE_CHEETAH), %g3 - ba,pt %xcc, 2f - sllx %g3, 32, %g3 - -9: - sethi %uhi(VPTE_BASE_SPITFIRE), %g3 - or %g3, %ulo(VPTE_BASE_SPITFIRE), %g3 - sllx %g3, 32, %g3 - -2: - clr %g7 -#undef KERN_HIGHBITS -#undef KERN_LOWBITS - /* Kill PROM timer */ sethi %hi(0x80000000), %o2 sllx %o2, 32, %o2 @@ -538,6 +493,7 @@ sparc64_boot_end: #include "systbls.S" #include "ktlb.S" +#include "tsb.S" #include "etrap.S" #include "rtrap.S" #include "winfixup.S" diff --git a/arch/sparc64/kernel/itlb_base.S b/arch/sparc64/kernel/itlb_base.S deleted file mode 100644 index 4951ff8f687..00000000000 --- a/arch/sparc64/kernel/itlb_base.S +++ /dev/null @@ -1,79 +0,0 @@ -/* $Id: itlb_base.S,v 1.12 2002/02/09 19:49:30 davem Exp $ - * itlb_base.S: Front end to ITLB miss replacement strategy. - * This is included directly into the trap table. - * - * Copyright (C) 1996,1998 David S. Miller (davem@redhat.com) - * Copyright (C) 1997,1998 Jakub Jelinek (jj@ultra.linux.cz) - */ - -#if PAGE_SHIFT == 13 -/* - * To compute vpte offset, we need to do ((addr >> 13) << 3), - * which can be optimized to (addr >> 10) if bits 10/11/12 can - * be guaranteed to be 0 ... mmu_context.h does guarantee this - * by only using 10 bits in the hwcontext value. - */ -#define CREATE_VPTE_OFFSET1(r1, r2) \ - srax r1, 10, r2 -#define CREATE_VPTE_OFFSET2(r1, r2) nop -#else /* PAGE_SHIFT */ -#define CREATE_VPTE_OFFSET1(r1, r2) \ - srax r1, PAGE_SHIFT, r2 -#define CREATE_VPTE_OFFSET2(r1, r2) \ - sllx r2, 3, r2 -#endif /* PAGE_SHIFT */ - - -/* Ways we can get here: - * - * 1) Nucleus instruction misses from module code. - * 2) All user instruction misses. - * - * All real page faults merge their code paths to the - * sparc64_realfault_common label below. - */ - -/* ITLB ** ICACHE line 1: Quick user TLB misses */ - mov TLB_SFSR, %g1 - ldxa [%g1 + %g1] ASI_IMMU, %g4 ! Get TAG_ACCESS - CREATE_VPTE_OFFSET1(%g4, %g6) ! Create VPTE offset - CREATE_VPTE_OFFSET2(%g4, %g6) ! Create VPTE offset - ldxa [%g3 + %g6] ASI_P, %g5 ! Load VPTE -1: brgez,pn %g5, 3f ! Not valid, branch out - sethi %hi(_PAGE_EXEC), %g4 ! Delay-slot - andcc %g5, %g4, %g0 ! Executable? - -/* ITLB ** ICACHE line 2: Real faults */ - be,pn %xcc, 3f ! Nope, branch. - nop ! Delay-slot -2: stxa %g5, [%g0] ASI_ITLB_DATA_IN ! Load PTE into TLB - retry ! Trap return -3: rdpr %pstate, %g4 ! Move into alt-globals - wrpr %g4, PSTATE_AG|PSTATE_MG, %pstate - rdpr %tpc, %g5 ! And load faulting VA - mov FAULT_CODE_ITLB, %g4 ! It was read from ITLB - -/* ITLB ** ICACHE line 3: Finish faults */ -sparc64_realfault_common: ! Called by dtlb_miss - stb %g4, [%g6 + TI_FAULT_CODE] - stx %g5, [%g6 + TI_FAULT_ADDR] - ba,pt %xcc, etrap ! Save state -1: rd %pc, %g7 ! ... - call do_sparc64_fault ! Call fault handler - add %sp, PTREGS_OFF, %o0! Compute pt_regs arg - ba,pt %xcc, rtrap_clr_l6 ! Restore cpu state - nop - -/* ITLB ** ICACHE line 4: Window fixups */ -winfix_trampoline: - rdpr %tpc, %g3 ! Prepare winfixup TNPC - or %g3, 0x7c, %g3 ! Compute branch offset - wrpr %g3, %tnpc ! Write it into TNPC - done ! Do it to it - nop - nop - nop - nop - -#undef CREATE_VPTE_OFFSET1 -#undef CREATE_VPTE_OFFSET2 diff --git a/arch/sparc64/kernel/itlb_miss.S b/arch/sparc64/kernel/itlb_miss.S new file mode 100644 index 00000000000..6b6c8fee04b --- /dev/null +++ b/arch/sparc64/kernel/itlb_miss.S @@ -0,0 +1,39 @@ +/* ITLB ** ICACHE line 1: Context 0 check and TSB load */ + ldxa [%g0] ASI_IMMU_TSB_8KB_PTR, %g1 ! Get TSB 8K pointer + ldxa [%g0] ASI_IMMU, %g6 ! Get TAG TARGET + srlx %g6, 48, %g5 ! Get context + brz,pn %g5, kvmap_itlb ! Context 0 processing + nop ! Delay slot (fill me) + ldda [%g1] ASI_NUCLEUS_QUAD_LDD, %g4 ! Load TSB entry + cmp %g4, %g6 ! Compare TAG + sethi %hi(_PAGE_EXEC), %g4 ! Setup exec check + +/* ITLB ** ICACHE line 2: TSB compare and TLB load */ + bne,pn %xcc, tsb_miss_itlb ! Miss + mov FAULT_CODE_ITLB, %g3 + andcc %g5, %g4, %g0 ! Executable? + be,pn %xcc, tsb_do_fault + nop ! Delay slot, fill me + stxa %g5, [%g0] ASI_ITLB_DATA_IN ! Load TLB + retry ! Trap done + nop + +/* ITLB ** ICACHE line 3: */ + nop + nop + nop + nop + nop + nop + nop + nop + +/* ITLB ** ICACHE line 4: */ + nop + nop + nop + nop + nop + nop + nop + nop diff --git a/arch/sparc64/kernel/ktlb.S b/arch/sparc64/kernel/ktlb.S index d9244d3c9f7..2b5e71b6888 100644 --- a/arch/sparc64/kernel/ktlb.S +++ b/arch/sparc64/kernel/ktlb.S @@ -4,191 +4,170 @@ * Copyright (C) 1996 Eddie C. Dost (ecd@brainaid.de) * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx) * Copyright (C) 1996,98,99 Jakub Jelinek (jj@sunsite.mff.cuni.cz) -*/ + */ #include #include #include #include #include +#include .text .align 32 -/* - * On a second level vpte miss, check whether the original fault is to the OBP - * range (note that this is only possible for instruction miss, data misses to - * obp range do not use vpte). If so, go back directly to the faulting address. - * This is because we want to read the tpc, otherwise we have no way of knowing - * the 8k aligned faulting address if we are using >8k kernel pagesize. This - * also ensures no vpte range addresses are dropped into tlb while obp is - * executing (see inherit_locked_prom_mappings() rant). - */ -sparc64_vpte_nucleus: - /* Note that kvmap below has verified that the address is - * in the range MODULES_VADDR --> VMALLOC_END already. So - * here we need only check if it is an OBP address or not. - */ + .globl kvmap_itlb +kvmap_itlb: + /* g6: TAG TARGET */ + mov TLB_TAG_ACCESS, %g4 + ldxa [%g4] ASI_IMMU, %g4 + +kvmap_itlb_nonlinear: + /* Catch kernel NULL pointer calls. */ + sethi %hi(PAGE_SIZE), %g5 + cmp %g4, %g5 + bleu,pn %xcc, kvmap_dtlb_longpath + nop + + KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_itlb_load) + +kvmap_itlb_tsb_miss: sethi %hi(LOW_OBP_ADDRESS), %g5 cmp %g4, %g5 - blu,pn %xcc, kern_vpte + blu,pn %xcc, kvmap_itlb_vmalloc_addr mov 0x1, %g5 sllx %g5, 32, %g5 cmp %g4, %g5 - blu,pn %xcc, vpte_insn_obp + blu,pn %xcc, kvmap_itlb_obp nop - /* These two instructions are patched by paginig_init(). */ -kern_vpte: - sethi %hi(swapper_pgd_zero), %g5 - lduw [%g5 + %lo(swapper_pgd_zero)], %g5 - - /* With kernel PGD in %g5, branch back into dtlb_backend. */ - ba,pt %xcc, sparc64_kpte_continue - andn %g1, 0x3, %g1 /* Finish PMD offset adjustment. */ - -vpte_noent: - /* Restore previous TAG_ACCESS, %g5 is zero, and we will - * skip over the trap instruction so that the top level - * TLB miss handler will thing this %g5 value is just an - * invalid PTE, thus branching to full fault processing. - */ - mov TLB_SFSR, %g1 - stxa %g4, [%g1 + %g1] ASI_DMMU - done - -vpte_insn_obp: - /* Behave as if we are at TL0. */ - wrpr %g0, 1, %tl - rdpr %tpc, %g4 /* Find original faulting iaddr */ - srlx %g4, 13, %g4 /* Throw out context bits */ - sllx %g4, 13, %g4 /* g4 has vpn + ctx0 now */ - - /* Restore previous TAG_ACCESS. */ - mov TLB_SFSR, %g1 - stxa %g4, [%g1 + %g1] ASI_IMMU - - sethi %hi(prom_trans), %g5 - or %g5, %lo(prom_trans), %g5 - -1: ldx [%g5 + 0x00], %g6 ! base - brz,a,pn %g6, longpath ! no more entries, fail - mov TLB_SFSR, %g1 ! and restore %g1 - ldx [%g5 + 0x08], %g1 ! len - add %g6, %g1, %g1 ! end - cmp %g6, %g4 - bgu,pt %xcc, 2f - cmp %g4, %g1 - bgeu,pt %xcc, 2f - ldx [%g5 + 0x10], %g1 ! PTE - - /* TLB load, restore %g1, and return from trap. */ - sub %g4, %g6, %g6 - add %g1, %g6, %g5 - mov TLB_SFSR, %g1 - stxa %g5, [%g0] ASI_ITLB_DATA_IN - retry +kvmap_itlb_vmalloc_addr: + KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_itlb_longpath) + + TSB_LOCK_TAG(%g1, %g2, %g4) + + /* Load and check PTE. */ + ldxa [%g5] ASI_PHYS_USE_EC, %g5 + brgez,a,pn %g5, kvmap_itlb_longpath + stx %g0, [%g1] -2: ba,pt %xcc, 1b - add %g5, (3 * 8), %g5 ! next entry - -kvmap_do_obp: - sethi %hi(prom_trans), %g5 - or %g5, %lo(prom_trans), %g5 - srlx %g4, 13, %g4 - sllx %g4, 13, %g4 - -1: ldx [%g5 + 0x00], %g6 ! base - brz,a,pn %g6, longpath ! no more entries, fail - mov TLB_SFSR, %g1 ! and restore %g1 - ldx [%g5 + 0x08], %g1 ! len - add %g6, %g1, %g1 ! end - cmp %g6, %g4 - bgu,pt %xcc, 2f - cmp %g4, %g1 - bgeu,pt %xcc, 2f - ldx [%g5 + 0x10], %g1 ! PTE - - /* TLB load, restore %g1, and return from trap. */ - sub %g4, %g6, %g6 - add %g1, %g6, %g5 - mov TLB_SFSR, %g1 - stxa %g5, [%g0] ASI_DTLB_DATA_IN + TSB_WRITE(%g1, %g5, %g6) + + /* fallthrough to TLB load */ + +kvmap_itlb_load: + stxa %g5, [%g0] ASI_ITLB_DATA_IN ! Reload TLB retry -2: ba,pt %xcc, 1b - add %g5, (3 * 8), %g5 ! next entry +kvmap_itlb_longpath: + rdpr %pstate, %g5 + wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate + rdpr %tpc, %g5 + ba,pt %xcc, sparc64_realfault_common + mov FAULT_CODE_ITLB, %g4 + +kvmap_itlb_obp: + OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_itlb_longpath) + + TSB_LOCK_TAG(%g1, %g2, %g4) + + TSB_WRITE(%g1, %g5, %g6) + + ba,pt %xcc, kvmap_itlb_load + nop + +kvmap_dtlb_obp: + OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_dtlb_longpath) + + TSB_LOCK_TAG(%g1, %g2, %g4) + + TSB_WRITE(%g1, %g5, %g6) + + ba,pt %xcc, kvmap_dtlb_load + nop -/* - * On a first level data miss, check whether this is to the OBP range (note - * that such accesses can be made by prom, as well as by kernel using - * prom_getproperty on "address"), and if so, do not use vpte access ... - * rather, use information saved during inherit_prom_mappings() using 8k - * pagesize. - */ .align 32 -kvmap: - brgez,pn %g4, kvmap_nonlinear + .globl kvmap_dtlb +kvmap_dtlb: + /* %g6: TAG TARGET */ + mov TLB_TAG_ACCESS, %g4 + ldxa [%g4] ASI_DMMU, %g4 + brgez,pn %g4, kvmap_dtlb_nonlinear nop -#ifdef CONFIG_DEBUG_PAGEALLOC +#define KERN_HIGHBITS ((_PAGE_VALID|_PAGE_SZ4MB)^0xfffff80000000000) +#define KERN_LOWBITS (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W) + + sethi %uhi(KERN_HIGHBITS), %g2 + or %g2, %ulo(KERN_HIGHBITS), %g2 + sllx %g2, 32, %g2 + or %g2, KERN_LOWBITS, %g2 + +#undef KERN_HIGHBITS +#undef KERN_LOWBITS + .globl kvmap_linear_patch kvmap_linear_patch: -#endif - ba,pt %xcc, kvmap_load + ba,pt %xcc, kvmap_dtlb_load xor %g2, %g4, %g5 -#ifdef CONFIG_DEBUG_PAGEALLOC - sethi %hi(swapper_pg_dir), %g5 - or %g5, %lo(swapper_pg_dir), %g5 - sllx %g4, 64 - (PGDIR_SHIFT + PGDIR_BITS), %g6 - srlx %g6, 64 - PAGE_SHIFT, %g6 - andn %g6, 0x3, %g6 - lduw [%g5 + %g6], %g5 - brz,pn %g5, longpath - sllx %g4, 64 - (PMD_SHIFT + PMD_BITS), %g6 - srlx %g6, 64 - PAGE_SHIFT, %g6 - sllx %g5, 11, %g5 - andn %g6, 0x3, %g6 - lduwa [%g5 + %g6] ASI_PHYS_USE_EC, %g5 - brz,pn %g5, longpath - sllx %g4, 64 - PMD_SHIFT, %g6 - srlx %g6, 64 - PAGE_SHIFT, %g6 - sllx %g5, 11, %g5 - andn %g6, 0x7, %g6 - ldxa [%g5 + %g6] ASI_PHYS_USE_EC, %g5 - brz,pn %g5, longpath +kvmap_dtlb_vmalloc_addr: + KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_dtlb_longpath) + + TSB_LOCK_TAG(%g1, %g2, %g4) + + /* Load and check PTE. */ + ldxa [%g5] ASI_PHYS_USE_EC, %g5 + brgez,a,pn %g5, kvmap_dtlb_longpath + stx %g0, [%g1] + + TSB_WRITE(%g1, %g5, %g6) + + /* fallthrough to TLB load */ + +kvmap_dtlb_load: + stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB + retry + +kvmap_dtlb_nonlinear: + /* Catch kernel NULL pointer derefs. */ + sethi %hi(PAGE_SIZE), %g5 + cmp %g4, %g5 + bleu,pn %xcc, kvmap_dtlb_longpath nop - ba,a,pt %xcc, kvmap_load -#endif -kvmap_nonlinear: + KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load) + +kvmap_dtlb_tsbmiss: sethi %hi(MODULES_VADDR), %g5 cmp %g4, %g5 - blu,pn %xcc, longpath + blu,pn %xcc, kvmap_dtlb_longpath mov (VMALLOC_END >> 24), %g5 sllx %g5, 24, %g5 cmp %g4, %g5 - bgeu,pn %xcc, longpath + bgeu,pn %xcc, kvmap_dtlb_longpath nop kvmap_check_obp: sethi %hi(LOW_OBP_ADDRESS), %g5 cmp %g4, %g5 - blu,pn %xcc, kvmap_vmalloc_addr + blu,pn %xcc, kvmap_dtlb_vmalloc_addr mov 0x1, %g5 sllx %g5, 32, %g5 cmp %g4, %g5 - blu,pn %xcc, kvmap_do_obp + blu,pn %xcc, kvmap_dtlb_obp nop - -kvmap_vmalloc_addr: - /* If we get here, a vmalloc addr was accessed, load kernel VPTE. */ - ldxa [%g3 + %g6] ASI_N, %g5 - brgez,pn %g5, longpath + ba,pt %xcc, kvmap_dtlb_vmalloc_addr nop -kvmap_load: - /* PTE is valid, load into TLB and return from trap. */ - stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB - retry +kvmap_dtlb_longpath: + rdpr %pstate, %g5 + wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate + rdpr %tl, %g4 + cmp %g4, 1 + mov TLB_TAG_ACCESS, %g4 + ldxa [%g4] ASI_DMMU, %g5 + be,pt %xcc, sparc64_realfault_common + mov FAULT_CODE_DTLB, %g4 + ba,pt %xcc, winfix_trampoline + nop diff --git a/arch/sparc64/kernel/process.c b/arch/sparc64/kernel/process.c index 059b0d02522..2784aab0d3e 100644 --- a/arch/sparc64/kernel/process.c +++ b/arch/sparc64/kernel/process.c @@ -44,6 +44,7 @@ #include #include #include +#include #include /* #define VERBOSE_SHOWREGS */ @@ -433,30 +434,16 @@ void exit_thread(void) void flush_thread(void) { struct thread_info *t = current_thread_info(); + struct mm_struct *mm; if (t->flags & _TIF_ABI_PENDING) t->flags ^= (_TIF_ABI_PENDING | _TIF_32BIT); - if (t->task->mm) { - unsigned long pgd_cache = 0UL; - if (test_thread_flag(TIF_32BIT)) { - struct mm_struct *mm = t->task->mm; - pgd_t *pgd0 = &mm->pgd[0]; - pud_t *pud0 = pud_offset(pgd0, 0); + mm = t->task->mm; + if (mm) + tsb_context_switch(__pa(mm->pgd), + mm->context.sparc64_tsb); - if (pud_none(*pud0)) { - pmd_t *page = pmd_alloc_one(mm, 0); - pud_set(pud0, page); - } - pgd_cache = get_pgd_cache(pgd0); - } - __asm__ __volatile__("stxa %0, [%1] %2\n\t" - "membar #Sync" - : /* no outputs */ - : "r" (pgd_cache), - "r" (TSB_REG), - "i" (ASI_DMMU)); - } set_thread_wsaved(0); /* Turn off performance counters if on. */ diff --git a/arch/sparc64/kernel/rtrap.S b/arch/sparc64/kernel/rtrap.S index b80eba0081c..213eb4a9d8a 100644 --- a/arch/sparc64/kernel/rtrap.S +++ b/arch/sparc64/kernel/rtrap.S @@ -223,10 +223,14 @@ rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1 ldx [%sp + PTREGS_OFF + PT_V9_G3], %g3 ldx [%sp + PTREGS_OFF + PT_V9_G4], %g4 ldx [%sp + PTREGS_OFF + PT_V9_G5], %g5 +#ifdef CONFIG_SMP +#error IMMU TSB usage must be fixed mov TSB_REG, %g6 brnz,a,pn %l3, 1f ldxa [%g6] ASI_IMMU, %g5 -1: ldx [%sp + PTREGS_OFF + PT_V9_G6], %g6 +#endif +1: + ldx [%sp + PTREGS_OFF + PT_V9_G6], %g6 ldx [%sp + PTREGS_OFF + PT_V9_G7], %g7 wrpr %g0, RTRAP_PSTATE_AG_IRQOFF, %pstate ldx [%sp + PTREGS_OFF + PT_V9_I0], %i0 diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index 1f7ad8a6905..d2d3369e7b5 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c @@ -123,6 +123,7 @@ extern void inherit_locked_prom_mappings(int save_p); static inline void cpu_setup_percpu_base(unsigned long cpu_id) { +#error IMMU TSB usage must be fixed __asm__ __volatile__("mov %0, %%g5\n\t" "stxa %0, [%1] %2\n\t" "membar #Sync" @@ -662,8 +663,6 @@ void smp_call_function_client(int irq, struct pt_regs *regs) extern unsigned long xcall_flush_tlb_mm; extern unsigned long xcall_flush_tlb_pending; extern unsigned long xcall_flush_tlb_kernel_range; -extern unsigned long xcall_flush_tlb_all_spitfire; -extern unsigned long xcall_flush_tlb_all_cheetah; extern unsigned long xcall_report_regs; extern unsigned long xcall_receive_signal; @@ -794,15 +793,6 @@ void smp_report_regs(void) smp_cross_call(&xcall_report_regs, 0, 0, 0); } -void smp_flush_tlb_all(void) -{ - if (tlb_type == spitfire) - smp_cross_call(&xcall_flush_tlb_all_spitfire, 0, 0, 0); - else - smp_cross_call(&xcall_flush_tlb_all_cheetah, 0, 0, 0); - __flush_tlb_all(); -} - /* We know that the window frames of the user have been flushed * to the stack before we get here because all callers of us * are flush_tlb_*() routines, and these run after flush_cache_*() diff --git a/arch/sparc64/kernel/trampoline.S b/arch/sparc64/kernel/trampoline.S index 9478551cb02..782d8c4973e 100644 --- a/arch/sparc64/kernel/trampoline.S +++ b/arch/sparc64/kernel/trampoline.S @@ -295,39 +295,6 @@ do_unlock: wrpr %g5, %tba mov %o2, %g6 - wrpr %o1, PSTATE_MG, %pstate -#define KERN_HIGHBITS ((_PAGE_VALID|_PAGE_SZ4MB)^0xfffff80000000000) -#define KERN_LOWBITS (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W) - - mov TSB_REG, %g1 - stxa %g0, [%g1] ASI_DMMU - membar #Sync - mov TLB_SFSR, %g1 - sethi %uhi(KERN_HIGHBITS), %g2 - or %g2, %ulo(KERN_HIGHBITS), %g2 - sllx %g2, 32, %g2 - or %g2, KERN_LOWBITS, %g2 - - BRANCH_IF_ANY_CHEETAH(g3,g7,9f) - - ba,pt %xcc, 1f - nop - -9: - sethi %uhi(VPTE_BASE_CHEETAH), %g3 - or %g3, %ulo(VPTE_BASE_CHEETAH), %g3 - ba,pt %xcc, 2f - sllx %g3, 32, %g3 -1: - sethi %uhi(VPTE_BASE_SPITFIRE), %g3 - or %g3, %ulo(VPTE_BASE_SPITFIRE), %g3 - sllx %g3, 32, %g3 - -2: - clr %g7 -#undef KERN_HIGHBITS -#undef KERN_LOWBITS - wrpr %o1, 0x0, %pstate ldx [%g6 + TI_TASK], %g4 diff --git a/arch/sparc64/kernel/tsb.S b/arch/sparc64/kernel/tsb.S new file mode 100644 index 00000000000..44b9e6fed09 --- /dev/null +++ b/arch/sparc64/kernel/tsb.S @@ -0,0 +1,169 @@ +/* tsb.S: Sparc64 TSB table handling. + * + * Copyright (C) 2006 David S. Miller + */ + +#include + + .text + .align 32 + + /* Invoked from TLB miss handler, we are in the + * MMU global registers and they are setup like + * this: + * + * %g1: TSB entry pointer + * %g2: available temporary + * %g3: FAULT_CODE_{D,I}TLB + * %g4: available temporary + * %g5: available temporary + * %g6: TAG TARGET + * %g7: physical address base of the linux page + * tables for the current address space + */ + .globl tsb_miss_dtlb +tsb_miss_dtlb: + mov TLB_TAG_ACCESS, %g4 + ldxa [%g4] ASI_DMMU, %g4 + ba,pt %xcc, tsb_miss_page_table_walk + nop + + .globl tsb_miss_itlb +tsb_miss_itlb: + mov TLB_TAG_ACCESS, %g4 + ldxa [%g4] ASI_IMMU, %g4 + ba,pt %xcc, tsb_miss_page_table_walk + nop + +tsb_miss_page_table_walk: + USER_PGTABLE_WALK_TL1(%g4, %g7, %g5, %g2, tsb_do_fault) + +tsb_reload: + TSB_LOCK_TAG(%g1, %g2, %g4) + + /* Load and check PTE. */ + ldxa [%g5] ASI_PHYS_USE_EC, %g5 + brgez,a,pn %g5, tsb_do_fault + stx %g0, [%g1] + + TSB_WRITE(%g1, %g5, %g6) + + /* Finally, load TLB and return from trap. */ +tsb_tlb_reload: + cmp %g3, FAULT_CODE_DTLB + bne,pn %xcc, tsb_itlb_load + nop + +tsb_dtlb_load: + stxa %g5, [%g0] ASI_DTLB_DATA_IN + retry + +tsb_itlb_load: + stxa %g5, [%g0] ASI_ITLB_DATA_IN + retry + + /* No valid entry in the page tables, do full fault + * processing. + */ + + .globl tsb_do_fault +tsb_do_fault: + cmp %g3, FAULT_CODE_DTLB + rdpr %pstate, %g5 + bne,pn %xcc, tsb_do_itlb_fault + wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate + +tsb_do_dtlb_fault: + rdpr %tl, %g4 + cmp %g4, 1 + mov TLB_TAG_ACCESS, %g4 + ldxa [%g4] ASI_DMMU, %g5 + be,pt %xcc, sparc64_realfault_common + mov FAULT_CODE_DTLB, %g4 + ba,pt %xcc, winfix_trampoline + nop + +tsb_do_itlb_fault: + rdpr %tpc, %g5 + ba,pt %xcc, sparc64_realfault_common + mov FAULT_CODE_ITLB, %g4 + + .globl sparc64_realfault_common +sparc64_realfault_common: + stb %g4, [%g6 + TI_FAULT_CODE] ! Save fault code + stx %g5, [%g6 + TI_FAULT_ADDR] ! Save fault address + ba,pt %xcc, etrap ! Save trap state +1: rd %pc, %g7 ! ... + call do_sparc64_fault ! Call fault handler + add %sp, PTREGS_OFF, %o0 ! Compute pt_regs arg + ba,pt %xcc, rtrap_clr_l6 ! Restore cpu state + nop ! Delay slot (fill me) + + .globl winfix_trampoline +winfix_trampoline: + rdpr %tpc, %g3 ! Prepare winfixup TNPC + or %g3, 0x7c, %g3 ! Compute branch offset + wrpr %g3, %tnpc ! Write it into TNPC + done ! Trap return + + /* Reload MMU related context switch state at + * schedule() time. + * + * %o0: page table physical address + * %o1: TSB address + */ + .globl tsb_context_switch +tsb_context_switch: + wrpr %g0, PSTATE_MG | PSTATE_RMO | PSTATE_PEF | PSTATE_PRIV, %pstate + + /* Set page table base alternate global. */ + mov %o0, %g7 + + /* XXX can this happen? */ + brz,pn %o1, 9f + nop + + /* Lock TSB into D-TLB. */ + sethi %hi(PAGE_SIZE), %o3 + and %o3, %o1, %o3 + sethi %hi(TSBMAP_BASE), %o2 + add %o2, %o3, %o2 + + /* XXX handle PAGE_SIZE != 8K correctly... */ + mov TSB_REG, %g1 + stxa %o2, [%g1] ASI_DMMU + membar #Sync + + stxa %o2, [%g1] ASI_IMMU + membar #Sync + +#define KERN_HIGHBITS ((_PAGE_VALID|_PAGE_SZBITS)^0xfffff80000000000) +#define KERN_LOWBITS (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W | _PAGE_L) + sethi %uhi(KERN_HIGHBITS), %g2 + or %g2, %ulo(KERN_HIGHBITS), %g2 + sllx %g2, 32, %g2 + or %g2, KERN_LOWBITS, %g2 +#undef KERN_HIGHBITS +#undef KERN_LOWBITS + + xor %o1, %g2, %o1 + + /* We use entry 61 for this locked entry. This is the spitfire + * TLB entry number, and luckily cheetah masks the value with + * 15 ending us up with entry 13 which is what we want in that + * case too. + * + * XXX Interactions with prom_world()... + */ + mov TLB_TAG_ACCESS, %g1 + stxa %o2, [%g1] ASI_DMMU + membar #Sync + mov (61 << 3), %g1 + stxa %o1, [%g1] ASI_DTLB_DATA_ACCESS + membar #Sync + +9: + wrpr %g0, PSTATE_RMO | PSTATE_PEF | PSTATE_PRIV | PSTATE_IE, %pstate + + retl + mov %o2, %o0 diff --git a/arch/sparc64/kernel/ttable.S b/arch/sparc64/kernel/ttable.S index 8365bc1f81f..56f060c8fbf 100644 --- a/arch/sparc64/kernel/ttable.S +++ b/arch/sparc64/kernel/ttable.S @@ -78,9 +78,9 @@ tl0_vaw: TRAP(do_vaw) tl0_cee: membar #Sync TRAP_NOSAVE_7INSNS(__spitfire_cee_trap) tl0_iamiss: -#include "itlb_base.S" +#include "itlb_miss.S" tl0_damiss: -#include "dtlb_base.S" +#include "dtlb_miss.S" tl0_daprot: #include "dtlb_prot.S" tl0_fecc: BTRAP(0x70) /* Fast-ECC on Cheetah */ @@ -241,7 +241,7 @@ tl1_cee: membar #Sync tl1_iamiss: BTRAPTL1(0x64) BTRAPTL1(0x65) BTRAPTL1(0x66) BTRAPTL1(0x67) tl1_damiss: -#include "dtlb_backend.S" +#include "dtlb_miss.S" tl1_daprot: #include "dtlb_prot.S" tl1_fecc: BTRAPTL1(0x70) /* Fast-ECC on Cheetah */ diff --git a/arch/sparc64/kernel/vmlinux.lds.S b/arch/sparc64/kernel/vmlinux.lds.S index 467d13a0d5c..f018aaf4548 100644 --- a/arch/sparc64/kernel/vmlinux.lds.S +++ b/arch/sparc64/kernel/vmlinux.lds.S @@ -43,6 +43,9 @@ SECTIONS __ex_table : { *(__ex_table) } __stop___ex_table = .; + . = ALIGN(8192); + swapper_tsb = .; + . += 8192; . = ALIGN(8192); __init_begin = .; .init.text : { diff --git a/arch/sparc64/kernel/winfixup.S b/arch/sparc64/kernel/winfixup.S index 39160926267..f5d93aa99cb 100644 --- a/arch/sparc64/kernel/winfixup.S +++ b/arch/sparc64/kernel/winfixup.S @@ -85,6 +85,7 @@ fill_fixup: mov %o7, %g6 ldx [%g6 + TI_TASK], %g4 #ifdef CONFIG_SMP +#error IMMU TSB usage must be fixed mov TSB_REG, %g1 ldxa [%g1] ASI_IMMU, %g5 #endif @@ -209,6 +210,7 @@ fill_fixup_mna: mov %o7, %g6 ! Get current back. ldx [%g6 + TI_TASK], %g4 ! Finish it. #ifdef CONFIG_SMP +#error IMMU TSB usage must be fixed mov TSB_REG, %g1 ldxa [%g1] ASI_IMMU, %g5 #endif @@ -278,11 +280,6 @@ window_mna_from_user_common: ba,pt %xcc, rtrap clr %l6 - /* These are only needed for 64-bit mode processes which - * put their stack pointer into the VPTE area and there - * happens to be a VPTE tlb entry mapped there during - * a spill/fill trap to that stack frame. - */ .globl winfix_dax, fill_fixup_dax, spill_fixup_dax winfix_dax: andn %g3, 0x7f, %g3 @@ -318,6 +315,7 @@ fill_fixup_dax: mov %o7, %g6 ! Get current back. ldx [%g6 + TI_TASK], %g4 ! Finish it. #ifdef CONFIG_SMP +#error IMMU TSB usage must be fixed mov TSB_REG, %g1 ldxa [%g1] ASI_IMMU, %g5 #endif diff --git a/arch/sparc64/mm/Makefile b/arch/sparc64/mm/Makefile index 9d0960e69f4..e415bf942bc 100644 --- a/arch/sparc64/mm/Makefile +++ b/arch/sparc64/mm/Makefile @@ -5,6 +5,6 @@ EXTRA_AFLAGS := -ansi EXTRA_CFLAGS := -Werror -obj-y := ultra.o tlb.o fault.o init.o generic.o +obj-y := ultra.o tlb.o tsb.o fault.o init.o generic.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 1e44ee26cee..da068f6b259 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -408,8 +408,7 @@ unsigned long prom_virt_to_phys(unsigned long promva, int *error) /* The obp translations are saved based on 8k pagesize, since obp can * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS -> - * HI_OBP_ADDRESS range are handled in ktlb.S and do not use the vpte - * scheme (also, see rant in inherit_locked_prom_mappings()). + * HI_OBP_ADDRESS range are handled in ktlb.S. */ static inline int in_obp_range(unsigned long vaddr) { @@ -539,75 +538,6 @@ static void __init inherit_prom_mappings(void) prom_printf("done.\n"); } -/* The OBP specifications for sun4u mark 0xfffffffc00000000 and - * upwards as reserved for use by the firmware (I wonder if this - * will be the same on Cheetah...). We use this virtual address - * range for the VPTE table mappings of the nucleus so we need - * to zap them when we enter the PROM. -DaveM - */ -static void __flush_nucleus_vptes(void) -{ - unsigned long prom_reserved_base = 0xfffffffc00000000UL; - int i; - - /* Only DTLB must be checked for VPTE entries. */ - if (tlb_type == spitfire) { - for (i = 0; i < 63; i++) { - unsigned long tag; - - /* Spitfire Errata #32 workaround */ - /* NOTE: Always runs on spitfire, so no cheetah+ - * page size encodings. - */ - __asm__ __volatile__("stxa %0, [%1] %2\n\t" - "flush %%g6" - : /* No outputs */ - : "r" (0), - "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); - - tag = spitfire_get_dtlb_tag(i); - if (((tag & ~(PAGE_MASK)) == 0) && - ((tag & (PAGE_MASK)) >= prom_reserved_base)) { - __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" - "membar #Sync" - : /* no outputs */ - : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU)); - spitfire_put_dtlb_data(i, 0x0UL); - } - } - } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { - for (i = 0; i < 512; i++) { - unsigned long tag = cheetah_get_dtlb_tag(i, 2); - - if ((tag & ~PAGE_MASK) == 0 && - (tag & PAGE_MASK) >= prom_reserved_base) { - __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" - "membar #Sync" - : /* no outputs */ - : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU)); - cheetah_put_dtlb_data(i, 0x0UL, 2); - } - - if (tlb_type != cheetah_plus) - continue; - - tag = cheetah_get_dtlb_tag(i, 3); - - if ((tag & ~PAGE_MASK) == 0 && - (tag & PAGE_MASK) >= prom_reserved_base) { - __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" - "membar #Sync" - : /* no outputs */ - : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU)); - cheetah_put_dtlb_data(i, 0x0UL, 3); - } - } - } else { - /* Implement me :-) */ - BUG(); - } -} - static int prom_ditlb_set; struct prom_tlb_entry { int tlb_ent; @@ -635,9 +565,6 @@ void prom_world(int enter) : "i" (PSTATE_IE)); if (enter) { - /* Kick out nucleus VPTEs. */ - __flush_nucleus_vptes(); - /* Install PROM world. */ for (i = 0; i < 16; i++) { if (prom_dtlb[i].tlb_ent != -1) { @@ -1039,18 +966,7 @@ out: struct pgtable_cache_struct pgt_quicklists; #endif -/* OK, we have to color these pages. The page tables are accessed - * by non-Dcache enabled mapping in the VPTE area by the dtlb_backend.S - * code, as well as by PAGE_OFFSET range direct-mapped addresses by - * other parts of the kernel. By coloring, we make sure that the tlbmiss - * fast handlers do not get data from old/garbage dcache lines that - * correspond to an old/stale virtual address (user/kernel) that - * previously mapped the pagetable page while accessing vpte range - * addresses. The idea is that if the vpte color and PAGE_OFFSET range - * color is the same, then when the kernel initializes the pagetable - * using the later address range, accesses with the first address - * range will see the newly initialized data rather than the garbage. - */ +/* XXX We don't need to color these things in the D-cache any longer. */ #ifdef DCACHE_ALIASING_POSSIBLE #define DC_ALIAS_SHIFT 1 #else @@ -1419,6 +1335,9 @@ void kernel_map_pages(struct page *page, int numpages, int enable) kernel_map_range(phys_start, phys_end, (enable ? PAGE_KERNEL : __pgprot(0))); + flush_tsb_kernel_range(PAGE_OFFSET + phys_start, + PAGE_OFFSET + phys_end); + /* we should perform an IPI and flush all tlbs, * but that can deadlock->flush only current cpu. */ diff --git a/arch/sparc64/mm/tlb.c b/arch/sparc64/mm/tlb.c index 8b104be4662..78357cc2a0b 100644 --- a/arch/sparc64/mm/tlb.c +++ b/arch/sparc64/mm/tlb.c @@ -25,6 +25,8 @@ void flush_tlb_pending(void) struct mmu_gather *mp = &__get_cpu_var(mmu_gathers); if (mp->tlb_nr) { + flush_tsb_user(mp); + if (CTX_VALID(mp->mm->context)) { #ifdef CONFIG_SMP smp_flush_tlb_pending(mp->mm, mp->tlb_nr, @@ -89,62 +91,3 @@ no_cache_flush: if (nr >= TLB_BATCH_NR) flush_tlb_pending(); } - -void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, unsigned long end) -{ - struct mmu_gather *mp = &__get_cpu_var(mmu_gathers); - unsigned long nr = mp->tlb_nr; - long s = start, e = end, vpte_base; - - if (mp->fullmm) - return; - - /* If start is greater than end, that is a real problem. */ - BUG_ON(start > end); - - /* However, straddling the VA space hole is quite normal. */ - s &= PMD_MASK; - e = (e + PMD_SIZE - 1) & PMD_MASK; - - vpte_base = (tlb_type == spitfire ? - VPTE_BASE_SPITFIRE : - VPTE_BASE_CHEETAH); - - if (unlikely(nr != 0 && mm != mp->mm)) { - flush_tlb_pending(); - nr = 0; - } - - if (nr == 0) - mp->mm = mm; - - start = vpte_base + (s >> (PAGE_SHIFT - 3)); - end = vpte_base + (e >> (PAGE_SHIFT - 3)); - - /* If the request straddles the VA space hole, we - * need to swap start and end. The reason this - * occurs is that "vpte_base" is the center of - * the linear page table mapping area. Thus, - * high addresses with the sign bit set map to - * addresses below vpte_base and non-sign bit - * addresses map to addresses above vpte_base. - */ - if (end < start) { - unsigned long tmp = start; - - start = end; - end = tmp; - } - - while (start < end) { - mp->vaddrs[nr] = start; - mp->tlb_nr = ++nr; - if (nr >= TLB_BATCH_NR) { - flush_tlb_pending(); - nr = 0; - } - start += PAGE_SIZE; - } - if (nr) - flush_tlb_pending(); -} diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c new file mode 100644 index 00000000000..15e8af58b1d --- /dev/null +++ b/arch/sparc64/mm/tsb.c @@ -0,0 +1,84 @@ +/* arch/sparc64/mm/tsb.c + * + * Copyright (C) 2006 David S. Miller + */ + +#include +#include +#include +#include +#include + +#define TSB_ENTRY_ALIGNMENT 16 + +struct tsb { + unsigned long tag; + unsigned long pte; +} __attribute__((aligned(TSB_ENTRY_ALIGNMENT))); + +/* We use an 8K TSB for the whole kernel, this allows to + * handle about 4MB of modules and vmalloc mappings without + * incurring many hash conflicts. + */ +#define KERNEL_TSB_SIZE_BYTES 8192 +#define KERNEL_TSB_NENTRIES \ + (KERNEL_TSB_SIZE_BYTES / sizeof(struct tsb)) + +extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES]; + +static inline unsigned long tsb_hash(unsigned long vaddr) +{ + vaddr >>= PAGE_SHIFT; + return vaddr & (KERNEL_TSB_NENTRIES - 1); +} + +static inline int tag_compare(struct tsb *entry, unsigned long vaddr, unsigned long context) +{ + if (context == ~0UL) + return 1; + + return (entry->tag == ((vaddr >> 22) | (context << 48))); +} + +/* TSB flushes need only occur on the processor initiating the address + * space modification, not on each cpu the address space has run on. + * Only the TLB flush needs that treatment. + */ + +void flush_tsb_kernel_range(unsigned long start, unsigned long end) +{ + unsigned long v; + + for (v = start; v < end; v += PAGE_SIZE) { + struct tsb *ent = &swapper_tsb[tsb_hash(v)]; + + if (tag_compare(ent, v, 0)) { + ent->tag = 0UL; + membar_storeload_storestore(); + } + } +} + +void flush_tsb_user(struct mmu_gather *mp) +{ + struct mm_struct *mm = mp->mm; + struct tsb *tsb = (struct tsb *) mm->context.sparc64_tsb; + unsigned long ctx = ~0UL; + int i; + + if (CTX_VALID(mm->context)) + ctx = CTX_HWBITS(mm->context); + + for (i = 0; i < mp->tlb_nr; i++) { + unsigned long v = mp->vaddrs[i]; + struct tsb *ent; + + v &= ~0x1UL; + + ent = &tsb[tsb_hash(v)]; + if (tag_compare(ent, v, ctx)) { + ent->tag = 0UL; + membar_storeload_storestore(); + } + } +} diff --git a/arch/sparc64/mm/ultra.S b/arch/sparc64/mm/ultra.S index e4c9151fa11..22791f29552 100644 --- a/arch/sparc64/mm/ultra.S +++ b/arch/sparc64/mm/ultra.S @@ -453,64 +453,6 @@ xcall_flush_dcache_page_spitfire: /* %g1 == physical page address nop nop - .data - -errata32_hwbug: - .xword 0 - - .text - - /* These two are not performance critical... */ - .globl xcall_flush_tlb_all_spitfire -xcall_flush_tlb_all_spitfire: - /* Spitfire Errata #32 workaround. */ - sethi %hi(errata32_hwbug), %g4 - stx %g0, [%g4 + %lo(errata32_hwbug)] - - clr %g2 - clr %g3 -1: ldxa [%g3] ASI_DTLB_DATA_ACCESS, %g4 - and %g4, _PAGE_L, %g5 - brnz,pn %g5, 2f - mov TLB_TAG_ACCESS, %g7 - - stxa %g0, [%g7] ASI_DMMU - membar #Sync - stxa %g0, [%g3] ASI_DTLB_DATA_ACCESS - membar #Sync - - /* Spitfire Errata #32 workaround. */ - sethi %hi(errata32_hwbug), %g4 - stx %g0, [%g4 + %lo(errata32_hwbug)] - -2: ldxa [%g3] ASI_ITLB_DATA_ACCESS, %g4 - and %g4, _PAGE_L, %g5 - brnz,pn %g5, 2f - mov TLB_TAG_ACCESS, %g7 - - stxa %g0, [%g7] ASI_IMMU - membar #Sync - stxa %g0, [%g3] ASI_ITLB_DATA_ACCESS - membar #Sync - - /* Spitfire Errata #32 workaround. */ - sethi %hi(errata32_hwbug), %g4 - stx %g0, [%g4 + %lo(errata32_hwbug)] - -2: add %g2, 1, %g2 - cmp %g2, SPITFIRE_HIGHEST_LOCKED_TLBENT - ble,pt %icc, 1b - sll %g2, 3, %g3 - flush %g6 - retry - - .globl xcall_flush_tlb_all_cheetah -xcall_flush_tlb_all_cheetah: - mov 0x80, %g2 - stxa %g0, [%g2] ASI_DMMU_DEMAP - stxa %g0, [%g2] ASI_IMMU_DEMAP - retry - /* These just get rescheduled to PIL vectors. */ .globl xcall_call_function xcall_call_function: -- cgit v1.2.3 From 05e28f9de65a38bb0c769080e91b6976e7e1e70c Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 31 Jan 2006 18:30:13 -0800 Subject: [SPARC64]: No need to D-cache color page tables any longer. Unlike the virtual page tables, the new TSB scheme does not require this ugly hack. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 71 +++++--------------------------------------------- 1 file changed, 6 insertions(+), 65 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index da068f6b259..936ae1a594a 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -145,6 +145,10 @@ int bigkernel = 0; #define PGT_CACHE_LOW 25 #define PGT_CACHE_HIGH 50 +#ifndef CONFIG_SMP +struct pgtable_cache_struct pgt_quicklists; +#endif + void check_pgt_cache(void) { preempt_disable(); @@ -152,10 +156,8 @@ void check_pgt_cache(void) do { if (pgd_quicklist) free_pgd_slow(get_pgd_fast()); - if (pte_quicklist[0]) - free_pte_slow(pte_alloc_one_fast(NULL, 0)); - if (pte_quicklist[1]) - free_pte_slow(pte_alloc_one_fast(NULL, 1 << (PAGE_SHIFT + 10))); + if (pte_quicklist) + free_pte_slow(pte_alloc_one_fast()); } while (pgtable_cache_size > PGT_CACHE_LOW); } preempt_enable(); @@ -962,67 +964,6 @@ out: spin_unlock(&ctx_alloc_lock); } -#ifndef CONFIG_SMP -struct pgtable_cache_struct pgt_quicklists; -#endif - -/* XXX We don't need to color these things in the D-cache any longer. */ -#ifdef DCACHE_ALIASING_POSSIBLE -#define DC_ALIAS_SHIFT 1 -#else -#define DC_ALIAS_SHIFT 0 -#endif -pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) -{ - struct page *page; - unsigned long color; - - { - pte_t *ptep = pte_alloc_one_fast(mm, address); - - if (ptep) - return ptep; - } - - color = VPTE_COLOR(address); - page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, DC_ALIAS_SHIFT); - if (page) { - unsigned long *to_free; - unsigned long paddr; - pte_t *pte; - -#ifdef DCACHE_ALIASING_POSSIBLE - set_page_count(page, 1); - ClearPageCompound(page); - - set_page_count((page + 1), 1); - ClearPageCompound(page + 1); -#endif - paddr = (unsigned long) page_address(page); - memset((char *)paddr, 0, (PAGE_SIZE << DC_ALIAS_SHIFT)); - - if (!color) { - pte = (pte_t *) paddr; - to_free = (unsigned long *) (paddr + PAGE_SIZE); - } else { - pte = (pte_t *) (paddr + PAGE_SIZE); - to_free = (unsigned long *) paddr; - } - -#ifdef DCACHE_ALIASING_POSSIBLE - /* Now free the other one up, adjust cache size. */ - preempt_disable(); - *to_free = (unsigned long) pte_quicklist[color ^ 0x1]; - pte_quicklist[color ^ 0x1] = to_free; - pgtable_cache_size++; - preempt_enable(); -#endif - - return pte; - } - return NULL; -} - void sparc_ultra_dump_itlb(void) { int slot; -- cgit v1.2.3 From 3c936465249f863f322154ff1aaa628b84ee5750 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 31 Jan 2006 18:30:27 -0800 Subject: [SPARC64]: Kill pgtable quicklists and use SLAB. Taking a nod from the powerpc port. With the per-cpu caching of both the page allocator and SLAB, the pgtable quicklist scheme becomes relatively silly and primitive. Signed-off-by: David S. Miller --- arch/sparc64/kernel/sparc64_ksyms.c | 4 ---- arch/sparc64/mm/init.c | 32 +++++++++++++++----------------- 2 files changed, 15 insertions(+), 21 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c index 3c06bfb92a8..f1f01378d07 100644 --- a/arch/sparc64/kernel/sparc64_ksyms.c +++ b/arch/sparc64/kernel/sparc64_ksyms.c @@ -241,10 +241,6 @@ EXPORT_SYMBOL(verify_compat_iovec); #endif EXPORT_SYMBOL(dump_fpu); -EXPORT_SYMBOL(pte_alloc_one_kernel); -#ifndef CONFIG_SMP -EXPORT_SYMBOL(pgt_quicklists); -#endif EXPORT_SYMBOL(put_fs_struct); /* math-emu wants this */ diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 936ae1a594a..7c456afaa9a 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -141,26 +141,25 @@ unsigned long sparc64_kern_sec_context __read_mostly; int bigkernel = 0; -/* XXX Tune this... */ -#define PGT_CACHE_LOW 25 -#define PGT_CACHE_HIGH 50 +kmem_cache_t *pgtable_cache __read_mostly; -#ifndef CONFIG_SMP -struct pgtable_cache_struct pgt_quicklists; -#endif +static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags) +{ + clear_page(addr); +} -void check_pgt_cache(void) +void pgtable_cache_init(void) { - preempt_disable(); - if (pgtable_cache_size > PGT_CACHE_HIGH) { - do { - if (pgd_quicklist) - free_pgd_slow(get_pgd_fast()); - if (pte_quicklist) - free_pte_slow(pte_alloc_one_fast()); - } while (pgtable_cache_size > PGT_CACHE_LOW); + pgtable_cache = kmem_cache_create("pgtable_cache", + PAGE_SIZE, PAGE_SIZE, + SLAB_HWCACHE_ALIGN | + SLAB_MUST_HWCACHE_ALIGN, + zero_ctor, + NULL); + if (!pgtable_cache) { + prom_printf("pgtable_cache_init(): Could not create!\n"); + prom_halt(); } - preempt_enable(); } #ifdef CONFIG_DEBUG_DCFLUSH @@ -340,7 +339,6 @@ void show_mem(void) nr_swap_pages << (PAGE_SHIFT-10)); printk("%ld pages of RAM\n", num_physpages); printk("%d free pages\n", nr_free_pages()); - printk("%d pages in page table cache\n",pgtable_cache_size); } void mmu_info(struct seq_file *m) -- cgit v1.2.3 From 56fb4df6da76c35dca22036174e2d1edef83ff1f Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 26 Feb 2006 23:24:22 -0800 Subject: [SPARC64]: Elminate all usage of hard-coded trap globals. UltraSPARC has special sets of global registers which are switched to for certain trap types. There is one set for MMU related traps, one set of Interrupt Vector processing, and another set (called the Alternate globals) for all other trap types. For what seems like forever we've hard coded the values in some of these trap registers. Some examples include: 1) Interrupt Vector global %g6 holds current processors interrupt work struct where received interrupts are managed for IRQ handler dispatch. 2) MMU global %g7 holds the base of the page tables of the currently active address space. 3) Alternate global %g6 held the current_thread_info() value. Such hardcoding has resulted in some serious issues in many areas. There are some code sequences where having another register available would help clean up the implementation. Taking traps such as cross-calls from the OBP firmware requires some trick code sequences wherein we have to save away and restore all of the special sets of global registers when we enter/exit OBP. We were also using the IMMU TSB register on SMP to hold the per-cpu area base address, which doesn't work any longer now that we actually use the TSB facility of the cpu. The implementation is pretty straight forward. One tricky bit is getting the current processor ID as that is different on different cpu variants. We use a stub with a fancy calling convention which we patch at boot time. The calling convention is that the stub is branched to and the (PC - 4) to return to is in register %g1. The cpu number is left in %g6. This stub can be invoked by using the __GET_CPUID macro. We use an array of per-cpu trap state to store the current thread and physical address of the current address space's page tables. The TRAP_LOAD_THREAD_REG loads %g6 with the current thread from this table, it uses __GET_CPUID and also clobbers %g1. TRAP_LOAD_IRQ_WORK is used by the interrupt vector processing to load the current processor's IRQ software state into %g6. It also uses __GET_CPUID and clobbers %g1. Finally, TRAP_LOAD_PGD_PHYS loads the physical address base of the current address space's page tables into %g7, it clobbers %g1 and uses __GET_CPUID. Many refinements are possible, as well as some tuning, with this stuff in place. Signed-off-by: David S. Miller --- arch/sparc64/kernel/entry.S | 122 +++++++++++++++++++++++++++++++++------ arch/sparc64/kernel/etrap.S | 18 +++--- arch/sparc64/kernel/head.S | 20 +------ arch/sparc64/kernel/irq.c | 26 +-------- arch/sparc64/kernel/rtrap.S | 10 ++-- arch/sparc64/kernel/setup.c | 8 +++ arch/sparc64/kernel/smp.c | 55 ++++-------------- arch/sparc64/kernel/trampoline.S | 9 +-- arch/sparc64/kernel/traps.c | 19 ++++++ arch/sparc64/kernel/tsb.S | 26 +++++++-- arch/sparc64/kernel/ttable.S | 2 +- arch/sparc64/kernel/winfixup.S | 24 +++----- arch/sparc64/mm/ultra.S | 10 ++-- 13 files changed, 192 insertions(+), 157 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S index a73553ae7e5..906b64ffdb1 100644 --- a/arch/sparc64/kernel/entry.S +++ b/arch/sparc64/kernel/entry.S @@ -50,7 +50,8 @@ do_fpdis: add %g0, %g0, %g0 ba,a,pt %xcc, rtrap_clr_l6 -1: ldub [%g6 + TI_FPSAVED], %g5 +1: TRAP_LOAD_THREAD_REG + ldub [%g6 + TI_FPSAVED], %g5 wr %g0, FPRS_FEF, %fprs andcc %g5, FPRS_FEF, %g0 be,a,pt %icc, 1f @@ -189,6 +190,7 @@ fp_other_bounce: .globl do_fpother_check_fitos .align 32 do_fpother_check_fitos: + TRAP_LOAD_THREAD_REG sethi %hi(fp_other_bounce - 4), %g7 or %g7, %lo(fp_other_bounce - 4), %g7 @@ -353,8 +355,6 @@ do_fptrap_after_fsr: * * With this method we can do most of the cross-call tlb/cache * flushing very quickly. - * - * Current CPU's IRQ worklist table is locked into %g6, don't touch. */ .text .align 32 @@ -378,6 +378,8 @@ do_ivec: sllx %g2, %g4, %g2 sllx %g4, 2, %g4 + TRAP_LOAD_IRQ_WORK + lduw [%g6 + %g4], %g5 /* g5 = irq_work(cpu, pil) */ stw %g5, [%g3 + 0x00] /* bucket->irq_chain = g5 */ stw %g3, [%g6 + %g4] /* irq_work(cpu, pil) = bucket */ @@ -488,9 +490,24 @@ setcc: retl stx %o1, [%o0 + PT_V9_TSTATE] - .globl utrap, utrap_ill -utrap: brz,pn %g1, etrap + .globl utrap_trap +utrap_trap: /* %g3=handler,%g4=level */ + TRAP_LOAD_THREAD_REG + ldx [%g6 + TI_UTRAPS], %g1 + brnz,pt %g1, invoke_utrap nop + + ba,pt %xcc, etrap + rd %pc, %g7 + mov %l4, %o1 + call bad_trap + add %sp, PTREGS_OFF, %o0 + ba,pt %xcc, rtrap + clr %l6 + +invoke_utrap: + sllx %g3, 3, %g3 + ldx [%g1 + %g3], %g1 save %sp, -128, %sp rdpr %tstate, %l6 rdpr %cwp, %l7 @@ -500,17 +517,6 @@ utrap: brz,pn %g1, etrap rdpr %tnpc, %l7 wrpr %g1, 0, %tnpc done -utrap_ill: - call bad_trap - add %sp, PTREGS_OFF, %o0 - ba,pt %xcc, rtrap - clr %l6 - - /* XXX Here is stuff we still need to write... -DaveM XXX */ - .globl netbsd_syscall -netbsd_syscall: - retl - nop /* We need to carefully read the error status, ACK * the errors, prevent recursive traps, and pass the @@ -1001,7 +1007,7 @@ dcpe_icpe_tl1_common: * %g3: scratch * %g4: AFSR * %g5: AFAR - * %g6: current thread ptr + * %g6: unused, will have current thread ptr after etrap * %g7: scratch */ __cheetah_log_error: @@ -1690,3 +1696,85 @@ __flushw_user: restore %g0, %g0, %g0 2: retl nop + + /* Read cpu ID from hardware, return in %g6. + * (callers_pc - 4) is in %g1. Patched at boot time. + * + * Default is spitfire implementation. + * + * The instruction sequence needs to be 5 instructions + * in order to fit the longest implementation, which is + * currently starfire. + */ + .align 32 + .globl __get_cpu_id +__get_cpu_id: + ldxa [%g0] ASI_UPA_CONFIG, %g6 + srlx %g6, 17, %g6 + jmpl %g1 + 0x4, %g0 + and %g6, 0x1f, %g6 + nop + +__get_cpu_id_cheetah_safari: + ldxa [%g0] ASI_SAFARI_CONFIG, %g6 + srlx %g6, 17, %g6 + jmpl %g1 + 0x4, %g0 + and %g6, 0x3ff, %g6 + nop + +__get_cpu_id_cheetah_jbus: + ldxa [%g0] ASI_JBUS_CONFIG, %g6 + srlx %g6, 17, %g6 + jmpl %g1 + 0x4, %g0 + and %g6, 0x1f, %g6 + nop + +__get_cpu_id_starfire: + sethi %hi(0x1fff40000d0 >> 9), %g6 + sllx %g6, 9, %g6 + or %g6, 0xd0, %g6 + jmpl %g1 + 0x4, %g0 + lduwa [%g6] ASI_PHYS_BYPASS_EC_E, %g6 + + .globl per_cpu_patch +per_cpu_patch: + sethi %hi(this_is_starfire), %o0 + lduw [%o0 + %lo(this_is_starfire)], %o1 + sethi %hi(__get_cpu_id_starfire), %o0 + brnz,pn %o1, 10f + or %o0, %lo(__get_cpu_id_starfire), %o0 + sethi %hi(tlb_type), %o0 + lduw [%o0 + %lo(tlb_type)], %o1 + brz,pt %o1, 11f + nop + rdpr %ver, %o0 + srlx %o0, 32, %o0 + sethi %hi(0x003e0016), %o1 + or %o1, %lo(0x003e0016), %o1 + cmp %o0, %o1 + sethi %hi(__get_cpu_id_cheetah_jbus), %o0 + be,pn %icc, 10f + or %o0, %lo(__get_cpu_id_cheetah_jbus), %o0 + sethi %hi(__get_cpu_id_cheetah_safari), %o0 + or %o0, %lo(__get_cpu_id_cheetah_safari), %o0 +10: + sethi %hi(__get_cpu_id), %o1 + or %o1, %lo(__get_cpu_id), %o1 + lduw [%o0 + 0x00], %o2 + stw %o2, [%o1 + 0x00] + flush %o1 + 0x00 + lduw [%o0 + 0x04], %o2 + stw %o2, [%o1 + 0x04] + flush %o1 + 0x04 + lduw [%o0 + 0x08], %o2 + stw %o2, [%o1 + 0x08] + flush %o1 + 0x08 + lduw [%o0 + 0x0c], %o2 + stw %o2, [%o1 + 0x0c] + flush %o1 + 0x0c + lduw [%o0 + 0x10], %o2 + stw %o2, [%o1 + 0x10] + flush %o1 + 0x10 +11: + retl + nop diff --git a/arch/sparc64/kernel/etrap.S b/arch/sparc64/kernel/etrap.S index 567dbb765c3..8b3b6d720ed 100644 --- a/arch/sparc64/kernel/etrap.S +++ b/arch/sparc64/kernel/etrap.S @@ -31,6 +31,7 @@ .globl etrap, etrap_irq, etraptl1 etrap: rdpr %pil, %g2 etrap_irq: + TRAP_LOAD_THREAD_REG rdpr %tstate, %g1 sllx %g2, 20, %g3 andcc %g1, TSTATE_PRIV, %g0 @@ -98,11 +99,7 @@ etrap_irq: stx %i7, [%sp + PTREGS_OFF + PT_V9_I7] wrpr %g0, ETRAP_PSTATE2, %pstate mov %l6, %g6 -#ifdef CONFIG_SMP -#error IMMU TSB usage must be fixed - mov TSB_REG, %g3 - ldxa [%g3] ASI_IMMU, %g5 -#endif + LOAD_PER_CPU_BASE(%g4, %g3) jmpl %l2 + 0x4, %g0 ldx [%g6 + TI_TASK], %g4 @@ -126,6 +123,7 @@ etraptl1: /* Save tstate/tpc/tnpc of TL 1-->4 and the tl register itself. * 0x58 TL4's TT * 0x60 TL */ + TRAP_LOAD_THREAD_REG sub %sp, ((4 * 8) * 4) + 8, %g2 rdpr %tl, %g1 @@ -179,7 +177,9 @@ etraptl1: /* Save tstate/tpc/tnpc of TL 1-->4 and the tl register itself. .align 64 .globl scetrap -scetrap: rdpr %pil, %g2 +scetrap: + TRAP_LOAD_THREAD_REG + rdpr %pil, %g2 rdpr %tstate, %g1 sllx %g2, 20, %g3 andcc %g1, TSTATE_PRIV, %g0 @@ -248,11 +248,7 @@ scetrap: rdpr %pil, %g2 stx %i6, [%sp + PTREGS_OFF + PT_V9_I6] mov %l6, %g6 stx %i7, [%sp + PTREGS_OFF + PT_V9_I7] -#ifdef CONFIG_SMP -#error IMMU TSB usage must be fixed - mov TSB_REG, %g3 - ldxa [%g3] ASI_IMMU, %g5 -#endif + LOAD_PER_CPU_BASE(%g4, %g3) ldx [%g6 + TI_TASK], %g4 done diff --git a/arch/sparc64/kernel/head.S b/arch/sparc64/kernel/head.S index d00e20693be..82ce5bced9c 100644 --- a/arch/sparc64/kernel/head.S +++ b/arch/sparc64/kernel/head.S @@ -26,6 +26,7 @@ #include #include #include +#include /* This section from from _start to sparc64_boot_end should fit into * 0x0000000000404000 to 0x0000000000408000. @@ -421,24 +422,6 @@ setup_trap_table: stxa %g2, [%g1] ASI_DMMU membar #Sync - /* The Linux trap handlers expect various trap global registers - * to be setup with some fixed values. So here we set these - * up very carefully. These globals are: - * - * Alternate Globals (PSTATE_AG): - * - * %g6 --> current_thread_info() - * - * Interrupt Globals (PSTATE_IG, setup by init_irqwork_curcpu()): - * - * %g6 --> __irq_work[smp_processor_id()] - */ - - rdpr %pstate, %o1 - mov %g6, %o2 - wrpr %o1, PSTATE_AG, %pstate - mov %o2, %g6 - /* Kill PROM timer */ sethi %hi(0x80000000), %o2 sllx %o2, 32, %o2 @@ -457,7 +440,6 @@ setup_trap_table: 2: wrpr %g0, %g0, %wstate - wrpr %o1, 0x0, %pstate call init_irqwork_curcpu nop diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c index f7490ef629b..3e48af2769d 100644 --- a/arch/sparc64/kernel/irq.c +++ b/arch/sparc64/kernel/irq.c @@ -848,33 +848,9 @@ static void kill_prom_timer(void) void init_irqwork_curcpu(void) { - register struct irq_work_struct *workp asm("o2"); - register unsigned long tmp asm("o3"); int cpu = hard_smp_processor_id(); - memset(__irq_work + cpu, 0, sizeof(*workp)); - - /* Make sure we are called with PSTATE_IE disabled. */ - __asm__ __volatile__("rdpr %%pstate, %0\n\t" - : "=r" (tmp)); - if (tmp & PSTATE_IE) { - prom_printf("BUG: init_irqwork_curcpu() called with " - "PSTATE_IE enabled, bailing.\n"); - __asm__ __volatile__("mov %%i7, %0\n\t" - : "=r" (tmp)); - prom_printf("BUG: Called from %lx\n", tmp); - prom_halt(); - } - - /* Set interrupt globals. */ - workp = &__irq_work[cpu]; - __asm__ __volatile__( - "rdpr %%pstate, %0\n\t" - "wrpr %0, %1, %%pstate\n\t" - "mov %2, %%g6\n\t" - "wrpr %0, 0x0, %%pstate\n\t" - : "=&r" (tmp) - : "i" (PSTATE_IG), "r" (workp)); + memset(__irq_work + cpu, 0, sizeof(struct irq_work_struct)); } /* Only invoked on boot processor. */ diff --git a/arch/sparc64/kernel/rtrap.S b/arch/sparc64/kernel/rtrap.S index 213eb4a9d8a..5a62ec5d531 100644 --- a/arch/sparc64/kernel/rtrap.S +++ b/arch/sparc64/kernel/rtrap.S @@ -223,12 +223,10 @@ rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1 ldx [%sp + PTREGS_OFF + PT_V9_G3], %g3 ldx [%sp + PTREGS_OFF + PT_V9_G4], %g4 ldx [%sp + PTREGS_OFF + PT_V9_G5], %g5 -#ifdef CONFIG_SMP -#error IMMU TSB usage must be fixed - mov TSB_REG, %g6 - brnz,a,pn %l3, 1f - ldxa [%g6] ASI_IMMU, %g5 -#endif + brz,pt %l3, 1f + nop + /* Must do this before thread reg is clobbered below. */ + LOAD_PER_CPU_BASE(%g6, %g7) 1: ldx [%sp + PTREGS_OFF + PT_V9_G6], %g6 ldx [%sp + PTREGS_OFF + PT_V9_G7], %g7 diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c index 158bd31e15b..59a70301a6c 100644 --- a/arch/sparc64/kernel/setup.c +++ b/arch/sparc64/kernel/setup.c @@ -507,6 +507,11 @@ void __init setup_arch(char **cmdline_p) /* Work out if we are starfire early on */ check_if_starfire(); + /* Now we know enough to patch the __get_cpu_id() + * trampoline used by trap code. + */ + per_cpu_patch(); + boot_flags_init(*cmdline_p); idprom_init(); @@ -545,6 +550,9 @@ void __init setup_arch(char **cmdline_p) smp_setup_cpu_possible_map(); paging_init(); + + /* Get boot processor trap_block[] setup. */ + init_cur_cpu_trap(); } static int __init set_preferred_console(void) diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index d2d3369e7b5..8c245859d21 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c @@ -38,6 +38,7 @@ #include #include #include +#include extern void calibrate_delay(void); @@ -87,10 +88,6 @@ void __init smp_store_cpu_info(int id) cpu_data(id).clock_tick = prom_getintdefault(cpu_node, "clock-frequency", 0); - cpu_data(id).pgcache_size = 0; - cpu_data(id).pte_cache[0] = NULL; - cpu_data(id).pte_cache[1] = NULL; - cpu_data(id).pgd_cache = NULL; cpu_data(id).idle_volume = 1; cpu_data(id).dcache_size = prom_getintdefault(cpu_node, "dcache-size", @@ -121,26 +118,15 @@ static volatile unsigned long callin_flag = 0; extern void inherit_locked_prom_mappings(int save_p); -static inline void cpu_setup_percpu_base(unsigned long cpu_id) -{ -#error IMMU TSB usage must be fixed - __asm__ __volatile__("mov %0, %%g5\n\t" - "stxa %0, [%1] %2\n\t" - "membar #Sync" - : /* no outputs */ - : "r" (__per_cpu_offset(cpu_id)), - "r" (TSB_REG), "i" (ASI_IMMU)); -} - void __init smp_callin(void) { int cpuid = hard_smp_processor_id(); inherit_locked_prom_mappings(0); - __flush_tlb_all(); + __local_per_cpu_offset = __per_cpu_offset(cpuid); - cpu_setup_percpu_base(cpuid); + __flush_tlb_all(); smp_setup_percpu_timer(); @@ -1107,12 +1093,15 @@ void __init smp_setup_cpu_possible_map(void) void __devinit smp_prepare_boot_cpu(void) { - if (hard_smp_processor_id() >= NR_CPUS) { + int cpu = hard_smp_processor_id(); + + if (cpu >= NR_CPUS) { prom_printf("Serious problem, boot cpu id >= NR_CPUS\n"); prom_halt(); } - current_thread_info()->cpu = hard_smp_processor_id(); + current_thread_info()->cpu = cpu; + __local_per_cpu_offset = __per_cpu_offset(cpu); cpu_set(smp_processor_id(), cpu_online_map); cpu_set(smp_processor_id(), phys_cpu_present_map); @@ -1173,12 +1162,9 @@ void __init setup_per_cpu_areas(void) { unsigned long goal, size, i; char *ptr; - /* Created by linker magic */ - extern char __per_cpu_start[], __per_cpu_end[]; /* Copy section for each CPU (we discard the original) */ - goal = ALIGN(__per_cpu_end - __per_cpu_start, PAGE_SIZE); - + goal = ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES); #ifdef CONFIG_MODULES if (goal < PERCPU_ENOUGH_ROOM) goal = PERCPU_ENOUGH_ROOM; @@ -1187,31 +1173,10 @@ void __init setup_per_cpu_areas(void) for (size = 1UL; size < goal; size <<= 1UL) __per_cpu_shift++; - /* Make sure the resulting __per_cpu_base value - * will fit in the 43-bit sign extended IMMU - * TSB register. - */ - ptr = __alloc_bootmem(size * NR_CPUS, PAGE_SIZE, - (unsigned long) __per_cpu_start); + ptr = alloc_bootmem(size * NR_CPUS); __per_cpu_base = ptr - __per_cpu_start; - if ((__per_cpu_shift < PAGE_SHIFT) || - (__per_cpu_base & ~PAGE_MASK) || - (__per_cpu_base != (((long) __per_cpu_base << 20) >> 20))) { - prom_printf("PER_CPU: Invalid layout, " - "ptr[%p] shift[%lx] base[%lx]\n", - ptr, __per_cpu_shift, __per_cpu_base); - prom_halt(); - } - for (i = 0; i < NR_CPUS; i++, ptr += size) memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); - - /* Finally, load in the boot cpu's base value. - * We abuse the IMMU TSB register for trap handler - * entry and exit loading of %g5. That is why it - * has to be page aligned. - */ - cpu_setup_percpu_base(hard_smp_processor_id()); } diff --git a/arch/sparc64/kernel/trampoline.S b/arch/sparc64/kernel/trampoline.S index 782d8c4973e..18c333f841e 100644 --- a/arch/sparc64/kernel/trampoline.S +++ b/arch/sparc64/kernel/trampoline.S @@ -287,21 +287,18 @@ do_unlock: wrpr %g0, 0, %wstate wrpr %g0, 0, %tl - /* Setup the trap globals, then we can resurface. */ - rdpr %pstate, %o1 - mov %g6, %o2 - wrpr %o1, PSTATE_AG, %pstate + /* Load TBA, then we can resurface. */ sethi %hi(sparc64_ttable_tl0), %g5 wrpr %g5, %tba - mov %o2, %g6 - wrpr %o1, 0x0, %pstate ldx [%g6 + TI_TASK], %g4 wrpr %g0, 0, %wstate call init_irqwork_curcpu nop + call init_cur_cpu_trap + nop /* Start using proper page size encodings in ctx register. */ sethi %hi(sparc64_kern_pri_context), %g3 diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c index 8d44ae5a15e..f47f4874253 100644 --- a/arch/sparc64/kernel/traps.c +++ b/arch/sparc64/kernel/traps.c @@ -2130,7 +2130,22 @@ void do_getpsr(struct pt_regs *regs) } } +struct trap_per_cpu trap_block[NR_CPUS]; + +/* This can get invoked before sched_init() so play it super safe + * and use hard_smp_processor_id(). + */ +void init_cur_cpu_trap(void) +{ + int cpu = hard_smp_processor_id(); + struct trap_per_cpu *p = &trap_block[cpu]; + + p->thread = current_thread_info(); + p->pgd_paddr = 0; +} + extern void thread_info_offsets_are_bolixed_dave(void); +extern void trap_per_cpu_offsets_are_bolixed_dave(void); /* Only invoked on boot processor. */ void __init trap_init(void) @@ -2165,6 +2180,10 @@ void __init trap_init(void) (TI_FPREGS & (64 - 1))) thread_info_offsets_are_bolixed_dave(); + if (TRAP_PER_CPU_THREAD != offsetof(struct trap_per_cpu, thread) || + TRAP_PER_CPU_PGD_PADDR != offsetof(struct trap_per_cpu, pgd_paddr)) + trap_per_cpu_offsets_are_bolixed_dave(); + /* Attach to the address space of init_task. On SMP we * do this in smp.c:smp_callin for other cpus. */ diff --git a/arch/sparc64/kernel/tsb.S b/arch/sparc64/kernel/tsb.S index 44b9e6fed09..50752c51877 100644 --- a/arch/sparc64/kernel/tsb.S +++ b/arch/sparc64/kernel/tsb.S @@ -36,6 +36,15 @@ tsb_miss_itlb: nop tsb_miss_page_table_walk: + /* This clobbers %g1 and %g6, preserve them... */ + mov %g1, %g5 + mov %g6, %g2 + + TRAP_LOAD_PGD_PHYS + + mov %g2, %g6 + mov %g5, %g1 + USER_PGTABLE_WALK_TL1(%g4, %g7, %g5, %g2, tsb_do_fault) tsb_reload: @@ -112,15 +121,20 @@ winfix_trampoline: * %o0: page table physical address * %o1: TSB address */ + .align 32 .globl tsb_context_switch tsb_context_switch: - wrpr %g0, PSTATE_MG | PSTATE_RMO | PSTATE_PEF | PSTATE_PRIV, %pstate + rdpr %pstate, %o5 + wrpr %o5, PSTATE_IE, %pstate - /* Set page table base alternate global. */ - mov %o0, %g7 + ldub [%g6 + TI_CPU], %o3 + sethi %hi(trap_block), %o4 + sllx %o3, TRAP_BLOCK_SZ_SHIFT, %o3 + or %o4, %lo(trap_block), %o4 + add %o4, %o3, %o4 + stx %o0, [%o4 + TRAP_PER_CPU_PGD_PADDR] - /* XXX can this happen? */ - brz,pn %o1, 9f + brgez %o1, 9f nop /* Lock TSB into D-TLB. */ @@ -163,7 +177,7 @@ tsb_context_switch: membar #Sync 9: - wrpr %g0, PSTATE_RMO | PSTATE_PEF | PSTATE_PRIV | PSTATE_IE, %pstate + wrpr %o5, %pstate retl mov %o2, %o0 diff --git a/arch/sparc64/kernel/ttable.S b/arch/sparc64/kernel/ttable.S index 56f060c8fbf..2fb7a33993c 100644 --- a/arch/sparc64/kernel/ttable.S +++ b/arch/sparc64/kernel/ttable.S @@ -128,7 +128,7 @@ tl0_flushw: FLUSH_WINDOW_TRAP tl0_resv104: BTRAP(0x104) BTRAP(0x105) BTRAP(0x106) BTRAP(0x107) .globl tl0_solaris tl0_solaris: SOLARIS_SYSCALL_TRAP -tl0_netbsd: NETBSD_SYSCALL_TRAP +tl0_resv109: BTRAP(0x109) tl0_resv10a: BTRAP(0x10a) BTRAP(0x10b) BTRAP(0x10c) BTRAP(0x10d) BTRAP(0x10e) tl0_resv10f: BTRAP(0x10f) tl0_linux32: LINUX_32BIT_SYSCALL_TRAP diff --git a/arch/sparc64/kernel/winfixup.S b/arch/sparc64/kernel/winfixup.S index f5d93aa99cb..de588036df4 100644 --- a/arch/sparc64/kernel/winfixup.S +++ b/arch/sparc64/kernel/winfixup.S @@ -39,6 +39,7 @@ set_pcontext: */ .globl fill_fixup, spill_fixup fill_fixup: + TRAP_LOAD_THREAD_REG rdpr %tstate, %g1 andcc %g1, TSTATE_PRIV, %g0 or %g4, FAULT_CODE_WINFIXUP, %g4 @@ -84,11 +85,7 @@ fill_fixup: wrpr %l1, (PSTATE_IE | PSTATE_AG | PSTATE_RMO), %pstate mov %o7, %g6 ldx [%g6 + TI_TASK], %g4 -#ifdef CONFIG_SMP -#error IMMU TSB usage must be fixed - mov TSB_REG, %g1 - ldxa [%g1] ASI_IMMU, %g5 -#endif + LOAD_PER_CPU_BASE(%g1, %g2) /* This is the same as below, except we handle this a bit special * since we must preserve %l5 and %l6, see comment above. @@ -107,6 +104,7 @@ fill_fixup: * do not touch %g7 or %g2 so we handle the two cases fine. */ spill_fixup: + TRAP_LOAD_THREAD_REG ldx [%g6 + TI_FLAGS], %g1 andcc %g1, _TIF_32BIT, %g0 ldub [%g6 + TI_WSAVED], %g1 @@ -182,6 +180,7 @@ winfix_mna: wrpr %g3, %tnpc done fill_fixup_mna: + TRAP_LOAD_THREAD_REG rdpr %tstate, %g1 andcc %g1, TSTATE_PRIV, %g0 be,pt %xcc, window_mna_from_user_common @@ -209,17 +208,14 @@ fill_fixup_mna: wrpr %l1, (PSTATE_IE | PSTATE_AG | PSTATE_RMO), %pstate mov %o7, %g6 ! Get current back. ldx [%g6 + TI_TASK], %g4 ! Finish it. -#ifdef CONFIG_SMP -#error IMMU TSB usage must be fixed - mov TSB_REG, %g1 - ldxa [%g1] ASI_IMMU, %g5 -#endif + LOAD_PER_CPU_BASE(%g1, %g2) call mem_address_unaligned add %sp, PTREGS_OFF, %o0 b,pt %xcc, rtrap nop ! yes, the nop is correct spill_fixup_mna: + TRAP_LOAD_THREAD_REG ldx [%g6 + TI_FLAGS], %g1 andcc %g1, _TIF_32BIT, %g0 ldub [%g6 + TI_WSAVED], %g1 @@ -287,6 +283,7 @@ winfix_dax: wrpr %g3, %tnpc done fill_fixup_dax: + TRAP_LOAD_THREAD_REG rdpr %tstate, %g1 andcc %g1, TSTATE_PRIV, %g0 be,pt %xcc, window_dax_from_user_common @@ -314,17 +311,14 @@ fill_fixup_dax: wrpr %l1, (PSTATE_IE | PSTATE_AG | PSTATE_RMO), %pstate mov %o7, %g6 ! Get current back. ldx [%g6 + TI_TASK], %g4 ! Finish it. -#ifdef CONFIG_SMP -#error IMMU TSB usage must be fixed - mov TSB_REG, %g1 - ldxa [%g1] ASI_IMMU, %g5 -#endif + LOAD_PER_CPU_BASE(%g1, %g2) call spitfire_data_access_exception add %sp, PTREGS_OFF, %o0 b,pt %xcc, rtrap nop ! yes, the nop is correct spill_fixup_dax: + TRAP_LOAD_THREAD_REG ldx [%g6 + TI_FLAGS], %g1 andcc %g1, _TIF_32BIT, %g0 ldub [%g6 + TI_WSAVED], %g1 diff --git a/arch/sparc64/mm/ultra.S b/arch/sparc64/mm/ultra.S index 22791f29552..a87394824ec 100644 --- a/arch/sparc64/mm/ultra.S +++ b/arch/sparc64/mm/ultra.S @@ -295,12 +295,10 @@ cheetah_patch_cachetlbops: * %g1 address arg 1 (tlb page and range flushes) * %g7 address arg 2 (tlb range flush only) * - * %g6 ivector table, don't touch - * %g2 scratch 1 - * %g3 scratch 2 - * %g4 scratch 3 - * - * TODO: Make xcall TLB range flushes use the tricks above... -DaveM + * %g6 scratch 1 + * %g2 scratch 2 + * %g3 scratch 3 + * %g4 scratch 4 */ .align 32 .globl xcall_flush_tlb_mm -- cgit v1.2.3 From 09f94287f7260e03bbeab497e743691fafcc22c3 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 31 Jan 2006 18:31:06 -0800 Subject: [SPARC64]: TSB refinements. Move {init_new,destroy}_context() out of line. Do not put huge pages into the TSB, only base page size translations. There are some clever things we could do here, but for now let's be correct instead of fancy. Signed-off-by: David S. Miller --- arch/sparc64/kernel/tsb.S | 11 +++++++++++ arch/sparc64/mm/tsb.c | 28 ++++++++++++++++++++++++++++ 2 files changed, 39 insertions(+) (limited to 'arch') diff --git a/arch/sparc64/kernel/tsb.S b/arch/sparc64/kernel/tsb.S index 50752c51877..76f2c0b01f3 100644 --- a/arch/sparc64/kernel/tsb.S +++ b/arch/sparc64/kernel/tsb.S @@ -55,6 +55,17 @@ tsb_reload: brgez,a,pn %g5, tsb_do_fault stx %g0, [%g1] + /* If it is larger than the base page size, don't + * bother putting it into the TSB. + */ + srlx %g5, 32, %g2 + sethi %hi(_PAGE_ALL_SZ_BITS >> 32), %g4 + sethi %hi(_PAGE_SZBITS >> 32), %g7 + and %g2, %g4, %g2 + cmp %g2, %g7 + bne,a,pn %xcc, tsb_tlb_reload + stx %g0, [%g1] + TSB_WRITE(%g1, %g5, %g6) /* Finally, load TLB and return from trap. */ diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c index 15e8af58b1d..2f84cef6c1b 100644 --- a/arch/sparc64/mm/tsb.c +++ b/arch/sparc64/mm/tsb.c @@ -8,6 +8,7 @@ #include #include #include +#include #define TSB_ENTRY_ALIGNMENT 16 @@ -82,3 +83,30 @@ void flush_tsb_user(struct mmu_gather *mp) } } } + +int init_new_context(struct task_struct *tsk, struct mm_struct *mm) +{ + unsigned long page = get_zeroed_page(GFP_KERNEL); + + mm->context.sparc64_ctx_val = 0UL; + if (unlikely(!page)) + return -ENOMEM; + + mm->context.sparc64_tsb = (unsigned long *) page; + + return 0; +} + +void destroy_context(struct mm_struct *mm) +{ + free_page((unsigned long) mm->context.sparc64_tsb); + + spin_lock(&ctx_alloc_lock); + + if (CTX_VALID(mm->context)) { + unsigned long nr = CTX_NRBITS(mm->context); + mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63)); + } + + spin_unlock(&ctx_alloc_lock); +} -- cgit v1.2.3 From 98c5584cfc47932c4f3ccf5eee2e0bae1447b85e Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 31 Jan 2006 18:31:20 -0800 Subject: [SPARC64]: Add infrastructure for dynamic TSB sizing. This also cleans up tsb_context_switch(). The assembler routine is now __tsb_context_switch() and the former is an inline function that picks out the bits from the mm_struct and passes it into the assembler code as arguments. setup_tsb_parms() computes the locked TLB entry to map the TSB. Later when we support using the physical address quad load instructions of Cheetah+ and later, we'll simply use the physical address for the TSB register value and set the map virtual and PTE both to zero. Signed-off-by: David S. Miller --- arch/sparc64/kernel/binfmt_aout32.c | 3 +- arch/sparc64/kernel/process.c | 3 +- arch/sparc64/kernel/tsb.S | 55 +++++++----------- arch/sparc64/mm/tsb.c | 109 +++++++++++++++++++++++++++++++----- 4 files changed, 118 insertions(+), 52 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/binfmt_aout32.c b/arch/sparc64/kernel/binfmt_aout32.c index a57d7f2b6f1..181c8cdf954 100644 --- a/arch/sparc64/kernel/binfmt_aout32.c +++ b/arch/sparc64/kernel/binfmt_aout32.c @@ -330,8 +330,7 @@ beyond_if: current->mm->start_stack = (unsigned long) create_aout32_tables((char __user *)bprm->p, bprm); - tsb_context_switch(__pa(current->mm->pgd), - current->mm->context.sparc64_tsb); + tsb_context_switch(mm); start_thread32(regs, ex.a_entry, current->mm->start_stack); if (current->ptrace & PT_PTRACED) diff --git a/arch/sparc64/kernel/process.c b/arch/sparc64/kernel/process.c index 2784aab0d3e..26548fc604b 100644 --- a/arch/sparc64/kernel/process.c +++ b/arch/sparc64/kernel/process.c @@ -441,8 +441,7 @@ void flush_thread(void) mm = t->task->mm; if (mm) - tsb_context_switch(__pa(mm->pgd), - mm->context.sparc64_tsb); + tsb_context_switch(mm); set_thread_wsaved(0); diff --git a/arch/sparc64/kernel/tsb.S b/arch/sparc64/kernel/tsb.S index 76f2c0b01f3..fe266bad0a2 100644 --- a/arch/sparc64/kernel/tsb.S +++ b/arch/sparc64/kernel/tsb.S @@ -130,48 +130,36 @@ winfix_trampoline: * schedule() time. * * %o0: page table physical address - * %o1: TSB address + * %o1: TSB register value + * %o2: TSB virtual address + * %o3: TSB mapping locked PTE + * + * We have to run this whole thing with interrupts + * disabled so that the current cpu doesn't change + * due to preemption. */ .align 32 - .globl tsb_context_switch -tsb_context_switch: + .globl __tsb_context_switch +__tsb_context_switch: rdpr %pstate, %o5 wrpr %o5, PSTATE_IE, %pstate - ldub [%g6 + TI_CPU], %o3 - sethi %hi(trap_block), %o4 - sllx %o3, TRAP_BLOCK_SZ_SHIFT, %o3 - or %o4, %lo(trap_block), %o4 - add %o4, %o3, %o4 - stx %o0, [%o4 + TRAP_PER_CPU_PGD_PADDR] - - brgez %o1, 9f - nop - - /* Lock TSB into D-TLB. */ - sethi %hi(PAGE_SIZE), %o3 - and %o3, %o1, %o3 - sethi %hi(TSBMAP_BASE), %o2 - add %o2, %o3, %o2 + ldub [%g6 + TI_CPU], %g1 + sethi %hi(trap_block), %g2 + sllx %g1, TRAP_BLOCK_SZ_SHIFT, %g1 + or %g2, %lo(trap_block), %g2 + add %g2, %g1, %g2 + stx %o0, [%g2 + TRAP_PER_CPU_PGD_PADDR] - /* XXX handle PAGE_SIZE != 8K correctly... */ mov TSB_REG, %g1 - stxa %o2, [%g1] ASI_DMMU + stxa %o1, [%g1] ASI_DMMU membar #Sync - stxa %o2, [%g1] ASI_IMMU + stxa %o1, [%g1] ASI_IMMU membar #Sync -#define KERN_HIGHBITS ((_PAGE_VALID|_PAGE_SZBITS)^0xfffff80000000000) -#define KERN_LOWBITS (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W | _PAGE_L) - sethi %uhi(KERN_HIGHBITS), %g2 - or %g2, %ulo(KERN_HIGHBITS), %g2 - sllx %g2, 32, %g2 - or %g2, KERN_LOWBITS, %g2 -#undef KERN_HIGHBITS -#undef KERN_LOWBITS - - xor %o1, %g2, %o1 + brz %o2, 9f + nop /* We use entry 61 for this locked entry. This is the spitfire * TLB entry number, and luckily cheetah masks the value with @@ -184,11 +172,10 @@ tsb_context_switch: stxa %o2, [%g1] ASI_DMMU membar #Sync mov (61 << 3), %g1 - stxa %o1, [%g1] ASI_DTLB_DATA_ACCESS + stxa %o3, [%g1] ASI_DTLB_DATA_ACCESS membar #Sync - 9: wrpr %o5, %pstate retl - mov %o2, %o0 + nop diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c index 2f84cef6c1b..dfe7144fcdf 100644 --- a/arch/sparc64/mm/tsb.c +++ b/arch/sparc64/mm/tsb.c @@ -9,13 +9,7 @@ #include #include #include - -#define TSB_ENTRY_ALIGNMENT 16 - -struct tsb { - unsigned long tag; - unsigned long pte; -} __attribute__((aligned(TSB_ENTRY_ALIGNMENT))); +#include /* We use an 8K TSB for the whole kernel, this allows to * handle about 4MB of modules and vmalloc mappings without @@ -27,10 +21,10 @@ struct tsb { extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES]; -static inline unsigned long tsb_hash(unsigned long vaddr) +static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long nentries) { vaddr >>= PAGE_SHIFT; - return vaddr & (KERNEL_TSB_NENTRIES - 1); + return vaddr & (nentries - 1); } static inline int tag_compare(struct tsb *entry, unsigned long vaddr, unsigned long context) @@ -51,7 +45,8 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end) unsigned long v; for (v = start; v < end; v += PAGE_SIZE) { - struct tsb *ent = &swapper_tsb[tsb_hash(v)]; + unsigned long hash = tsb_hash(v, KERNEL_TSB_NENTRIES); + struct tsb *ent = &swapper_tsb[hash]; if (tag_compare(ent, v, 0)) { ent->tag = 0UL; @@ -63,8 +58,9 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end) void flush_tsb_user(struct mmu_gather *mp) { struct mm_struct *mm = mp->mm; - struct tsb *tsb = (struct tsb *) mm->context.sparc64_tsb; + struct tsb *tsb = mm->context.tsb; unsigned long ctx = ~0UL; + unsigned long nentries = mm->context.tsb_nentries; int i; if (CTX_VALID(mm->context)) @@ -76,7 +72,7 @@ void flush_tsb_user(struct mmu_gather *mp) v &= ~0x1UL; - ent = &tsb[tsb_hash(v)]; + ent = &tsb[tsb_hash(v, nentries)]; if (tag_compare(ent, v, ctx)) { ent->tag = 0UL; membar_storeload_storestore(); @@ -84,6 +80,83 @@ void flush_tsb_user(struct mmu_gather *mp) } } +static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes) +{ + unsigned long tsb_reg, base, tsb_paddr; + unsigned long page_sz, tte; + + mm->context.tsb_nentries = tsb_bytes / sizeof(struct tsb); + + base = TSBMAP_BASE; + tte = (_PAGE_VALID | _PAGE_L | _PAGE_CP | + _PAGE_CV | _PAGE_P | _PAGE_W); + tsb_paddr = __pa(mm->context.tsb); + + /* Use the smallest page size that can map the whole TSB + * in one TLB entry. + */ + switch (tsb_bytes) { + case 8192 << 0: + tsb_reg = 0x0UL; +#ifdef DCACHE_ALIASING_POSSIBLE + base += (tsb_paddr & 8192); +#endif + tte |= _PAGE_SZ8K; + page_sz = 8192; + break; + + case 8192 << 1: + tsb_reg = 0x1UL; + tte |= _PAGE_SZ64K; + page_sz = 64 * 1024; + break; + + case 8192 << 2: + tsb_reg = 0x2UL; + tte |= _PAGE_SZ64K; + page_sz = 64 * 1024; + break; + + case 8192 << 3: + tsb_reg = 0x3UL; + tte |= _PAGE_SZ64K; + page_sz = 64 * 1024; + break; + + case 8192 << 4: + tsb_reg = 0x4UL; + tte |= _PAGE_SZ512K; + page_sz = 512 * 1024; + break; + + case 8192 << 5: + tsb_reg = 0x5UL; + tte |= _PAGE_SZ512K; + page_sz = 512 * 1024; + break; + + case 8192 << 6: + tsb_reg = 0x6UL; + tte |= _PAGE_SZ512K; + page_sz = 512 * 1024; + break; + + case 8192 << 7: + tsb_reg = 0x7UL; + tte |= _PAGE_SZ4MB; + page_sz = 4 * 1024 * 1024; + break; + }; + + tsb_reg |= base; + tsb_reg |= (tsb_paddr & (page_sz - 1UL)); + tte |= (tsb_paddr & ~(page_sz - 1UL)); + + mm->context.tsb_reg_val = tsb_reg; + mm->context.tsb_map_vaddr = base; + mm->context.tsb_map_pte = tte; +} + int init_new_context(struct task_struct *tsk, struct mm_struct *mm) { unsigned long page = get_zeroed_page(GFP_KERNEL); @@ -92,14 +165,22 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) if (unlikely(!page)) return -ENOMEM; - mm->context.sparc64_tsb = (unsigned long *) page; + mm->context.tsb = (struct tsb *) page; + setup_tsb_params(mm, PAGE_SIZE); return 0; } void destroy_context(struct mm_struct *mm) { - free_page((unsigned long) mm->context.sparc64_tsb); + free_page((unsigned long) mm->context.tsb); + + /* We can remove these later, but for now it's useful + * to catch any bogus post-destroy_context() references + * to the TSB. + */ + mm->context.tsb = NULL; + mm->context.tsb_reg_val = 0UL; spin_lock(&ctx_alloc_lock); -- cgit v1.2.3 From bd40791e1d289d807b8580abe1f117e9c62894e4 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 31 Jan 2006 18:31:38 -0800 Subject: [SPARC64]: Dynamically grow TSB in response to RSS growth. As the RSS grows, grow the TSB in order to reduce the likelyhood of hash collisions and thus poor hit rates in the TSB. This definitely needs some serious tuning. Signed-off-by: David S. Miller --- arch/sparc64/kernel/smp.c | 28 +++++++-- arch/sparc64/mm/init.c | 7 +++ arch/sparc64/mm/tsb.c | 151 ++++++++++++++++++++++++++++++++++++++++++++-- 3 files changed, 176 insertions(+), 10 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index 8c245859d21..3c14b549cf9 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c @@ -581,11 +581,11 @@ extern unsigned long xcall_call_function; * You must not call this function with disabled interrupts or from a * hardware interrupt handler or from a bottom half handler. */ -int smp_call_function(void (*func)(void *info), void *info, - int nonatomic, int wait) +static int smp_call_function_mask(void (*func)(void *info), void *info, + int nonatomic, int wait, cpumask_t mask) { struct call_data_struct data; - int cpus = num_online_cpus() - 1; + int cpus = cpus_weight(mask) - 1; long timeout; if (!cpus) @@ -603,7 +603,7 @@ int smp_call_function(void (*func)(void *info), void *info, call_data = &data; - smp_cross_call(&xcall_call_function, 0, 0, 0); + smp_cross_call_masked(&xcall_call_function, 0, 0, 0, mask); /* * Wait for other cpus to complete function or at @@ -629,6 +629,13 @@ out_timeout: return 0; } +int smp_call_function(void (*func)(void *info), void *info, + int nonatomic, int wait) +{ + return smp_call_function_mask(func, info, nonatomic, wait, + cpu_online_map); +} + void smp_call_function_client(int irq, struct pt_regs *regs) { void (*func) (void *info) = call_data->func; @@ -646,6 +653,19 @@ void smp_call_function_client(int irq, struct pt_regs *regs) } } +static void tsb_sync(void *info) +{ + struct mm_struct *mm = info; + + if (current->active_mm == mm) + tsb_context_switch(mm); +} + +void smp_tsb_sync(struct mm_struct *mm) +{ + smp_call_function_mask(tsb_sync, mm, 0, 1, mm->cpu_vm_mask); +} + extern unsigned long xcall_flush_tlb_mm; extern unsigned long xcall_flush_tlb_pending; extern unsigned long xcall_flush_tlb_kernel_range; diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 7c456afaa9a..a8119cb4fa3 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -246,9 +246,11 @@ static __inline__ void clear_dcache_dirty_cpu(struct page *page, unsigned long c void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) { + struct mm_struct *mm; struct page *page; unsigned long pfn; unsigned long pg_flags; + unsigned long mm_rss; pfn = pte_pfn(pte); if (pfn_valid(pfn) && @@ -270,6 +272,11 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p put_cpu(); } + + mm = vma->vm_mm; + mm_rss = get_mm_rss(mm); + if (mm_rss >= mm->context.tsb_rss_limit) + tsb_grow(mm, mm_rss, GFP_ATOMIC); } void flush_dcache_page(struct page *page) diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c index dfe7144fcdf..707af4b84a0 100644 --- a/arch/sparc64/mm/tsb.c +++ b/arch/sparc64/mm/tsb.c @@ -10,6 +10,7 @@ #include #include #include +#include /* We use an 8K TSB for the whole kernel, this allows to * handle about 4MB of modules and vmalloc mappings without @@ -146,6 +147,9 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes) tte |= _PAGE_SZ4MB; page_sz = 4 * 1024 * 1024; break; + + default: + BUG(); }; tsb_reg |= base; @@ -157,23 +161,158 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes) mm->context.tsb_map_pte = tte; } +/* The page tables are locked against modifications while this + * runs. + * + * XXX do some prefetching... + */ +static void copy_tsb(struct tsb *old_tsb, unsigned long old_size, + struct tsb *new_tsb, unsigned long new_size) +{ + unsigned long old_nentries = old_size / sizeof(struct tsb); + unsigned long new_nentries = new_size / sizeof(struct tsb); + unsigned long i; + + for (i = 0; i < old_nentries; i++) { + register unsigned long tag asm("o4"); + register unsigned long pte asm("o5"); + unsigned long v; + unsigned int hash; + + __asm__ __volatile__( + "ldda [%2] %3, %0" + : "=r" (tag), "=r" (pte) + : "r" (&old_tsb[i]), "i" (ASI_NUCLEUS_QUAD_LDD)); + + if (!tag || (tag & TSB_TAG_LOCK)) + continue; + + /* We only put base page size PTEs into the TSB, + * but that might change in the future. This code + * would need to be changed if we start putting larger + * page size PTEs into there. + */ + WARN_ON((pte & _PAGE_ALL_SZ_BITS) != _PAGE_SZBITS); + + /* The tag holds bits 22 to 63 of the virtual address + * and the context. Clear out the context, and shift + * up to make a virtual address. + */ + v = (tag & ((1UL << 42UL) - 1UL)) << 22UL; + + /* The implied bits of the tag (bits 13 to 21) are + * determined by the TSB entry index, so fill that in. + */ + v |= (i & (512UL - 1UL)) << 13UL; + + hash = tsb_hash(v, new_nentries); + new_tsb[hash].tag = tag; + new_tsb[hash].pte = pte; + } +} + +/* When the RSS of an address space exceeds mm->context.tsb_rss_limit, + * update_mmu_cache() invokes this routine to try and grow the TSB. + * When we reach the maximum TSB size supported, we stick ~0UL into + * mm->context.tsb_rss_limit so the grow checks in update_mmu_cache() + * will not trigger any longer. + * + * The TSB can be anywhere from 8K to 1MB in size, in increasing powers + * of two. The TSB must be aligned to it's size, so f.e. a 512K TSB + * must be 512K aligned. + * + * The idea here is to grow the TSB when the RSS of the process approaches + * the number of entries that the current TSB can hold at once. Currently, + * we trigger when the RSS hits 3/4 of the TSB capacity. + */ +void tsb_grow(struct mm_struct *mm, unsigned long rss, gfp_t gfp_flags) +{ + unsigned long max_tsb_size = 1 * 1024 * 1024; + unsigned long size, old_size; + struct page *page; + struct tsb *old_tsb; + + if (max_tsb_size > (PAGE_SIZE << MAX_ORDER)) + max_tsb_size = (PAGE_SIZE << MAX_ORDER); + + for (size = PAGE_SIZE; size < max_tsb_size; size <<= 1UL) { + unsigned long n_entries = size / sizeof(struct tsb); + + n_entries = (n_entries * 3) / 4; + if (n_entries > rss) + break; + } + + page = alloc_pages(gfp_flags | __GFP_ZERO, get_order(size)); + if (unlikely(!page)) + return; + + if (size == max_tsb_size) + mm->context.tsb_rss_limit = ~0UL; + else + mm->context.tsb_rss_limit = + ((size / sizeof(struct tsb)) * 3) / 4; + + old_tsb = mm->context.tsb; + old_size = mm->context.tsb_nentries * sizeof(struct tsb); + + if (old_tsb) + copy_tsb(old_tsb, old_size, page_address(page), size); + + mm->context.tsb = page_address(page); + setup_tsb_params(mm, size); + + /* If old_tsb is NULL, we're being invoked for the first time + * from init_new_context(). + */ + if (old_tsb) { + /* Now force all other processors to reload the new + * TSB state. + */ + smp_tsb_sync(mm); + + /* Finally reload it on the local cpu. No further + * references will remain to the old TSB and we can + * thus free it up. + */ + tsb_context_switch(mm); + + free_pages((unsigned long) old_tsb, get_order(old_size)); + } +} + int init_new_context(struct task_struct *tsk, struct mm_struct *mm) { - unsigned long page = get_zeroed_page(GFP_KERNEL); + unsigned long initial_rss; mm->context.sparc64_ctx_val = 0UL; - if (unlikely(!page)) - return -ENOMEM; - mm->context.tsb = (struct tsb *) page; - setup_tsb_params(mm, PAGE_SIZE); + /* copy_mm() copies over the parent's mm_struct before calling + * us, so we need to zero out the TSB pointer or else tsb_grow() + * will be confused and think there is an older TSB to free up. + */ + mm->context.tsb = NULL; + + /* If this is fork, inherit the parent's TSB size. We would + * grow it to that size on the first page fault anyways. + */ + initial_rss = mm->context.tsb_nentries; + if (initial_rss) + initial_rss -= 1; + + tsb_grow(mm, initial_rss, GFP_KERNEL); + + if (unlikely(!mm->context.tsb)) + return -ENOMEM; return 0; } void destroy_context(struct mm_struct *mm) { - free_page((unsigned long) mm->context.tsb); + unsigned long size = mm->context.tsb_nentries * sizeof(struct tsb); + + free_pages((unsigned long) mm->context.tsb, get_order(size)); /* We can remove these later, but for now it's useful * to catch any bogus post-destroy_context() references -- cgit v1.2.3 From b70c0fa1613c4f69b4a340a0e2bee387560ebbb1 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 31 Jan 2006 18:32:04 -0800 Subject: [SPARC64]: Preload TSB entries from update_mmu_cache(). Signed-off-by: David S. Miller --- arch/sparc64/kernel/tsb.S | 17 +++++++++++++++++ arch/sparc64/mm/init.c | 10 ++++++++++ 2 files changed, 27 insertions(+) (limited to 'arch') diff --git a/arch/sparc64/kernel/tsb.S b/arch/sparc64/kernel/tsb.S index fe266bad0a2..08405ed6928 100644 --- a/arch/sparc64/kernel/tsb.S +++ b/arch/sparc64/kernel/tsb.S @@ -126,6 +126,23 @@ winfix_trampoline: wrpr %g3, %tnpc ! Write it into TNPC done ! Trap return + /* Insert an entry into the TSB. + * + * %o0: TSB entry pointer + * %o1: tag + * %o2: pte + */ + .align 32 + .globl tsb_insert +tsb_insert: + rdpr %pstate, %o5 + wrpr %o5, PSTATE_IE, %pstate + TSB_LOCK_TAG(%o0, %g2, %g3) + TSB_WRITE(%o0, %o2, %o1) + wrpr %o5, %pstate + retl + nop + /* Reload MMU related context switch state at * schedule() time. * diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index a8119cb4fa3..1e8a5a33639 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -277,6 +277,16 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p mm_rss = get_mm_rss(mm); if (mm_rss >= mm->context.tsb_rss_limit) tsb_grow(mm, mm_rss, GFP_ATOMIC); + + if ((pte_val(pte) & _PAGE_ALL_SZ_BITS) == _PAGE_SZBITS) { + struct tsb *tsb; + unsigned long tag; + + tsb = &mm->context.tsb[(address >> PAGE_SHIFT) & + (mm->context.tsb_nentries - 1UL)]; + tag = (address >> 22UL) | CTX_HWBITS(mm->context) << 48UL; + tsb_insert(tsb, tag, pte_val(pte)); + } } void flush_dcache_page(struct page *page) -- cgit v1.2.3 From 96c6e0d8e2a0eb1338751598be47fa1ffed91704 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 31 Jan 2006 18:32:29 -0800 Subject: [SPARC64]: Kill {save,restore}_alternate_globals() No longer needed now that we no longer have hard-coded alternate global register usage. Signed-off-by: David S. Miller --- arch/sparc64/kernel/entry.S | 70 --------------------------------------------- arch/sparc64/kernel/smp.c | 7 +---- 2 files changed, 1 insertion(+), 76 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S index 906b64ffdb1..ad919f4f4b3 100644 --- a/arch/sparc64/kernel/entry.S +++ b/arch/sparc64/kernel/entry.S @@ -401,76 +401,6 @@ do_ivec_xcall: 1: jmpl %g3, %g0 nop - .globl save_alternate_globals -save_alternate_globals: /* %o0 = save_area */ - rdpr %pstate, %o5 - andn %o5, PSTATE_IE, %o1 - wrpr %o1, PSTATE_AG, %pstate - stx %g0, [%o0 + 0x00] - stx %g1, [%o0 + 0x08] - stx %g2, [%o0 + 0x10] - stx %g3, [%o0 + 0x18] - stx %g4, [%o0 + 0x20] - stx %g5, [%o0 + 0x28] - stx %g6, [%o0 + 0x30] - stx %g7, [%o0 + 0x38] - wrpr %o1, PSTATE_IG, %pstate - stx %g0, [%o0 + 0x40] - stx %g1, [%o0 + 0x48] - stx %g2, [%o0 + 0x50] - stx %g3, [%o0 + 0x58] - stx %g4, [%o0 + 0x60] - stx %g5, [%o0 + 0x68] - stx %g6, [%o0 + 0x70] - stx %g7, [%o0 + 0x78] - wrpr %o1, PSTATE_MG, %pstate - stx %g0, [%o0 + 0x80] - stx %g1, [%o0 + 0x88] - stx %g2, [%o0 + 0x90] - stx %g3, [%o0 + 0x98] - stx %g4, [%o0 + 0xa0] - stx %g5, [%o0 + 0xa8] - stx %g6, [%o0 + 0xb0] - stx %g7, [%o0 + 0xb8] - wrpr %o5, 0x0, %pstate - retl - nop - - .globl restore_alternate_globals -restore_alternate_globals: /* %o0 = save_area */ - rdpr %pstate, %o5 - andn %o5, PSTATE_IE, %o1 - wrpr %o1, PSTATE_AG, %pstate - ldx [%o0 + 0x00], %g0 - ldx [%o0 + 0x08], %g1 - ldx [%o0 + 0x10], %g2 - ldx [%o0 + 0x18], %g3 - ldx [%o0 + 0x20], %g4 - ldx [%o0 + 0x28], %g5 - ldx [%o0 + 0x30], %g6 - ldx [%o0 + 0x38], %g7 - wrpr %o1, PSTATE_IG, %pstate - ldx [%o0 + 0x40], %g0 - ldx [%o0 + 0x48], %g1 - ldx [%o0 + 0x50], %g2 - ldx [%o0 + 0x58], %g3 - ldx [%o0 + 0x60], %g4 - ldx [%o0 + 0x68], %g5 - ldx [%o0 + 0x70], %g6 - ldx [%o0 + 0x78], %g7 - wrpr %o1, PSTATE_MG, %pstate - ldx [%o0 + 0x80], %g0 - ldx [%o0 + 0x88], %g1 - ldx [%o0 + 0x90], %g2 - ldx [%o0 + 0x98], %g3 - ldx [%o0 + 0xa0], %g4 - ldx [%o0 + 0xa8], %g5 - ldx [%o0 + 0xb0], %g6 - ldx [%o0 + 0xb8], %g7 - wrpr %o5, 0x0, %pstate - retl - nop - .globl getcc, setcc getcc: ldx [%o0 + PT_V9_TSTATE], %o1 diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index 3c14b549cf9..d030c311cfe 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c @@ -940,24 +940,19 @@ void smp_release(void) * can service tlb flush xcalls... */ extern void prom_world(int); -extern void save_alternate_globals(unsigned long *); -extern void restore_alternate_globals(unsigned long *); + void smp_penguin_jailcell(int irq, struct pt_regs *regs) { - unsigned long global_save[24]; - clear_softint(1 << irq); preempt_disable(); __asm__ __volatile__("flushw"); - save_alternate_globals(global_save); prom_world(1); atomic_inc(&smp_capture_registry); membar_storeload_storestore(); while (penguins_are_doing_time) rmb(); - restore_alternate_globals(global_save); atomic_dec(&smp_capture_registry); prom_world(0); -- cgit v1.2.3 From 4753eb2ac7022b999e5e484f1a5dc001dba22bd3 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 31 Jan 2006 18:32:44 -0800 Subject: [SPARC64]: Fix incorrect TSB lock bit handling. The TSB_LOCK_BIT define is actually a special value shifted down by 32-bits for the assembler code macros. In C code, this isn't what we want. Signed-off-by: David S. Miller --- arch/sparc64/mm/tsb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c index 707af4b84a0..e605478217c 100644 --- a/arch/sparc64/mm/tsb.c +++ b/arch/sparc64/mm/tsb.c @@ -184,7 +184,7 @@ static void copy_tsb(struct tsb *old_tsb, unsigned long old_size, : "=r" (tag), "=r" (pte) : "r" (&old_tsb[i]), "i" (ASI_NUCLEUS_QUAD_LDD)); - if (!tag || (tag & TSB_TAG_LOCK)) + if (!tag || (tag & (1UL << TSB_TAG_LOCK_BIT))) continue; /* We only put base page size PTEs into the TSB, -- cgit v1.2.3 From 4da808c352c290d3f762933d44d4ab90c2fd65f3 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 31 Jan 2006 18:33:00 -0800 Subject: [SPARC64]: Fix bogus flush instruction usage. Some of the trap code was still assuming that alternate global %g6 was hard coded with current_thread_info(). Let's just consistently flush at KERNBASE when we need a pipeline synchronization. That's locked into the TLB and will always work. Signed-off-by: David S. Miller --- arch/sparc64/kernel/etrap.S | 6 ++++-- arch/sparc64/kernel/rtrap.S | 3 ++- arch/sparc64/kernel/winfixup.S | 3 ++- arch/sparc64/lib/clear_page.S | 4 +++- arch/sparc64/mm/ultra.S | 31 +++++++++++++++++++------------ 5 files changed, 30 insertions(+), 17 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/etrap.S b/arch/sparc64/kernel/etrap.S index 8b3b6d720ed..db768101729 100644 --- a/arch/sparc64/kernel/etrap.S +++ b/arch/sparc64/kernel/etrap.S @@ -72,7 +72,8 @@ etrap_irq: sethi %hi(sparc64_kern_pri_context), %g2 ldx [%g2 + %lo(sparc64_kern_pri_context)], %g3 stxa %g3, [%l4] ASI_DMMU - flush %l6 + sethi %hi(KERNBASE), %l4 + flush %l4 wr %g0, ASI_AIUS, %asi 2: wrpr %g0, 0x0, %tl mov %g4, %l4 @@ -215,7 +216,8 @@ scetrap: sethi %hi(sparc64_kern_pri_context), %g2 ldx [%g2 + %lo(sparc64_kern_pri_context)], %g3 stxa %g3, [%l4] ASI_DMMU - flush %l6 + sethi %hi(KERNBASE), %l4 + flush %l4 mov ASI_AIUS, %l7 2: mov %g4, %l4 diff --git a/arch/sparc64/kernel/rtrap.S b/arch/sparc64/kernel/rtrap.S index 5a62ec5d531..89794ebdcbc 100644 --- a/arch/sparc64/kernel/rtrap.S +++ b/arch/sparc64/kernel/rtrap.S @@ -259,7 +259,8 @@ rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1 ldx [%l1 + %lo(sparc64_kern_pri_nuc_bits)], %l1 or %l0, %l1, %l0 stxa %l0, [%l7] ASI_DMMU - flush %g6 + sethi %hi(KERNBASE), %l7 + flush %l7 rdpr %wstate, %l1 rdpr %otherwin, %l2 srl %l1, 3, %l1 diff --git a/arch/sparc64/kernel/winfixup.S b/arch/sparc64/kernel/winfixup.S index de588036df4..c0545d089c9 100644 --- a/arch/sparc64/kernel/winfixup.S +++ b/arch/sparc64/kernel/winfixup.S @@ -20,7 +20,8 @@ set_pcontext: ldx [%l1 + %lo(sparc64_kern_pri_context)], %l1 mov PRIMARY_CONTEXT, %g1 stxa %l1, [%g1] ASI_DMMU - flush %g6 + sethi %hi(KERNBASE), %l1 + flush %l1 retl nop diff --git a/arch/sparc64/lib/clear_page.S b/arch/sparc64/lib/clear_page.S index b59884ef051..cdc634bceba 100644 --- a/arch/sparc64/lib/clear_page.S +++ b/arch/sparc64/lib/clear_page.S @@ -9,6 +9,7 @@ #include #include #include +#include /* What we used to do was lock a TLB entry into a specific * TLB slot, clear the page with interrupts disabled, then @@ -66,7 +67,8 @@ clear_user_page: /* %o0=dest, %o1=vaddr */ wrpr %o4, PSTATE_IE, %pstate stxa %o0, [%g3] ASI_DMMU stxa %g1, [%g0] ASI_DTLB_DATA_IN - flush %g6 + sethi %hi(KERNBASE), %g1 + flush %g1 wrpr %o4, 0x0, %pstate mov 1, %o4 diff --git a/arch/sparc64/mm/ultra.S b/arch/sparc64/mm/ultra.S index a87394824ec..269ed57b3e9 100644 --- a/arch/sparc64/mm/ultra.S +++ b/arch/sparc64/mm/ultra.S @@ -36,9 +36,10 @@ __flush_tlb_mm: /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */ mov 0x50, %g3 stxa %g0, [%g3] ASI_DMMU_DEMAP stxa %g0, [%g3] ASI_IMMU_DEMAP + sethi %hi(KERNBASE), %g3 + flush %g3 retl - flush %g6 - nop + nop nop nop nop @@ -72,7 +73,8 @@ __flush_tlb_pending: brnz,pt %o1, 1b nop stxa %g2, [%o4] ASI_DMMU - flush %g6 + sethi %hi(KERNBASE), %o4 + flush %o4 retl wrpr %g7, 0x0, %pstate nop @@ -94,8 +96,10 @@ __flush_tlb_kernel_range: /* %o0=start, %o1=end */ membar #Sync brnz,pt %o3, 1b sub %o3, %o4, %o3 -2: retl - flush %g6 +2: sethi %hi(KERNBASE), %o3 + flush %o3 + retl + nop __spitfire_flush_tlb_mm_slow: rdpr %pstate, %g1 @@ -105,7 +109,8 @@ __spitfire_flush_tlb_mm_slow: stxa %g0, [%g3] ASI_IMMU_DEMAP flush %g6 stxa %g2, [%o1] ASI_DMMU - flush %g6 + sethi %hi(KERNBASE), %o1 + flush %o1 retl wrpr %g1, 0, %pstate @@ -181,7 +186,7 @@ __flush_dcache_page: /* %o0=kaddr, %o1=flush_icache */ .previous /* Cheetah specific versions, patched at boot time. */ -__cheetah_flush_tlb_mm: /* 18 insns */ +__cheetah_flush_tlb_mm: /* 19 insns */ rdpr %pstate, %g7 andn %g7, PSTATE_IE, %g2 wrpr %g2, 0x0, %pstate @@ -196,12 +201,13 @@ __cheetah_flush_tlb_mm: /* 18 insns */ stxa %g0, [%g3] ASI_DMMU_DEMAP stxa %g0, [%g3] ASI_IMMU_DEMAP stxa %g2, [%o2] ASI_DMMU - flush %g6 + sethi %hi(KERNBASE), %o2 + flush %o2 wrpr %g0, 0, %tl retl wrpr %g7, 0x0, %pstate -__cheetah_flush_tlb_pending: /* 26 insns */ +__cheetah_flush_tlb_pending: /* 27 insns */ /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ rdpr %pstate, %g7 sllx %o1, 3, %o1 @@ -225,7 +231,8 @@ __cheetah_flush_tlb_pending: /* 26 insns */ brnz,pt %o1, 1b nop stxa %g2, [%o4] ASI_DMMU - flush %g6 + sethi %hi(KERNBASE), %o4 + flush %o4 wrpr %g0, 0, %tl retl wrpr %g7, 0x0, %pstate @@ -265,14 +272,14 @@ cheetah_patch_cachetlbops: sethi %hi(__cheetah_flush_tlb_mm), %o1 or %o1, %lo(__cheetah_flush_tlb_mm), %o1 call cheetah_patch_one - mov 18, %o2 + mov 19, %o2 sethi %hi(__flush_tlb_pending), %o0 or %o0, %lo(__flush_tlb_pending), %o0 sethi %hi(__cheetah_flush_tlb_pending), %o1 or %o1, %lo(__cheetah_flush_tlb_pending), %o1 call cheetah_patch_one - mov 26, %o2 + mov 27, %o2 #ifdef DCACHE_ALIASING_POSSIBLE sethi %hi(__flush_dcache_page), %o0 -- cgit v1.2.3 From 6b6d017235acad3ee1681140795593723bb9b9df Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 31 Jan 2006 18:33:12 -0800 Subject: [SPARC64]: Use sparc64_highest_unlocked_tlb_ent in __tsb_context_switch() Instead of ugly hard-coded value. Signed-off-by: David S. Miller --- arch/sparc64/kernel/tsb.S | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/tsb.S b/arch/sparc64/kernel/tsb.S index 08405ed6928..c1532292a62 100644 --- a/arch/sparc64/kernel/tsb.S +++ b/arch/sparc64/kernel/tsb.S @@ -185,12 +185,14 @@ __tsb_context_switch: * * XXX Interactions with prom_world()... */ - mov TLB_TAG_ACCESS, %g1 - stxa %o2, [%g1] ASI_DMMU - membar #Sync - mov (61 << 3), %g1 - stxa %o3, [%g1] ASI_DTLB_DATA_ACCESS - membar #Sync + sethi %hi(sparc64_highest_unlocked_tlb_ent), %o4 + mov TLB_TAG_ACCESS, %g1 + lduw [%o4 + %lo(sparc64_highest_unlocked_tlb_ent)], %g2 + stxa %o2, [%g1] ASI_DMMU + membar #Sync + sllx %g2, 3, %g2 + stxa %o3, [%g2] ASI_DTLB_DATA_ACCESS + membar #Sync 9: wrpr %o5, %pstate -- cgit v1.2.3 From 3487d1d4414fbfab5d98ec559e6f84f55520cb15 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 31 Jan 2006 18:33:25 -0800 Subject: [SPARC64]: Kill PROM locked TLB entry preservation code. It is totally unnecessary complexity. After we take over the trap table, we handle all PROM tlb misses fully. Signed-off-by: David S. Miller --- arch/sparc64/kernel/smp.c | 4 - arch/sparc64/kernel/tsb.S | 7 -- arch/sparc64/mm/init.c | 295 ++-------------------------------------------- 3 files changed, 10 insertions(+), 296 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index d030c311cfe..0e7552546d3 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c @@ -116,14 +116,10 @@ static void smp_setup_percpu_timer(void); static volatile unsigned long callin_flag = 0; -extern void inherit_locked_prom_mappings(int save_p); - void __init smp_callin(void) { int cpuid = hard_smp_processor_id(); - inherit_locked_prom_mappings(0); - __local_per_cpu_offset = __per_cpu_offset(cpuid); __flush_tlb_all(); diff --git a/arch/sparc64/kernel/tsb.S b/arch/sparc64/kernel/tsb.S index c1532292a62..c4e7740d5e8 100644 --- a/arch/sparc64/kernel/tsb.S +++ b/arch/sparc64/kernel/tsb.S @@ -178,13 +178,6 @@ __tsb_context_switch: brz %o2, 9f nop - /* We use entry 61 for this locked entry. This is the spitfire - * TLB entry number, and luckily cheetah masks the value with - * 15 ending us up with entry 13 which is what we want in that - * case too. - * - * XXX Interactions with prom_world()... - */ sethi %hi(sparc64_highest_unlocked_tlb_ent), %o4 mov TLB_TAG_ACCESS, %g1 lduw [%o4 + %lo(sparc64_highest_unlocked_tlb_ent)], %g2 diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 1e8a5a33639..f4d22ccb4cf 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -555,294 +555,12 @@ static void __init inherit_prom_mappings(void) prom_printf("done.\n"); } -static int prom_ditlb_set; -struct prom_tlb_entry { - int tlb_ent; - unsigned long tlb_tag; - unsigned long tlb_data; -}; -struct prom_tlb_entry prom_itlb[16], prom_dtlb[16]; - void prom_world(int enter) { - unsigned long pstate; - int i; - if (!enter) set_fs((mm_segment_t) { get_thread_current_ds() }); - if (!prom_ditlb_set) - return; - - /* Make sure the following runs atomically. */ - __asm__ __volatile__("flushw\n\t" - "rdpr %%pstate, %0\n\t" - "wrpr %0, %1, %%pstate" - : "=r" (pstate) - : "i" (PSTATE_IE)); - - if (enter) { - /* Install PROM world. */ - for (i = 0; i < 16; i++) { - if (prom_dtlb[i].tlb_ent != -1) { - __asm__ __volatile__("stxa %0, [%1] %2\n\t" - "membar #Sync" - : : "r" (prom_dtlb[i].tlb_tag), "r" (TLB_TAG_ACCESS), - "i" (ASI_DMMU)); - if (tlb_type == spitfire) - spitfire_put_dtlb_data(prom_dtlb[i].tlb_ent, - prom_dtlb[i].tlb_data); - else if (tlb_type == cheetah || tlb_type == cheetah_plus) - cheetah_put_ldtlb_data(prom_dtlb[i].tlb_ent, - prom_dtlb[i].tlb_data); - } - if (prom_itlb[i].tlb_ent != -1) { - __asm__ __volatile__("stxa %0, [%1] %2\n\t" - "membar #Sync" - : : "r" (prom_itlb[i].tlb_tag), - "r" (TLB_TAG_ACCESS), - "i" (ASI_IMMU)); - if (tlb_type == spitfire) - spitfire_put_itlb_data(prom_itlb[i].tlb_ent, - prom_itlb[i].tlb_data); - else if (tlb_type == cheetah || tlb_type == cheetah_plus) - cheetah_put_litlb_data(prom_itlb[i].tlb_ent, - prom_itlb[i].tlb_data); - } - } - } else { - for (i = 0; i < 16; i++) { - if (prom_dtlb[i].tlb_ent != -1) { - __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" - "membar #Sync" - : : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU)); - if (tlb_type == spitfire) - spitfire_put_dtlb_data(prom_dtlb[i].tlb_ent, 0x0UL); - else - cheetah_put_ldtlb_data(prom_dtlb[i].tlb_ent, 0x0UL); - } - if (prom_itlb[i].tlb_ent != -1) { - __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" - "membar #Sync" - : : "r" (TLB_TAG_ACCESS), - "i" (ASI_IMMU)); - if (tlb_type == spitfire) - spitfire_put_itlb_data(prom_itlb[i].tlb_ent, 0x0UL); - else - cheetah_put_litlb_data(prom_itlb[i].tlb_ent, 0x0UL); - } - } - } - __asm__ __volatile__("wrpr %0, 0, %%pstate" - : : "r" (pstate)); -} - -void inherit_locked_prom_mappings(int save_p) -{ - int i; - int dtlb_seen = 0; - int itlb_seen = 0; - - /* Fucking losing PROM has more mappings in the TLB, but - * it (conveniently) fails to mention any of these in the - * translations property. The only ones that matter are - * the locked PROM tlb entries, so we impose the following - * irrecovable rule on the PROM, it is allowed 8 locked - * entries in the ITLB and 8 in the DTLB. - * - * Supposedly the upper 16GB of the address space is - * reserved for OBP, BUT I WISH THIS WAS DOCUMENTED - * SOMEWHERE!!!!!!!!!!!!!!!!! Furthermore the entire interface - * used between the client program and the firmware on sun5 - * systems to coordinate mmu mappings is also COMPLETELY - * UNDOCUMENTED!!!!!! Thanks S(t)un! - */ - if (save_p) { - for (i = 0; i < 16; i++) { - prom_itlb[i].tlb_ent = -1; - prom_dtlb[i].tlb_ent = -1; - } - } - if (tlb_type == spitfire) { - int high = sparc64_highest_unlocked_tlb_ent; - for (i = 0; i <= high; i++) { - unsigned long data; - - /* Spitfire Errata #32 workaround */ - /* NOTE: Always runs on spitfire, so no cheetah+ - * page size encodings. - */ - __asm__ __volatile__("stxa %0, [%1] %2\n\t" - "flush %%g6" - : /* No outputs */ - : "r" (0), - "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); - - data = spitfire_get_dtlb_data(i); - if ((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) { - unsigned long tag; - - /* Spitfire Errata #32 workaround */ - /* NOTE: Always runs on spitfire, so no - * cheetah+ page size encodings. - */ - __asm__ __volatile__("stxa %0, [%1] %2\n\t" - "flush %%g6" - : /* No outputs */ - : "r" (0), - "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); - - tag = spitfire_get_dtlb_tag(i); - if (save_p) { - prom_dtlb[dtlb_seen].tlb_ent = i; - prom_dtlb[dtlb_seen].tlb_tag = tag; - prom_dtlb[dtlb_seen].tlb_data = data; - } - __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" - "membar #Sync" - : : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU)); - spitfire_put_dtlb_data(i, 0x0UL); - - dtlb_seen++; - if (dtlb_seen > 15) - break; - } - } - - for (i = 0; i < high; i++) { - unsigned long data; - - /* Spitfire Errata #32 workaround */ - /* NOTE: Always runs on spitfire, so no - * cheetah+ page size encodings. - */ - __asm__ __volatile__("stxa %0, [%1] %2\n\t" - "flush %%g6" - : /* No outputs */ - : "r" (0), - "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); - - data = spitfire_get_itlb_data(i); - if ((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) { - unsigned long tag; - - /* Spitfire Errata #32 workaround */ - /* NOTE: Always runs on spitfire, so no - * cheetah+ page size encodings. - */ - __asm__ __volatile__("stxa %0, [%1] %2\n\t" - "flush %%g6" - : /* No outputs */ - : "r" (0), - "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); - - tag = spitfire_get_itlb_tag(i); - if (save_p) { - prom_itlb[itlb_seen].tlb_ent = i; - prom_itlb[itlb_seen].tlb_tag = tag; - prom_itlb[itlb_seen].tlb_data = data; - } - __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" - "membar #Sync" - : : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU)); - spitfire_put_itlb_data(i, 0x0UL); - - itlb_seen++; - if (itlb_seen > 15) - break; - } - } - } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { - int high = sparc64_highest_unlocked_tlb_ent; - - for (i = 0; i <= high; i++) { - unsigned long data; - - data = cheetah_get_ldtlb_data(i); - if ((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) { - unsigned long tag; - - tag = cheetah_get_ldtlb_tag(i); - if (save_p) { - prom_dtlb[dtlb_seen].tlb_ent = i; - prom_dtlb[dtlb_seen].tlb_tag = tag; - prom_dtlb[dtlb_seen].tlb_data = data; - } - __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" - "membar #Sync" - : : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU)); - cheetah_put_ldtlb_data(i, 0x0UL); - - dtlb_seen++; - if (dtlb_seen > 15) - break; - } - } - - for (i = 0; i < high; i++) { - unsigned long data; - - data = cheetah_get_litlb_data(i); - if ((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) { - unsigned long tag; - - tag = cheetah_get_litlb_tag(i); - if (save_p) { - prom_itlb[itlb_seen].tlb_ent = i; - prom_itlb[itlb_seen].tlb_tag = tag; - prom_itlb[itlb_seen].tlb_data = data; - } - __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" - "membar #Sync" - : : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU)); - cheetah_put_litlb_data(i, 0x0UL); - - itlb_seen++; - if (itlb_seen > 15) - break; - } - } - } else { - /* Implement me :-) */ - BUG(); - } - if (save_p) - prom_ditlb_set = 1; -} - -/* Give PROM back his world, done during reboots... */ -void prom_reload_locked(void) -{ - int i; - - for (i = 0; i < 16; i++) { - if (prom_dtlb[i].tlb_ent != -1) { - __asm__ __volatile__("stxa %0, [%1] %2\n\t" - "membar #Sync" - : : "r" (prom_dtlb[i].tlb_tag), "r" (TLB_TAG_ACCESS), - "i" (ASI_DMMU)); - if (tlb_type == spitfire) - spitfire_put_dtlb_data(prom_dtlb[i].tlb_ent, - prom_dtlb[i].tlb_data); - else if (tlb_type == cheetah || tlb_type == cheetah_plus) - cheetah_put_ldtlb_data(prom_dtlb[i].tlb_ent, - prom_dtlb[i].tlb_data); - } - - if (prom_itlb[i].tlb_ent != -1) { - __asm__ __volatile__("stxa %0, [%1] %2\n\t" - "membar #Sync" - : : "r" (prom_itlb[i].tlb_tag), - "r" (TLB_TAG_ACCESS), - "i" (ASI_IMMU)); - if (tlb_type == spitfire) - spitfire_put_itlb_data(prom_itlb[i].tlb_ent, - prom_itlb[i].tlb_data); - else - cheetah_put_litlb_data(prom_itlb[i].tlb_ent, - prom_itlb[i].tlb_data); - } - } + __asm__ __volatile__("flushw"); } #ifdef DCACHE_ALIASING_POSSIBLE @@ -1066,6 +784,15 @@ void sparc_ultra_dump_dtlb(void) } } +static inline void spitfire_errata32(void) +{ + __asm__ __volatile__("stxa %0, [%1] %2\n\t" + "flush %%g6" + : /* No outputs */ + : "r" (0), + "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); +} + extern unsigned long cmdline_memory_size; unsigned long __init bootmem_init(unsigned long *pages_avail) @@ -1375,8 +1102,6 @@ void __init paging_init(void) setup_tba(this_is_starfire); } - inherit_locked_prom_mappings(1); - __flush_tlb_all(); /* Setup bootmem... */ -- cgit v1.2.3 From a8b900d801697609d1b56cc9c110148c64678068 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 31 Jan 2006 18:33:37 -0800 Subject: [SPARC64]: Kill sole argument passed to setup_tba(). No longer used, and move extern declaration to a header file. Signed-off-by: David S. Miller --- arch/sparc64/kernel/head.S | 2 +- arch/sparc64/mm/init.c | 11 ++--------- 2 files changed, 3 insertions(+), 10 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/head.S b/arch/sparc64/kernel/head.S index 82ce5bced9c..2988be85147 100644 --- a/arch/sparc64/kernel/head.S +++ b/arch/sparc64/kernel/head.S @@ -454,7 +454,7 @@ setup_trap_table: restore .globl setup_tba -setup_tba: /* i0 = is_starfire */ +setup_tba: save %sp, -192, %sp /* The boot processor is the only cpu which invokes this diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index f4d22ccb4cf..20e7af552ce 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -1092,15 +1092,8 @@ void __init paging_init(void) inherit_prom_mappings(); - /* Ok, we can use our TLB miss and window trap handlers safely. - * We need to do a quick peek here to see if we are on StarFire - * or not, so setup_tba can setup the IRQ globals correctly (it - * needs to get the hard smp processor id correctly). - */ - { - extern void setup_tba(int); - setup_tba(this_is_starfire); - } + /* Ok, we can use our TLB miss and window trap handlers safely. */ + setup_tba(); __flush_tlb_all(); -- cgit v1.2.3 From 2f7ee7c63f08b7f883b710a29d91c1891b81b8e1 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 31 Jan 2006 18:33:49 -0800 Subject: [SPARC64]: Increase swapper_tsb size to 32K. Signed-off-by: David S. Miller --- arch/sparc64/kernel/head.S | 12 ++++++------ arch/sparc64/kernel/vmlinux.lds.S | 3 --- arch/sparc64/mm/tsb.c | 8 -------- 3 files changed, 6 insertions(+), 17 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/head.S b/arch/sparc64/kernel/head.S index 2988be85147..7840271d7aa 100644 --- a/arch/sparc64/kernel/head.S +++ b/arch/sparc64/kernel/head.S @@ -484,16 +484,16 @@ sparc64_boot_end: /* * The following skip makes sure the trap table in ttable.S is aligned * on a 32K boundary as required by the v9 specs for TBA register. + * + * We align to a 32K boundary, then we have the 32K kernel TSB, + * then the 32K aligned trap table. */ 1: .skip 0x4000 + _start - 1b -#ifdef CONFIG_SBUS -/* This is just a hack to fool make depend config.h discovering - strategy: As the .S files below need config.h, but - make depend does not find it for them, we include config.h - in head.S */ -#endif + .globl swapper_tsb +swapper_tsb: + .skip (32 * 1024) ! 0x0000000000408000 diff --git a/arch/sparc64/kernel/vmlinux.lds.S b/arch/sparc64/kernel/vmlinux.lds.S index f018aaf4548..467d13a0d5c 100644 --- a/arch/sparc64/kernel/vmlinux.lds.S +++ b/arch/sparc64/kernel/vmlinux.lds.S @@ -43,9 +43,6 @@ SECTIONS __ex_table : { *(__ex_table) } __stop___ex_table = .; - . = ALIGN(8192); - swapper_tsb = .; - . += 8192; . = ALIGN(8192); __init_begin = .; .init.text : { diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c index e605478217c..1c4e5c2dfc5 100644 --- a/arch/sparc64/mm/tsb.c +++ b/arch/sparc64/mm/tsb.c @@ -12,14 +12,6 @@ #include #include -/* We use an 8K TSB for the whole kernel, this allows to - * handle about 4MB of modules and vmalloc mappings without - * incurring many hash conflicts. - */ -#define KERNEL_TSB_SIZE_BYTES 8192 -#define KERNEL_TSB_NENTRIES \ - (KERNEL_TSB_SIZE_BYTES / sizeof(struct tsb)) - extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES]; static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long nentries) -- cgit v1.2.3 From 764afe2edb834930050313459cef9f1ae2656750 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 31 Jan 2006 18:34:06 -0800 Subject: [SPARC64]: Kill hard-coded %pstate setting in sparc_exit. Just flip the bit off of whatever it's currently set to. PSTATE_IE is guarenteed to be enabled when we get here. Signed-off-by: David S. Miller --- arch/sparc64/kernel/entry.S | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S index ad919f4f4b3..563fa4ec33f 100644 --- a/arch/sparc64/kernel/entry.S +++ b/arch/sparc64/kernel/entry.S @@ -1475,13 +1475,14 @@ ret_from_syscall: 1: b,pt %xcc, ret_sys_call ldx [%sp + PTREGS_OFF + PT_V9_I0], %o0 -sparc_exit: wrpr %g0, (PSTATE_RMO | PSTATE_PEF | PSTATE_PRIV), %pstate +sparc_exit: rdpr %pstate, %g2 + wrpr %g2, PSTATE_IE, %pstate rdpr %otherwin, %g1 rdpr %cansave, %g3 add %g3, %g1, %g3 wrpr %g3, 0x0, %cansave wrpr %g0, 0x0, %otherwin - wrpr %g0, (PSTATE_RMO | PSTATE_PEF | PSTATE_PRIV | PSTATE_IE), %pstate + wrpr %g2, 0x0, %pstate ba,pt %xcc, sys_exit stb %g0, [%g6 + TI_WSAVED] -- cgit v1.2.3 From 9bc657b28eba22e36efcdf3afa08100f92971eb4 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 31 Jan 2006 18:34:21 -0800 Subject: [SPARC64]: Fix too early reference to %g6 %g6 is not necessarily set to current_thread_info() at sparc64_realfault_common. So store the fault code and address after we invoke etrap and %g6 is properly set up. Signed-off-by: David S. Miller --- arch/sparc64/kernel/tsb.S | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/tsb.S b/arch/sparc64/kernel/tsb.S index c4e7740d5e8..e1dd37f5e53 100644 --- a/arch/sparc64/kernel/tsb.S +++ b/arch/sparc64/kernel/tsb.S @@ -110,10 +110,13 @@ tsb_do_itlb_fault: .globl sparc64_realfault_common sparc64_realfault_common: - stb %g4, [%g6 + TI_FAULT_CODE] ! Save fault code - stx %g5, [%g6 + TI_FAULT_ADDR] ! Save fault address + /* fault code in %g4, fault address in %g5, etrap will + * preserve these two values in %l4 and %l5 respectively + */ ba,pt %xcc, etrap ! Save trap state 1: rd %pc, %g7 ! ... + stb %l4, [%g6 + TI_FAULT_CODE] ! Save fault code + stx %l5, [%g6 + TI_FAULT_ADDR] ! Save fault address call do_sparc64_fault ! Call fault handler add %sp, PTREGS_OFF, %o0 ! Compute pt_regs arg ba,pt %xcc, rtrap_clr_l6 ! Restore cpu state -- cgit v1.2.3 From 9954863975910a1b9372b7d5006a6cba43bdd288 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 31 Jan 2006 18:34:34 -0800 Subject: [SPARC64]: Kill swapper_pgd_zero, totally unused. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 20e7af552ce..2c21d85de78 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -388,7 +388,6 @@ struct linux_prom_translation { /* Exported for kernel TLB miss handling in ktlb.S */ struct linux_prom_translation prom_trans[512] __read_mostly; unsigned int prom_trans_ents __read_mostly; -unsigned int swapper_pgd_zero __read_mostly; extern unsigned long prom_boot_page; extern void prom_remap(unsigned long physpage, unsigned long virtpage, int mmu_ihandle); @@ -1088,8 +1087,6 @@ void __init paging_init(void) pud_set(pud_offset(&swapper_pg_dir[0], 0), swapper_low_pmd_dir + (shift / sizeof(pgd_t))); - swapper_pgd_zero = pgd_val(swapper_pg_dir[0]); - inherit_prom_mappings(); /* Ok, we can use our TLB miss and window trap handlers safely. */ -- cgit v1.2.3 From 86b818687d4894063ecd1190e54717a0cce8c009 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 31 Jan 2006 18:34:51 -0800 Subject: [SPARC64]: Fix race in LOAD_PER_CPU_BASE() Since we use %g5 itself as a temporary, it can get clobbered if we take an interrupt mid-stream and thus cause end up with the final %g5 value too early as a result of rtrap processing. Set %g5 at the very end, atomically, to avoid this problem. Signed-off-by: David S. Miller --- arch/sparc64/kernel/etrap.S | 4 ++-- arch/sparc64/kernel/rtrap.S | 2 +- arch/sparc64/kernel/winfixup.S | 6 +++--- 3 files changed, 6 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/etrap.S b/arch/sparc64/kernel/etrap.S index db768101729..d974d18b15b 100644 --- a/arch/sparc64/kernel/etrap.S +++ b/arch/sparc64/kernel/etrap.S @@ -100,7 +100,7 @@ etrap_irq: stx %i7, [%sp + PTREGS_OFF + PT_V9_I7] wrpr %g0, ETRAP_PSTATE2, %pstate mov %l6, %g6 - LOAD_PER_CPU_BASE(%g4, %g3) + LOAD_PER_CPU_BASE(%g4, %g3, %l1) jmpl %l2 + 0x4, %g0 ldx [%g6 + TI_TASK], %g4 @@ -250,7 +250,7 @@ scetrap: stx %i6, [%sp + PTREGS_OFF + PT_V9_I6] mov %l6, %g6 stx %i7, [%sp + PTREGS_OFF + PT_V9_I7] - LOAD_PER_CPU_BASE(%g4, %g3) + LOAD_PER_CPU_BASE(%g4, %g3, %l1) ldx [%g6 + TI_TASK], %g4 done diff --git a/arch/sparc64/kernel/rtrap.S b/arch/sparc64/kernel/rtrap.S index 89794ebdcbc..64bc03610bc 100644 --- a/arch/sparc64/kernel/rtrap.S +++ b/arch/sparc64/kernel/rtrap.S @@ -226,7 +226,7 @@ rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1 brz,pt %l3, 1f nop /* Must do this before thread reg is clobbered below. */ - LOAD_PER_CPU_BASE(%g6, %g7) + LOAD_PER_CPU_BASE(%i0, %i1, %i2) 1: ldx [%sp + PTREGS_OFF + PT_V9_G6], %g6 ldx [%sp + PTREGS_OFF + PT_V9_G7], %g7 diff --git a/arch/sparc64/kernel/winfixup.S b/arch/sparc64/kernel/winfixup.S index c0545d089c9..ade991b7d07 100644 --- a/arch/sparc64/kernel/winfixup.S +++ b/arch/sparc64/kernel/winfixup.S @@ -86,7 +86,7 @@ fill_fixup: wrpr %l1, (PSTATE_IE | PSTATE_AG | PSTATE_RMO), %pstate mov %o7, %g6 ldx [%g6 + TI_TASK], %g4 - LOAD_PER_CPU_BASE(%g1, %g2) + LOAD_PER_CPU_BASE(%g1, %g2, %g3) /* This is the same as below, except we handle this a bit special * since we must preserve %l5 and %l6, see comment above. @@ -209,7 +209,7 @@ fill_fixup_mna: wrpr %l1, (PSTATE_IE | PSTATE_AG | PSTATE_RMO), %pstate mov %o7, %g6 ! Get current back. ldx [%g6 + TI_TASK], %g4 ! Finish it. - LOAD_PER_CPU_BASE(%g1, %g2) + LOAD_PER_CPU_BASE(%g1, %g2, %g3) call mem_address_unaligned add %sp, PTREGS_OFF, %o0 @@ -312,7 +312,7 @@ fill_fixup_dax: wrpr %l1, (PSTATE_IE | PSTATE_AG | PSTATE_RMO), %pstate mov %o7, %g6 ! Get current back. ldx [%g6 + TI_TASK], %g4 ! Finish it. - LOAD_PER_CPU_BASE(%g1, %g2) + LOAD_PER_CPU_BASE(%g1, %g2, %g3) call spitfire_data_access_exception add %sp, PTREGS_OFF, %o0 -- cgit v1.2.3 From 30a6ecad9670d97c9d0fbfa7d80970aeb339bdec Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 31 Jan 2006 18:35:05 -0800 Subject: [SPARC64]: Don't clobber alt-global %g4 on window fixups. If we are returning back to kernel mode, %g4 could be live (for example, in the case where we window spill in the etrap code). So do not change it's value if going back to kernel. Signed-off-by: David S. Miller --- arch/sparc64/kernel/winfixup.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/winfixup.S b/arch/sparc64/kernel/winfixup.S index ade991b7d07..320a762d051 100644 --- a/arch/sparc64/kernel/winfixup.S +++ b/arch/sparc64/kernel/winfixup.S @@ -160,7 +160,7 @@ spill_fixup: andcc %g1, TSTATE_PRIV, %g0 saved and %g1, TSTATE_CWP, %g1 - be,pn %xcc, window_scheisse_from_user_common + be,a,pn %xcc, window_scheisse_from_user_common mov FAULT_CODE_WRITE | FAULT_CODE_DTLB | FAULT_CODE_WINFIXUP, %g4 retry -- cgit v1.2.3 From 517af33237ecfc3c8a93b335365fa61e741ceca4 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 1 Feb 2006 15:55:21 -0800 Subject: [SPARC64]: Access TSB with physical addresses when possible. This way we don't need to lock the TSB into the TLB. The trick is that every TSB load/store is registered into a special instruction patch section. The default uses virtual addresses, and the patch instructions use physical address load/stores. We can't do this on all chips because only cheetah+ and later have the physical variant of the atomic quad load. Signed-off-by: David S. Miller --- arch/sparc64/kernel/dtlb_miss.S | 2 +- arch/sparc64/kernel/itlb_miss.S | 2 +- arch/sparc64/kernel/ktlb.S | 20 ++++----- arch/sparc64/kernel/tsb.S | 35 ++++++++++++--- arch/sparc64/kernel/vmlinux.lds.S | 4 ++ arch/sparc64/mm/init.c | 32 +++++++++++++ arch/sparc64/mm/tsb.c | 95 ++++++++++++++++++++++++++------------- 7 files changed, 143 insertions(+), 47 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/dtlb_miss.S b/arch/sparc64/kernel/dtlb_miss.S index d0f1565cb56..2ef6f6e6e72 100644 --- a/arch/sparc64/kernel/dtlb_miss.S +++ b/arch/sparc64/kernel/dtlb_miss.S @@ -4,7 +4,7 @@ srlx %g6, 48, %g5 ! Get context brz,pn %g5, kvmap_dtlb ! Context 0 processing nop ! Delay slot (fill me) - ldda [%g1] ASI_NUCLEUS_QUAD_LDD, %g4 ! Load TSB entry + TSB_LOAD_QUAD(%g1, %g4) ! Load TSB entry nop ! Push branch to next I$ line cmp %g4, %g6 ! Compare TAG diff --git a/arch/sparc64/kernel/itlb_miss.S b/arch/sparc64/kernel/itlb_miss.S index 6b6c8fee04b..97facce27aa 100644 --- a/arch/sparc64/kernel/itlb_miss.S +++ b/arch/sparc64/kernel/itlb_miss.S @@ -4,7 +4,7 @@ srlx %g6, 48, %g5 ! Get context brz,pn %g5, kvmap_itlb ! Context 0 processing nop ! Delay slot (fill me) - ldda [%g1] ASI_NUCLEUS_QUAD_LDD, %g4 ! Load TSB entry + TSB_LOAD_QUAD(%g1, %g4) ! Load TSB entry cmp %g4, %g6 ! Compare TAG sethi %hi(_PAGE_EXEC), %g4 ! Setup exec check diff --git a/arch/sparc64/kernel/ktlb.S b/arch/sparc64/kernel/ktlb.S index 2b5e71b6888..9b415ab6db6 100644 --- a/arch/sparc64/kernel/ktlb.S +++ b/arch/sparc64/kernel/ktlb.S @@ -44,14 +44,14 @@ kvmap_itlb_tsb_miss: kvmap_itlb_vmalloc_addr: KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_itlb_longpath) - TSB_LOCK_TAG(%g1, %g2, %g4) + KTSB_LOCK_TAG(%g1, %g2, %g4) /* Load and check PTE. */ ldxa [%g5] ASI_PHYS_USE_EC, %g5 brgez,a,pn %g5, kvmap_itlb_longpath - stx %g0, [%g1] + KTSB_STORE(%g1, %g0) - TSB_WRITE(%g1, %g5, %g6) + KTSB_WRITE(%g1, %g5, %g6) /* fallthrough to TLB load */ @@ -69,9 +69,9 @@ kvmap_itlb_longpath: kvmap_itlb_obp: OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_itlb_longpath) - TSB_LOCK_TAG(%g1, %g2, %g4) + KTSB_LOCK_TAG(%g1, %g2, %g4) - TSB_WRITE(%g1, %g5, %g6) + KTSB_WRITE(%g1, %g5, %g6) ba,pt %xcc, kvmap_itlb_load nop @@ -79,9 +79,9 @@ kvmap_itlb_obp: kvmap_dtlb_obp: OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_dtlb_longpath) - TSB_LOCK_TAG(%g1, %g2, %g4) + KTSB_LOCK_TAG(%g1, %g2, %g4) - TSB_WRITE(%g1, %g5, %g6) + KTSB_WRITE(%g1, %g5, %g6) ba,pt %xcc, kvmap_dtlb_load nop @@ -114,14 +114,14 @@ kvmap_linear_patch: kvmap_dtlb_vmalloc_addr: KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_dtlb_longpath) - TSB_LOCK_TAG(%g1, %g2, %g4) + KTSB_LOCK_TAG(%g1, %g2, %g4) /* Load and check PTE. */ ldxa [%g5] ASI_PHYS_USE_EC, %g5 brgez,a,pn %g5, kvmap_dtlb_longpath - stx %g0, [%g1] + KTSB_STORE(%g1, %g0) - TSB_WRITE(%g1, %g5, %g6) + KTSB_WRITE(%g1, %g5, %g6) /* fallthrough to TLB load */ diff --git a/arch/sparc64/kernel/tsb.S b/arch/sparc64/kernel/tsb.S index e1dd37f5e53..ff6a79beb98 100644 --- a/arch/sparc64/kernel/tsb.S +++ b/arch/sparc64/kernel/tsb.S @@ -53,7 +53,7 @@ tsb_reload: /* Load and check PTE. */ ldxa [%g5] ASI_PHYS_USE_EC, %g5 brgez,a,pn %g5, tsb_do_fault - stx %g0, [%g1] + TSB_STORE(%g1, %g0) /* If it is larger than the base page size, don't * bother putting it into the TSB. @@ -64,7 +64,7 @@ tsb_reload: and %g2, %g4, %g2 cmp %g2, %g7 bne,a,pn %xcc, tsb_tlb_reload - stx %g0, [%g1] + TSB_STORE(%g1, %g0) TSB_WRITE(%g1, %g5, %g6) @@ -131,13 +131,13 @@ winfix_trampoline: /* Insert an entry into the TSB. * - * %o0: TSB entry pointer + * %o0: TSB entry pointer (virt or phys address) * %o1: tag * %o2: pte */ .align 32 - .globl tsb_insert -tsb_insert: + .globl __tsb_insert +__tsb_insert: rdpr %pstate, %o5 wrpr %o5, PSTATE_IE, %pstate TSB_LOCK_TAG(%o0, %g2, %g3) @@ -146,6 +146,31 @@ tsb_insert: retl nop + /* Flush the given TSB entry if it has the matching + * tag. + * + * %o0: TSB entry pointer (virt or phys address) + * %o1: tag + */ + .align 32 + .globl tsb_flush +tsb_flush: + sethi %hi(TSB_TAG_LOCK_HIGH), %g2 +1: TSB_LOAD_TAG(%o0, %g1) + srlx %g1, 32, %o3 + andcc %o3, %g2, %g0 + bne,pn %icc, 1b + membar #LoadLoad + cmp %g1, %o1 + bne,pt %xcc, 2f + clr %o3 + TSB_CAS_TAG(%o0, %g1, %o3) + cmp %g1, %o3 + bne,pn %xcc, 1b + nop +2: retl + TSB_MEMBAR + /* Reload MMU related context switch state at * schedule() time. * diff --git a/arch/sparc64/kernel/vmlinux.lds.S b/arch/sparc64/kernel/vmlinux.lds.S index 467d13a0d5c..71b943f1c9b 100644 --- a/arch/sparc64/kernel/vmlinux.lds.S +++ b/arch/sparc64/kernel/vmlinux.lds.S @@ -70,6 +70,10 @@ SECTIONS .con_initcall.init : { *(.con_initcall.init) } __con_initcall_end = .; SECURITY_INIT + . = ALIGN(4); + __tsb_phys_patch = .; + .tsb_phys_patch : { *(.tsb_phys_patch) } + __tsb_phys_patch_end = .; . = ALIGN(8192); __initramfs_start = .; .init.ramfs : { *(.init.ramfs) } diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 2c21d85de78..4893f3e2c33 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -39,6 +39,7 @@ #include #include #include +#include extern void device_scan(void); @@ -244,6 +245,16 @@ static __inline__ void clear_dcache_dirty_cpu(struct page *page, unsigned long c : "g1", "g7"); } +static inline void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long pte) +{ + unsigned long tsb_addr = (unsigned long) ent; + + if (tlb_type == cheetah_plus) + tsb_addr = __pa(tsb_addr); + + __tsb_insert(tsb_addr, tag, pte); +} + void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) { struct mm_struct *mm; @@ -1040,6 +1051,24 @@ unsigned long __init find_ecache_flush_span(unsigned long size) return ~0UL; } +static void __init tsb_phys_patch(void) +{ + struct tsb_phys_patch_entry *p; + + p = &__tsb_phys_patch; + while (p < &__tsb_phys_patch_end) { + unsigned long addr = p->addr; + + *(unsigned int *) addr = p->insn; + wmb(); + __asm__ __volatile__("flush %0" + : /* no outputs */ + : "r" (addr)); + + p++; + } +} + /* paging_init() sets up the page tables */ extern void cheetah_ecache_flush_init(void); @@ -1052,6 +1081,9 @@ void __init paging_init(void) unsigned long end_pfn, pages_avail, shift; unsigned long real_end, i; + if (tlb_type == cheetah_plus) + tsb_phys_patch(); + /* Find available physical memory... */ read_obp_memory("available", &pavail[0], &pavail_ents); diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c index 1c4e5c2dfc5..787533f0104 100644 --- a/arch/sparc64/mm/tsb.c +++ b/arch/sparc64/mm/tsb.c @@ -20,12 +20,9 @@ static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long nentries return vaddr & (nentries - 1); } -static inline int tag_compare(struct tsb *entry, unsigned long vaddr, unsigned long context) +static inline int tag_compare(unsigned long tag, unsigned long vaddr, unsigned long context) { - if (context == ~0UL) - return 1; - - return (entry->tag == ((vaddr >> 22) | (context << 48))); + return (tag == ((vaddr >> 22) | (context << 48))); } /* TSB flushes need only occur on the processor initiating the address @@ -41,7 +38,7 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end) unsigned long hash = tsb_hash(v, KERNEL_TSB_NENTRIES); struct tsb *ent = &swapper_tsb[hash]; - if (tag_compare(ent, v, 0)) { + if (tag_compare(ent->tag, v, 0)) { ent->tag = 0UL; membar_storeload_storestore(); } @@ -52,24 +49,31 @@ void flush_tsb_user(struct mmu_gather *mp) { struct mm_struct *mm = mp->mm; struct tsb *tsb = mm->context.tsb; - unsigned long ctx = ~0UL; unsigned long nentries = mm->context.tsb_nentries; + unsigned long ctx, base; int i; - if (CTX_VALID(mm->context)) - ctx = CTX_HWBITS(mm->context); + if (unlikely(!CTX_VALID(mm->context))) + return; + + ctx = CTX_HWBITS(mm->context); + if (tlb_type == cheetah_plus) + base = __pa(tsb); + else + base = (unsigned long) tsb; + for (i = 0; i < mp->tlb_nr; i++) { unsigned long v = mp->vaddrs[i]; - struct tsb *ent; + unsigned long tag, ent, hash; v &= ~0x1UL; - ent = &tsb[tsb_hash(v, nentries)]; - if (tag_compare(ent, v, ctx)) { - ent->tag = 0UL; - membar_storeload_storestore(); - } + hash = tsb_hash(v, nentries); + ent = base + (hash * sizeof(struct tsb)); + tag = (v >> 22UL) | (ctx << 48UL); + + tsb_flush(ent, tag); } } @@ -84,6 +88,7 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes) tte = (_PAGE_VALID | _PAGE_L | _PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W); tsb_paddr = __pa(mm->context.tsb); + BUG_ON(tsb_paddr & (tsb_bytes - 1UL)); /* Use the smallest page size that can map the whole TSB * in one TLB entry. @@ -144,13 +149,23 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes) BUG(); }; - tsb_reg |= base; - tsb_reg |= (tsb_paddr & (page_sz - 1UL)); - tte |= (tsb_paddr & ~(page_sz - 1UL)); + if (tlb_type == cheetah_plus) { + /* Physical mapping, no locked TLB entry for TSB. */ + tsb_reg |= tsb_paddr; + + mm->context.tsb_reg_val = tsb_reg; + mm->context.tsb_map_vaddr = 0; + mm->context.tsb_map_pte = 0; + } else { + tsb_reg |= base; + tsb_reg |= (tsb_paddr & (page_sz - 1UL)); + tte |= (tsb_paddr & ~(page_sz - 1UL)); + + mm->context.tsb_reg_val = tsb_reg; + mm->context.tsb_map_vaddr = base; + mm->context.tsb_map_pte = tte; + } - mm->context.tsb_reg_val = tsb_reg; - mm->context.tsb_map_vaddr = base; - mm->context.tsb_map_pte = tte; } /* The page tables are locked against modifications while this @@ -168,13 +183,21 @@ static void copy_tsb(struct tsb *old_tsb, unsigned long old_size, for (i = 0; i < old_nentries; i++) { register unsigned long tag asm("o4"); register unsigned long pte asm("o5"); - unsigned long v; - unsigned int hash; - - __asm__ __volatile__( - "ldda [%2] %3, %0" - : "=r" (tag), "=r" (pte) - : "r" (&old_tsb[i]), "i" (ASI_NUCLEUS_QUAD_LDD)); + unsigned long v, hash; + + if (tlb_type == cheetah_plus) { + __asm__ __volatile__( + "ldda [%2] %3, %0" + : "=r" (tag), "=r" (pte) + : "r" (__pa(&old_tsb[i])), + "i" (ASI_QUAD_LDD_PHYS)); + } else { + __asm__ __volatile__( + "ldda [%2] %3, %0" + : "=r" (tag), "=r" (pte) + : "r" (&old_tsb[i]), + "i" (ASI_NUCLEUS_QUAD_LDD)); + } if (!tag || (tag & (1UL << TSB_TAG_LOCK_BIT))) continue; @@ -198,8 +221,20 @@ static void copy_tsb(struct tsb *old_tsb, unsigned long old_size, v |= (i & (512UL - 1UL)) << 13UL; hash = tsb_hash(v, new_nentries); - new_tsb[hash].tag = tag; - new_tsb[hash].pte = pte; + if (tlb_type == cheetah_plus) { + __asm__ __volatile__( + "stxa %0, [%1] %2\n\t" + "stxa %3, [%4] %2" + : /* no outputs */ + : "r" (tag), + "r" (__pa(&new_tsb[hash].tag)), + "i" (ASI_PHYS_USE_EC), + "r" (pte), + "r" (__pa(&new_tsb[hash].pte))); + } else { + new_tsb[hash].tag = tag; + new_tsb[hash].pte = pte; + } } } -- cgit v1.2.3 From 7bec08e38a7d0f088994f6eec9b6374652ea71fb Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 2 Feb 2006 01:20:18 -0800 Subject: [SPARC64]: Correctable ECC errors cannot occur at trap level > 0. The are distrupting, which by the sparc v9 definition means they can only occur when interrupts are enabled in the %pstate register. This never occurs in any of the trap handling code running at trap levels > 0. So just mark it as an unexpected trap. This allows us to kill off the cee_stuff member of struct thread_info. Signed-off-by: David S. Miller --- arch/sparc64/kernel/traps.c | 1 - arch/sparc64/kernel/ttable.S | 18 +----------------- 2 files changed, 1 insertion(+), 18 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c index f47f4874253..7e52e897266 100644 --- a/arch/sparc64/kernel/traps.c +++ b/arch/sparc64/kernel/traps.c @@ -2169,7 +2169,6 @@ void __init trap_init(void) TI_KERN_CNTD0 != offsetof(struct thread_info, kernel_cntd0) || TI_KERN_CNTD1 != offsetof(struct thread_info, kernel_cntd1) || TI_PCR != offsetof(struct thread_info, pcr_reg) || - TI_CEE_STUFF != offsetof(struct thread_info, cee_stuff) || TI_PRE_COUNT != offsetof(struct thread_info, preempt_count) || TI_NEW_CHILD != offsetof(struct thread_info, new_child) || TI_SYS_NOERROR != offsetof(struct thread_info, syscall_noerror) || diff --git a/arch/sparc64/kernel/ttable.S b/arch/sparc64/kernel/ttable.S index 2fb7a33993c..99531424c59 100644 --- a/arch/sparc64/kernel/ttable.S +++ b/arch/sparc64/kernel/ttable.S @@ -222,23 +222,7 @@ tl1_resv05c: BTRAPTL1(0x5c) BTRAPTL1(0x5d) BTRAPTL1(0x5e) BTRAPTL1(0x5f) tl1_ivec: TRAP_IVEC tl1_paw: TRAPTL1(do_paw_tl1) tl1_vaw: TRAPTL1(do_vaw_tl1) - - /* The grotty trick to save %g1 into current->thread.cee_stuff - * is because when we take this trap we could be interrupting - * trap code already using the trap alternate global registers. - * - * We cross our fingers and pray that this store/load does - * not cause yet another CEE trap. - */ -tl1_cee: membar #Sync - stx %g1, [%g6 + TI_CEE_STUFF] - ldxa [%g0] ASI_AFSR, %g1 - membar #Sync - stxa %g1, [%g0] ASI_AFSR - membar #Sync - ldx [%g6 + TI_CEE_STUFF], %g1 - retry - +tl1_cee: BTRAPTL1(0x63) tl1_iamiss: BTRAPTL1(0x64) BTRAPTL1(0x65) BTRAPTL1(0x66) BTRAPTL1(0x67) tl1_damiss: #include "dtlb_miss.S" -- cgit v1.2.3 From f4e841da30b4bcbb8f1cc20a01157a788ff58b21 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 2 Feb 2006 16:16:24 -0800 Subject: [SPARC64]: Turn off TSB growing for now. There are several tricky races involved with growing the TSB. So just use base-size TSBs for user contexts and we can revisit enabling this later. One part of the SMP problems is that tsb_context_switch() can see partially updated TSB configuration state if tsb_grow() is running in parallel. That's easily solved with a seqlock taken as a writer by tsb_grow() and taken as a reader to capture all the TSB config state in tsb_context_switch(). Then there is flush_tsb_user() running in parallel with a tsb_grow(). In theory we could take the seqlock as a reader there too, and just resample the TSB pointer and reflush but that looks really ugly. Lastly, I believe there is a case with threads that results in a TSB entry lock bit being set spuriously which will cause the next access to that TSB entry to wedge the cpu (since the TSB entry lock bit will never clear). It's either copy_tsb() or some bug elsewhere in the TSB assembly. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 5 ----- arch/sparc64/mm/tsb.c | 11 +---------- 2 files changed, 1 insertion(+), 15 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 4893f3e2c33..1af63307b24 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -261,7 +261,6 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p struct page *page; unsigned long pfn; unsigned long pg_flags; - unsigned long mm_rss; pfn = pte_pfn(pte); if (pfn_valid(pfn) && @@ -285,10 +284,6 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p } mm = vma->vm_mm; - mm_rss = get_mm_rss(mm); - if (mm_rss >= mm->context.tsb_rss_limit) - tsb_grow(mm, mm_rss, GFP_ATOMIC); - if ((pte_val(pte) & _PAGE_ALL_SZ_BITS) == _PAGE_SZBITS) { struct tsb *tsb; unsigned long tag; diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c index 787533f0104..2cc8e6528c6 100644 --- a/arch/sparc64/mm/tsb.c +++ b/arch/sparc64/mm/tsb.c @@ -310,7 +310,6 @@ void tsb_grow(struct mm_struct *mm, unsigned long rss, gfp_t gfp_flags) int init_new_context(struct task_struct *tsk, struct mm_struct *mm) { - unsigned long initial_rss; mm->context.sparc64_ctx_val = 0UL; @@ -319,15 +318,7 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) * will be confused and think there is an older TSB to free up. */ mm->context.tsb = NULL; - - /* If this is fork, inherit the parent's TSB size. We would - * grow it to that size on the first page fault anyways. - */ - initial_rss = mm->context.tsb_nentries; - if (initial_rss) - initial_rss -= 1; - - tsb_grow(mm, initial_rss, GFP_KERNEL); + tsb_grow(mm, 0, GFP_KERNEL); if (unlikely(!mm->context.tsb)) return -ENOMEM; -- cgit v1.2.3 From 92704a1c63c3b481870d02636d0b5a70c7e21cd1 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 26 Feb 2006 23:27:19 -0800 Subject: [SPARC64]: Refine code sequences to get the cpu id. On uniprocessor, it's always zero for optimize that. On SMP, the jmpl to the stub kills the return address stack in the cpu branch prediction logic, so expand the code sequence inline and use a code patching section to fix things up. This also always better and explicit register selection, which will be taken advantage of in a future changeset. The hard_smp_processor_id() function is big, so do not inline it. Fix up tests for Jalapeno to also test for Serrano chips too. These tests want "jbus Ultra-IIIi" cases to match, so that is what we should test for. Signed-off-by: David S. Miller --- arch/sparc64/kernel/entry.S | 84 +++------------------------------------ arch/sparc64/kernel/irq.c | 4 +- arch/sparc64/kernel/setup.c | 56 +++++++++++++++++++++++++- arch/sparc64/kernel/smp.c | 9 +++-- arch/sparc64/kernel/traps.c | 4 +- arch/sparc64/kernel/vmlinux.lds.S | 3 ++ 6 files changed, 73 insertions(+), 87 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S index 563fa4ec33f..b3511ff5d04 100644 --- a/arch/sparc64/kernel/entry.S +++ b/arch/sparc64/kernel/entry.S @@ -1628,84 +1628,10 @@ __flushw_user: 2: retl nop - /* Read cpu ID from hardware, return in %g6. - * (callers_pc - 4) is in %g1. Patched at boot time. - * - * Default is spitfire implementation. - * - * The instruction sequence needs to be 5 instructions - * in order to fit the longest implementation, which is - * currently starfire. - */ - .align 32 - .globl __get_cpu_id -__get_cpu_id: - ldxa [%g0] ASI_UPA_CONFIG, %g6 - srlx %g6, 17, %g6 - jmpl %g1 + 0x4, %g0 - and %g6, 0x1f, %g6 - nop - -__get_cpu_id_cheetah_safari: - ldxa [%g0] ASI_SAFARI_CONFIG, %g6 - srlx %g6, 17, %g6 - jmpl %g1 + 0x4, %g0 - and %g6, 0x3ff, %g6 - nop - -__get_cpu_id_cheetah_jbus: - ldxa [%g0] ASI_JBUS_CONFIG, %g6 - srlx %g6, 17, %g6 - jmpl %g1 + 0x4, %g0 - and %g6, 0x1f, %g6 - nop - -__get_cpu_id_starfire: - sethi %hi(0x1fff40000d0 >> 9), %g6 - sllx %g6, 9, %g6 - or %g6, 0xd0, %g6 - jmpl %g1 + 0x4, %g0 - lduwa [%g6] ASI_PHYS_BYPASS_EC_E, %g6 - - .globl per_cpu_patch -per_cpu_patch: - sethi %hi(this_is_starfire), %o0 - lduw [%o0 + %lo(this_is_starfire)], %o1 - sethi %hi(__get_cpu_id_starfire), %o0 - brnz,pn %o1, 10f - or %o0, %lo(__get_cpu_id_starfire), %o0 - sethi %hi(tlb_type), %o0 - lduw [%o0 + %lo(tlb_type)], %o1 - brz,pt %o1, 11f - nop - rdpr %ver, %o0 - srlx %o0, 32, %o0 - sethi %hi(0x003e0016), %o1 - or %o1, %lo(0x003e0016), %o1 - cmp %o0, %o1 - sethi %hi(__get_cpu_id_cheetah_jbus), %o0 - be,pn %icc, 10f - or %o0, %lo(__get_cpu_id_cheetah_jbus), %o0 - sethi %hi(__get_cpu_id_cheetah_safari), %o0 - or %o0, %lo(__get_cpu_id_cheetah_safari), %o0 -10: - sethi %hi(__get_cpu_id), %o1 - or %o1, %lo(__get_cpu_id), %o1 - lduw [%o0 + 0x00], %o2 - stw %o2, [%o1 + 0x00] - flush %o1 + 0x00 - lduw [%o0 + 0x04], %o2 - stw %o2, [%o1 + 0x04] - flush %o1 + 0x04 - lduw [%o0 + 0x08], %o2 - stw %o2, [%o1 + 0x08] - flush %o1 + 0x08 - lduw [%o0 + 0x0c], %o2 - stw %o2, [%o1 + 0x0c] - flush %o1 + 0x0c - lduw [%o0 + 0x10], %o2 - stw %o2, [%o1 + 0x10] - flush %o1 + 0x10 -11: +#ifdef CONFIG_SMP + .globl hard_smp_processor_id +hard_smp_processor_id: + __GET_CPUID(%o0) retl nop +#endif diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c index 3e48af2769d..d069a6feb53 100644 --- a/arch/sparc64/kernel/irq.c +++ b/arch/sparc64/kernel/irq.c @@ -39,6 +39,7 @@ #include #include #include +#include #ifdef CONFIG_SMP static void distribute_irqs(void); @@ -153,7 +154,8 @@ void enable_irq(unsigned int irq) unsigned long ver; __asm__ ("rdpr %%ver, %0" : "=r" (ver)); - if ((ver >> 32) == 0x003e0016) { + if ((ver >> 32) == __JALAPENO_ID || + (ver >> 32) == __SERRANO_ID) { /* We set it to our JBUS ID. */ __asm__ __volatile__("ldxa [%%g0] %1, %0" : "=r" (tid) diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c index 59a70301a6c..f751d11926b 100644 --- a/arch/sparc64/kernel/setup.c +++ b/arch/sparc64/kernel/setup.c @@ -490,6 +490,58 @@ void register_prom_callbacks(void) "' linux-.soft2 to .soft2"); } +static void __init per_cpu_patch(void) +{ +#ifdef CONFIG_SMP + struct cpuid_patch_entry *p; + unsigned long ver; + int is_jbus; + + if (tlb_type == spitfire && !this_is_starfire) + return; + + __asm__ ("rdpr %%ver, %0" : "=r" (ver)); + is_jbus = ((ver >> 32) == __JALAPENO_ID || + (ver >> 32) == __SERRANO_ID); + + p = &__cpuid_patch; + while (p < &__cpuid_patch_end) { + unsigned long addr = p->addr; + unsigned int *insns; + + switch (tlb_type) { + case spitfire: + insns = &p->starfire[0]; + break; + case cheetah: + case cheetah_plus: + if (is_jbus) + insns = &p->cheetah_jbus[0]; + else + insns = &p->cheetah_safari[0]; + break; + default: + prom_printf("Unknown cpu type, halting.\n"); + prom_halt(); + }; + + *(unsigned int *) (addr + 0) = insns[0]; + __asm__ __volatile__("flush %0" : : "r" (addr + 0)); + + *(unsigned int *) (addr + 4) = insns[1]; + __asm__ __volatile__("flush %0" : : "r" (addr + 4)); + + *(unsigned int *) (addr + 8) = insns[2]; + __asm__ __volatile__("flush %0" : : "r" (addr + 8)); + + *(unsigned int *) (addr + 12) = insns[3]; + __asm__ __volatile__("flush %0" : : "r" (addr + 12)); + + p++; + } +#endif +} + void __init setup_arch(char **cmdline_p) { /* Initialize PROM console and command line. */ @@ -507,8 +559,8 @@ void __init setup_arch(char **cmdline_p) /* Work out if we are starfire early on */ check_if_starfire(); - /* Now we know enough to patch the __get_cpu_id() - * trampoline used by trap code. + /* Now we know enough to patch the get_cpuid sequences + * used by trap code. */ per_cpu_patch(); diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index 0e7552546d3..16b8eca9754 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c @@ -424,7 +424,7 @@ static __inline__ void spitfire_xcall_deliver(u64 data0, u64 data1, u64 data2, c static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask) { u64 pstate, ver; - int nack_busy_id, is_jalapeno; + int nack_busy_id, is_jbus; if (cpus_empty(mask)) return; @@ -434,7 +434,8 @@ static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mas * derivative processor. */ __asm__ ("rdpr %%ver, %0" : "=r" (ver)); - is_jalapeno = ((ver >> 32) == 0x003e0016); + is_jbus = ((ver >> 32) == __JALAPENO_ID || + (ver >> 32) == __SERRANO_ID); __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate)); @@ -459,7 +460,7 @@ retry: for_each_cpu_mask(i, mask) { u64 target = (i << 14) | 0x70; - if (!is_jalapeno) + if (!is_jbus) target |= (nack_busy_id << 24); __asm__ __volatile__( "stxa %%g0, [%0] %1\n\t" @@ -512,7 +513,7 @@ retry: for_each_cpu_mask(i, mask) { u64 check_mask; - if (is_jalapeno) + if (is_jbus) check_mask = (0x2UL << (2*i)); else check_mask = (0x2UL << diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c index 7e52e897266..1c4744c047a 100644 --- a/arch/sparc64/kernel/traps.c +++ b/arch/sparc64/kernel/traps.c @@ -38,6 +38,7 @@ #include #include #include +#include #ifdef CONFIG_KMOD #include #endif @@ -788,7 +789,8 @@ void __init cheetah_ecache_flush_init(void) cheetah_error_log[i].afsr = CHAFSR_INVALID; __asm__ ("rdpr %%ver, %0" : "=r" (ver)); - if ((ver >> 32) == 0x003e0016) { + if ((ver >> 32) == __JALAPENO_ID || + (ver >> 32) == __SERRANO_ID) { cheetah_error_table = &__jalapeno_error_table[0]; cheetah_afsr_errors = JPAFSR_ERRORS; } else if ((ver >> 32) == 0x003e0015) { diff --git a/arch/sparc64/kernel/vmlinux.lds.S b/arch/sparc64/kernel/vmlinux.lds.S index 71b943f1c9b..1639d9c935c 100644 --- a/arch/sparc64/kernel/vmlinux.lds.S +++ b/arch/sparc64/kernel/vmlinux.lds.S @@ -74,6 +74,9 @@ SECTIONS __tsb_phys_patch = .; .tsb_phys_patch : { *(.tsb_phys_patch) } __tsb_phys_patch_end = .; + __cpuid_patch = .; + .cpuid_patch : { *(.cpuid_patch) } + __cpuid_patch_end = .; . = ALIGN(8192); __initramfs_start = .; .init.ramfs : { *(.init.ramfs) } -- cgit v1.2.3 From ffe483d55229fadbaf4cc7316d47024a24ecd1a2 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 2 Feb 2006 21:55:10 -0800 Subject: [SPARC64]: Add explicit register args to trap state loading macros. This, as well as making the code cleaner, allows a simplification in the TSB miss handling path. Signed-off-by: David S. Miller --- arch/sparc64/kernel/entry.S | 8 ++++---- arch/sparc64/kernel/etrap.S | 10 +++++----- arch/sparc64/kernel/rtrap.S | 2 +- arch/sparc64/kernel/tsb.S | 9 +-------- arch/sparc64/kernel/winfixup.S | 18 +++++++++--------- 5 files changed, 20 insertions(+), 27 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S index b3511ff5d04..4ca3ea0beaf 100644 --- a/arch/sparc64/kernel/entry.S +++ b/arch/sparc64/kernel/entry.S @@ -50,7 +50,7 @@ do_fpdis: add %g0, %g0, %g0 ba,a,pt %xcc, rtrap_clr_l6 -1: TRAP_LOAD_THREAD_REG +1: TRAP_LOAD_THREAD_REG(%g6, %g1) ldub [%g6 + TI_FPSAVED], %g5 wr %g0, FPRS_FEF, %fprs andcc %g5, FPRS_FEF, %g0 @@ -190,7 +190,7 @@ fp_other_bounce: .globl do_fpother_check_fitos .align 32 do_fpother_check_fitos: - TRAP_LOAD_THREAD_REG + TRAP_LOAD_THREAD_REG(%g6, %g1) sethi %hi(fp_other_bounce - 4), %g7 or %g7, %lo(fp_other_bounce - 4), %g7 @@ -378,7 +378,7 @@ do_ivec: sllx %g2, %g4, %g2 sllx %g4, 2, %g4 - TRAP_LOAD_IRQ_WORK + TRAP_LOAD_IRQ_WORK(%g6, %g1) lduw [%g6 + %g4], %g5 /* g5 = irq_work(cpu, pil) */ stw %g5, [%g3 + 0x00] /* bucket->irq_chain = g5 */ @@ -422,7 +422,7 @@ setcc: .globl utrap_trap utrap_trap: /* %g3=handler,%g4=level */ - TRAP_LOAD_THREAD_REG + TRAP_LOAD_THREAD_REG(%g6, %g1) ldx [%g6 + TI_UTRAPS], %g1 brnz,pt %g1, invoke_utrap nop diff --git a/arch/sparc64/kernel/etrap.S b/arch/sparc64/kernel/etrap.S index d974d18b15b..b5f6bc52d91 100644 --- a/arch/sparc64/kernel/etrap.S +++ b/arch/sparc64/kernel/etrap.S @@ -31,7 +31,7 @@ .globl etrap, etrap_irq, etraptl1 etrap: rdpr %pil, %g2 etrap_irq: - TRAP_LOAD_THREAD_REG + TRAP_LOAD_THREAD_REG(%g6, %g1) rdpr %tstate, %g1 sllx %g2, 20, %g3 andcc %g1, TSTATE_PRIV, %g0 @@ -100,7 +100,7 @@ etrap_irq: stx %i7, [%sp + PTREGS_OFF + PT_V9_I7] wrpr %g0, ETRAP_PSTATE2, %pstate mov %l6, %g6 - LOAD_PER_CPU_BASE(%g4, %g3, %l1) + LOAD_PER_CPU_BASE(%g5, %g6, %g4, %g3, %l1) jmpl %l2 + 0x4, %g0 ldx [%g6 + TI_TASK], %g4 @@ -124,7 +124,7 @@ etraptl1: /* Save tstate/tpc/tnpc of TL 1-->4 and the tl register itself. * 0x58 TL4's TT * 0x60 TL */ - TRAP_LOAD_THREAD_REG + TRAP_LOAD_THREAD_REG(%g6, %g1) sub %sp, ((4 * 8) * 4) + 8, %g2 rdpr %tl, %g1 @@ -179,7 +179,7 @@ etraptl1: /* Save tstate/tpc/tnpc of TL 1-->4 and the tl register itself. .align 64 .globl scetrap scetrap: - TRAP_LOAD_THREAD_REG + TRAP_LOAD_THREAD_REG(%g6, %g1) rdpr %pil, %g2 rdpr %tstate, %g1 sllx %g2, 20, %g3 @@ -250,7 +250,7 @@ scetrap: stx %i6, [%sp + PTREGS_OFF + PT_V9_I6] mov %l6, %g6 stx %i7, [%sp + PTREGS_OFF + PT_V9_I7] - LOAD_PER_CPU_BASE(%g4, %g3, %l1) + LOAD_PER_CPU_BASE(%g5, %g6, %g4, %g3, %l1) ldx [%g6 + TI_TASK], %g4 done diff --git a/arch/sparc64/kernel/rtrap.S b/arch/sparc64/kernel/rtrap.S index 64bc03610bc..61bd45e7697 100644 --- a/arch/sparc64/kernel/rtrap.S +++ b/arch/sparc64/kernel/rtrap.S @@ -226,7 +226,7 @@ rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1 brz,pt %l3, 1f nop /* Must do this before thread reg is clobbered below. */ - LOAD_PER_CPU_BASE(%i0, %i1, %i2) + LOAD_PER_CPU_BASE(%g5, %g6, %i0, %i1, %i2) 1: ldx [%sp + PTREGS_OFF + PT_V9_G6], %g6 ldx [%sp + PTREGS_OFF + PT_V9_G7], %g7 diff --git a/arch/sparc64/kernel/tsb.S b/arch/sparc64/kernel/tsb.S index ff6a79beb98..28e38b168dd 100644 --- a/arch/sparc64/kernel/tsb.S +++ b/arch/sparc64/kernel/tsb.S @@ -36,14 +36,7 @@ tsb_miss_itlb: nop tsb_miss_page_table_walk: - /* This clobbers %g1 and %g6, preserve them... */ - mov %g1, %g5 - mov %g6, %g2 - - TRAP_LOAD_PGD_PHYS - - mov %g2, %g6 - mov %g5, %g1 + TRAP_LOAD_PGD_PHYS(%g7, %g5) USER_PGTABLE_WALK_TL1(%g4, %g7, %g5, %g2, tsb_do_fault) diff --git a/arch/sparc64/kernel/winfixup.S b/arch/sparc64/kernel/winfixup.S index 320a762d051..211021ae6e8 100644 --- a/arch/sparc64/kernel/winfixup.S +++ b/arch/sparc64/kernel/winfixup.S @@ -40,7 +40,7 @@ set_pcontext: */ .globl fill_fixup, spill_fixup fill_fixup: - TRAP_LOAD_THREAD_REG + TRAP_LOAD_THREAD_REG(%g6, %g1) rdpr %tstate, %g1 andcc %g1, TSTATE_PRIV, %g0 or %g4, FAULT_CODE_WINFIXUP, %g4 @@ -86,7 +86,7 @@ fill_fixup: wrpr %l1, (PSTATE_IE | PSTATE_AG | PSTATE_RMO), %pstate mov %o7, %g6 ldx [%g6 + TI_TASK], %g4 - LOAD_PER_CPU_BASE(%g1, %g2, %g3) + LOAD_PER_CPU_BASE(%g5, %g6, %g1, %g2, %g3) /* This is the same as below, except we handle this a bit special * since we must preserve %l5 and %l6, see comment above. @@ -105,7 +105,7 @@ fill_fixup: * do not touch %g7 or %g2 so we handle the two cases fine. */ spill_fixup: - TRAP_LOAD_THREAD_REG + TRAP_LOAD_THREAD_REG(%g6, %g1) ldx [%g6 + TI_FLAGS], %g1 andcc %g1, _TIF_32BIT, %g0 ldub [%g6 + TI_WSAVED], %g1 @@ -181,7 +181,7 @@ winfix_mna: wrpr %g3, %tnpc done fill_fixup_mna: - TRAP_LOAD_THREAD_REG + TRAP_LOAD_THREAD_REG(%g6, %g1) rdpr %tstate, %g1 andcc %g1, TSTATE_PRIV, %g0 be,pt %xcc, window_mna_from_user_common @@ -209,14 +209,14 @@ fill_fixup_mna: wrpr %l1, (PSTATE_IE | PSTATE_AG | PSTATE_RMO), %pstate mov %o7, %g6 ! Get current back. ldx [%g6 + TI_TASK], %g4 ! Finish it. - LOAD_PER_CPU_BASE(%g1, %g2, %g3) + LOAD_PER_CPU_BASE(%g5, %g6, %g1, %g2, %g3) call mem_address_unaligned add %sp, PTREGS_OFF, %o0 b,pt %xcc, rtrap nop ! yes, the nop is correct spill_fixup_mna: - TRAP_LOAD_THREAD_REG + TRAP_LOAD_THREAD_REG(%g6, %g1) ldx [%g6 + TI_FLAGS], %g1 andcc %g1, _TIF_32BIT, %g0 ldub [%g6 + TI_WSAVED], %g1 @@ -284,7 +284,7 @@ winfix_dax: wrpr %g3, %tnpc done fill_fixup_dax: - TRAP_LOAD_THREAD_REG + TRAP_LOAD_THREAD_REG(%g6, %g1) rdpr %tstate, %g1 andcc %g1, TSTATE_PRIV, %g0 be,pt %xcc, window_dax_from_user_common @@ -312,14 +312,14 @@ fill_fixup_dax: wrpr %l1, (PSTATE_IE | PSTATE_AG | PSTATE_RMO), %pstate mov %o7, %g6 ! Get current back. ldx [%g6 + TI_TASK], %g4 ! Finish it. - LOAD_PER_CPU_BASE(%g1, %g2, %g3) + LOAD_PER_CPU_BASE(%g5, %g6, %g1, %g2, %g3) call spitfire_data_access_exception add %sp, PTREGS_OFF, %o0 b,pt %xcc, rtrap nop ! yes, the nop is correct spill_fixup_dax: - TRAP_LOAD_THREAD_REG + TRAP_LOAD_THREAD_REG(%g6, %g1) ldx [%g6 + TI_FLAGS], %g1 andcc %g1, _TIF_32BIT, %g0 ldub [%g6 + TI_WSAVED], %g1 -- cgit v1.2.3 From 314ef6859750b6539eac48d78059bb7986f29cb1 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 4 Feb 2006 00:10:01 -0800 Subject: [SPARC64]: Refine register window trap handling. When saving and restoing trap state, do the window spill/fill handling inline so that we never trap deeper than 2 trap levels. This is important for chips like Niagara. The window fixup code is massively simplified, and many more improvements are now possible. Signed-off-by: David S. Miller --- arch/sparc64/kernel/etrap.S | 104 +++------- arch/sparc64/kernel/process.c | 20 +- arch/sparc64/kernel/rtrap.S | 58 +++++- arch/sparc64/kernel/tsb.S | 1 - arch/sparc64/kernel/ttable.S | 16 +- arch/sparc64/kernel/winfixup.S | 454 ++++++++++------------------------------- 6 files changed, 214 insertions(+), 439 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/etrap.S b/arch/sparc64/kernel/etrap.S index b5f6bc52d91..4a0e01b1404 100644 --- a/arch/sparc64/kernel/etrap.S +++ b/arch/sparc64/kernel/etrap.S @@ -55,7 +55,31 @@ etrap_irq: rd %y, %g3 stx %g1, [%g2 + STACKFRAME_SZ + PT_V9_TNPC] st %g3, [%g2 + STACKFRAME_SZ + PT_V9_Y] - save %g2, -STACK_BIAS, %sp ! Ordering here is critical + + rdpr %cansave, %g1 + brnz,pt %g1, etrap_save + nop + + rdpr %cwp, %g1 + add %g1, 2, %g1 + wrpr %g1, %cwp + be,pt %xcc, etrap_user_spill + mov ASI_AIUP, %g3 + + rdpr %otherwin, %g3 + brz %g3, etrap_kernel_spill + mov ASI_AIUS, %g3 + +etrap_user_spill: + + wr %g3, 0x0, %asi + ldx [%g6 + TI_FLAGS], %g3 + and %g3, _TIF_32BIT, %g3 + brnz,pt %g3, etrap_user_spill_32bit + nop + ba,a,pt %xcc, etrap_user_spill_64bit + +etrap_save: save %g2, -STACK_BIAS, %sp mov %g6, %l6 bne,pn %xcc, 3f @@ -176,83 +200,5 @@ etraptl1: /* Save tstate/tpc/tnpc of TL 1-->4 and the tl register itself. ba,pt %xcc, 1b andcc %g1, TSTATE_PRIV, %g0 - .align 64 - .globl scetrap -scetrap: - TRAP_LOAD_THREAD_REG(%g6, %g1) - rdpr %pil, %g2 - rdpr %tstate, %g1 - sllx %g2, 20, %g3 - andcc %g1, TSTATE_PRIV, %g0 - or %g1, %g3, %g1 - bne,pn %xcc, 1f - sub %sp, (STACKFRAME_SZ+TRACEREG_SZ-STACK_BIAS), %g2 - wrpr %g0, 7, %cleanwin - - sllx %g1, 51, %g3 - sethi %hi(TASK_REGOFF), %g2 - or %g2, %lo(TASK_REGOFF), %g2 - brlz,pn %g3, 1f - add %g6, %g2, %g2 - wr %g0, 0, %fprs -1: rdpr %tpc, %g3 - stx %g1, [%g2 + STACKFRAME_SZ + PT_V9_TSTATE] - - rdpr %tnpc, %g1 - stx %g3, [%g2 + STACKFRAME_SZ + PT_V9_TPC] - stx %g1, [%g2 + STACKFRAME_SZ + PT_V9_TNPC] - save %g2, -STACK_BIAS, %sp ! Ordering here is critical - mov %g6, %l6 - bne,pn %xcc, 2f - mov ASI_P, %l7 - rdpr %canrestore, %g3 - - rdpr %wstate, %g2 - wrpr %g0, 0, %canrestore - sll %g2, 3, %g2 - mov PRIMARY_CONTEXT, %l4 - wrpr %g3, 0, %otherwin - wrpr %g2, 0, %wstate - sethi %hi(sparc64_kern_pri_context), %g2 - ldx [%g2 + %lo(sparc64_kern_pri_context)], %g3 - stxa %g3, [%l4] ASI_DMMU - sethi %hi(KERNBASE), %l4 - flush %l4 - - mov ASI_AIUS, %l7 -2: mov %g4, %l4 - mov %g5, %l5 - add %g7, 0x4, %l2 - wrpr %g0, ETRAP_PSTATE1, %pstate - stx %g1, [%sp + PTREGS_OFF + PT_V9_G1] - stx %g2, [%sp + PTREGS_OFF + PT_V9_G2] - sllx %l7, 24, %l7 - - stx %g3, [%sp + PTREGS_OFF + PT_V9_G3] - rdpr %cwp, %l0 - stx %g4, [%sp + PTREGS_OFF + PT_V9_G4] - stx %g5, [%sp + PTREGS_OFF + PT_V9_G5] - stx %g6, [%sp + PTREGS_OFF + PT_V9_G6] - stx %g7, [%sp + PTREGS_OFF + PT_V9_G7] - or %l7, %l0, %l7 - sethi %hi(TSTATE_RMO | TSTATE_PEF), %l0 - - or %l7, %l0, %l7 - wrpr %l2, %tnpc - wrpr %l7, (TSTATE_PRIV | TSTATE_IE), %tstate - stx %i0, [%sp + PTREGS_OFF + PT_V9_I0] - stx %i1, [%sp + PTREGS_OFF + PT_V9_I1] - stx %i2, [%sp + PTREGS_OFF + PT_V9_I2] - stx %i3, [%sp + PTREGS_OFF + PT_V9_I3] - stx %i4, [%sp + PTREGS_OFF + PT_V9_I4] - - stx %i5, [%sp + PTREGS_OFF + PT_V9_I5] - stx %i6, [%sp + PTREGS_OFF + PT_V9_I6] - mov %l6, %g6 - stx %i7, [%sp + PTREGS_OFF + PT_V9_I7] - LOAD_PER_CPU_BASE(%g5, %g6, %g4, %g3, %l1) - ldx [%g6 + TI_TASK], %g4 - done - #undef TASK_REGOFF #undef ETRAP_PSTATE1 diff --git a/arch/sparc64/kernel/process.c b/arch/sparc64/kernel/process.c index 26548fc604b..803eea4dc4f 100644 --- a/arch/sparc64/kernel/process.c +++ b/arch/sparc64/kernel/process.c @@ -541,6 +541,18 @@ void synchronize_user_stack(void) } } +static void stack_unaligned(unsigned long sp) +{ + siginfo_t info; + + info.si_signo = SIGBUS; + info.si_errno = 0; + info.si_code = BUS_ADRALN; + info.si_addr = (void __user *) sp; + info.si_trapno = 0; + force_sig_info(SIGBUS, &info, current); +} + void fault_in_user_windows(void) { struct thread_info *t = current_thread_info(); @@ -556,13 +568,17 @@ void fault_in_user_windows(void) flush_user_windows(); window = get_thread_wsaved(); - if (window != 0) { + if (likely(window != 0)) { window -= 1; do { unsigned long sp = (t->rwbuf_stkptrs[window] + bias); struct reg_window *rwin = &t->reg_window[window]; - if (copy_to_user((char __user *)sp, rwin, winsize)) + if (unlikely(sp & 0x7UL)) + stack_unaligned(sp); + + if (unlikely(copy_to_user((char __user *)sp, + rwin, winsize))) goto barf; } while (window--); } diff --git a/arch/sparc64/kernel/rtrap.S b/arch/sparc64/kernel/rtrap.S index 61bd45e7697..ecfbbdc5612 100644 --- a/arch/sparc64/kernel/rtrap.S +++ b/arch/sparc64/kernel/rtrap.S @@ -267,15 +267,69 @@ rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1 wrpr %l2, %g0, %canrestore wrpr %l1, %g0, %wstate - wrpr %g0, %g0, %otherwin + brnz,pt %l2, user_rtt_restore + wrpr %g0, %g0, %otherwin + + ldx [%g6 + TI_FLAGS], %g3 + wr %g0, ASI_AIUP, %asi + rdpr %cwp, %g1 + andcc %g3, _TIF_32BIT, %g0 + sub %g1, 1, %g1 + bne,pt %xcc, user_rtt_fill_32bit + wrpr %g1, %cwp + ba,a,pt %xcc, user_rtt_fill_64bit + +user_rtt_fill_fixup: + rdpr %cwp, %g1 + add %g1, 1, %g1 + wrpr %g1, 0x0, %cwp + + rdpr %wstate, %g2 + sll %g2, 3, %g2 + wrpr %g2, 0x0, %wstate + + /* We know %canrestore and %otherwin are both zero. */ + + sethi %hi(sparc64_kern_pri_context), %g2 + ldx [%g2 + %lo(sparc64_kern_pri_context)], %g2 + mov PRIMARY_CONTEXT, %g1 + stxa %g2, [%g1] ASI_DMMU + sethi %hi(KERNBASE), %g1 + flush %g1 + + or %g4, FAULT_CODE_WINFIXUP, %g4 + stb %g4, [%g6 + TI_FAULT_CODE] + stx %g5, [%g6 + TI_FAULT_ADDR] + + mov %g6, %l1 + wrpr %g0, 0x0, %tl + wrpr %g0, RTRAP_PSTATE, %pstate + mov %l1, %g6 + ldx [%g6 + TI_TASK], %g4 + LOAD_PER_CPU_BASE(%g5, %g6, %g1, %g2, %g3) + call do_sparc64_fault + add %sp, PTREGS_OFF, %o0 + ba,pt %xcc, rtrap + nop + +user_rtt_pre_restore: + add %g1, 1, %g1 + wrpr %g1, 0x0, %cwp + +user_rtt_restore: restore rdpr %canrestore, %g1 wrpr %g1, 0x0, %cleanwin retry nop -kern_rtt: restore +kern_rtt: rdpr %canrestore, %g1 + brz,pn %g1, kern_rtt_fill + nop +kern_rtt_restore: + restore retry + to_kernel: #ifdef CONFIG_PREEMPT ldsw [%g6 + TI_PRE_COUNT], %l5 diff --git a/arch/sparc64/kernel/tsb.S b/arch/sparc64/kernel/tsb.S index 28e38b168dd..3b45db98005 100644 --- a/arch/sparc64/kernel/tsb.S +++ b/arch/sparc64/kernel/tsb.S @@ -115,7 +115,6 @@ sparc64_realfault_common: ba,pt %xcc, rtrap_clr_l6 ! Restore cpu state nop ! Delay slot (fill me) - .globl winfix_trampoline winfix_trampoline: rdpr %tpc, %g3 ! Prepare winfixup TNPC or %g3, 0x7c, %g3 ! Compute branch offset diff --git a/arch/sparc64/kernel/ttable.S b/arch/sparc64/kernel/ttable.S index 99531424c59..2679b6e253a 100644 --- a/arch/sparc64/kernel/ttable.S +++ b/arch/sparc64/kernel/ttable.S @@ -92,11 +92,11 @@ tl0_resv07c: BTRAP(0x7c) BTRAP(0x7d) BTRAP(0x7e) BTRAP(0x7f) tl0_s0n: SPILL_0_NORMAL tl0_s1n: SPILL_1_NORMAL tl0_s2n: SPILL_2_NORMAL -tl0_s3n: SPILL_3_NORMAL -tl0_s4n: SPILL_4_NORMAL -tl0_s5n: SPILL_5_NORMAL -tl0_s6n: SPILL_6_NORMAL -tl0_s7n: SPILL_7_NORMAL +tl0_s3n: SPILL_0_NORMAL_ETRAP +tl0_s4n: SPILL_1_GENERIC_ETRAP +tl0_s5n: SPILL_1_GENERIC_ETRAP_FIXUP +tl0_s6n: SPILL_2_GENERIC_ETRAP +tl0_s7n: SPILL_2_GENERIC_ETRAP_FIXUP tl0_s0o: SPILL_0_OTHER tl0_s1o: SPILL_1_OTHER tl0_s2o: SPILL_2_OTHER @@ -110,9 +110,9 @@ tl0_f1n: FILL_1_NORMAL tl0_f2n: FILL_2_NORMAL tl0_f3n: FILL_3_NORMAL tl0_f4n: FILL_4_NORMAL -tl0_f5n: FILL_5_NORMAL -tl0_f6n: FILL_6_NORMAL -tl0_f7n: FILL_7_NORMAL +tl0_f5n: FILL_0_NORMAL_RTRAP +tl0_f6n: FILL_1_GENERIC_RTRAP +tl0_f7n: FILL_2_GENERIC_RTRAP tl0_f0o: FILL_0_OTHER tl0_f1o: FILL_1_OTHER tl0_f2o: FILL_2_OTHER diff --git a/arch/sparc64/kernel/winfixup.S b/arch/sparc64/kernel/winfixup.S index 211021ae6e8..efe2770e8f5 100644 --- a/arch/sparc64/kernel/winfixup.S +++ b/arch/sparc64/kernel/winfixup.S @@ -1,8 +1,6 @@ -/* $Id: winfixup.S,v 1.30 2002/02/09 19:49:30 davem Exp $ +/* winfixup.S: Handle cases where user stack pointer is found to be bogus. * - * winfixup.S: Handle cases where user stack pointer is found to be bogus. - * - * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) + * Copyright (C) 1997, 2006 David S. Miller (davem@davemloft.net) */ #include @@ -15,367 +13,129 @@ .text -set_pcontext: - sethi %hi(sparc64_kern_pri_context), %l1 - ldx [%l1 + %lo(sparc64_kern_pri_context)], %l1 - mov PRIMARY_CONTEXT, %g1 - stxa %l1, [%g1] ASI_DMMU - sethi %hi(KERNBASE), %l1 - flush %l1 - retl - nop + /* It used to be the case that these register window fault + * handlers could run via the save and restore instructions + * done by the trap entry and exit code. They now do the + * window spill/fill by hand, so that case no longer can occur. + */ .align 32 - - /* Here are the rules, pay attention. - * - * The kernel is disallowed from touching user space while - * the trap level is greater than zero, except for from within - * the window spill/fill handlers. This must be followed - * so that we can easily detect the case where we tried to - * spill/fill with a bogus (or unmapped) user stack pointer. - * - * These are layed out in a special way for cache reasons, - * don't touch... - */ - .globl fill_fixup, spill_fixup fill_fixup: TRAP_LOAD_THREAD_REG(%g6, %g1) - rdpr %tstate, %g1 - andcc %g1, TSTATE_PRIV, %g0 - or %g4, FAULT_CODE_WINFIXUP, %g4 - be,pt %xcc, window_scheisse_from_user_common - and %g1, TSTATE_CWP, %g1 - - /* This is the extremely complex case, but it does happen from - * time to time if things are just right. Essentially the restore - * done in rtrap right before going back to user mode, with tl=1 - * and that levels trap stack registers all setup, took a fill trap, - * the user stack was not mapped in the tlb, and tlb miss occurred, - * the pte found was not valid, and a simple ref bit watch update - * could not satisfy the miss, so we got here. - * - * We must carefully unwind the state so we get back to tl=0, preserve - * all the register values we were going to give to the user. Luckily - * most things are where they need to be, we also have the address - * which triggered the fault handy as well. - * - * Also note that we must preserve %l5 and %l6. If the user was - * returning from a system call, we must make it look this way - * after we process the fill fault on the users stack. - * - * First, get into the window where the original restore was executed. - */ - - rdpr %wstate, %g2 ! Grab user mode wstate. - wrpr %g1, %cwp ! Get into the right window. - sll %g2, 3, %g2 ! NORMAL-->OTHER - - wrpr %g0, 0x0, %canrestore ! Standard etrap stuff. - wrpr %g2, 0x0, %wstate ! This must be consistent. - wrpr %g0, 0x0, %otherwin ! We know this. - call set_pcontext ! Change contexts... + rdpr %tstate, %g1 + and %g1, TSTATE_CWP, %g1 + or %g4, FAULT_CODE_WINFIXUP, %g4 + stb %g4, [%g6 + TI_FAULT_CODE] + stx %g5, [%g6 + TI_FAULT_ADDR] + wrpr %g1, %cwp + ba,pt %xcc, etrap + rd %pc, %g7 + call do_sparc64_fault + add %sp, PTREGS_OFF, %o0 + ba,pt %xcc, rtrap_clr_l6 nop - rdpr %pstate, %l1 ! Prepare to change globals. - mov %g6, %o7 ! Get current. - - andn %l1, PSTATE_MM, %l1 ! We want to be in RMO - stb %g4, [%g6 + TI_FAULT_CODE] - stx %g5, [%g6 + TI_FAULT_ADDR] - wrpr %g0, 0x0, %tl ! Out of trap levels. - wrpr %l1, (PSTATE_IE | PSTATE_AG | PSTATE_RMO), %pstate - mov %o7, %g6 - ldx [%g6 + TI_TASK], %g4 - LOAD_PER_CPU_BASE(%g5, %g6, %g1, %g2, %g3) - /* This is the same as below, except we handle this a bit special - * since we must preserve %l5 and %l6, see comment above. - */ - call do_sparc64_fault - add %sp, PTREGS_OFF, %o0 - ba,pt %xcc, rtrap - nop ! yes, nop is correct - - /* Be very careful about usage of the alternate globals here. - * You cannot touch %g4/%g5 as that has the fault information - * should this be from usermode. Also be careful for the case - * where we get here from the save instruction in etrap.S when - * coming from either user or kernel (does not matter which, it - * is the same problem in both cases). Essentially this means - * do not touch %g7 or %g2 so we handle the two cases fine. + /* Be very careful about usage of the trap globals here. + * You cannot touch %g5 as that has the fault information. */ spill_fixup: +spill_fixup_mna: +spill_fixup_dax: TRAP_LOAD_THREAD_REG(%g6, %g1) - ldx [%g6 + TI_FLAGS], %g1 - andcc %g1, _TIF_32BIT, %g0 - ldub [%g6 + TI_WSAVED], %g1 - - sll %g1, 3, %g3 - add %g6, %g3, %g3 - stx %sp, [%g3 + TI_RWIN_SPTRS] - sll %g1, 7, %g3 - bne,pt %xcc, 1f - add %g6, %g3, %g3 - stx %l0, [%g3 + TI_REG_WINDOW + 0x00] - stx %l1, [%g3 + TI_REG_WINDOW + 0x08] - - stx %l2, [%g3 + TI_REG_WINDOW + 0x10] - stx %l3, [%g3 + TI_REG_WINDOW + 0x18] - stx %l4, [%g3 + TI_REG_WINDOW + 0x20] - stx %l5, [%g3 + TI_REG_WINDOW + 0x28] - stx %l6, [%g3 + TI_REG_WINDOW + 0x30] - stx %l7, [%g3 + TI_REG_WINDOW + 0x38] - stx %i0, [%g3 + TI_REG_WINDOW + 0x40] - stx %i1, [%g3 + TI_REG_WINDOW + 0x48] - - stx %i2, [%g3 + TI_REG_WINDOW + 0x50] - stx %i3, [%g3 + TI_REG_WINDOW + 0x58] - stx %i4, [%g3 + TI_REG_WINDOW + 0x60] - stx %i5, [%g3 + TI_REG_WINDOW + 0x68] - stx %i6, [%g3 + TI_REG_WINDOW + 0x70] - b,pt %xcc, 2f - stx %i7, [%g3 + TI_REG_WINDOW + 0x78] -1: stw %l0, [%g3 + TI_REG_WINDOW + 0x00] - - stw %l1, [%g3 + TI_REG_WINDOW + 0x04] - stw %l2, [%g3 + TI_REG_WINDOW + 0x08] - stw %l3, [%g3 + TI_REG_WINDOW + 0x0c] - stw %l4, [%g3 + TI_REG_WINDOW + 0x10] - stw %l5, [%g3 + TI_REG_WINDOW + 0x14] - stw %l6, [%g3 + TI_REG_WINDOW + 0x18] - stw %l7, [%g3 + TI_REG_WINDOW + 0x1c] - stw %i0, [%g3 + TI_REG_WINDOW + 0x20] - - stw %i1, [%g3 + TI_REG_WINDOW + 0x24] - stw %i2, [%g3 + TI_REG_WINDOW + 0x28] - stw %i3, [%g3 + TI_REG_WINDOW + 0x2c] - stw %i4, [%g3 + TI_REG_WINDOW + 0x30] - stw %i5, [%g3 + TI_REG_WINDOW + 0x34] - stw %i6, [%g3 + TI_REG_WINDOW + 0x38] - stw %i7, [%g3 + TI_REG_WINDOW + 0x3c] -2: add %g1, 1, %g1 - - stb %g1, [%g6 + TI_WSAVED] - rdpr %tstate, %g1 - andcc %g1, TSTATE_PRIV, %g0 + ldx [%g6 + TI_FLAGS], %g1 + andcc %g1, _TIF_32BIT, %g0 + ldub [%g6 + TI_WSAVED], %g1 + sll %g1, 3, %g3 + add %g6, %g3, %g3 + stx %sp, [%g3 + TI_RWIN_SPTRS] + sll %g1, 7, %g3 + bne,pt %xcc, 1f + add %g6, %g3, %g3 + stx %l0, [%g3 + TI_REG_WINDOW + 0x00] + stx %l1, [%g3 + TI_REG_WINDOW + 0x08] + stx %l2, [%g3 + TI_REG_WINDOW + 0x10] + stx %l3, [%g3 + TI_REG_WINDOW + 0x18] + stx %l4, [%g3 + TI_REG_WINDOW + 0x20] + stx %l5, [%g3 + TI_REG_WINDOW + 0x28] + stx %l6, [%g3 + TI_REG_WINDOW + 0x30] + stx %l7, [%g3 + TI_REG_WINDOW + 0x38] + stx %i0, [%g3 + TI_REG_WINDOW + 0x40] + stx %i1, [%g3 + TI_REG_WINDOW + 0x48] + stx %i2, [%g3 + TI_REG_WINDOW + 0x50] + stx %i3, [%g3 + TI_REG_WINDOW + 0x58] + stx %i4, [%g3 + TI_REG_WINDOW + 0x60] + stx %i5, [%g3 + TI_REG_WINDOW + 0x68] + stx %i6, [%g3 + TI_REG_WINDOW + 0x70] + ba,pt %xcc, 2f + stx %i7, [%g3 + TI_REG_WINDOW + 0x78] +1: stw %l0, [%g3 + TI_REG_WINDOW + 0x00] + stw %l1, [%g3 + TI_REG_WINDOW + 0x04] + stw %l2, [%g3 + TI_REG_WINDOW + 0x08] + stw %l3, [%g3 + TI_REG_WINDOW + 0x0c] + stw %l4, [%g3 + TI_REG_WINDOW + 0x10] + stw %l5, [%g3 + TI_REG_WINDOW + 0x14] + stw %l6, [%g3 + TI_REG_WINDOW + 0x18] + stw %l7, [%g3 + TI_REG_WINDOW + 0x1c] + stw %i0, [%g3 + TI_REG_WINDOW + 0x20] + stw %i1, [%g3 + TI_REG_WINDOW + 0x24] + stw %i2, [%g3 + TI_REG_WINDOW + 0x28] + stw %i3, [%g3 + TI_REG_WINDOW + 0x2c] + stw %i4, [%g3 + TI_REG_WINDOW + 0x30] + stw %i5, [%g3 + TI_REG_WINDOW + 0x34] + stw %i6, [%g3 + TI_REG_WINDOW + 0x38] + stw %i7, [%g3 + TI_REG_WINDOW + 0x3c] +2: add %g1, 1, %g1 + stb %g1, [%g6 + TI_WSAVED] + rdpr %tstate, %g1 + andcc %g1, TSTATE_PRIV, %g0 saved - and %g1, TSTATE_CWP, %g1 - be,a,pn %xcc, window_scheisse_from_user_common - mov FAULT_CODE_WRITE | FAULT_CODE_DTLB | FAULT_CODE_WINFIXUP, %g4 + be,pn %xcc, 1f + and %g1, TSTATE_CWP, %g1 retry +1: mov FAULT_CODE_WRITE | FAULT_CODE_DTLB | FAULT_CODE_WINFIXUP, %g4 + stb %g4, [%g6 + TI_FAULT_CODE] + stx %g5, [%g6 + TI_FAULT_ADDR] + wrpr %g1, %cwp + ba,pt %xcc, etrap + rd %pc, %g7 + call do_sparc64_fault + add %sp, PTREGS_OFF, %o0 + ba,a,pt %xcc, rtrap_clr_l6 -window_scheisse_from_user_common: - stb %g4, [%g6 + TI_FAULT_CODE] - stx %g5, [%g6 + TI_FAULT_ADDR] - wrpr %g1, %cwp - ba,pt %xcc, etrap - rd %pc, %g7 - call do_sparc64_fault - add %sp, PTREGS_OFF, %o0 - ba,a,pt %xcc, rtrap_clr_l6 - - .globl winfix_mna, fill_fixup_mna, spill_fixup_mna winfix_mna: - andn %g3, 0x7f, %g3 - add %g3, 0x78, %g3 - wrpr %g3, %tnpc + andn %g3, 0x7f, %g3 + add %g3, 0x78, %g3 + wrpr %g3, %tnpc done -fill_fixup_mna: - TRAP_LOAD_THREAD_REG(%g6, %g1) - rdpr %tstate, %g1 - andcc %g1, TSTATE_PRIV, %g0 - be,pt %xcc, window_mna_from_user_common - and %g1, TSTATE_CWP, %g1 - - /* Please, see fill_fixup commentary about why we must preserve - * %l5 and %l6 to preserve absolute correct semantics. - */ - rdpr %wstate, %g2 ! Grab user mode wstate. - wrpr %g1, %cwp ! Get into the right window. - sll %g2, 3, %g2 ! NORMAL-->OTHER - wrpr %g0, 0x0, %canrestore ! Standard etrap stuff. - - wrpr %g2, 0x0, %wstate ! This must be consistent. - wrpr %g0, 0x0, %otherwin ! We know this. - call set_pcontext ! Change contexts... - nop - rdpr %pstate, %l1 ! Prepare to change globals. - mov %g4, %o2 ! Setup args for - mov %g5, %o1 ! final call to mem_address_unaligned. - andn %l1, PSTATE_MM, %l1 ! We want to be in RMO - - mov %g6, %o7 ! Stash away current. - wrpr %g0, 0x0, %tl ! Out of trap levels. - wrpr %l1, (PSTATE_IE | PSTATE_AG | PSTATE_RMO), %pstate - mov %o7, %g6 ! Get current back. - ldx [%g6 + TI_TASK], %g4 ! Finish it. - LOAD_PER_CPU_BASE(%g5, %g6, %g1, %g2, %g3) - call mem_address_unaligned - add %sp, PTREGS_OFF, %o0 - b,pt %xcc, rtrap - nop ! yes, the nop is correct -spill_fixup_mna: +fill_fixup_mna: TRAP_LOAD_THREAD_REG(%g6, %g1) - ldx [%g6 + TI_FLAGS], %g1 - andcc %g1, _TIF_32BIT, %g0 - ldub [%g6 + TI_WSAVED], %g1 - sll %g1, 3, %g3 - add %g6, %g3, %g3 - stx %sp, [%g3 + TI_RWIN_SPTRS] - - sll %g1, 7, %g3 - bne,pt %xcc, 1f - add %g6, %g3, %g3 - stx %l0, [%g3 + TI_REG_WINDOW + 0x00] - stx %l1, [%g3 + TI_REG_WINDOW + 0x08] - stx %l2, [%g3 + TI_REG_WINDOW + 0x10] - stx %l3, [%g3 + TI_REG_WINDOW + 0x18] - stx %l4, [%g3 + TI_REG_WINDOW + 0x20] - - stx %l5, [%g3 + TI_REG_WINDOW + 0x28] - stx %l6, [%g3 + TI_REG_WINDOW + 0x30] - stx %l7, [%g3 + TI_REG_WINDOW + 0x38] - stx %i0, [%g3 + TI_REG_WINDOW + 0x40] - stx %i1, [%g3 + TI_REG_WINDOW + 0x48] - stx %i2, [%g3 + TI_REG_WINDOW + 0x50] - stx %i3, [%g3 + TI_REG_WINDOW + 0x58] - stx %i4, [%g3 + TI_REG_WINDOW + 0x60] - - stx %i5, [%g3 + TI_REG_WINDOW + 0x68] - stx %i6, [%g3 + TI_REG_WINDOW + 0x70] - stx %i7, [%g3 + TI_REG_WINDOW + 0x78] - b,pt %xcc, 2f - add %g1, 1, %g1 -1: std %l0, [%g3 + TI_REG_WINDOW + 0x00] - std %l2, [%g3 + TI_REG_WINDOW + 0x08] - std %l4, [%g3 + TI_REG_WINDOW + 0x10] + rdpr %tstate, %g1 + and %g1, TSTATE_CWP, %g1 + wrpr %g1, %cwp + ba,pt %xcc, etrap + rd %pc, %g7 + mov %l4, %o2 + mov %l5, %o1 + call mem_address_unaligned + add %sp, PTREGS_OFF, %o0 + ba,a,pt %xcc, rtrap_clr_l6 - std %l6, [%g3 + TI_REG_WINDOW + 0x18] - std %i0, [%g3 + TI_REG_WINDOW + 0x20] - std %i2, [%g3 + TI_REG_WINDOW + 0x28] - std %i4, [%g3 + TI_REG_WINDOW + 0x30] - std %i6, [%g3 + TI_REG_WINDOW + 0x38] - add %g1, 1, %g1 -2: stb %g1, [%g6 + TI_WSAVED] - rdpr %tstate, %g1 - - andcc %g1, TSTATE_PRIV, %g0 - saved - be,pn %xcc, window_mna_from_user_common - and %g1, TSTATE_CWP, %g1 - retry -window_mna_from_user_common: - wrpr %g1, %cwp - sethi %hi(109f), %g7 - ba,pt %xcc, etrap -109: or %g7, %lo(109b), %g7 - mov %l4, %o2 - mov %l5, %o1 - call mem_address_unaligned - add %sp, PTREGS_OFF, %o0 - ba,pt %xcc, rtrap - clr %l6 - - .globl winfix_dax, fill_fixup_dax, spill_fixup_dax winfix_dax: - andn %g3, 0x7f, %g3 - add %g3, 0x74, %g3 - wrpr %g3, %tnpc + andn %g3, 0x7f, %g3 + add %g3, 0x74, %g3 + wrpr %g3, %tnpc done -fill_fixup_dax: - TRAP_LOAD_THREAD_REG(%g6, %g1) - rdpr %tstate, %g1 - andcc %g1, TSTATE_PRIV, %g0 - be,pt %xcc, window_dax_from_user_common - and %g1, TSTATE_CWP, %g1 - - /* Please, see fill_fixup commentary about why we must preserve - * %l5 and %l6 to preserve absolute correct semantics. - */ - rdpr %wstate, %g2 ! Grab user mode wstate. - wrpr %g1, %cwp ! Get into the right window. - sll %g2, 3, %g2 ! NORMAL-->OTHER - wrpr %g0, 0x0, %canrestore ! Standard etrap stuff. - - wrpr %g2, 0x0, %wstate ! This must be consistent. - wrpr %g0, 0x0, %otherwin ! We know this. - call set_pcontext ! Change contexts... - nop - rdpr %pstate, %l1 ! Prepare to change globals. - mov %g4, %o1 ! Setup args for - mov %g5, %o2 ! final call to spitfire_data_access_exception. - andn %l1, PSTATE_MM, %l1 ! We want to be in RMO - - mov %g6, %o7 ! Stash away current. - wrpr %g0, 0x0, %tl ! Out of trap levels. - wrpr %l1, (PSTATE_IE | PSTATE_AG | PSTATE_RMO), %pstate - mov %o7, %g6 ! Get current back. - ldx [%g6 + TI_TASK], %g4 ! Finish it. - LOAD_PER_CPU_BASE(%g5, %g6, %g1, %g2, %g3) - call spitfire_data_access_exception - add %sp, PTREGS_OFF, %o0 - b,pt %xcc, rtrap - nop ! yes, the nop is correct -spill_fixup_dax: +fill_fixup_dax: TRAP_LOAD_THREAD_REG(%g6, %g1) - ldx [%g6 + TI_FLAGS], %g1 - andcc %g1, _TIF_32BIT, %g0 - ldub [%g6 + TI_WSAVED], %g1 - sll %g1, 3, %g3 - add %g6, %g3, %g3 - stx %sp, [%g3 + TI_RWIN_SPTRS] - - sll %g1, 7, %g3 - bne,pt %xcc, 1f - add %g6, %g3, %g3 - stx %l0, [%g3 + TI_REG_WINDOW + 0x00] - stx %l1, [%g3 + TI_REG_WINDOW + 0x08] - stx %l2, [%g3 + TI_REG_WINDOW + 0x10] - stx %l3, [%g3 + TI_REG_WINDOW + 0x18] - stx %l4, [%g3 + TI_REG_WINDOW + 0x20] - - stx %l5, [%g3 + TI_REG_WINDOW + 0x28] - stx %l6, [%g3 + TI_REG_WINDOW + 0x30] - stx %l7, [%g3 + TI_REG_WINDOW + 0x38] - stx %i0, [%g3 + TI_REG_WINDOW + 0x40] - stx %i1, [%g3 + TI_REG_WINDOW + 0x48] - stx %i2, [%g3 + TI_REG_WINDOW + 0x50] - stx %i3, [%g3 + TI_REG_WINDOW + 0x58] - stx %i4, [%g3 + TI_REG_WINDOW + 0x60] - - stx %i5, [%g3 + TI_REG_WINDOW + 0x68] - stx %i6, [%g3 + TI_REG_WINDOW + 0x70] - stx %i7, [%g3 + TI_REG_WINDOW + 0x78] - b,pt %xcc, 2f - add %g1, 1, %g1 -1: std %l0, [%g3 + TI_REG_WINDOW + 0x00] - std %l2, [%g3 + TI_REG_WINDOW + 0x08] - std %l4, [%g3 + TI_REG_WINDOW + 0x10] - - std %l6, [%g3 + TI_REG_WINDOW + 0x18] - std %i0, [%g3 + TI_REG_WINDOW + 0x20] - std %i2, [%g3 + TI_REG_WINDOW + 0x28] - std %i4, [%g3 + TI_REG_WINDOW + 0x30] - std %i6, [%g3 + TI_REG_WINDOW + 0x38] - add %g1, 1, %g1 -2: stb %g1, [%g6 + TI_WSAVED] - rdpr %tstate, %g1 - - andcc %g1, TSTATE_PRIV, %g0 - saved - be,pn %xcc, window_dax_from_user_common - and %g1, TSTATE_CWP, %g1 - retry -window_dax_from_user_common: - wrpr %g1, %cwp - sethi %hi(109f), %g7 - ba,pt %xcc, etrap -109: or %g7, %lo(109b), %g7 - mov %l4, %o1 - mov %l5, %o2 - call spitfire_data_access_exception - add %sp, PTREGS_OFF, %o0 - ba,pt %xcc, rtrap - clr %l6 + rdpr %tstate, %g1 + and %g1, TSTATE_CWP, %g1 + wrpr %g1, %cwp + ba,pt %xcc, etrap + rd %pc, %g7 + mov %l4, %o1 + mov %l5, %o2 + call spitfire_data_access_exception + add %sp, PTREGS_OFF, %o0 + ba,a,pt %xcc, rtrap_clr_l6 -- cgit v1.2.3 From 52bf082f0a6e49e08ed99d4d9518c662dc735c7a Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 4 Feb 2006 03:08:37 -0800 Subject: [SPARC64]: SUN4V hypervisor TLB flush support code. Signed-off-by: David S. Miller --- arch/sparc64/mm/ultra.S | 224 +++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 214 insertions(+), 10 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/mm/ultra.S b/arch/sparc64/mm/ultra.S index 269ed57b3e9..cac58d66fca 100644 --- a/arch/sparc64/mm/ultra.S +++ b/arch/sparc64/mm/ultra.S @@ -15,6 +15,7 @@ #include #include #include +#include /* Basically, most of the Spitfire vs. Cheetah madness * has to do with the fact that Cheetah does not support @@ -29,7 +30,8 @@ .text .align 32 .globl __flush_tlb_mm -__flush_tlb_mm: /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */ +__flush_tlb_mm: /* 18 insns */ + /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */ ldxa [%o1] ASI_DMMU, %g2 cmp %g2, %o0 bne,pn %icc, __spitfire_flush_tlb_mm_slow @@ -52,7 +54,7 @@ __flush_tlb_mm: /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */ .align 32 .globl __flush_tlb_pending -__flush_tlb_pending: +__flush_tlb_pending: /* 26 insns */ /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ rdpr %pstate, %g7 sllx %o1, 3, %o1 @@ -84,7 +86,8 @@ __flush_tlb_pending: .align 32 .globl __flush_tlb_kernel_range -__flush_tlb_kernel_range: /* %o0=start, %o1=end */ +__flush_tlb_kernel_range: /* 14 insns */ + /* %o0=start, %o1=end */ cmp %o0, %o1 be,pn %xcc, 2f sethi %hi(PAGE_SIZE), %o4 @@ -100,6 +103,7 @@ __flush_tlb_kernel_range: /* %o0=start, %o1=end */ flush %o3 retl nop + nop __spitfire_flush_tlb_mm_slow: rdpr %pstate, %g1 @@ -252,7 +256,63 @@ __cheetah_flush_dcache_page: /* 11 insns */ nop #endif /* DCACHE_ALIASING_POSSIBLE */ -cheetah_patch_one: + /* Hypervisor specific versions, patched at boot time. */ +__hypervisor_flush_tlb_mm: /* 8 insns */ + mov %o0, %o2 /* ARG2: mmu context */ + mov 0, %o0 /* ARG0: CPU lists unimplemented */ + mov 0, %o1 /* ARG1: CPU lists unimplemented */ + mov HV_MMU_ALL, %o3 /* ARG3: flags */ + mov HV_FAST_MMU_DEMAP_CTX, %o5 + ta HV_FAST_TRAP + retl + nop + +__hypervisor_flush_tlb_pending: /* 15 insns */ + /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ + sllx %o1, 3, %g1 + mov %o2, %g2 + mov %o0, %g3 +1: sub %g1, (1 << 3), %g1 + ldx [%g2 + %g1], %o0 /* ARG0: vaddr + IMMU-bit */ + mov %g3, %o1 /* ARG1: mmu context */ + mov HV_MMU_DMMU, %o2 + andcc %o0, 1, %g0 + movne %icc, HV_MMU_ALL, %o2 /* ARG2: flags */ + andn %o0, 1, %o0 + ta HV_MMU_UNMAP_ADDR_TRAP + brnz,pt %g1, 1b + nop + retl + nop + +__hypervisor_flush_tlb_kernel_range: /* 14 insns */ + /* %o0=start, %o1=end */ + cmp %o0, %o1 + be,pn %xcc, 2f + sethi %hi(PAGE_SIZE), %g3 + mov %o0, %g1 + sub %o1, %g1, %g2 + sub %g2, %g3, %g2 +1: add %g1, %g2, %o0 /* ARG0: virtual address */ + mov 0, %o1 /* ARG1: mmu context */ + mov HV_MMU_ALL, %o2 /* ARG2: flags */ + ta HV_MMU_UNMAP_ADDR_TRAP + brnz,pt %g2, 1b + sub %g2, %g3, %g2 +2: retl + nop + +#ifdef DCACHE_ALIASING_POSSIBLE + /* XXX Niagara and friends have an 8K cache, so no aliasing is + * XXX possible, but nothing explicit in the Hypervisor API + * XXX guarantees this. + */ +__hypervisor_flush_dcache_page: /* 2 insns */ + retl + nop +#endif + +tlb_patch_one: 1: lduw [%o1], %g1 stw %g1, [%o0] flush %o0 @@ -271,14 +331,14 @@ cheetah_patch_cachetlbops: or %o0, %lo(__flush_tlb_mm), %o0 sethi %hi(__cheetah_flush_tlb_mm), %o1 or %o1, %lo(__cheetah_flush_tlb_mm), %o1 - call cheetah_patch_one + call tlb_patch_one mov 19, %o2 sethi %hi(__flush_tlb_pending), %o0 or %o0, %lo(__flush_tlb_pending), %o0 sethi %hi(__cheetah_flush_tlb_pending), %o1 or %o1, %lo(__cheetah_flush_tlb_pending), %o1 - call cheetah_patch_one + call tlb_patch_one mov 27, %o2 #ifdef DCACHE_ALIASING_POSSIBLE @@ -286,7 +346,7 @@ cheetah_patch_cachetlbops: or %o0, %lo(__flush_dcache_page), %o0 sethi %hi(__cheetah_flush_dcache_page), %o1 or %o1, %lo(__cheetah_flush_dcache_page), %o1 - call cheetah_patch_one + call tlb_patch_one mov 11, %o2 #endif /* DCACHE_ALIASING_POSSIBLE */ @@ -309,7 +369,7 @@ cheetah_patch_cachetlbops: */ .align 32 .globl xcall_flush_tlb_mm -xcall_flush_tlb_mm: +xcall_flush_tlb_mm: /* 18 insns */ mov PRIMARY_CONTEXT, %g2 ldxa [%g2] ASI_DMMU, %g3 srlx %g3, CTX_PGSZ1_NUC_SHIFT, %g4 @@ -321,9 +381,16 @@ xcall_flush_tlb_mm: stxa %g0, [%g4] ASI_IMMU_DEMAP stxa %g3, [%g2] ASI_DMMU retry + nop + nop + nop + nop + nop + nop + nop .globl xcall_flush_tlb_pending -xcall_flush_tlb_pending: +xcall_flush_tlb_pending: /* 20 insns */ /* %g5=context, %g1=nr, %g7=vaddrs[] */ sllx %g1, 3, %g1 mov PRIMARY_CONTEXT, %g4 @@ -348,7 +415,7 @@ xcall_flush_tlb_pending: retry .globl xcall_flush_tlb_kernel_range -xcall_flush_tlb_kernel_range: +xcall_flush_tlb_kernel_range: /* 22 insns */ sethi %hi(PAGE_SIZE - 1), %g2 or %g2, %lo(PAGE_SIZE - 1), %g2 andn %g1, %g2, %g1 @@ -365,6 +432,12 @@ xcall_flush_tlb_kernel_range: retry nop nop + nop + nop + nop + nop + nop + nop /* This runs in a very controlled environment, so we do * not need to worry about BH races etc. @@ -458,6 +531,76 @@ xcall_flush_dcache_page_spitfire: /* %g1 == physical page address nop nop + .globl __hypervisor_xcall_flush_tlb_mm +__hypervisor_xcall_flush_tlb_mm: /* 18 insns */ + /* %g5=ctx, g1,g2,g3,g4,g7=scratch, %g6=unusable */ + mov %o0, %g2 + mov %o1, %g3 + mov %o2, %g4 + mov %o3, %g1 + mov %o5, %g7 + clr %o0 /* ARG0: CPU lists unimplemented */ + clr %o1 /* ARG1: CPU lists unimplemented */ + mov %g5, %o2 /* ARG2: mmu context */ + mov HV_MMU_ALL, %o3 /* ARG3: flags */ + mov HV_FAST_MMU_DEMAP_CTX, %o5 + ta HV_FAST_TRAP + mov %g2, %o0 + mov %g3, %o1 + mov %g4, %o2 + mov %g1, %o3 + mov %g7, %o5 + membar #Sync + retry + + .globl __hypervisor_xcall_flush_tlb_pending +__hypervisor_xcall_flush_tlb_pending: /* 18 insns */ + /* %g5=ctx, %g1=nr, %g7=vaddrs[], %g2,%g3,%g4=scratch, %g6=unusable */ + sllx %g1, 3, %g1 + mov %o0, %g2 + mov %o1, %g3 + mov %o2, %g4 +1: sub %g1, (1 << 3), %g1 + ldx [%g7 + %g1], %o0 /* ARG0: virtual address */ + mov %g5, %o1 /* ARG1: mmu context */ + mov HV_MMU_DMMU, %o2 + andcc %o0, 1, %g0 + movne %icc, HV_MMU_ALL, %o2 /* ARG2: flags */ + ta HV_MMU_UNMAP_ADDR_TRAP + brnz,pt %g1, 1b + nop + mov %g2, %o0 + mov %g3, %o1 + mov %g4, %o2 + membar #Sync + retry + + .globl __hypervisor_xcall_flush_tlb_kernel_range +__hypervisor_xcall_flush_tlb_kernel_range: /* 22 insns */ + /* %g1=start, %g7=end, g2,g3,g4,g5=scratch, g6=unusable */ + sethi %hi(PAGE_SIZE - 1), %g2 + or %g2, %lo(PAGE_SIZE - 1), %g2 + andn %g1, %g2, %g1 + andn %g7, %g2, %g7 + sub %g7, %g1, %g3 + add %g2, 1, %g2 + sub %g3, %g2, %g3 + mov %o0, %g2 + mov %o1, %g4 + mov %o2, %g5 +1: add %g1, %g3, %o0 /* ARG0: virtual address */ + mov 0, %o1 /* ARG1: mmu context */ + mov HV_MMU_ALL, %o2 /* ARG2: flags */ + ta HV_MMU_UNMAP_ADDR_TRAP + sethi %hi(PAGE_SIZE), %o2 + brnz,pt %g3, 1b + sub %g3, %o2, %g3 + mov %g2, %o0 + mov %g4, %o1 + mov %g5, %o2 + membar #Sync + retry + /* These just get rescheduled to PIL vectors. */ .globl xcall_call_function xcall_call_function: @@ -475,3 +618,64 @@ xcall_capture: retry #endif /* CONFIG_SMP */ + + + .globl hypervisor_patch_cachetlbops +hypervisor_patch_cachetlbops: + save %sp, -128, %sp + + sethi %hi(__flush_tlb_mm), %o0 + or %o0, %lo(__flush_tlb_mm), %o0 + sethi %hi(__hypervisor_flush_tlb_mm), %o1 + or %o1, %lo(__hypervisor_flush_tlb_mm), %o1 + call tlb_patch_one + mov 8, %o2 + + sethi %hi(__flush_tlb_pending), %o0 + or %o0, %lo(__flush_tlb_pending), %o0 + sethi %hi(__hypervisor_flush_tlb_pending), %o1 + or %o1, %lo(__hypervisor_flush_tlb_pending), %o1 + call tlb_patch_one + mov 15, %o2 + + sethi %hi(__flush_tlb_kernel_range), %o0 + or %o0, %lo(__flush_tlb_kernel_range), %o0 + sethi %hi(__hypervisor_flush_tlb_kernel_range), %o1 + or %o1, %lo(__hypervisor_flush_tlb_kernel_range), %o1 + call tlb_patch_one + mov 14, %o2 + +#ifdef DCACHE_ALIASING_POSSIBLE + sethi %hi(__flush_dcache_page), %o0 + or %o0, %lo(__flush_dcache_page), %o0 + sethi %hi(__hypervisor_flush_dcache_page), %o1 + or %o1, %lo(__hypervisor_flush_dcache_page), %o1 + call tlb_patch_one + mov 2, %o2 +#endif /* DCACHE_ALIASING_POSSIBLE */ + +#ifdef CONFIG_SMP + sethi %hi(xcall_flush_tlb_mm), %o0 + or %o0, %lo(xcall_flush_tlb_mm), %o0 + sethi %hi(__hypervisor_xcall_flush_tlb_mm), %o1 + or %o1, %lo(__hypervisor_xcall_flush_tlb_mm), %o1 + call tlb_patch_one + mov 18, %o2 + + sethi %hi(xcall_flush_tlb_pending), %o0 + or %o0, %lo(xcall_flush_tlb_pending), %o0 + sethi %hi(__hypervisor_xcall_flush_tlb_pending), %o1 + or %o1, %lo(__hypervisor_xcall_flush_tlb_pending), %o1 + call tlb_patch_one + mov 18, %o2 + + sethi %hi(xcall_flush_tlb_kernel_range), %o0 + or %o0, %lo(xcall_flush_tlb_kernel_range), %o0 + sethi %hi(__hypervisor_xcall_flush_tlb_kernel_range), %o1 + or %o1, %lo(__hypervisor_xcall_flush_tlb_kernel_range), %o1 + call tlb_patch_one + mov 22, %o2 +#endif /* CONFIG_SMP */ + + ret + restore -- cgit v1.2.3 From a43fe0e789f5445f5224511034f410adf11f153b Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 4 Feb 2006 03:10:53 -0800 Subject: [SPARC64]: Add some hypervisor tlb_type checks. And more consistently check cheetah{,_plus} instead of assuming anything not spitfire is cheetah{,_plus}. Signed-off-by: David S. Miller --- arch/sparc64/kernel/smp.c | 32 ++++++++++++++++++++++++++------ arch/sparc64/mm/init.c | 6 ++++-- 2 files changed, 30 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index 16b8eca9754..aba0f886b05 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c @@ -528,6 +528,11 @@ retry: } } +static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask) +{ + /* XXX implement me */ +} + /* Send cross call to all processors mentioned in MASK * except self. */ @@ -541,8 +546,10 @@ static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 d if (tlb_type == spitfire) spitfire_xcall_deliver(data0, data1, data2, mask); - else + else if (tlb_type == cheetah || tlb_type == cheetah_plus) cheetah_xcall_deliver(data0, data1, data2, mask); + else + hypervisor_xcall_deliver(data0, data1, data2, mask); /* NOTE: Caller runs local copy on master. */ put_cpu(); @@ -695,11 +702,17 @@ static __inline__ void __local_flush_dcache_page(struct page *page) void smp_flush_dcache_page_impl(struct page *page, int cpu) { cpumask_t mask = cpumask_of_cpu(cpu); - int this_cpu = get_cpu(); + int this_cpu; + + if (tlb_type == hypervisor) + return; #ifdef CONFIG_DEBUG_DCFLUSH atomic_inc(&dcpage_flushes); #endif + + this_cpu = get_cpu(); + if (cpu == this_cpu) { __local_flush_dcache_page(page); } else if (cpu_online(cpu)) { @@ -715,7 +728,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu) __pa(pg_addr), (u64) pg_addr, mask); - } else { + } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { #ifdef DCACHE_ALIASING_POSSIBLE data0 = ((u64)&xcall_flush_dcache_page_cheetah); @@ -737,7 +750,12 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page) void *pg_addr = page_address(page); cpumask_t mask = cpu_online_map; u64 data0; - int this_cpu = get_cpu(); + int this_cpu; + + if (tlb_type == hypervisor) + return; + + this_cpu = get_cpu(); cpu_clear(this_cpu, mask); @@ -754,7 +772,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page) __pa(pg_addr), (u64) pg_addr, mask); - } else { + } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { #ifdef DCACHE_ALIASING_POSSIBLE data0 = ((u64)&xcall_flush_dcache_page_cheetah); cheetah_xcall_deliver(data0, @@ -780,8 +798,10 @@ void smp_receive_signal(int cpu) if (tlb_type == spitfire) spitfire_xcall_deliver(data0, 0, 0, mask); - else + else if (tlb_type == cheetah || tlb_type == cheetah_plus) cheetah_xcall_deliver(data0, 0, 0, mask); + else if (tlb_type == hypervisor) + hypervisor_xcall_deliver(data0, 0, 0, mask); } } diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 1af63307b24..ab50cd9618f 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -335,7 +335,7 @@ out: void __kprobes flush_icache_range(unsigned long start, unsigned long end) { - /* Cheetah has coherent I-cache. */ + /* Cheetah and Hypervisor platform cpus have coherent I-cache. */ if (tlb_type == spitfire) { unsigned long kaddr; @@ -372,6 +372,8 @@ void mmu_info(struct seq_file *m) seq_printf(m, "MMU Type\t: Cheetah+\n"); else if (tlb_type == spitfire) seq_printf(m, "MMU Type\t: Spitfire\n"); + else if (tlb_type == hypervisor) + seq_printf(m, "MMU Type\t: Hypervisor (sun4v)\n"); else seq_printf(m, "MMU Type\t: ???\n"); @@ -581,7 +583,7 @@ void __flush_dcache_range(unsigned long start, unsigned long end) if (++n >= 512) break; } - } else { + } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { start = __pa(start); end = __pa(end); for (va = start; va < end; va += 32) -- cgit v1.2.3 From 398d10830843bda7798f71052b54a5341a8ddd53 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 5 Mar 2006 16:41:56 -0800 Subject: [SPARC64]: Niagara optimized memcpy() and copy_{to,from}_user(). Signed-off-by: David S. Miller --- arch/sparc64/lib/Makefile | 1 + arch/sparc64/lib/NGcopy_from_user.S | 37 ++++ arch/sparc64/lib/NGcopy_to_user.S | 40 ++++ arch/sparc64/lib/NGmemcpy.S | 364 ++++++++++++++++++++++++++++++++++++ arch/sparc64/lib/NGpatch.S | 32 ++++ 5 files changed, 474 insertions(+) create mode 100644 arch/sparc64/lib/NGcopy_from_user.S create mode 100644 arch/sparc64/lib/NGcopy_to_user.S create mode 100644 arch/sparc64/lib/NGmemcpy.S create mode 100644 arch/sparc64/lib/NGpatch.S (limited to 'arch') diff --git a/arch/sparc64/lib/Makefile b/arch/sparc64/lib/Makefile index c295806500f..813f622b5c4 100644 --- a/arch/sparc64/lib/Makefile +++ b/arch/sparc64/lib/Makefile @@ -11,6 +11,7 @@ lib-y := PeeCeeI.o copy_page.o clear_page.o strlen.o strncmp.o \ VISsave.o atomic.o bitops.o \ U1memcpy.o U1copy_from_user.o U1copy_to_user.o \ U3memcpy.o U3copy_from_user.o U3copy_to_user.o U3patch.o \ + NGmemcpy.o NGcopy_from_user.o NGcopy_to_user.o NGpatch.o \ copy_in_user.o user_fixup.o memmove.o \ mcount.o ipcsum.o rwsem.o xor.o find_bit.o delay.o diff --git a/arch/sparc64/lib/NGcopy_from_user.S b/arch/sparc64/lib/NGcopy_from_user.S new file mode 100644 index 00000000000..2d93456f76d --- /dev/null +++ b/arch/sparc64/lib/NGcopy_from_user.S @@ -0,0 +1,37 @@ +/* NGcopy_from_user.S: Niagara optimized copy from userspace. + * + * Copyright (C) 2006 David S. Miller (davem@davemloft.net) + */ + +#define EX_LD(x) \ +98: x; \ + .section .fixup; \ + .align 4; \ +99: wr %g0, ASI_AIUS, %asi;\ + retl; \ + mov 1, %o0; \ + .section __ex_table,"a";\ + .align 4; \ + .word 98b, 99b; \ + .text; \ + .align 4; + +#ifndef ASI_AIUS +#define ASI_AIUS 0x11 +#endif + +#define FUNC_NAME NGcopy_from_user +#define LOAD(type,addr,dest) type##a [addr] ASI_AIUS, dest +#define LOAD_TWIN(addr_reg,dest0,dest1) \ + ldda [addr_reg] ASI_BLK_INIT_QUAD_LDD_AIUS, dest0 +#define EX_RETVAL(x) 0 + +#ifdef __KERNEL__ +#define PREAMBLE \ + rd %asi, %g1; \ + cmp %g1, ASI_AIUS; \ + bne,pn %icc, memcpy_user_stub; \ + nop +#endif + +#include "NGmemcpy.S" diff --git a/arch/sparc64/lib/NGcopy_to_user.S b/arch/sparc64/lib/NGcopy_to_user.S new file mode 100644 index 00000000000..4a12395b450 --- /dev/null +++ b/arch/sparc64/lib/NGcopy_to_user.S @@ -0,0 +1,40 @@ +/* NGcopy_to_user.S: Niagara optimized copy to userspace. + * + * Copyright (C) 2006 David S. Miller (davem@davemloft.net) + */ + +#define EX_ST(x) \ +98: x; \ + .section .fixup; \ + .align 4; \ +99: wr %g0, ASI_AIUS, %asi;\ + retl; \ + mov 1, %o0; \ + .section __ex_table,"a";\ + .align 4; \ + .word 98b, 99b; \ + .text; \ + .align 4; + +#ifndef ASI_AIUS +#define ASI_AIUS 0x11 +#endif + +#define FUNC_NAME NGcopy_to_user +#define STORE(type,src,addr) type##a src, [addr] ASI_AIUS +#define STORE_ASI ASI_BLK_INIT_QUAD_LDD_AIUS +#define EX_RETVAL(x) 0 + +#ifdef __KERNEL__ + /* Writing to %asi is _expensive_ so we hardcode it. + * Reading %asi to check for KERNEL_DS is comparatively + * cheap. + */ +#define PREAMBLE \ + rd %asi, %g1; \ + cmp %g1, ASI_AIUS; \ + bne,pn %icc, memcpy_user_stub; \ + nop +#endif + +#include "U3memcpy.S" diff --git a/arch/sparc64/lib/NGmemcpy.S b/arch/sparc64/lib/NGmemcpy.S new file mode 100644 index 00000000000..a39aa3bd434 --- /dev/null +++ b/arch/sparc64/lib/NGmemcpy.S @@ -0,0 +1,364 @@ +/* NGmemcpy.S: Niagara optimized memcpy. + * + * Copyright (C) 2006 David S. Miller (davem@davemloft.net) + */ + +#ifdef __KERNEL__ +#include +#define GLOBAL_SPARE %g7 +#define RESTORE_ASI wr %g0, ASI_AIUS, %asi +#else +#define GLOBAL_SPARE %g5 +#define RESTORE_ASI +#endif + +#ifndef STORE_ASI +#define STORE_ASI ASI_BLK_INIT_QUAD_LDD_P +#endif + +#ifndef EX_LD +#define EX_LD(x) x +#endif + +#ifndef EX_ST +#define EX_ST(x) x +#endif + +#ifndef EX_RETVAL +#define EX_RETVAL(x) x +#endif + +#ifndef LOAD +#ifndef MEMCPY_DEBUG +#define LOAD(type,addr,dest) type [addr], dest +#else +#define LOAD(type,addr,dest) type##a [addr] 0x80, dest +#endif +#endif + +#ifndef LOAD_TWIN +#define LOAD_TWIN(addr_reg,dest0,dest1) \ + ldda [addr_reg] ASI_BLK_INIT_QUAD_LDD_P, dest0 +#endif + +#ifndef STORE +#define STORE(type,src,addr) type src, [addr] +#endif + +#ifndef STORE_INIT +#define STORE_INIT(src,addr) stxa src, [addr] %asi +#endif + +#ifndef FUNC_NAME +#define FUNC_NAME NGmemcpy +#endif + +#ifndef PREAMBLE +#define PREAMBLE +#endif + +#ifndef XCC +#define XCC xcc +#endif + + .register %g2,#scratch + .register %g3,#scratch + + .text + .align 64 + + .globl FUNC_NAME + .type FUNC_NAME,#function +FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ + srlx %o2, 31, %g2 + cmp %g2, 0 + tne %xcc, 5 + PREAMBLE + mov %o0, GLOBAL_SPARE + cmp %o2, 0 + be,pn %XCC, 85f + or %o0, %o1, %o3 + cmp %o2, 16 + blu,a,pn %XCC, 80f + or %o3, %o2, %o3 + + /* 2 blocks (128 bytes) is the minimum we can do the block + * copy with. We need to ensure that we'll iterate at least + * once in the block copy loop. At worst we'll need to align + * the destination to a 64-byte boundary which can chew up + * to (64 - 1) bytes from the length before we perform the + * block copy loop. + */ + cmp %o2, (2 * 64) + blu,pt %XCC, 70f + andcc %o3, 0x7, %g0 + + /* %o0: dst + * %o1: src + * %o2: len (known to be >= 128) + * + * The block copy loops will use %o4/%o5,%g2/%g3 as + * temporaries while copying the data. + */ + + LOAD(prefetch, %o1, #one_read) + wr %g0, STORE_ASI, %asi + + /* Align destination on 64-byte boundary. */ + andcc %o0, (64 - 1), %o4 + be,pt %XCC, 2f + sub %o4, 64, %o4 + sub %g0, %o4, %o4 ! bytes to align dst + sub %o2, %o4, %o2 +1: subcc %o4, 1, %o4 + EX_LD(LOAD(ldub, %o1, %g1)) + EX_ST(STORE(stb, %g1, %o0)) + add %o1, 1, %o1 + bne,pt %XCC, 1b + add %o0, 1, %o0 + + /* If the source is on a 16-byte boundary we can do + * the direct block copy loop. If it is 8-byte aligned + * we can do the 16-byte loads offset by -8 bytes and the + * init stores offset by one register. + * + * If the source is not even 8-byte aligned, we need to do + * shifting and masking (basically integer faligndata). + * + * The careful bit with init stores is that if we store + * to any part of the cache line we have to store the whole + * cacheline else we can end up with corrupt L2 cache line + * contents. Since the loop works on 64-bytes of 64-byte + * aligned store data at a time, this is easy to ensure. + */ +2: + andcc %o1, (16 - 1), %o4 + andn %o2, (64 - 1), %g1 ! block copy loop iterator + sub %o2, %g1, %o2 ! final sub-block copy bytes + be,pt %XCC, 50f + cmp %o4, 8 + be,a,pt %XCC, 10f + sub %o1, 0x8, %o1 + + /* Neither 8-byte nor 16-byte aligned, shift and mask. */ + mov %g1, %o4 + and %o1, 0x7, %g1 + sll %g1, 3, %g1 + mov 64, %o3 + andn %o1, 0x7, %o1 + EX_LD(LOAD(ldx, %o1, %g2)) + sub %o3, %g1, %o3 + sllx %g2, %g1, %g2 + +#define SWIVEL_ONE_DWORD(SRC, TMP1, TMP2, PRE_VAL, PRE_SHIFT, POST_SHIFT, DST)\ + EX_LD(LOAD(ldx, SRC, TMP1)); \ + srlx TMP1, PRE_SHIFT, TMP2; \ + or TMP2, PRE_VAL, TMP2; \ + EX_ST(STORE_INIT(TMP2, DST)); \ + sllx TMP1, POST_SHIFT, PRE_VAL; + +1: add %o1, 0x8, %o1 + SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x00) + add %o1, 0x8, %o1 + SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x08) + add %o1, 0x8, %o1 + SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x10) + add %o1, 0x8, %o1 + SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x18) + add %o1, 32, %o1 + LOAD(prefetch, %o1, #one_read) + sub %o1, 32 - 8, %o1 + SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x20) + add %o1, 8, %o1 + SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x28) + add %o1, 8, %o1 + SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x30) + add %o1, 8, %o1 + SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x38) + subcc %o4, 64, %o4 + bne,pt %XCC, 1b + add %o0, 64, %o0 + +#undef SWIVEL_ONE_DWORD + + srl %g1, 3, %g1 + ba,pt %XCC, 60f + add %o1, %g1, %o1 + +10: /* Destination is 64-byte aligned, source was only 8-byte + * aligned but it has been subtracted by 8 and we perform + * one twin load ahead, then add 8 back into source when + * we finish the loop. + */ + EX_LD(LOAD_TWIN(%o1, %o4, %o5)) +1: add %o1, 16, %o1 + EX_LD(LOAD_TWIN(%o1, %g2, %g3)) + add %o1, 16 + 32, %o1 + LOAD(prefetch, %o1, #one_read) + sub %o1, 32, %o1 + EX_ST(STORE_INIT(%o5, %o0 + 0x00)) ! initializes cache line + EX_ST(STORE_INIT(%g2, %o0 + 0x08)) + EX_LD(LOAD_TWIN(%o1, %o4, %o5)) + add %o1, 16, %o1 + EX_ST(STORE_INIT(%g3, %o0 + 0x10)) + EX_ST(STORE_INIT(%o4, %o0 + 0x18)) + EX_LD(LOAD_TWIN(%o1, %g2, %g3)) + add %o1, 16, %o1 + EX_ST(STORE_INIT(%o5, %o0 + 0x20)) + EX_ST(STORE_INIT(%g2, %o0 + 0x28)) + EX_LD(LOAD_TWIN(%o1, %o4, %o5)) + EX_ST(STORE_INIT(%g3, %o0 + 0x30)) + EX_ST(STORE_INIT(%o4, %o0 + 0x38)) + subcc %g1, 64, %g1 + bne,pt %XCC, 1b + add %o0, 64, %o0 + + ba,pt %XCC, 60f + add %o1, 0x8, %o1 + +50: /* Destination is 64-byte aligned, and source is 16-byte + * aligned. + */ +1: EX_LD(LOAD_TWIN(%o1, %o4, %o5)) + add %o1, 16, %o1 + EX_LD(LOAD_TWIN(%o1, %g2, %g3)) + add %o1, 16 + 32, %o1 + LOAD(prefetch, %o1, #one_read) + sub %o1, 32, %o1 + EX_ST(STORE_INIT(%o4, %o0 + 0x00)) ! initializes cache line + EX_ST(STORE_INIT(%o5, %o0 + 0x08)) + EX_LD(LOAD_TWIN(%o1, %o4, %o5)) + add %o1, 16, %o1 + EX_ST(STORE_INIT(%g2, %o0 + 0x10)) + EX_ST(STORE_INIT(%g3, %o0 + 0x18)) + EX_LD(LOAD_TWIN(%o1, %g2, %g3)) + add %o1, 16, %o1 + EX_ST(STORE_INIT(%o4, %o0 + 0x20)) + EX_ST(STORE_INIT(%o5, %o0 + 0x28)) + EX_ST(STORE_INIT(%g2, %o0 + 0x30)) + EX_ST(STORE_INIT(%g3, %o0 + 0x38)) + subcc %g1, 64, %g1 + bne,pt %XCC, 1b + add %o0, 64, %o0 + /* fall through */ + +60: + /* %o2 contains any final bytes still needed to be copied + * over. If anything is left, we copy it one byte at a time. + */ + RESTORE_ASI + brz,pt %o2, 85f + sub %o0, %o1, %o3 + ba,a,pt %XCC, 90f + + .align 64 +70: /* 16 < len <= 64 */ + bne,pn %XCC, 75f + sub %o0, %o1, %o3 + +72: + andn %o2, 0xf, %o4 + and %o2, 0xf, %o2 +1: subcc %o4, 0x10, %o4 + EX_LD(LOAD(ldx, %o1, %o5)) + add %o1, 0x08, %o1 + EX_LD(LOAD(ldx, %o1, %g1)) + sub %o1, 0x08, %o1 + EX_ST(STORE(stx, %o5, %o1 + %o3)) + add %o1, 0x8, %o1 + EX_ST(STORE(stx, %g1, %o1 + %o3)) + bgu,pt %XCC, 1b + add %o1, 0x8, %o1 +73: andcc %o2, 0x8, %g0 + be,pt %XCC, 1f + nop + sub %o2, 0x8, %o2 + EX_LD(LOAD(ldx, %o1, %o5)) + EX_ST(STORE(stx, %o5, %o1 + %o3)) + add %o1, 0x8, %o1 +1: andcc %o2, 0x4, %g0 + be,pt %XCC, 1f + nop + sub %o2, 0x4, %o2 + EX_LD(LOAD(lduw, %o1, %o5)) + EX_ST(STORE(stw, %o5, %o1 + %o3)) + add %o1, 0x4, %o1 +1: cmp %o2, 0 + be,pt %XCC, 85f + nop + ba,pt %xcc, 90f + nop + +75: + andcc %o0, 0x7, %g1 + sub %g1, 0x8, %g1 + be,pn %icc, 2f + sub %g0, %g1, %g1 + sub %o2, %g1, %o2 + +1: subcc %g1, 1, %g1 + EX_LD(LOAD(ldub, %o1, %o5)) + EX_ST(STORE(stb, %o5, %o1 + %o3)) + bgu,pt %icc, 1b + add %o1, 1, %o1 + +2: add %o1, %o3, %o0 + andcc %o1, 0x7, %g1 + bne,pt %icc, 8f + sll %g1, 3, %g1 + + cmp %o2, 16 + bgeu,pt %icc, 72b + nop + ba,a,pt %xcc, 73b + +8: mov 64, %o3 + andn %o1, 0x7, %o1 + EX_LD(LOAD(ldx, %o1, %g2)) + sub %o3, %g1, %o3 + andn %o2, 0x7, %o4 + sllx %g2, %g1, %g2 +1: add %o1, 0x8, %o1 + EX_LD(LOAD(ldx, %o1, %g3)) + subcc %o4, 0x8, %o4 + srlx %g3, %o3, %o5 + or %o5, %g2, %o5 + EX_ST(STORE(stx, %o5, %o0)) + add %o0, 0x8, %o0 + bgu,pt %icc, 1b + sllx %g3, %g1, %g2 + + srl %g1, 3, %g1 + andcc %o2, 0x7, %o2 + be,pn %icc, 85f + add %o1, %g1, %o1 + ba,pt %xcc, 90f + sub %o0, %o1, %o3 + + .align 64 +80: /* 0 < len <= 16 */ + andcc %o3, 0x3, %g0 + bne,pn %XCC, 90f + sub %o0, %o1, %o3 + +1: + subcc %o2, 4, %o2 + EX_LD(LOAD(lduw, %o1, %g1)) + EX_ST(STORE(stw, %g1, %o1 + %o3)) + bgu,pt %XCC, 1b + add %o1, 4, %o1 + +85: retl + mov EX_RETVAL(GLOBAL_SPARE), %o0 + + .align 32 +90: + subcc %o2, 1, %o2 + EX_LD(LOAD(ldub, %o1, %g1)) + EX_ST(STORE(stb, %g1, %o1 + %o3)) + bgu,pt %XCC, 90b + add %o1, 1, %o1 + retl + mov EX_RETVAL(GLOBAL_SPARE), %o0 + + .size FUNC_NAME, .-FUNC_NAME diff --git a/arch/sparc64/lib/NGpatch.S b/arch/sparc64/lib/NGpatch.S new file mode 100644 index 00000000000..f13ec9e4c8a --- /dev/null +++ b/arch/sparc64/lib/NGpatch.S @@ -0,0 +1,32 @@ +/* NGpatch.S: Patch Ultra-I routines with Niagara variant. + * + * Copyright (C) 2006 David S. Miller + */ + +#define BRANCH_ALWAYS 0x10680000 +#define NOP 0x01000000 +#define NG_DO_PATCH(OLD, NEW) \ + sethi %hi(NEW), %g1; \ + or %g1, %lo(NEW), %g1; \ + sethi %hi(OLD), %g2; \ + or %g2, %lo(OLD), %g2; \ + sub %g1, %g2, %g1; \ + sethi %hi(BRANCH_ALWAYS), %g3; \ + srl %g1, 2, %g1; \ + or %g3, %lo(BRANCH_ALWAYS), %g3; \ + or %g3, %g1, %g3; \ + stw %g3, [%g2]; \ + sethi %hi(NOP), %g3; \ + or %g3, %lo(NOP), %g3; \ + stw %g3, [%g2 + 0x4]; \ + flush %g2; + + .globl niagara_patch_copyops + .type niagara_patch_copyops,#function +niagara_patch_copyops: + NG_DO_PATCH(memcpy, NGmemcpy) + NG_DO_PATCH(___copy_from_user, NGcopy_from_user) + NG_DO_PATCH(___copy_to_user, NGcopy_to_user) + retl + nop + .size niagara_patch_copyops,.-niagara_patch_copyops -- cgit v1.2.3 From d96b81533ba3d5775e45aee6986b2aa33c10801c Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 4 Feb 2006 15:40:53 -0800 Subject: [SPARC64]: Add sun4v case to __GET_CPUID() patch tables. Signed-off-by: David S. Miller --- arch/sparc64/kernel/setup.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch') diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c index f751d11926b..2918ed3eb1b 100644 --- a/arch/sparc64/kernel/setup.c +++ b/arch/sparc64/kernel/setup.c @@ -520,6 +520,9 @@ static void __init per_cpu_patch(void) else insns = &p->cheetah_safari[0]; break; + case hypervisor: + insns = &p->sun4v[0]; + break; default: prom_printf("Unknown cpu type, halting.\n"); prom_halt(); -- cgit v1.2.3 From 6e02493a7f33ac89e698b980a657d77ab2749eaf Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 5 Feb 2006 20:47:26 -0800 Subject: [SPARC64]: Fill dead cycles on trap entry with real work. As we save trap state onto the stack, the store buffer fills up mid-way through and we stall for several cycles as the store buffer trickles out to the L2 cache. Meanwhile we can do some privileged register reads and other calculations, essentially for free. Signed-off-by: David S. Miller --- arch/sparc64/kernel/etrap.S | 27 +++++++++++++++------------ 1 file changed, 15 insertions(+), 12 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/etrap.S b/arch/sparc64/kernel/etrap.S index 4a0e01b1404..f2556146a73 100644 --- a/arch/sparc64/kernel/etrap.S +++ b/arch/sparc64/kernel/etrap.S @@ -98,37 +98,40 @@ etrap_save: save %g2, -STACK_BIAS, %sp stxa %g3, [%l4] ASI_DMMU sethi %hi(KERNBASE), %l4 flush %l4 - wr %g0, ASI_AIUS, %asi -2: wrpr %g0, 0x0, %tl - mov %g4, %l4 + mov ASI_AIUS, %l7 +2: mov %g4, %l4 mov %g5, %l5 - - mov %g7, %l2 + add %g7, 4, %l2 wrpr %g0, ETRAP_PSTATE1, %pstate stx %g1, [%sp + PTREGS_OFF + PT_V9_G1] stx %g2, [%sp + PTREGS_OFF + PT_V9_G2] + sllx %l7, 24, %l7 stx %g3, [%sp + PTREGS_OFF + PT_V9_G3] + rdpr %cwp, %l0 stx %g4, [%sp + PTREGS_OFF + PT_V9_G4] stx %g5, [%sp + PTREGS_OFF + PT_V9_G5] stx %g6, [%sp + PTREGS_OFF + PT_V9_G6] - stx %g7, [%sp + PTREGS_OFF + PT_V9_G7] + or %l7, %l0, %l7 + sethi %hi(TSTATE_RMO | TSTATE_PEF), %l0 + or %l7, %l0, %l7 + wrpr %l2, %tnpc + wrpr %l7, (TSTATE_PRIV | TSTATE_IE), %tstate stx %i0, [%sp + PTREGS_OFF + PT_V9_I0] stx %i1, [%sp + PTREGS_OFF + PT_V9_I1] stx %i2, [%sp + PTREGS_OFF + PT_V9_I2] stx %i3, [%sp + PTREGS_OFF + PT_V9_I3] stx %i4, [%sp + PTREGS_OFF + PT_V9_I4] stx %i5, [%sp + PTREGS_OFF + PT_V9_I5] - stx %i6, [%sp + PTREGS_OFF + PT_V9_I6] - stx %i7, [%sp + PTREGS_OFF + PT_V9_I7] - wrpr %g0, ETRAP_PSTATE2, %pstate mov %l6, %g6 + stx %i7, [%sp + PTREGS_OFF + PT_V9_I7] LOAD_PER_CPU_BASE(%g5, %g6, %g4, %g3, %l1) - jmpl %l2 + 0x4, %g0 - ldx [%g6 + TI_TASK], %g4 + ldx [%g6 + TI_TASK], %g4 + done -3: ldub [%l6 + TI_FPDEPTH], %l5 +3: mov ASI_P, %l7 + ldub [%l6 + TI_FPDEPTH], %l5 add %l6, TI_FPSAVED + 1, %l4 srl %l5, 1, %l3 add %l5, 2, %l5 -- cgit v1.2.3 From 936f482af1743141d637483ec10eb881537c26dc Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 5 Feb 2006 21:29:28 -0800 Subject: [SPARC64]: Add initial code to twiddle %gl on trap entry/exit. Instead of setting/clearing PSTATE_AG we have to change the %gl register value on sun4v. Signed-off-by: David S. Miller --- arch/sparc64/kernel/etrap.S | 17 +++++++++++++++-- arch/sparc64/kernel/rtrap.S | 16 +++++++++++++++- arch/sparc64/kernel/setup.c | 20 ++++++++++++++++++++ arch/sparc64/kernel/vmlinux.lds.S | 3 +++ 4 files changed, 53 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/etrap.S b/arch/sparc64/kernel/etrap.S index f2556146a73..4d644949ad4 100644 --- a/arch/sparc64/kernel/etrap.S +++ b/arch/sparc64/kernel/etrap.S @@ -102,7 +102,14 @@ etrap_save: save %g2, -STACK_BIAS, %sp 2: mov %g4, %l4 mov %g5, %l5 add %g7, 4, %l2 - wrpr %g0, ETRAP_PSTATE1, %pstate + + /* Go to trap time globals so we can save them. */ +661: wrpr %g0, ETRAP_PSTATE1, %pstate + .section .gl_1insn_patch, "ax" + .word 661b + SET_GL(0) + .previous + stx %g1, [%sp + PTREGS_OFF + PT_V9_G1] stx %g2, [%sp + PTREGS_OFF + PT_V9_G2] sllx %l7, 24, %l7 @@ -195,9 +202,15 @@ etraptl1: /* Save tstate/tpc/tnpc of TL 1-->4 and the tl register itself. rdpr %tt, %g3 stx %g3, [%g2 + STACK_BIAS + 0x78] - wrpr %g1, %tl stx %g1, [%g2 + STACK_BIAS + 0x80] + wrpr %g0, 1, %tl +661: nop + .section .gl_1insn_patch, "ax" + .word 661b + SET_GL(1) + .previous + rdpr %tstate, %g1 sub %g2, STACKFRAME_SZ + TRACEREG_SZ - STACK_BIAS, %g2 ba,pt %xcc, 1b diff --git a/arch/sparc64/kernel/rtrap.S b/arch/sparc64/kernel/rtrap.S index ecfbbdc5612..e6130956307 100644 --- a/arch/sparc64/kernel/rtrap.S +++ b/arch/sparc64/kernel/rtrap.S @@ -230,7 +230,14 @@ rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1 1: ldx [%sp + PTREGS_OFF + PT_V9_G6], %g6 ldx [%sp + PTREGS_OFF + PT_V9_G7], %g7 - wrpr %g0, RTRAP_PSTATE_AG_IRQOFF, %pstate + + /* Normal globals are restored, go to trap globals. */ +661: wrpr %g0, RTRAP_PSTATE_AG_IRQOFF, %pstate + .section .gl_1insn_patch, "ax" + .word 661b + SET_GL(1) + .previous + ldx [%sp + PTREGS_OFF + PT_V9_I0], %i0 ldx [%sp + PTREGS_OFF + PT_V9_I1], %i1 @@ -304,6 +311,13 @@ user_rtt_fill_fixup: mov %g6, %l1 wrpr %g0, 0x0, %tl wrpr %g0, RTRAP_PSTATE, %pstate + +661: nop + .section .gl_1insn_patch, "ax" + .word 661b + SET_GL(0) + .previous + mov %l1, %g6 ldx [%g6 + TI_TASK], %g4 LOAD_PER_CPU_BASE(%g5, %g6, %g1, %g2, %g3) diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c index 2918ed3eb1b..aaab319ad88 100644 --- a/arch/sparc64/kernel/setup.c +++ b/arch/sparc64/kernel/setup.c @@ -545,6 +545,24 @@ static void __init per_cpu_patch(void) #endif } +static void __init gl_patch(void) +{ + struct gl_1insn_patch_entry *p; + + if (tlb_type != hypervisor) + return; + + p = &__gl_1insn_patch; + while (p < &__gl_1insn_patch_end) { + unsigned long addr = p->addr; + + *(unsigned int *) (addr + 0) = p->insn; + __asm__ __volatile__("flush %0" : : "r" (addr + 0)); + + p++; + } +} + void __init setup_arch(char **cmdline_p) { /* Initialize PROM console and command line. */ @@ -567,6 +585,8 @@ void __init setup_arch(char **cmdline_p) */ per_cpu_patch(); + gl_patch(); + boot_flags_init(*cmdline_p); idprom_init(); diff --git a/arch/sparc64/kernel/vmlinux.lds.S b/arch/sparc64/kernel/vmlinux.lds.S index 1639d9c935c..482d1ed87f4 100644 --- a/arch/sparc64/kernel/vmlinux.lds.S +++ b/arch/sparc64/kernel/vmlinux.lds.S @@ -77,6 +77,9 @@ SECTIONS __cpuid_patch = .; .cpuid_patch : { *(.cpuid_patch) } __cpuid_patch_end = .; + __gl_1insn_patch = .; + .gl_1insn_patch : { *(.gl_1insn_patch) } + __gl_1insn_patch_end = .; . = ALIGN(8192); __initramfs_start = .; .init.ramfs : { *(.init.ramfs) } -- cgit v1.2.3 From 314981ac7177a933319e3c071a5cf0a579205e6e Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 5 Feb 2006 21:59:03 -0800 Subject: [SPARC64]: Kill all %pstate changes in context switch code. They are totally unnecessary because: 1) Interrupts are already disabled when switch_to() runs. 2) We don't use hard-coded alternate globals any longer. This found a case in rtrap, which still assumed alternate global %g6 was current_thread_info(), and that is fixed by this changeset as well. Signed-off-by: David S. Miller --- arch/sparc64/kernel/rtrap.S | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/rtrap.S b/arch/sparc64/kernel/rtrap.S index e6130956307..a2fa277da62 100644 --- a/arch/sparc64/kernel/rtrap.S +++ b/arch/sparc64/kernel/rtrap.S @@ -224,7 +224,8 @@ rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1 ldx [%sp + PTREGS_OFF + PT_V9_G4], %g4 ldx [%sp + PTREGS_OFF + PT_V9_G5], %g5 brz,pt %l3, 1f - nop + mov %g6, %l2 + /* Must do this before thread reg is clobbered below. */ LOAD_PER_CPU_BASE(%g5, %g6, %i0, %i1, %i2) 1: @@ -238,6 +239,8 @@ rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1 SET_GL(1) .previous + mov %l2, %g6 + ldx [%sp + PTREGS_OFF + PT_V9_I0], %i0 ldx [%sp + PTREGS_OFF + PT_V9_I1], %i1 -- cgit v1.2.3 From 45fec05f805a113372c9a7ff4c653ac749f6921c Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 5 Feb 2006 22:27:28 -0800 Subject: [SPARC64]: Sanitize %pstate writes for sun4v. If we're just switching between different alternate global sets, nop it out on sun4v. Also, get rid of all of the alternate global save/restore in the OBP CIF trampoline code. Signed-off-by: David S. Miller --- arch/sparc64/kernel/ktlb.S | 18 +++- arch/sparc64/kernel/setup.c | 26 +++-- arch/sparc64/kernel/tsb.S | 12 ++- arch/sparc64/kernel/vmlinux.lds.S | 3 + arch/sparc64/mm/ultra.S | 18 +++- arch/sparc64/prom/cif.S | 211 +++----------------------------------- 6 files changed, 82 insertions(+), 206 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/ktlb.S b/arch/sparc64/kernel/ktlb.S index 9b415ab6db6..c1335432124 100644 --- a/arch/sparc64/kernel/ktlb.S +++ b/arch/sparc64/kernel/ktlb.S @@ -60,8 +60,15 @@ kvmap_itlb_load: retry kvmap_itlb_longpath: - rdpr %pstate, %g5 + +661: rdpr %pstate, %g5 wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate + .section .gl_2insn_patch, "ax" + .word 661b + nop + nop + .previous + rdpr %tpc, %g5 ba,pt %xcc, sparc64_realfault_common mov FAULT_CODE_ITLB, %g4 @@ -161,8 +168,15 @@ kvmap_check_obp: nop kvmap_dtlb_longpath: - rdpr %pstate, %g5 + +661: rdpr %pstate, %g5 wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate + .section .gl_2insn_patch, "ax" + .word 661b + nop + nop + .previous + rdpr %tl, %g4 cmp %g4, 1 mov TLB_TAG_ACCESS, %g4 diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c index aaab319ad88..e22bf5fc92c 100644 --- a/arch/sparc64/kernel/setup.c +++ b/arch/sparc64/kernel/setup.c @@ -547,19 +547,33 @@ static void __init per_cpu_patch(void) static void __init gl_patch(void) { - struct gl_1insn_patch_entry *p; + struct gl_1insn_patch_entry *p1; + struct gl_2insn_patch_entry *p2; if (tlb_type != hypervisor) return; - p = &__gl_1insn_patch; - while (p < &__gl_1insn_patch_end) { - unsigned long addr = p->addr; + p1 = &__gl_1insn_patch; + while (p1 < &__gl_1insn_patch_end) { + unsigned long addr = p1->addr; - *(unsigned int *) (addr + 0) = p->insn; + *(unsigned int *) (addr + 0) = p1->insn; __asm__ __volatile__("flush %0" : : "r" (addr + 0)); - p++; + p1++; + } + + p2 = &__gl_2insn_patch; + while (p2 < &__gl_2insn_patch_end) { + unsigned long addr = p2->addr; + + *(unsigned int *) (addr + 0) = p2->insns[0]; + __asm__ __volatile__("flush %0" : : "r" (addr + 0)); + + *(unsigned int *) (addr + 3) = p2->insns[1]; + __asm__ __volatile__("flush %0" : : "r" (addr + 4)); + + p2++; } } diff --git a/arch/sparc64/kernel/tsb.S b/arch/sparc64/kernel/tsb.S index 3b45db98005..96e63168d8b 100644 --- a/arch/sparc64/kernel/tsb.S +++ b/arch/sparc64/kernel/tsb.S @@ -82,9 +82,17 @@ tsb_itlb_load: .globl tsb_do_fault tsb_do_fault: cmp %g3, FAULT_CODE_DTLB - rdpr %pstate, %g5 + +661: rdpr %pstate, %g5 + wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate + .section .gl_2insn_patch, "ax" + .word 661b + nop + nop + .previous + bne,pn %xcc, tsb_do_itlb_fault - wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate + nop tsb_do_dtlb_fault: rdpr %tl, %g4 diff --git a/arch/sparc64/kernel/vmlinux.lds.S b/arch/sparc64/kernel/vmlinux.lds.S index 482d1ed87f4..686bf6b3b03 100644 --- a/arch/sparc64/kernel/vmlinux.lds.S +++ b/arch/sparc64/kernel/vmlinux.lds.S @@ -80,6 +80,9 @@ SECTIONS __gl_1insn_patch = .; .gl_1insn_patch : { *(.gl_1insn_patch) } __gl_1insn_patch_end = .; + __gl_2insn_patch = .; + .gl_2insn_patch : { *(.gl_2insn_patch) } + __gl_2insn_patch_end = .; . = ALIGN(8192); __initramfs_start = .; .init.ramfs : { *(.init.ramfs) } diff --git a/arch/sparc64/mm/ultra.S b/arch/sparc64/mm/ultra.S index cac58d66fca..5dd86ad0d29 100644 --- a/arch/sparc64/mm/ultra.S +++ b/arch/sparc64/mm/ultra.S @@ -444,8 +444,15 @@ xcall_flush_tlb_kernel_range: /* 22 insns */ */ .globl xcall_sync_tick xcall_sync_tick: - rdpr %pstate, %g2 + +661: rdpr %pstate, %g2 wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate + .section .gl_2insn_patch, "ax" + .word 661b + nop + nop + .previous + rdpr %pil, %g2 wrpr %g0, 15, %pil sethi %hi(109f), %g7 @@ -468,8 +475,15 @@ xcall_sync_tick: */ .globl xcall_report_regs xcall_report_regs: - rdpr %pstate, %g2 + +661: rdpr %pstate, %g2 wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate + .section .gl_2insn_patch, "ax" + .word 661b + nop + nop + .previous + rdpr %pil, %g2 wrpr %g0, 15, %pil sethi %hi(109f), %g7 diff --git a/arch/sparc64/prom/cif.S b/arch/sparc64/prom/cif.S index 29d0ae74aed..5f27ad779c0 100644 --- a/arch/sparc64/prom/cif.S +++ b/arch/sparc64/prom/cif.S @@ -1,10 +1,12 @@ /* cif.S: PROM entry/exit assembler trampolines. * - * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) - * Copyright (C) 2005 David S. Miller + * Copyright (C) 1996, 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) + * Copyright (C) 2005, 2006 David S. Miller */ #include +#include +#include .text .globl prom_cif_interface @@ -12,78 +14,16 @@ prom_cif_interface: sethi %hi(p1275buf), %o0 or %o0, %lo(p1275buf), %o0 ldx [%o0 + 0x010], %o1 ! prom_cif_stack - save %o1, -0x190, %sp + save %o1, -192, %sp ldx [%i0 + 0x008], %l2 ! prom_cif_handler - rdpr %pstate, %l4 - wrpr %g0, 0x15, %pstate ! save alternate globals - stx %g1, [%sp + 2047 + 0x0b0] - stx %g2, [%sp + 2047 + 0x0b8] - stx %g3, [%sp + 2047 + 0x0c0] - stx %g4, [%sp + 2047 + 0x0c8] - stx %g5, [%sp + 2047 + 0x0d0] - stx %g6, [%sp + 2047 + 0x0d8] - stx %g7, [%sp + 2047 + 0x0e0] - wrpr %g0, 0x814, %pstate ! save interrupt globals - stx %g1, [%sp + 2047 + 0x0e8] - stx %g2, [%sp + 2047 + 0x0f0] - stx %g3, [%sp + 2047 + 0x0f8] - stx %g4, [%sp + 2047 + 0x100] - stx %g5, [%sp + 2047 + 0x108] - stx %g6, [%sp + 2047 + 0x110] - stx %g7, [%sp + 2047 + 0x118] - wrpr %g0, 0x14, %pstate ! save normal globals - stx %g1, [%sp + 2047 + 0x120] - stx %g2, [%sp + 2047 + 0x128] - stx %g3, [%sp + 2047 + 0x130] - stx %g4, [%sp + 2047 + 0x138] - stx %g5, [%sp + 2047 + 0x140] - stx %g6, [%sp + 2047 + 0x148] - stx %g7, [%sp + 2047 + 0x150] - wrpr %g0, 0x414, %pstate ! save mmu globals - stx %g1, [%sp + 2047 + 0x158] - stx %g2, [%sp + 2047 + 0x160] - stx %g3, [%sp + 2047 + 0x168] - stx %g4, [%sp + 2047 + 0x170] - stx %g5, [%sp + 2047 + 0x178] - stx %g6, [%sp + 2047 + 0x180] - stx %g7, [%sp + 2047 + 0x188] - mov %g1, %l0 ! also save to locals, so we can handle - mov %g2, %l1 ! tlb faults later on, when accessing - mov %g3, %l3 ! the stack. - mov %g7, %l5 - wrpr %l4, PSTATE_IE, %pstate ! turn off interrupts + mov %g4, %l0 + mov %g5, %l1 + mov %g6, %l3 call %l2 add %i0, 0x018, %o0 ! prom_args - wrpr %g0, 0x414, %pstate ! restore mmu globals - mov %l0, %g1 - mov %l1, %g2 - mov %l3, %g3 - mov %l5, %g7 - wrpr %g0, 0x14, %pstate ! restore normal globals - ldx [%sp + 2047 + 0x120], %g1 - ldx [%sp + 2047 + 0x128], %g2 - ldx [%sp + 2047 + 0x130], %g3 - ldx [%sp + 2047 + 0x138], %g4 - ldx [%sp + 2047 + 0x140], %g5 - ldx [%sp + 2047 + 0x148], %g6 - ldx [%sp + 2047 + 0x150], %g7 - wrpr %g0, 0x814, %pstate ! restore interrupt globals - ldx [%sp + 2047 + 0x0e8], %g1 - ldx [%sp + 2047 + 0x0f0], %g2 - ldx [%sp + 2047 + 0x0f8], %g3 - ldx [%sp + 2047 + 0x100], %g4 - ldx [%sp + 2047 + 0x108], %g5 - ldx [%sp + 2047 + 0x110], %g6 - ldx [%sp + 2047 + 0x118], %g7 - wrpr %g0, 0x15, %pstate ! restore alternate globals - ldx [%sp + 2047 + 0x0b0], %g1 - ldx [%sp + 2047 + 0x0b8], %g2 - ldx [%sp + 2047 + 0x0c0], %g3 - ldx [%sp + 2047 + 0x0c8], %g4 - ldx [%sp + 2047 + 0x0d0], %g5 - ldx [%sp + 2047 + 0x0d8], %g6 - ldx [%sp + 2047 + 0x0e0], %g7 - wrpr %l4, 0, %pstate ! restore original pstate + mov %l0, %g4 + mov %l1, %g5 + mov %l3, %g6 ret restore @@ -91,135 +31,18 @@ prom_cif_interface: prom_cif_callback: sethi %hi(p1275buf), %o1 or %o1, %lo(p1275buf), %o1 - save %sp, -0x270, %sp - rdpr %pstate, %l4 - wrpr %g0, 0x15, %pstate ! save PROM alternate globals - stx %g1, [%sp + 2047 + 0x0b0] - stx %g2, [%sp + 2047 + 0x0b8] - stx %g3, [%sp + 2047 + 0x0c0] - stx %g4, [%sp + 2047 + 0x0c8] - stx %g5, [%sp + 2047 + 0x0d0] - stx %g6, [%sp + 2047 + 0x0d8] - stx %g7, [%sp + 2047 + 0x0e0] - ! restore Linux alternate globals - ldx [%sp + 2047 + 0x190], %g1 - ldx [%sp + 2047 + 0x198], %g2 - ldx [%sp + 2047 + 0x1a0], %g3 - ldx [%sp + 2047 + 0x1a8], %g4 - ldx [%sp + 2047 + 0x1b0], %g5 - ldx [%sp + 2047 + 0x1b8], %g6 - ldx [%sp + 2047 + 0x1c0], %g7 - wrpr %g0, 0x814, %pstate ! save PROM interrupt globals - stx %g1, [%sp + 2047 + 0x0e8] - stx %g2, [%sp + 2047 + 0x0f0] - stx %g3, [%sp + 2047 + 0x0f8] - stx %g4, [%sp + 2047 + 0x100] - stx %g5, [%sp + 2047 + 0x108] - stx %g6, [%sp + 2047 + 0x110] - stx %g7, [%sp + 2047 + 0x118] - ! restore Linux interrupt globals - ldx [%sp + 2047 + 0x1c8], %g1 - ldx [%sp + 2047 + 0x1d0], %g2 - ldx [%sp + 2047 + 0x1d8], %g3 - ldx [%sp + 2047 + 0x1e0], %g4 - ldx [%sp + 2047 + 0x1e8], %g5 - ldx [%sp + 2047 + 0x1f0], %g6 - ldx [%sp + 2047 + 0x1f8], %g7 - wrpr %g0, 0x14, %pstate ! save PROM normal globals - stx %g1, [%sp + 2047 + 0x120] - stx %g2, [%sp + 2047 + 0x128] - stx %g3, [%sp + 2047 + 0x130] - stx %g4, [%sp + 2047 + 0x138] - stx %g5, [%sp + 2047 + 0x140] - stx %g6, [%sp + 2047 + 0x148] - stx %g7, [%sp + 2047 + 0x150] - ! restore Linux normal globals - ldx [%sp + 2047 + 0x200], %g1 - ldx [%sp + 2047 + 0x208], %g2 - ldx [%sp + 2047 + 0x210], %g3 - ldx [%sp + 2047 + 0x218], %g4 - ldx [%sp + 2047 + 0x220], %g5 - ldx [%sp + 2047 + 0x228], %g6 - ldx [%sp + 2047 + 0x230], %g7 - wrpr %g0, 0x414, %pstate ! save PROM mmu globals - stx %g1, [%sp + 2047 + 0x158] - stx %g2, [%sp + 2047 + 0x160] - stx %g3, [%sp + 2047 + 0x168] - stx %g4, [%sp + 2047 + 0x170] - stx %g5, [%sp + 2047 + 0x178] - stx %g6, [%sp + 2047 + 0x180] - stx %g7, [%sp + 2047 + 0x188] - ! restore Linux mmu globals - ldx [%sp + 2047 + 0x238], %o0 - ldx [%sp + 2047 + 0x240], %o1 - ldx [%sp + 2047 + 0x248], %l2 - ldx [%sp + 2047 + 0x250], %l3 - ldx [%sp + 2047 + 0x258], %l5 - ldx [%sp + 2047 + 0x260], %l6 - ldx [%sp + 2047 + 0x268], %l7 - ! switch to Linux tba - sethi %hi(sparc64_ttable_tl0), %l1 - rdpr %tba, %l0 ! save PROM tba - mov %o0, %g1 - mov %o1, %g2 - mov %l2, %g3 - mov %l3, %g4 - mov %l5, %g5 - mov %l6, %g6 - mov %l7, %g7 - wrpr %l1, %tba ! install Linux tba - wrpr %l4, 0, %pstate ! restore PSTATE + save %sp, -192, %sp + TRAP_LOAD_THREAD_REG(%g6, %g1) + LOAD_PER_CPU_BASE(%g5, %g6, %g4, %g3, %o0) + ldx [%g6 + TI_TASK], %g4 call prom_world - mov %g0, %o0 + mov 0, %o0 ldx [%i1 + 0x000], %l2 call %l2 mov %i0, %o0 mov %o0, %l1 call prom_world - or %g0, 1, %o0 - wrpr %g0, 0x14, %pstate ! interrupts off - ! restore PROM mmu globals - ldx [%sp + 2047 + 0x158], %o0 - ldx [%sp + 2047 + 0x160], %o1 - ldx [%sp + 2047 + 0x168], %l2 - ldx [%sp + 2047 + 0x170], %l3 - ldx [%sp + 2047 + 0x178], %l5 - ldx [%sp + 2047 + 0x180], %l6 - ldx [%sp + 2047 + 0x188], %l7 - wrpr %g0, 0x414, %pstate ! restore PROM mmu globals - mov %o0, %g1 - mov %o1, %g2 - mov %l2, %g3 - mov %l3, %g4 - mov %l5, %g5 - mov %l6, %g6 - mov %l7, %g7 - wrpr %l0, %tba ! restore PROM tba - wrpr %g0, 0x14, %pstate ! restore PROM normal globals - ldx [%sp + 2047 + 0x120], %g1 - ldx [%sp + 2047 + 0x128], %g2 - ldx [%sp + 2047 + 0x130], %g3 - ldx [%sp + 2047 + 0x138], %g4 - ldx [%sp + 2047 + 0x140], %g5 - ldx [%sp + 2047 + 0x148], %g6 - ldx [%sp + 2047 + 0x150], %g7 - wrpr %g0, 0x814, %pstate ! restore PROM interrupt globals - ldx [%sp + 2047 + 0x0e8], %g1 - ldx [%sp + 2047 + 0x0f0], %g2 - ldx [%sp + 2047 + 0x0f8], %g3 - ldx [%sp + 2047 + 0x100], %g4 - ldx [%sp + 2047 + 0x108], %g5 - ldx [%sp + 2047 + 0x110], %g6 - ldx [%sp + 2047 + 0x118], %g7 - wrpr %g0, 0x15, %pstate ! restore PROM alternate globals - ldx [%sp + 2047 + 0x0b0], %g1 - ldx [%sp + 2047 + 0x0b8], %g2 - ldx [%sp + 2047 + 0x0c0], %g3 - ldx [%sp + 2047 + 0x0c8], %g4 - ldx [%sp + 2047 + 0x0d0], %g5 - ldx [%sp + 2047 + 0x0d8], %g6 - ldx [%sp + 2047 + 0x0e0], %g7 - wrpr %l4, 0, %pstate + mov 1, %o0 ret restore %l1, 0, %o0 -- cgit v1.2.3 From 840aaef8db32572b6d11e0d5cb5e6efcbc812000 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Mon, 6 Feb 2006 15:52:05 -0800 Subject: [SPARC64]: Add missing memory barriers to instruction patching functions. V9 requires a write memory barrier before the instruction flush. Signed-off-by: David S. Miller --- arch/sparc64/kernel/setup.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'arch') diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c index e22bf5fc92c..40acac5b833 100644 --- a/arch/sparc64/kernel/setup.c +++ b/arch/sparc64/kernel/setup.c @@ -529,15 +529,19 @@ static void __init per_cpu_patch(void) }; *(unsigned int *) (addr + 0) = insns[0]; + wmb(); __asm__ __volatile__("flush %0" : : "r" (addr + 0)); *(unsigned int *) (addr + 4) = insns[1]; + wmb(); __asm__ __volatile__("flush %0" : : "r" (addr + 4)); *(unsigned int *) (addr + 8) = insns[2]; + wmb(); __asm__ __volatile__("flush %0" : : "r" (addr + 8)); *(unsigned int *) (addr + 12) = insns[3]; + wmb(); __asm__ __volatile__("flush %0" : : "r" (addr + 12)); p++; @@ -558,6 +562,7 @@ static void __init gl_patch(void) unsigned long addr = p1->addr; *(unsigned int *) (addr + 0) = p1->insn; + wmb(); __asm__ __volatile__("flush %0" : : "r" (addr + 0)); p1++; @@ -568,9 +573,11 @@ static void __init gl_patch(void) unsigned long addr = p2->addr; *(unsigned int *) (addr + 0) = p2->insns[0]; + wmb(); __asm__ __volatile__("flush %0" : : "r" (addr + 0)); *(unsigned int *) (addr + 3) = p2->insns[1]; + wmb(); __asm__ __volatile__("flush %0" : : "r" (addr + 4)); p2++; -- cgit v1.2.3 From d257d5da39a78b32721ca84b2ba7f461f2f7ed7f Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Mon, 6 Feb 2006 23:44:37 -0800 Subject: [SPARC64]: Initial sun4v TLB miss handling infrastructure. Things are a little tricky because, unlike sun4u, we have to: 1) do a hypervisor trap to do the TLB load. 2) do the TSB lookup calculations by hand Signed-off-by: David S. Miller --- arch/sparc64/kernel/head.S | 1 + arch/sparc64/kernel/ktlb.S | 12 +- arch/sparc64/kernel/sun4v_tlb_miss.S | 219 +++++++++++++++++++++++++++++++++++ arch/sparc64/kernel/tsb.S | 89 +++++++++++--- arch/sparc64/kernel/vmlinux.lds.S | 3 + arch/sparc64/mm/init.c | 24 +++- 6 files changed, 332 insertions(+), 16 deletions(-) create mode 100644 arch/sparc64/kernel/sun4v_tlb_miss.S (limited to 'arch') diff --git a/arch/sparc64/kernel/head.S b/arch/sparc64/kernel/head.S index 7840271d7aa..03fc0b5b1d9 100644 --- a/arch/sparc64/kernel/head.S +++ b/arch/sparc64/kernel/head.S @@ -474,6 +474,7 @@ setup_tba: sparc64_boot_end: #include "systbls.S" +#include "sun4v_tlb_miss.S" #include "ktlb.S" #include "tsb.S" #include "etrap.S" diff --git a/arch/sparc64/kernel/ktlb.S b/arch/sparc64/kernel/ktlb.S index c1335432124..2e55084a0c1 100644 --- a/arch/sparc64/kernel/ktlb.S +++ b/arch/sparc64/kernel/ktlb.S @@ -16,12 +16,16 @@ .text .align 32 - .globl kvmap_itlb kvmap_itlb: /* g6: TAG TARGET */ mov TLB_TAG_ACCESS, %g4 ldxa [%g4] ASI_IMMU, %g4 + /* sun4v_itlb_miss branches here with the missing virtual + * address already loaded into %g4 + */ +kvmap_itlb_4v: + kvmap_itlb_nonlinear: /* Catch kernel NULL pointer calls. */ sethi %hi(PAGE_SIZE), %g5 @@ -94,11 +98,15 @@ kvmap_dtlb_obp: nop .align 32 - .globl kvmap_dtlb kvmap_dtlb: /* %g6: TAG TARGET */ mov TLB_TAG_ACCESS, %g4 ldxa [%g4] ASI_DMMU, %g4 + + /* sun4v_dtlb_miss branches here with the missing virtual + * address already loaded into %g4 + */ +kvmap_dtlb_4v: brgez,pn %g4, kvmap_dtlb_nonlinear nop diff --git a/arch/sparc64/kernel/sun4v_tlb_miss.S b/arch/sparc64/kernel/sun4v_tlb_miss.S new file mode 100644 index 00000000000..58ea5dd8573 --- /dev/null +++ b/arch/sparc64/kernel/sun4v_tlb_miss.S @@ -0,0 +1,219 @@ +/* sun4v_tlb_miss.S: Sun4v TLB miss handlers. + * + * Copyright (C) 2006 + */ + + .text + .align 32 + +sun4v_itlb_miss: + /* Load CPU ID into %g3. */ + mov SCRATCHPAD_CPUID, %g1 + ldxa [%g1] ASI_SCRATCHPAD, %g3 + + /* Load UTSB reg into %g1. */ + ldxa [%g1 + %g1] ASI_SCRATCHPAD, %g1 + + /* Load &trap_block[smp_processor_id()] into %g2. */ + sethi %hi(trap_block), %g2 + or %g2, %lo(trap_block), %g2 + sllx %g3, TRAP_BLOCK_SZ_SHIFT, %g3 + add %g2, %g3, %g2 + + /* Create a TAG TARGET, "(vaddr>>22) | (ctx << 48)", in %g6. + * Branch if kernel TLB miss. The kernel TSB and user TSB miss + * code wants the missing virtual address in %g4, so that value + * cannot be modified through the entirety of this handler. + */ + ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_I_ADDR_OFFSET], %g4 + ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_I_CTX_OFFSET], %g5 + srlx %g4, 22, %g3 + sllx %g5, 48, %g6 + or %g6, %g3, %g6 + brz,pn %g5, kvmap_itlb_4v + nop + + /* Create TSB pointer. This is something like: + * + * index_mask = (512 << (tsb_reg & 0x7UL)) - 1UL; + * tsb_base = tsb_reg & ~0x7UL; + */ + and %g1, 0x7, %g3 + andn %g1, 0x7, %g1 + mov 512, %g7 + sllx %g7, %g3, %g7 + sub %g7, 1, %g7 + + /* TSB index mask is in %g7, tsb base is in %g1. Compute + * the TSB entry pointer into %g1: + * + * tsb_index = ((vaddr >> PAGE_SHIFT) & tsb_mask); + * tsb_ptr = tsb_base + (tsb_index * 16); + */ + srlx %g4, PAGE_SHIFT, %g3 + and %g3, %g7, %g3 + sllx %g3, 4, %g3 + add %g1, %g3, %g1 + + /* Load TSB tag/pte into %g2/%g3 and compare the tag. */ + ldda [%g1] ASI_QUAD_LDD_PHYS, %g2 + cmp %g2, %g6 + sethi %hi(_PAGE_EXEC), %g7 + bne,a,pn %xcc, tsb_miss_page_table_walk + mov FAULT_CODE_ITLB, %g3 + andcc %g3, %g7, %g0 + be,a,pn %xcc, tsb_do_fault + mov FAULT_CODE_ITLB, %g3 + + /* We have a valid entry, make hypervisor call to load + * I-TLB and return from trap. + * + * %g3: PTE + * %g4: vaddr + * %g6: TAG TARGET (only "CTX << 48" part matters) + */ +sun4v_itlb_load: + mov %o0, %g1 ! save %o0 + mov %o1, %g2 ! save %o1 + mov %o2, %g5 ! save %o2 + mov %o3, %g7 ! save %o3 + mov %g4, %o0 ! vaddr + srlx %g6, 48, %o1 ! ctx + mov %g3, %o2 ! PTE + mov HV_MMU_IMMU, %o3 ! flags + ta HV_MMU_MAP_ADDR_TRAP + mov %g1, %o0 ! restore %o0 + mov %g2, %o1 ! restore %o1 + mov %g5, %o2 ! restore %o2 + mov %g7, %o3 ! restore %o3 + + retry + +sun4v_dtlb_miss: + /* Load CPU ID into %g3. */ + mov SCRATCHPAD_CPUID, %g1 + ldxa [%g1] ASI_SCRATCHPAD, %g3 + + /* Load UTSB reg into %g1. */ + ldxa [%g1 + %g1] ASI_SCRATCHPAD, %g1 + + /* Load &trap_block[smp_processor_id()] into %g2. */ + sethi %hi(trap_block), %g2 + or %g2, %lo(trap_block), %g2 + sllx %g3, TRAP_BLOCK_SZ_SHIFT, %g3 + add %g2, %g3, %g2 + + /* Create a TAG TARGET, "(vaddr>>22) | (ctx << 48)", in %g6. + * Branch if kernel TLB miss. The kernel TSB and user TSB miss + * code wants the missing virtual address in %g4, so that value + * cannot be modified through the entirety of this handler. + */ + ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_ADDR_OFFSET], %g4 + ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_CTX_OFFSET], %g5 + srlx %g4, 22, %g3 + sllx %g5, 48, %g6 + or %g6, %g3, %g6 + brz,pn %g5, kvmap_dtlb_4v + nop + + /* Create TSB pointer. This is something like: + * + * index_mask = (512 << (tsb_reg & 0x7UL)) - 1UL; + * tsb_base = tsb_reg & ~0x7UL; + */ + and %g1, 0x7, %g3 + andn %g1, 0x7, %g1 + mov 512, %g7 + sllx %g7, %g3, %g7 + sub %g7, 1, %g7 + + /* TSB index mask is in %g7, tsb base is in %g1. Compute + * the TSB entry pointer into %g1: + * + * tsb_index = ((vaddr >> PAGE_SHIFT) & tsb_mask); + * tsb_ptr = tsb_base + (tsb_index * 16); + */ + srlx %g4, PAGE_SHIFT, %g3 + and %g3, %g7, %g3 + sllx %g3, 4, %g3 + add %g1, %g3, %g1 + + /* Load TSB tag/pte into %g2/%g3 and compare the tag. */ + ldda [%g1] ASI_QUAD_LDD_PHYS, %g2 + cmp %g2, %g6 + bne,a,pn %xcc, tsb_miss_page_table_walk + mov FAULT_CODE_ITLB, %g3 + + /* We have a valid entry, make hypervisor call to load + * D-TLB and return from trap. + * + * %g3: PTE + * %g4: vaddr + * %g6: TAG TARGET (only "CTX << 48" part matters) + */ +sun4v_dtlb_load: + mov %o0, %g1 ! save %o0 + mov %o1, %g2 ! save %o1 + mov %o2, %g5 ! save %o2 + mov %o3, %g7 ! save %o3 + mov %g4, %o0 ! vaddr + srlx %g6, 48, %o1 ! ctx + mov %g3, %o2 ! PTE + mov HV_MMU_DMMU, %o3 ! flags + ta HV_MMU_MAP_ADDR_TRAP + mov %g1, %o0 ! restore %o0 + mov %g2, %o1 ! restore %o1 + mov %g5, %o2 ! restore %o2 + mov %g7, %o3 ! restore %o3 + + retry + +sun4v_dtlb_prot: + /* Load CPU ID into %g3. */ + mov SCRATCHPAD_CPUID, %g1 + ldxa [%g1] ASI_SCRATCHPAD, %g3 + + /* Load &trap_block[smp_processor_id()] into %g2. */ + sethi %hi(trap_block), %g2 + or %g2, %lo(trap_block), %g2 + sllx %g3, TRAP_BLOCK_SZ_SHIFT, %g3 + add %g2, %g3, %g2 + + ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_ADDR_OFFSET], %g5 + rdpr %tl, %g1 + cmp %g1, 1 + bgu,pn %xcc, winfix_trampoline + nop + ba,pt %xcc, sparc64_realfault_common + mov FAULT_CODE_DTLB | FAULT_CODE_WRITE, %g4 + +#define BRANCH_ALWAYS 0x10680000 +#define NOP 0x01000000 +#define SUN4V_DO_PATCH(OLD, NEW) \ + sethi %hi(NEW), %g1; \ + or %g1, %lo(NEW), %g1; \ + sethi %hi(OLD), %g2; \ + or %g2, %lo(OLD), %g2; \ + sub %g1, %g2, %g1; \ + sethi %hi(BRANCH_ALWAYS), %g3; \ + srl %g1, 2, %g1; \ + or %g3, %lo(BRANCH_ALWAYS), %g3; \ + or %g3, %g1, %g3; \ + stw %g3, [%g2]; \ + sethi %hi(NOP), %g3; \ + or %g3, %lo(NOP), %g3; \ + stw %g3, [%g2 + 0x4]; \ + flush %g2; + + .globl sun4v_patch_tlb_handlers + .type sun4v_patch_tlb_handlers,#function +sun4v_patch_tlb_handlers: + SUN4V_DO_PATCH(tl0_iamiss, sun4v_itlb_miss) + SUN4V_DO_PATCH(tl1_iamiss, sun4v_itlb_miss) + SUN4V_DO_PATCH(tl0_damiss, sun4v_dtlb_miss) + SUN4V_DO_PATCH(tl1_damiss, sun4v_dtlb_miss) + SUN4V_DO_PATCH(tl0_daprot, sun4v_dtlb_prot) + SUN4V_DO_PATCH(tl1_daprot, sun4v_dtlb_prot) + retl + nop + .size sun4v_patch_tlb_handlers,.-sun4v_patch_tlb_handlers diff --git a/arch/sparc64/kernel/tsb.S b/arch/sparc64/kernel/tsb.S index 96e63168d8b..818bc9e9135 100644 --- a/arch/sparc64/kernel/tsb.S +++ b/arch/sparc64/kernel/tsb.S @@ -18,30 +18,33 @@ * %g4: available temporary * %g5: available temporary * %g6: TAG TARGET - * %g7: physical address base of the linux page + * %g7: available temporary, will be loaded by us with + * the physical address base of the linux page * tables for the current address space */ - .globl tsb_miss_dtlb tsb_miss_dtlb: mov TLB_TAG_ACCESS, %g4 ldxa [%g4] ASI_DMMU, %g4 ba,pt %xcc, tsb_miss_page_table_walk nop - .globl tsb_miss_itlb tsb_miss_itlb: mov TLB_TAG_ACCESS, %g4 ldxa [%g4] ASI_IMMU, %g4 ba,pt %xcc, tsb_miss_page_table_walk nop + /* The sun4v TLB miss handlers jump directly here instead + * of tsb_miss_{d,i}tlb with the missing virtual address + * already loaded into %g4. + */ tsb_miss_page_table_walk: TRAP_LOAD_PGD_PHYS(%g7, %g5) USER_PGTABLE_WALK_TL1(%g4, %g7, %g5, %g2, tsb_do_fault) tsb_reload: - TSB_LOCK_TAG(%g1, %g2, %g4) + TSB_LOCK_TAG(%g1, %g2, %g7) /* Load and check PTE. */ ldxa [%g5] ASI_PHYS_USE_EC, %g5 @@ -52,9 +55,9 @@ tsb_reload: * bother putting it into the TSB. */ srlx %g5, 32, %g2 - sethi %hi(_PAGE_ALL_SZ_BITS >> 32), %g4 + sethi %hi(_PAGE_ALL_SZ_BITS >> 32), %g7 + and %g2, %g7, %g2 sethi %hi(_PAGE_SZBITS >> 32), %g7 - and %g2, %g4, %g2 cmp %g2, %g7 bne,a,pn %xcc, tsb_tlb_reload TSB_STORE(%g1, %g0) @@ -68,12 +71,54 @@ tsb_tlb_reload: nop tsb_dtlb_load: - stxa %g5, [%g0] ASI_DTLB_DATA_IN + +661: stxa %g5, [%g0] ASI_DTLB_DATA_IN retry + .section .gl_2insn_patch, "ax" + .word 661b + nop + nop + .previous + + /* For sun4v the ASI_DTLB_DATA_IN store and the retry + * instruction get nop'd out and we get here to branch + * to the sun4v tlb load code. The registers are setup + * as follows: + * + * %g4: vaddr + * %g5: PTE + * %g6: TAG + * + * The sun4v TLB load wants the PTE in %g3 so we fix that + * up here. + */ + ba,pt %xcc, sun4v_dtlb_load + mov %g5, %g3 tsb_itlb_load: - stxa %g5, [%g0] ASI_ITLB_DATA_IN + +661: stxa %g5, [%g0] ASI_ITLB_DATA_IN retry + .section .gl_2insn_patch, "ax" + .word 661b + nop + nop + .previous + + /* For sun4v the ASI_ITLB_DATA_IN store and the retry + * instruction get nop'd out and we get here to branch + * to the sun4v tlb load code. The registers are setup + * as follows: + * + * %g4: vaddr + * %g5: PTE + * %g6: TAG + * + * The sun4v TLB load wants the PTE in %g3 so we fix that + * up here. + */ + ba,pt %xcc, sun4v_itlb_load + mov %g5, %g3 /* No valid entry in the page tables, do full fault * processing. @@ -95,10 +140,17 @@ tsb_do_fault: nop tsb_do_dtlb_fault: - rdpr %tl, %g4 - cmp %g4, 1 - mov TLB_TAG_ACCESS, %g4 + rdpr %tl, %g3 + cmp %g3, 1 + +661: mov TLB_TAG_ACCESS, %g4 ldxa [%g4] ASI_DMMU, %g5 + .section .gl_2insn_patch, "ax" + .word 661b + mov %g4, %g5 + nop + .previous + be,pt %xcc, sparc64_realfault_common mov FAULT_CODE_DTLB, %g4 ba,pt %xcc, winfix_trampoline @@ -196,12 +248,23 @@ __tsb_context_switch: add %g2, %g1, %g2 stx %o0, [%g2 + TRAP_PER_CPU_PGD_PADDR] - mov TSB_REG, %g1 +661: mov TSB_REG, %g1 stxa %o1, [%g1] ASI_DMMU + .section .gl_2insn_patch, "ax" + .word 661b + mov SCRATCHPAD_UTSBREG1, %g1 + stxa %o1, [%g1] ASI_SCRATCHPAD + .previous + membar #Sync - stxa %o1, [%g1] ASI_IMMU +661: stxa %o1, [%g1] ASI_IMMU membar #Sync + .section .gl_2insn_patch, "ax" + .word 661b + nop + nop + .previous brz %o2, 9f nop diff --git a/arch/sparc64/kernel/vmlinux.lds.S b/arch/sparc64/kernel/vmlinux.lds.S index 686bf6b3b03..a09a8a2383d 100644 --- a/arch/sparc64/kernel/vmlinux.lds.S +++ b/arch/sparc64/kernel/vmlinux.lds.S @@ -71,6 +71,9 @@ SECTIONS __con_initcall_end = .; SECURITY_INIT . = ALIGN(4); + __tsb_ldquad_phys_patch = .; + .tsb_ldquad_phys_patch : { *(.tsb_ldquad_phys_patch) } + __tsb_ldquad_phys_patch_end = .; __tsb_phys_patch = .; .tsb_phys_patch : { *(.tsb_phys_patch) } __tsb_phys_patch_end = .; diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index ab50cd9618f..e9aac424877 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -1050,8 +1050,25 @@ unsigned long __init find_ecache_flush_span(unsigned long size) static void __init tsb_phys_patch(void) { + struct tsb_ldquad_phys_patch_entry *pquad; struct tsb_phys_patch_entry *p; + pquad = &__tsb_ldquad_phys_patch; + while (pquad < &__tsb_ldquad_phys_patch_end) { + unsigned long addr = pquad->addr; + + if (tlb_type == hypervisor) + *(unsigned int *) addr = pquad->sun4v_insn; + else + *(unsigned int *) addr = pquad->sun4u_insn; + wmb(); + __asm__ __volatile__("flush %0" + : /* no outputs */ + : "r" (addr)); + + pquad++; + } + p = &__tsb_phys_patch; while (p < &__tsb_phys_patch_end) { unsigned long addr = p->addr; @@ -1069,6 +1086,7 @@ static void __init tsb_phys_patch(void) /* paging_init() sets up the page tables */ extern void cheetah_ecache_flush_init(void); +extern void sun4v_patch_tlb_handlers(void); static unsigned long last_valid_pfn; pgd_t swapper_pg_dir[2048]; @@ -1078,9 +1096,13 @@ void __init paging_init(void) unsigned long end_pfn, pages_avail, shift; unsigned long real_end, i; - if (tlb_type == cheetah_plus) + if (tlb_type == cheetah_plus || + tlb_type == hypervisor) tsb_phys_patch(); + if (tlb_type == hypervisor) + sun4v_patch_tlb_handlers(); + /* Find available physical memory... */ read_obp_memory("available", &pavail[0], &pavail_ents); -- cgit v1.2.3 From df7d6aec96ab98cb182dd5138a85bdc363a9bf0d Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 7 Feb 2006 00:00:16 -0800 Subject: [SPARC64]: Rename gl_{1,2}insn_patch --> sun4v_{1,2}insn_patch Signed-off-by: David S. Miller --- arch/sparc64/kernel/etrap.S | 4 ++-- arch/sparc64/kernel/ktlb.S | 4 ++-- arch/sparc64/kernel/rtrap.S | 4 ++-- arch/sparc64/kernel/setup.c | 16 ++++++++-------- arch/sparc64/kernel/tsb.S | 12 ++++++------ arch/sparc64/kernel/vmlinux.lds.S | 12 ++++++------ arch/sparc64/mm/ultra.S | 4 ++-- 7 files changed, 28 insertions(+), 28 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/etrap.S b/arch/sparc64/kernel/etrap.S index 4d644949ad4..d8c062a1700 100644 --- a/arch/sparc64/kernel/etrap.S +++ b/arch/sparc64/kernel/etrap.S @@ -105,7 +105,7 @@ etrap_save: save %g2, -STACK_BIAS, %sp /* Go to trap time globals so we can save them. */ 661: wrpr %g0, ETRAP_PSTATE1, %pstate - .section .gl_1insn_patch, "ax" + .section .sun4v_1insn_patch, "ax" .word 661b SET_GL(0) .previous @@ -206,7 +206,7 @@ etraptl1: /* Save tstate/tpc/tnpc of TL 1-->4 and the tl register itself. wrpr %g0, 1, %tl 661: nop - .section .gl_1insn_patch, "ax" + .section .sun4v_1insn_patch, "ax" .word 661b SET_GL(1) .previous diff --git a/arch/sparc64/kernel/ktlb.S b/arch/sparc64/kernel/ktlb.S index 2e55084a0c1..f6bb2e08964 100644 --- a/arch/sparc64/kernel/ktlb.S +++ b/arch/sparc64/kernel/ktlb.S @@ -67,7 +67,7 @@ kvmap_itlb_longpath: 661: rdpr %pstate, %g5 wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate - .section .gl_2insn_patch, "ax" + .section .sun4v_2insn_patch, "ax" .word 661b nop nop @@ -179,7 +179,7 @@ kvmap_dtlb_longpath: 661: rdpr %pstate, %g5 wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate - .section .gl_2insn_patch, "ax" + .section .sun4v_2insn_patch, "ax" .word 661b nop nop diff --git a/arch/sparc64/kernel/rtrap.S b/arch/sparc64/kernel/rtrap.S index a2fa277da62..a55d517e76a 100644 --- a/arch/sparc64/kernel/rtrap.S +++ b/arch/sparc64/kernel/rtrap.S @@ -234,7 +234,7 @@ rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1 /* Normal globals are restored, go to trap globals. */ 661: wrpr %g0, RTRAP_PSTATE_AG_IRQOFF, %pstate - .section .gl_1insn_patch, "ax" + .section .sun4v_1insn_patch, "ax" .word 661b SET_GL(1) .previous @@ -316,7 +316,7 @@ user_rtt_fill_fixup: wrpr %g0, RTRAP_PSTATE, %pstate 661: nop - .section .gl_1insn_patch, "ax" + .section .sun4v_1insn_patch, "ax" .word 661b SET_GL(0) .previous diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c index 40acac5b833..6d6178efd58 100644 --- a/arch/sparc64/kernel/setup.c +++ b/arch/sparc64/kernel/setup.c @@ -549,16 +549,16 @@ static void __init per_cpu_patch(void) #endif } -static void __init gl_patch(void) +static void __init sun4v_patch(void) { - struct gl_1insn_patch_entry *p1; - struct gl_2insn_patch_entry *p2; + struct sun4v_1insn_patch_entry *p1; + struct sun4v_2insn_patch_entry *p2; if (tlb_type != hypervisor) return; - p1 = &__gl_1insn_patch; - while (p1 < &__gl_1insn_patch_end) { + p1 = &__sun4v_1insn_patch; + while (p1 < &__sun4v_1insn_patch_end) { unsigned long addr = p1->addr; *(unsigned int *) (addr + 0) = p1->insn; @@ -568,8 +568,8 @@ static void __init gl_patch(void) p1++; } - p2 = &__gl_2insn_patch; - while (p2 < &__gl_2insn_patch_end) { + p2 = &__sun4v_2insn_patch; + while (p2 < &__sun4v_2insn_patch_end) { unsigned long addr = p2->addr; *(unsigned int *) (addr + 0) = p2->insns[0]; @@ -606,7 +606,7 @@ void __init setup_arch(char **cmdline_p) */ per_cpu_patch(); - gl_patch(); + sun4v_patch(); boot_flags_init(*cmdline_p); diff --git a/arch/sparc64/kernel/tsb.S b/arch/sparc64/kernel/tsb.S index 818bc9e9135..819a6ef9799 100644 --- a/arch/sparc64/kernel/tsb.S +++ b/arch/sparc64/kernel/tsb.S @@ -74,7 +74,7 @@ tsb_dtlb_load: 661: stxa %g5, [%g0] ASI_DTLB_DATA_IN retry - .section .gl_2insn_patch, "ax" + .section .sun4v_2insn_patch, "ax" .word 661b nop nop @@ -99,7 +99,7 @@ tsb_itlb_load: 661: stxa %g5, [%g0] ASI_ITLB_DATA_IN retry - .section .gl_2insn_patch, "ax" + .section .sun4v_2insn_patch, "ax" .word 661b nop nop @@ -130,7 +130,7 @@ tsb_do_fault: 661: rdpr %pstate, %g5 wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate - .section .gl_2insn_patch, "ax" + .section .sun4v_2insn_patch, "ax" .word 661b nop nop @@ -145,7 +145,7 @@ tsb_do_dtlb_fault: 661: mov TLB_TAG_ACCESS, %g4 ldxa [%g4] ASI_DMMU, %g5 - .section .gl_2insn_patch, "ax" + .section .sun4v_2insn_patch, "ax" .word 661b mov %g4, %g5 nop @@ -250,7 +250,7 @@ __tsb_context_switch: 661: mov TSB_REG, %g1 stxa %o1, [%g1] ASI_DMMU - .section .gl_2insn_patch, "ax" + .section .sun4v_2insn_patch, "ax" .word 661b mov SCRATCHPAD_UTSBREG1, %g1 stxa %o1, [%g1] ASI_SCRATCHPAD @@ -260,7 +260,7 @@ __tsb_context_switch: 661: stxa %o1, [%g1] ASI_IMMU membar #Sync - .section .gl_2insn_patch, "ax" + .section .sun4v_2insn_patch, "ax" .word 661b nop nop diff --git a/arch/sparc64/kernel/vmlinux.lds.S b/arch/sparc64/kernel/vmlinux.lds.S index a09a8a2383d..b097379a49a 100644 --- a/arch/sparc64/kernel/vmlinux.lds.S +++ b/arch/sparc64/kernel/vmlinux.lds.S @@ -80,12 +80,12 @@ SECTIONS __cpuid_patch = .; .cpuid_patch : { *(.cpuid_patch) } __cpuid_patch_end = .; - __gl_1insn_patch = .; - .gl_1insn_patch : { *(.gl_1insn_patch) } - __gl_1insn_patch_end = .; - __gl_2insn_patch = .; - .gl_2insn_patch : { *(.gl_2insn_patch) } - __gl_2insn_patch_end = .; + __sun4v_1insn_patch = .; + .sun4v_1insn_patch : { *(.sun4v_1insn_patch) } + __sun4v_1insn_patch_end = .; + __sun4v_2insn_patch = .; + .sun4v_2insn_patch : { *(.sun4v_2insn_patch) } + __sun4v_2insn_patch_end = .; . = ALIGN(8192); __initramfs_start = .; .init.ramfs : { *(.init.ramfs) } diff --git a/arch/sparc64/mm/ultra.S b/arch/sparc64/mm/ultra.S index 5dd86ad0d29..8c244932b1c 100644 --- a/arch/sparc64/mm/ultra.S +++ b/arch/sparc64/mm/ultra.S @@ -447,7 +447,7 @@ xcall_sync_tick: 661: rdpr %pstate, %g2 wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate - .section .gl_2insn_patch, "ax" + .section .sun4v_2insn_patch, "ax" .word 661b nop nop @@ -478,7 +478,7 @@ xcall_report_regs: 661: rdpr %pstate, %g2 wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate - .section .gl_2insn_patch, "ax" + .section .sun4v_2insn_patch, "ax" .word 661b nop nop -- cgit v1.2.3 From 8591e3027235d6d11b958e43379f2ee7b7114841 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 7 Feb 2006 16:09:12 -0800 Subject: [SPARC64]: Niagara copy/clear page. Happily we have no D-cache aliasing issues on these chips, so the implementation is very straightforward. Add a stub in bootup which will be where the patching calls will be made for niagara/sun4v/hypervisor. Signed-off-by: David S. Miller --- arch/sparc64/kernel/head.S | 18 +++++++++ arch/sparc64/lib/Makefile | 1 + arch/sparc64/lib/NGpage.S | 95 ++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 114 insertions(+) create mode 100644 arch/sparc64/lib/NGpage.S (limited to 'arch') diff --git a/arch/sparc64/kernel/head.S b/arch/sparc64/kernel/head.S index 03fc0b5b1d9..f04f7391f23 100644 --- a/arch/sparc64/kernel/head.S +++ b/arch/sparc64/kernel/head.S @@ -316,6 +316,24 @@ sun4u_init: ba,pt %xcc, spitfire_tlb_fixup nop + /* XXX Nothing branches to here yet, when %ver register indicates + * XXX Niagara we should do this. + */ +niagara_tlb_fixup: + mov 3, %g2 /* Set TLB type to hypervisor. */ + sethi %hi(tlb_type), %g1 + stw %g2, [%g1 + %lo(tlb_type)] + + /* Patch copy/clear ops. */ + call niagara_patch_copyops + nop + call niagara_patch_pageops + nop + + /* Patch TLB/cache ops. */ + call hypervisor_patch_cachetlbops + nop + cheetah_tlb_fixup: mov 2, %g2 /* Set TLB type to cheetah+. */ BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g7,1f) diff --git a/arch/sparc64/lib/Makefile b/arch/sparc64/lib/Makefile index 813f622b5c4..3d0e9a24d7a 100644 --- a/arch/sparc64/lib/Makefile +++ b/arch/sparc64/lib/Makefile @@ -12,6 +12,7 @@ lib-y := PeeCeeI.o copy_page.o clear_page.o strlen.o strncmp.o \ U1memcpy.o U1copy_from_user.o U1copy_to_user.o \ U3memcpy.o U3copy_from_user.o U3copy_to_user.o U3patch.o \ NGmemcpy.o NGcopy_from_user.o NGcopy_to_user.o NGpatch.o \ + NGpage.o \ copy_in_user.o user_fixup.o memmove.o \ mcount.o ipcsum.o rwsem.o xor.o find_bit.o delay.o diff --git a/arch/sparc64/lib/NGpage.S b/arch/sparc64/lib/NGpage.S new file mode 100644 index 00000000000..0e6152c28b0 --- /dev/null +++ b/arch/sparc64/lib/NGpage.S @@ -0,0 +1,95 @@ +/* NGpage.S: Niagara optimize clear and copy page. + * + * Copyright (C) 2006 (davem@davemloft.net) + */ + +#include +#include + + .text + .align 32 + + /* This is heavily simplified from the sun4u variants + * because Niagara does not have any D-cache aliasing issues + * and also we don't need to use the FPU in order to implement + * an optimal page copy/clear. + */ + +NGcopy_user_page: /* %o0=dest, %o1=src, %o2=vaddr */ + prefetch [%o1 + 0x00], #one_read + mov 8, %g1 + mov 16, %g2 + mov 24, %g3 + set PAGE_SIZE, %g7 + +1: ldda [%o1 + %g0] ASI_BLK_INIT_QUAD_LDD_P, %o2 + ldda [%o1 + %g2] ASI_BLK_INIT_QUAD_LDD_P, %o4 + prefetch [%o1 + 0x40], #one_read + add %o1, 32, %o1 + stxa %o2, [%o0 + %g0] ASI_BLK_INIT_QUAD_LDD_P + stxa %o3, [%o0 + %g1] ASI_BLK_INIT_QUAD_LDD_P + ldda [%o1 + %g0] ASI_BLK_INIT_QUAD_LDD_P, %o2 + stxa %o4, [%o0 + %g2] ASI_BLK_INIT_QUAD_LDD_P + stxa %o5, [%o0 + %g3] ASI_BLK_INIT_QUAD_LDD_P + ldda [%o1 + %g2] ASI_BLK_INIT_QUAD_LDD_P, %o4 + add %o1, 32, %o1 + add %o0, 32, %o0 + stxa %o2, [%o0 + %g0] ASI_BLK_INIT_QUAD_LDD_P + stxa %o3, [%o0 + %g1] ASI_BLK_INIT_QUAD_LDD_P + stxa %o4, [%o0 + %g2] ASI_BLK_INIT_QUAD_LDD_P + stxa %o5, [%o0 + %g3] ASI_BLK_INIT_QUAD_LDD_P + subcc %g7, 64, %g7 + bne,pt %xcc, 1b + add %o0, 32, %o0 + retl + nop + +NGclear_page: /* %o0=dest */ +NGclear_user_page: /* %o0=dest, %o1=vaddr */ + mov 8, %g1 + mov 16, %g2 + mov 24, %g3 + set PAGE_SIZE, %g7 + +1: stxa %g0, [%o0 + %g0] ASI_BLK_INIT_QUAD_LDD_P + stxa %g0, [%o0 + %g1] ASI_BLK_INIT_QUAD_LDD_P + stxa %g0, [%o0 + %g2] ASI_BLK_INIT_QUAD_LDD_P + stxa %g0, [%o0 + %g3] ASI_BLK_INIT_QUAD_LDD_P + add %o0, 32, %o0 + stxa %g0, [%o0 + %g0] ASI_BLK_INIT_QUAD_LDD_P + stxa %g0, [%o0 + %g1] ASI_BLK_INIT_QUAD_LDD_P + stxa %g0, [%o0 + %g2] ASI_BLK_INIT_QUAD_LDD_P + stxa %g0, [%o0 + %g3] ASI_BLK_INIT_QUAD_LDD_P + subcc %g7, 64, %g7 + bne,pt %xcc, 1b + add %o0, 32, %o0 + retl + nop + +#define BRANCH_ALWAYS 0x10680000 +#define NOP 0x01000000 +#define NG_DO_PATCH(OLD, NEW) \ + sethi %hi(NEW), %g1; \ + or %g1, %lo(NEW), %g1; \ + sethi %hi(OLD), %g2; \ + or %g2, %lo(OLD), %g2; \ + sub %g1, %g2, %g1; \ + sethi %hi(BRANCH_ALWAYS), %g3; \ + srl %g1, 2, %g1; \ + or %g3, %lo(BRANCH_ALWAYS), %g3; \ + or %g3, %g1, %g3; \ + stw %g3, [%g2]; \ + sethi %hi(NOP), %g3; \ + or %g3, %lo(NOP), %g3; \ + stw %g3, [%g2 + 0x4]; \ + flush %g2; + + .globl niagara_patch_pageops + .type niagara_patch_pageops,#function +niagara_patch_pageops: + NG_DO_PATCH(copy_user_page, NGcopy_user_page) + NG_DO_PATCH(_clear_page, NGclear_page) + NG_DO_PATCH(clear_user_page, NGclear_user_page) + retl + nop + .size niagara_patch_pageops,.-niagara_patch_pageops -- cgit v1.2.3 From 481295f982b21b1dbe71cbf41d3a93028fee30d1 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 7 Feb 2006 21:51:08 -0800 Subject: [SPARC64]: Register per-cpu fault status area with sun4v hypervisor. Signed-off-by: David S. Miller --- arch/sparc64/kernel/smp.c | 3 +++ arch/sparc64/mm/init.c | 29 +++++++++++++++++++++++++---- 2 files changed, 28 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index aba0f886b05..223cc6bd369 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c @@ -122,6 +122,9 @@ void __init smp_callin(void) __local_per_cpu_offset = __per_cpu_offset(cpuid); + if (tlb_type == hypervisor) + sun4v_register_fault_status(); + __flush_tlb_all(); smp_setup_percpu_timer(); diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index e9aac424877..4c95cf34075 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -40,6 +40,7 @@ #include #include #include +#include extern void device_scan(void); @@ -1083,6 +1084,24 @@ static void __init tsb_phys_patch(void) } } +/* Register this cpu's fault status area with the hypervisor. */ +void __cpuinit sun4v_register_fault_status(void) +{ + register unsigned long arg0 asm("%o0"); + register unsigned long arg1 asm("%o1"); + int cpu = hard_smp_processor_id(); + struct trap_per_cpu *tb = &trap_block[cpu]; + unsigned long pa; + + pa = kern_base + ((unsigned long) tb - KERNBASE); + arg0 = HV_FAST_MMU_FAULT_AREA_CONF; + arg1 = pa; + __asm__ __volatile__("ta %4" + : "=&r" (arg0), "=&r" (arg1) + : "0" (arg0), "1" (arg1), + "i" (HV_FAST_TRAP)); +} + /* paging_init() sets up the page tables */ extern void cheetah_ecache_flush_init(void); @@ -1096,12 +1115,17 @@ void __init paging_init(void) unsigned long end_pfn, pages_avail, shift; unsigned long real_end, i; + kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL; + kern_size = (unsigned long)&_end - (unsigned long)KERNBASE; + if (tlb_type == cheetah_plus || tlb_type == hypervisor) tsb_phys_patch(); - if (tlb_type == hypervisor) + if (tlb_type == hypervisor) { sun4v_patch_tlb_handlers(); + sun4v_register_fault_status(); + } /* Find available physical memory... */ read_obp_memory("available", &pavail[0], &pavail_ents); @@ -1112,9 +1136,6 @@ void __init paging_init(void) pfn_base = phys_base >> PAGE_SHIFT; - kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL; - kern_size = (unsigned long)&_end - (unsigned long)KERNBASE; - set_bit(0, mmu_context_bmap); shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE); -- cgit v1.2.3 From 8b11bd12aff76e02cdc2cbc9e439bba88d281223 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 7 Feb 2006 22:13:05 -0800 Subject: [SPARC64]: Patch up mmu context register writes for sun4v. sun4v uses ASI_MMU instead of ASI_DMMU Signed-off-by: David S. Miller --- arch/sparc64/kernel/entry.S | 80 +++++++++++++++++++++++++++++++++++----- arch/sparc64/kernel/etrap.S | 8 +++- arch/sparc64/kernel/head.S | 33 +++++++++++++---- arch/sparc64/kernel/rtrap.S | 24 ++++++++++-- arch/sparc64/kernel/setup.c | 30 ++++++++------- arch/sparc64/kernel/trampoline.S | 32 ++++++++++++---- arch/sparc64/mm/init.c | 9 ----- arch/sparc64/prom/p1275.c | 11 ------ 8 files changed, 166 insertions(+), 61 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S index 4ca3ea0beaf..f51b66a1687 100644 --- a/arch/sparc64/kernel/entry.S +++ b/arch/sparc64/kernel/entry.S @@ -97,10 +97,22 @@ do_fpdis: add %g6, TI_FPREGS + 0x80, %g1 faddd %f0, %f2, %f4 fmuld %f0, %f2, %f6 - ldxa [%g3] ASI_DMMU, %g5 + +661: ldxa [%g3] ASI_DMMU, %g5 + .section .sun4v_1insn_patch, "ax" + .word 661b + ldxa [%g3] ASI_MMU, %g5 + .previous + sethi %hi(sparc64_kern_sec_context), %g2 ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2 - stxa %g2, [%g3] ASI_DMMU + +661: stxa %g2, [%g3] ASI_DMMU + .section .sun4v_1insn_patch, "ax" + .word 661b + stxa %g2, [%g3] ASI_MMU + .previous + membar #Sync add %g6, TI_FPREGS + 0xc0, %g2 faddd %f0, %f2, %f8 @@ -126,11 +138,23 @@ do_fpdis: fzero %f32 mov SECONDARY_CONTEXT, %g3 fzero %f34 - ldxa [%g3] ASI_DMMU, %g5 + +661: ldxa [%g3] ASI_DMMU, %g5 + .section .sun4v_1insn_patch, "ax" + .word 661b + ldxa [%g3] ASI_MMU, %g5 + .previous + add %g6, TI_FPREGS, %g1 sethi %hi(sparc64_kern_sec_context), %g2 ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2 - stxa %g2, [%g3] ASI_DMMU + +661: stxa %g2, [%g3] ASI_DMMU + .section .sun4v_1insn_patch, "ax" + .word 661b + stxa %g2, [%g3] ASI_MMU + .previous + membar #Sync add %g6, TI_FPREGS + 0x40, %g2 faddd %f32, %f34, %f36 @@ -155,10 +179,22 @@ do_fpdis: nop 3: mov SECONDARY_CONTEXT, %g3 add %g6, TI_FPREGS, %g1 - ldxa [%g3] ASI_DMMU, %g5 + +661: ldxa [%g3] ASI_DMMU, %g5 + .section .sun4v_1insn_patch, "ax" + .word 661b + ldxa [%g3] ASI_MMU, %g5 + .previous + sethi %hi(sparc64_kern_sec_context), %g2 ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2 - stxa %g2, [%g3] ASI_DMMU + +661: stxa %g2, [%g3] ASI_DMMU + .section .sun4v_1insn_patch, "ax" + .word 661b + stxa %g2, [%g3] ASI_MMU + .previous + membar #Sync mov 0x40, %g2 membar #Sync @@ -169,7 +205,13 @@ do_fpdis: ldda [%g1 + %g2] ASI_BLK_S, %f48 membar #Sync fpdis_exit: - stxa %g5, [%g3] ASI_DMMU + +661: stxa %g5, [%g3] ASI_DMMU + .section .sun4v_1insn_patch, "ax" + .word 661b + stxa %g5, [%g3] ASI_MMU + .previous + membar #Sync fpdis_exit2: wr %g7, 0, %gsr @@ -323,10 +365,22 @@ do_fptrap_after_fsr: rd %gsr, %g3 stx %g3, [%g6 + TI_GSR] mov SECONDARY_CONTEXT, %g3 - ldxa [%g3] ASI_DMMU, %g5 + +661: ldxa [%g3] ASI_DMMU, %g5 + .section .sun4v_1insn_patch, "ax" + .word 661b + ldxa [%g3] ASI_MMU, %g5 + .previous + sethi %hi(sparc64_kern_sec_context), %g2 ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2 - stxa %g2, [%g3] ASI_DMMU + +661: stxa %g2, [%g3] ASI_DMMU + .section .sun4v_1insn_patch, "ax" + .word 661b + stxa %g2, [%g3] ASI_MMU + .previous + membar #Sync add %g6, TI_FPREGS, %g2 andcc %g1, FPRS_DL, %g0 @@ -341,7 +395,13 @@ do_fptrap_after_fsr: stda %f48, [%g2 + %g3] ASI_BLK_S 5: mov SECONDARY_CONTEXT, %g1 membar #Sync - stxa %g5, [%g1] ASI_DMMU + +661: stxa %g5, [%g1] ASI_DMMU + .section .sun4v_1insn_patch, "ax" + .word 661b + stxa %g5, [%g1] ASI_MMU + .previous + membar #Sync ba,pt %xcc, etrap wr %g0, 0, %fprs diff --git a/arch/sparc64/kernel/etrap.S b/arch/sparc64/kernel/etrap.S index d8c062a1700..a0e7d480e5d 100644 --- a/arch/sparc64/kernel/etrap.S +++ b/arch/sparc64/kernel/etrap.S @@ -95,7 +95,13 @@ etrap_save: save %g2, -STACK_BIAS, %sp wrpr %g2, 0, %wstate sethi %hi(sparc64_kern_pri_context), %g2 ldx [%g2 + %lo(sparc64_kern_pri_context)], %g3 - stxa %g3, [%l4] ASI_DMMU + +661: stxa %g3, [%l4] ASI_DMMU + .section .sun4v_1insn_patch, "ax" + .word 661b + stxa %g3, [%l4] ASI_MMU + .previous + sethi %hi(KERNBASE), %l4 flush %l4 mov ASI_AIUS, %l7 diff --git a/arch/sparc64/kernel/head.S b/arch/sparc64/kernel/head.S index f04f7391f23..a304845f8c5 100644 --- a/arch/sparc64/kernel/head.S +++ b/arch/sparc64/kernel/head.S @@ -303,12 +303,24 @@ jump_to_sun4u_init: sun4u_init: /* Set ctx 0 */ - mov PRIMARY_CONTEXT, %g7 - stxa %g0, [%g7] ASI_DMMU - membar #Sync + mov PRIMARY_CONTEXT, %g7 + +661: stxa %g0, [%g7] ASI_DMMU + .section .sun4v_1insn_patch, "ax" + .word 661b + stxa %g0, [%g7] ASI_MMU + .previous + + membar #Sync + + mov SECONDARY_CONTEXT, %g7 + +661: stxa %g0, [%g7] ASI_DMMU + .section .sun4v_1insn_patch, "ax" + .word 661b + stxa %g0, [%g7] ASI_MMU + .previous - mov SECONDARY_CONTEXT, %g7 - stxa %g0, [%g7] ASI_DMMU membar #Sync BRANCH_IF_ANY_CHEETAH(g1,g7,cheetah_tlb_fixup) @@ -436,8 +448,15 @@ setup_trap_table: /* Start using proper page size encodings in ctx register. */ sethi %hi(sparc64_kern_pri_context), %g3 ldx [%g3 + %lo(sparc64_kern_pri_context)], %g2 - mov PRIMARY_CONTEXT, %g1 - stxa %g2, [%g1] ASI_DMMU + + mov PRIMARY_CONTEXT, %g1 + +661: stxa %g2, [%g1] ASI_DMMU + .section .sun4v_1insn_patch, "ax" + .word 661b + stxa %g2, [%g1] ASI_MMU + .previous + membar #Sync /* Kill PROM timer */ diff --git a/arch/sparc64/kernel/rtrap.S b/arch/sparc64/kernel/rtrap.S index a55d517e76a..551f7198200 100644 --- a/arch/sparc64/kernel/rtrap.S +++ b/arch/sparc64/kernel/rtrap.S @@ -264,11 +264,23 @@ rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1 brnz,pn %l3, kern_rtt mov PRIMARY_CONTEXT, %l7 - ldxa [%l7 + %l7] ASI_DMMU, %l0 + +661: ldxa [%l7 + %l7] ASI_DMMU, %l0 + .section .sun4v_1insn_patch, "ax" + .word 661b + ldxa [%l7 + %l7] ASI_MMU, %l0 + .previous + sethi %hi(sparc64_kern_pri_nuc_bits), %l1 ldx [%l1 + %lo(sparc64_kern_pri_nuc_bits)], %l1 or %l0, %l1, %l0 - stxa %l0, [%l7] ASI_DMMU + +661: stxa %l0, [%l7] ASI_DMMU + .section .sun4v_1insn_patch, "ax" + .word 661b + stxa %l0, [%l7] ASI_MMU + .previous + sethi %hi(KERNBASE), %l7 flush %l7 rdpr %wstate, %l1 @@ -303,7 +315,13 @@ user_rtt_fill_fixup: sethi %hi(sparc64_kern_pri_context), %g2 ldx [%g2 + %lo(sparc64_kern_pri_context)], %g2 mov PRIMARY_CONTEXT, %g1 - stxa %g2, [%g1] ASI_DMMU + +661: stxa %g2, [%g1] ASI_DMMU + .section .sun4v_1insn_patch, "ax" + .word 661b + stxa %g2, [%g1] ASI_MMU + .previous + sethi %hi(KERNBASE), %g1 flush %g1 diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c index 6d6178efd58..2d64320d3a4 100644 --- a/arch/sparc64/kernel/setup.c +++ b/arch/sparc64/kernel/setup.c @@ -189,26 +189,30 @@ int prom_callback(long *args) } if ((va >= KERNBASE) && (va < (KERNBASE + (4 * 1024 * 1024)))) { - extern unsigned long sparc64_kern_pri_context; - - /* Spitfire Errata #32 workaround */ - __asm__ __volatile__("stxa %0, [%1] %2\n\t" - "flush %%g6" - : /* No outputs */ - : "r" (sparc64_kern_pri_context), - "r" (PRIMARY_CONTEXT), - "i" (ASI_DMMU)); + if (tlb_type == spitfire) { + extern unsigned long sparc64_kern_pri_context; + + /* Spitfire Errata #32 workaround */ + __asm__ __volatile__( + "stxa %0, [%1] %2\n\t" + "flush %%g6" + : /* No outputs */ + : "r" (sparc64_kern_pri_context), + "r" (PRIMARY_CONTEXT), + "i" (ASI_DMMU)); + } /* * Locked down tlb entry. */ - if (tlb_type == spitfire) + if (tlb_type == spitfire) { tte = spitfire_get_dtlb_data(SPITFIRE_HIGHEST_LOCKED_TLBENT); - else if (tlb_type == cheetah || tlb_type == cheetah_plus) + res = PROM_TRUE; + } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { tte = cheetah_get_ldtlb_data(CHEETAH_HIGHEST_LOCKED_TLBENT); - - res = PROM_TRUE; + res = PROM_TRUE; + } goto done; } diff --git a/arch/sparc64/kernel/trampoline.S b/arch/sparc64/kernel/trampoline.S index 18c333f841e..d9e2af35158 100644 --- a/arch/sparc64/kernel/trampoline.S +++ b/arch/sparc64/kernel/trampoline.S @@ -272,10 +272,22 @@ do_unlock: wr %g0, ASI_P, %asi mov PRIMARY_CONTEXT, %g7 - stxa %g0, [%g7] ASI_DMMU + +661: stxa %g0, [%g7] ASI_DMMU + .section .sun4v_1insn_patch, "ax" + .word 661b + stxa %g0, [%g7] ASI_MMU + .previous + membar #Sync mov SECONDARY_CONTEXT, %g7 - stxa %g0, [%g7] ASI_DMMU + +661: stxa %g0, [%g7] ASI_DMMU + .section .sun4v_1insn_patch, "ax" + .word 661b + stxa %g0, [%g7] ASI_MMU + .previous + membar #Sync mov 1, %g5 @@ -301,11 +313,17 @@ do_unlock: nop /* Start using proper page size encodings in ctx register. */ - sethi %hi(sparc64_kern_pri_context), %g3 - ldx [%g3 + %lo(sparc64_kern_pri_context)], %g2 - mov PRIMARY_CONTEXT, %g1 - stxa %g2, [%g1] ASI_DMMU - membar #Sync + sethi %hi(sparc64_kern_pri_context), %g3 + ldx [%g3 + %lo(sparc64_kern_pri_context)], %g2 + mov PRIMARY_CONTEXT, %g1 + +661: stxa %g2, [%g1] ASI_DMMU + .section .sun4v_1insn_patch, "ax" + .word 661b + stxa %g2, [%g1] ASI_MMU + .previous + + membar #Sync rdpr %pstate, %o1 or %o1, PSTATE_IE, %o1 diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 4c95cf34075..6504d6eb537 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -792,15 +792,6 @@ void sparc_ultra_dump_dtlb(void) } } -static inline void spitfire_errata32(void) -{ - __asm__ __volatile__("stxa %0, [%1] %2\n\t" - "flush %%g6" - : /* No outputs */ - : "r" (0), - "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); -} - extern unsigned long cmdline_memory_size; unsigned long __init bootmem_init(unsigned long *pages_avail) diff --git a/arch/sparc64/prom/p1275.c b/arch/sparc64/prom/p1275.c index a5a7c571202..2b32c489860 100644 --- a/arch/sparc64/prom/p1275.c +++ b/arch/sparc64/prom/p1275.c @@ -30,16 +30,6 @@ extern void prom_world(int); extern void prom_cif_interface(void); extern void prom_cif_callback(void); -static inline unsigned long spitfire_get_primary_context(void) -{ - unsigned long ctx; - - __asm__ __volatile__("ldxa [%1] %2, %0" - : "=r" (ctx) - : "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); - return ctx; -} - /* * This provides SMP safety on the p1275buf. prom_callback() drops this lock * to allow recursuve acquisition. @@ -55,7 +45,6 @@ long p1275_cmd(const char *service, long fmt, ...) long attrs, x; p = p1275buf.prom_buffer; - BUG_ON((spitfire_get_primary_context() & CTX_NR_MASK) != 0); spin_lock_irqsave(&prom_entry_lock, flags); -- cgit v1.2.3 From e088ad7ca3d09c96e63f1ce411a2ccba2688bf25 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 7 Feb 2006 23:51:49 -0800 Subject: [SPARC64]: Verify all trap_per_cpu assembler offsets in trap_init() Signed-off-by: David S. Miller --- arch/sparc64/kernel/traps.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c index 1c4744c047a..8f3fce24359 100644 --- a/arch/sparc64/kernel/traps.c +++ b/arch/sparc64/kernel/traps.c @@ -2182,7 +2182,18 @@ void __init trap_init(void) thread_info_offsets_are_bolixed_dave(); if (TRAP_PER_CPU_THREAD != offsetof(struct trap_per_cpu, thread) || - TRAP_PER_CPU_PGD_PADDR != offsetof(struct trap_per_cpu, pgd_paddr)) + (TRAP_PER_CPU_PGD_PADDR != + offsetof(struct trap_per_cpu, pgd_paddr)) || + (TRAP_PER_CPU_CPU_MONDO_PA != + offsetof(struct trap_per_cpu, cpu_mondo_pa)) || + (TRAP_PER_CPU_DEV_MONDO_PA != + offsetof(struct trap_per_cpu, dev_mondo_pa)) || + (TRAP_PER_CPU_RESUM_MONDO_PA != + offsetof(struct trap_per_cpu, resum_mondo_pa)) || + (TRAP_PER_CPU_NONRESUM_MONDO_PA != + offsetof(struct trap_per_cpu, nonresum_mondo_pa)) || + (TRAP_PER_CPU_FAULT_INFO != + offsetof(struct trap_per_cpu, fault_info))) trap_per_cpu_offsets_are_bolixed_dave(); /* Attach to the address space of init_task. On SMP we -- cgit v1.2.3 From ac29c11d4cd4fa1fac968e99998a956405732f2f Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 8 Feb 2006 00:08:23 -0800 Subject: [SPARC64]: Allocate and register the 4 sun4v mondo queues at bootup. Needs to occur before we enable PSTATE_IE in %pstate. Signed-off-by: David S. Miller --- arch/sparc64/kernel/irq.c | 48 ++++++++++++++++++++++++++++++++++++++++ arch/sparc64/kernel/trampoline.S | 12 +++++++++- 2 files changed, 59 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c index d069a6feb53..3c1a2139f1b 100644 --- a/arch/sparc64/kernel/irq.c +++ b/arch/sparc64/kernel/irq.c @@ -855,6 +855,51 @@ void init_irqwork_curcpu(void) memset(__irq_work + cpu, 0, sizeof(struct irq_work_struct)); } +static void __cpuinit init_one_mondo(unsigned long *pa_ptr, unsigned long type) +{ + register unsigned long func __asm__("%o0"); + register unsigned long arg0 __asm__("%o1"); + register unsigned long arg1 __asm__("%o2"); + register unsigned long arg2 __asm__("%o3"); + unsigned long page = get_zeroed_page(GFP_ATOMIC); + + if (!page) { + prom_printf("SUN4V: Error, cannot allocate mondo queue.\n"); + prom_halt(); + } + + *pa_ptr = __pa(page); + + func = HV_FAST_CPU_QCONF; + arg0 = type; + arg1 = *pa_ptr; + arg2 = 128; /* XXX Implied by Niagara queue offsets. XXX */ + __asm__ __volatile__("ta %8" + : "=&r" (func), "=&r" (arg0), + "=&r" (arg1), "=&r" (arg2) + : "0" (func), "1" (arg0), + "2" (arg1), "3" (arg2), + "i" (HV_FAST_TRAP)); + + if (func != HV_EOK) { + prom_printf("SUN4V: cpu_qconf(%lu) failed with error %lu\n", + type, func); + prom_halt(); + } +} + +/* Allocate and init the mondo queues for this cpu. */ +void __cpuinit sun4v_init_mondo_queues(void) +{ + int cpu = hard_smp_processor_id(); + struct trap_per_cpu *tb = &trap_block[cpu]; + + init_one_mondo(&tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO); + init_one_mondo(&tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO); + init_one_mondo(&tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR); + init_one_mondo(&tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR); +} + /* Only invoked on boot processor. */ void __init init_IRQ(void) { @@ -862,6 +907,9 @@ void __init init_IRQ(void) kill_prom_timer(); memset(&ivector_table[0], 0, sizeof(ivector_table)); + if (tlb_type == hypervisor) + sun4v_init_mondo_queues(); + /* We need to clear any IRQ's pending in the soft interrupt * registers, a spurious one could be left around from the * PROM timer which we just disabled. diff --git a/arch/sparc64/kernel/trampoline.S b/arch/sparc64/kernel/trampoline.S index d9e2af35158..fbf844f84a4 100644 --- a/arch/sparc64/kernel/trampoline.S +++ b/arch/sparc64/kernel/trampoline.S @@ -309,7 +309,17 @@ do_unlock: call init_irqwork_curcpu nop - call init_cur_cpu_trap + + sethi %hi(tlb_type), %g3 + lduw [%g3 + %lo(tlb_type)], %g2 + cmp %g2, 3 + bne,pt %icc, 1f + nop + + call sun4v_init_mondo_queues + nop + +1: call init_cur_cpu_trap nop /* Start using proper page size encodings in ctx register. */ -- cgit v1.2.3 From 5b0c0572fcd6204675c5f7ddfa572b5017f817dd Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 8 Feb 2006 02:53:50 -0800 Subject: [SPARC64]: Sun4v interrupt handling. Sun4v has 4 interrupt queues: cpu, device, resumable errors, and non-resumable errors. A set of head/tail offset pointers help maintain a work queue in physical memory. The entries are 64-bytes in size. Each queue is allocated then registered with the hypervisor as we bring cpus up. The two error queues each get a kernel side buffer that we use to quickly empty the main interrupt queue before we call up to C code to log the event and possibly take evasive action. Signed-off-by: David S. Miller --- arch/sparc64/kernel/head.S | 3 +- arch/sparc64/kernel/irq.c | 16 +- arch/sparc64/kernel/sun4v_ivec.S | 349 +++++++++++++++++++++++++++++++++++++++ arch/sparc64/kernel/traps.c | 184 +++++++++++++++++++++ arch/sparc64/kernel/ttable.S | 5 +- 5 files changed, 554 insertions(+), 3 deletions(-) create mode 100644 arch/sparc64/kernel/sun4v_ivec.S (limited to 'arch') diff --git a/arch/sparc64/kernel/head.S b/arch/sparc64/kernel/head.S index a304845f8c5..01980014aea 100644 --- a/arch/sparc64/kernel/head.S +++ b/arch/sparc64/kernel/head.S @@ -511,13 +511,14 @@ setup_tba: sparc64_boot_end: #include "systbls.S" -#include "sun4v_tlb_miss.S" #include "ktlb.S" #include "tsb.S" #include "etrap.S" #include "rtrap.S" #include "winfixup.S" #include "entry.S" +#include "sun4v_tlb_miss.S" +#include "sun4v_ivec.S" /* * The following skip makes sure the trap table in ttable.S is aligned diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c index 3c1a2139f1b..ff201c007e0 100644 --- a/arch/sparc64/kernel/irq.c +++ b/arch/sparc64/kernel/irq.c @@ -888,7 +888,19 @@ static void __cpuinit init_one_mondo(unsigned long *pa_ptr, unsigned long type) } } -/* Allocate and init the mondo queues for this cpu. */ +static void __cpuinit init_one_kbuf(unsigned long *pa_ptr) +{ + unsigned long page = get_zeroed_page(GFP_ATOMIC); + + if (!page) { + prom_printf("SUN4V: Error, cannot allocate kbuf page.\n"); + prom_halt(); + } + + *pa_ptr = __pa(page); +} + +/* Allocate and init the mondo and error queues for this cpu. */ void __cpuinit sun4v_init_mondo_queues(void) { int cpu = hard_smp_processor_id(); @@ -897,7 +909,9 @@ void __cpuinit sun4v_init_mondo_queues(void) init_one_mondo(&tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO); init_one_mondo(&tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO); init_one_mondo(&tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR); + init_one_kbuf(&tb->resum_kernel_buf_pa); init_one_mondo(&tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR); + init_one_kbuf(&tb->nonresum_kernel_buf_pa); } /* Only invoked on boot processor. */ diff --git a/arch/sparc64/kernel/sun4v_ivec.S b/arch/sparc64/kernel/sun4v_ivec.S new file mode 100644 index 00000000000..d9d442017d3 --- /dev/null +++ b/arch/sparc64/kernel/sun4v_ivec.S @@ -0,0 +1,349 @@ +/* sun4v_ivec.S: Sun4v interrupt vector handling. + * + * Copyright (C) 2006 + */ + +#include +#include + + .text + .align 32 + +sun4v_cpu_mondo: + /* Head offset in %g2, tail offset in %g4. + * If they are the same, no work. + */ + mov INTRQ_CPU_MONDO_HEAD, %g2 + ldxa [%g2] ASI_QUEUE, %g2 + mov INTRQ_CPU_MONDO_TAIL, %g4 + ldxa [%g4] ASI_QUEUE, %g4 + cmp %g2, %g4 + be,pn %xcc, sun4v_cpu_mondo_queue_empty + nop + + /* Get &trap_block[smp_processor_id()] into %g3. */ + __GET_CPUID(%g1) + sethi %hi(trap_block), %g3 + sllx %g1, TRAP_BLOCK_SZ_SHIFT, %g7 + or %g3, %lo(trap_block), %g3 + add %g3, %g7, %g3 + + /* Get CPU mondo queue base phys address into %g7. */ + ldx [%g3 + TRAP_PER_CPU_CPU_MONDO_PA], %g7 + + /* Now get the cross-call arguments and handler PC, same + * layout as sun4u: + * + * 1st 64-bit word: low half is 32-bit PC, put into %g3 and jmpl to it + * high half is context arg to MMU flushes, into %g5 + * 2nd 64-bit word: 64-bit arg, load into %g1 + * 3rd 64-bit word: 64-bit arg, load into %g7 + */ + ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g3 + add %g2, 0x8, %g2 + srlx %g3, 32, %g5 + ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g1 + add %g2, 0x8, %g2 + srl %g3, 0, %g3 + ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g7 + add %g2, 0x40 - 0x8 - 0x8, %g2 + + /* Update queue head pointer. */ + sethi %hi(8192 - 1), %g4 + or %g4, %lo(8192 - 1), %g4 + and %g2, %g4, %g2 + + mov INTRQ_CPU_MONDO_HEAD, %g4 + stxa %g2, [%g4] ASI_QUEUE + membar #Sync + + jmpl %g3, %g0 + nop + +sun4v_cpu_mondo_queue_empty: + retry + +sun4v_dev_mondo: + /* Head offset in %g2, tail offset in %g4. */ + mov INTRQ_DEVICE_MONDO_HEAD, %g2 + ldxa [%g2] ASI_QUEUE, %g2 + mov INTRQ_DEVICE_MONDO_TAIL, %g4 + ldxa [%g4] ASI_QUEUE, %g4 + cmp %g2, %g4 + be,pn %xcc, sun4v_dev_mondo_queue_empty + nop + + /* Get &trap_block[smp_processor_id()] into %g3. */ + __GET_CPUID(%g1) + sethi %hi(trap_block), %g3 + sllx %g1, TRAP_BLOCK_SZ_SHIFT, %g7 + or %g3, %lo(trap_block), %g3 + add %g3, %g7, %g3 + + /* Get DEV mondo queue base phys address into %g5. */ + ldx [%g3 + TRAP_PER_CPU_DEV_MONDO_PA], %g5 + + /* Load IVEC into %g3. */ + ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 + add %g2, 0x40, %g2 + + /* XXX There can be a full 64-byte block of data here. + * XXX This is how we can get at MSI vector data. + * XXX Current we do not capture this, but when we do we'll + * XXX need to add a 64-byte storage area in the struct ino_bucket + * XXX or the struct irq_desc. + */ + + /* Update queue head pointer, this frees up some registers. */ + sethi %hi(8192 - 1), %g4 + or %g4, %lo(8192 - 1), %g4 + and %g2, %g4, %g2 + + mov INTRQ_DEVICE_MONDO_HEAD, %g4 + stxa %g2, [%g4] ASI_QUEUE + membar #Sync + + /* Get &__irq_work[smp_processor_id()] into %g1. */ + sethi %hi(__irq_work), %g4 + sllx %g1, 6, %g1 + or %g4, %lo(__irq_work), %g4 + add %g4, %g1, %g1 + + /* Get &ivector_table[IVEC] into %g4. */ + sethi %hi(ivector_table), %g4 + sllx %g3, 5, %g3 + or %g4, %lo(ivector_table), %g4 + add %g4, %g3, %g4 + + /* Load IRQ %pil into %g5. */ + ldub [%g4 + 0x04], %g5 + + /* Insert ivector_table[] entry into __irq_work[] queue. */ + sllx %g5, 2, %g3 + lduw [%g1 + %g3], %g2 /* g2 = irq_work(cpu, pil) */ + stw %g2, [%g4 + 0x00] /* bucket->irq_chain = g2 */ + stw %g4, [%g1 + %g3] /* irq_work(cpu, pil) = bucket */ + + /* Signal the interrupt by setting (1 << pil) in %softint. */ + mov 1, %g2 + sllx %g2, %g5, %g2 + wr %g2, 0x0, %set_softint + +sun4v_dev_mondo_queue_empty: + retry + +sun4v_res_mondo: + /* Head offset in %g2, tail offset in %g4. */ + mov INTRQ_RESUM_MONDO_HEAD, %g2 + ldxa [%g2] ASI_QUEUE, %g2 + mov INTRQ_RESUM_MONDO_TAIL, %g4 + ldxa [%g4] ASI_QUEUE, %g4 + cmp %g2, %g4 + be,pn %xcc, sun4v_res_mondo_queue_empty + nop + + /* Get &trap_block[smp_processor_id()] into %g3. */ + __GET_CPUID(%g1) + sethi %hi(trap_block), %g3 + sllx %g1, TRAP_BLOCK_SZ_SHIFT, %g7 + or %g3, %lo(trap_block), %g3 + add %g3, %g7, %g3 + + /* Get RES mondo queue base phys address into %g5. */ + ldx [%g3 + TRAP_PER_CPU_RESUM_MONDO_PA], %g5 + + /* Get RES kernel buffer base phys address into %g7. */ + ldx [%g3 + TRAP_PER_CPU_RESUM_KBUF_PA], %g7 + + /* If the first word is non-zero, queue is full. */ + ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g1 + brnz,pn %g1, sun4v_res_mondo_queue_full + nop + + /* Remember this entry's offset in %g1. */ + mov %g2, %g1 + + /* Copy 64-byte queue entry into kernel buffer. */ + ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 + stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC + add %g2, 0x08, %g2 + ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 + stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC + add %g2, 0x08, %g2 + ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 + stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC + add %g2, 0x08, %g2 + ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 + stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC + add %g2, 0x08, %g2 + ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 + stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC + add %g2, 0x08, %g2 + ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 + stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC + add %g2, 0x08, %g2 + ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 + stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC + add %g2, 0x08, %g2 + ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 + stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC + add %g2, 0x08, %g2 + + /* Update queue head pointer. */ + sethi %hi(8192 - 1), %g4 + or %g4, %lo(8192 - 1), %g4 + and %g2, %g4, %g2 + + mov INTRQ_RESUM_MONDO_HEAD, %g4 + stxa %g2, [%g4] ASI_QUEUE + membar #Sync + + /* Disable interrupts and save register state so we can call + * C code. The etrap handling will leave %g4 in %l4 for us + * when it's done. + */ + rdpr %pil, %g2 + wrpr %g0, 15, %pil + mov %g1, %g4 + ba,pt %xcc, etrap_irq + rd %pc, %g7 + + /* Log the event. */ + add %sp, PTREGS_OFF, %o0 + call sun4v_resum_error + mov %l4, %o1 + + /* Return from trap. */ + ba,pt %xcc, rtrap_irq + nop + +sun4v_res_mondo_queue_empty: + retry + +sun4v_res_mondo_queue_full: + /* The queue is full, consolidate our damage by setting + * the head equal to the tail. We'll just trap again otherwise. + * Call C code to log the event. + */ + mov INTRQ_RESUM_MONDO_HEAD, %g2 + stxa %g4, [%g2] ASI_QUEUE + membar #Sync + + rdpr %pil, %g2 + wrpr %g0, 15, %pil + ba,pt %xcc, etrap_irq + rd %pc, %g7 + + call sun4v_resum_overflow + add %sp, PTREGS_OFF, %o0 + + ba,pt %xcc, rtrap_irq + nop + +sun4v_nonres_mondo: + /* Head offset in %g2, tail offset in %g4. */ + mov INTRQ_NONRESUM_MONDO_HEAD, %g2 + ldxa [%g2] ASI_QUEUE, %g2 + mov INTRQ_NONRESUM_MONDO_TAIL, %g4 + ldxa [%g4] ASI_QUEUE, %g4 + cmp %g2, %g4 + be,pn %xcc, sun4v_nonres_mondo_queue_empty + nop + + /* Get &trap_block[smp_processor_id()] into %g3. */ + __GET_CPUID(%g1) + sethi %hi(trap_block), %g3 + sllx %g1, TRAP_BLOCK_SZ_SHIFT, %g7 + or %g3, %lo(trap_block), %g3 + add %g3, %g7, %g3 + + /* Get RES mondo queue base phys address into %g5. */ + ldx [%g3 + TRAP_PER_CPU_NONRESUM_MONDO_PA], %g5 + + /* Get RES kernel buffer base phys address into %g7. */ + ldx [%g3 + TRAP_PER_CPU_NONRESUM_KBUF_PA], %g7 + + /* If the first word is non-zero, queue is full. */ + ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g1 + brnz,pn %g1, sun4v_nonres_mondo_queue_full + nop + + /* Remember this entry's offset in %g1. */ + mov %g2, %g1 + + /* Copy 64-byte queue entry into kernel buffer. */ + ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 + stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC + add %g2, 0x08, %g2 + ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 + stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC + add %g2, 0x08, %g2 + ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 + stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC + add %g2, 0x08, %g2 + ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 + stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC + add %g2, 0x08, %g2 + ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 + stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC + add %g2, 0x08, %g2 + ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 + stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC + add %g2, 0x08, %g2 + ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 + stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC + add %g2, 0x08, %g2 + ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 + stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC + add %g2, 0x08, %g2 + + /* Update queue head pointer. */ + sethi %hi(8192 - 1), %g4 + or %g4, %lo(8192 - 1), %g4 + and %g2, %g4, %g2 + + mov INTRQ_NONRESUM_MONDO_HEAD, %g4 + stxa %g2, [%g4] ASI_QUEUE + membar #Sync + + /* Disable interrupts and save register state so we can call + * C code. The etrap handling will leave %g4 in %l4 for us + * when it's done. + */ + rdpr %pil, %g2 + wrpr %g0, 15, %pil + mov %g1, %g4 + ba,pt %xcc, etrap_irq + rd %pc, %g7 + + /* Log the event. */ + add %sp, PTREGS_OFF, %o0 + call sun4v_nonresum_error + mov %l4, %o1 + + /* Return from trap. */ + ba,pt %xcc, rtrap_irq + nop + +sun4v_nonres_mondo_queue_empty: + retry + +sun4v_nonres_mondo_queue_full: + /* The queue is full, consolidate our damage by setting + * the head equal to the tail. We'll just trap again otherwise. + * Call C code to log the event. + */ + mov INTRQ_NONRESUM_MONDO_HEAD, %g2 + stxa %g4, [%g2] ASI_QUEUE + membar #Sync + + rdpr %pil, %g2 + wrpr %g0, 15, %pil + ba,pt %xcc, etrap_irq + rd %pc, %g7 + + call sun4v_nonresum_overflow + add %sp, PTREGS_OFF, %o0 + + ba,pt %xcc, rtrap_irq + nop diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c index 8f3fce24359..5417ff1b934 100644 --- a/arch/sparc64/kernel/traps.c +++ b/arch/sparc64/kernel/traps.c @@ -1668,6 +1668,186 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs) regs->tpc); } +struct sun4v_error_entry { + u64 err_handle; + u64 err_stick; + + u32 err_type; +#define SUN4V_ERR_TYPE_UNDEFINED 0 +#define SUN4V_ERR_TYPE_UNCORRECTED_RES 1 +#define SUN4V_ERR_TYPE_PRECISE_NONRES 2 +#define SUN4V_ERR_TYPE_DEFERRED_NONRES 3 +#define SUN4V_ERR_TYPE_WARNING_RES 4 + + u32 err_attrs; +#define SUN4V_ERR_ATTRS_PROCESSOR 0x00000001 +#define SUN4V_ERR_ATTRS_MEMORY 0x00000002 +#define SUN4V_ERR_ATTRS_PIO 0x00000004 +#define SUN4V_ERR_ATTRS_INT_REGISTERS 0x00000008 +#define SUN4V_ERR_ATTRS_FPU_REGISTERS 0x00000010 +#define SUN4V_ERR_ATTRS_USER_MODE 0x01000000 +#define SUN4V_ERR_ATTRS_PRIV_MODE 0x02000000 +#define SUN4V_ERR_ATTRS_RES_QUEUE_FULL 0x80000000 + + u64 err_raddr; + u32 err_size; + u16 err_cpu; + u16 err_pad; +}; + +static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0); +static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0); + +static const char *sun4v_err_type_to_str(u32 type) +{ + switch (type) { + case SUN4V_ERR_TYPE_UNDEFINED: + return "undefined"; + case SUN4V_ERR_TYPE_UNCORRECTED_RES: + return "uncorrected resumable"; + case SUN4V_ERR_TYPE_PRECISE_NONRES: + return "precise nonresumable"; + case SUN4V_ERR_TYPE_DEFERRED_NONRES: + return "deferred nonresumable"; + case SUN4V_ERR_TYPE_WARNING_RES: + return "warning resumable"; + default: + return "unknown"; + }; +} + +static void sun4v_log_error(struct sun4v_error_entry *ent, int cpu, const char *pfx, atomic_t *ocnt) +{ + int cnt; + + printk("%s: Reporting on cpu %d\n", pfx, cpu); + printk("%s: err_handle[%lx] err_stick[%lx] err_type[%08x:%s]\n", + pfx, + ent->err_handle, ent->err_stick, + ent->err_type, + sun4v_err_type_to_str(ent->err_type)); + printk("%s: err_attrs[%08x:%s %s %s %s %s %s %s %s]\n", + pfx, + ent->err_attrs, + ((ent->err_attrs & SUN4V_ERR_ATTRS_PROCESSOR) ? + "processor" : ""), + ((ent->err_attrs & SUN4V_ERR_ATTRS_MEMORY) ? + "memory" : ""), + ((ent->err_attrs & SUN4V_ERR_ATTRS_PIO) ? + "pio" : ""), + ((ent->err_attrs & SUN4V_ERR_ATTRS_INT_REGISTERS) ? + "integer-regs" : ""), + ((ent->err_attrs & SUN4V_ERR_ATTRS_FPU_REGISTERS) ? + "fpu-regs" : ""), + ((ent->err_attrs & SUN4V_ERR_ATTRS_USER_MODE) ? + "user" : ""), + ((ent->err_attrs & SUN4V_ERR_ATTRS_PRIV_MODE) ? + "privileged" : ""), + ((ent->err_attrs & SUN4V_ERR_ATTRS_RES_QUEUE_FULL) ? + "queue-full" : "")); + printk("%s: err_raddr[%016lx] err_size[%u] err_cpu[%u]\n", + pfx, + ent->err_raddr, ent->err_size, ent->err_cpu); + + if ((cnt = atomic_read(ocnt)) != 0) { + atomic_set(ocnt, 0); + wmb(); + printk("%s: Queue overflowed %d times.\n", + pfx, cnt); + } +} + +/* We run with %pil set to 15 and PSTATE_IE enabled in %pstate. + * Log the event and clear the first word of the entry. + */ +void sun4v_resum_error(struct pt_regs *regs, unsigned long offset) +{ + struct sun4v_error_entry *ent, local_copy; + struct trap_per_cpu *tb; + unsigned long paddr; + int cpu; + + cpu = get_cpu(); + + tb = &trap_block[cpu]; + paddr = tb->resum_kernel_buf_pa + offset; + ent = __va(paddr); + + memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry)); + + /* We have a local copy now, so release the entry. */ + ent->err_handle = 0; + wmb(); + + put_cpu(); + + sun4v_log_error(&local_copy, cpu, + KERN_ERR "RESUMABLE ERROR", + &sun4v_resum_oflow_cnt); +} + +/* If we try to printk() we'll probably make matters worse, by trying + * to retake locks this cpu already holds or causing more errors. So + * just bump a counter, and we'll report these counter bumps above. + */ +void sun4v_resum_overflow(struct pt_regs *regs) +{ + atomic_inc(&sun4v_resum_oflow_cnt); +} + +/* We run with %pil set to 15 and PSTATE_IE enabled in %pstate. + * Log the event, clear the first word of the entry, and die. + */ +void sun4v_nonresum_error(struct pt_regs *regs, unsigned long offset) +{ + struct sun4v_error_entry *ent, local_copy; + struct trap_per_cpu *tb; + unsigned long paddr; + int cpu; + + cpu = get_cpu(); + + tb = &trap_block[cpu]; + paddr = tb->nonresum_kernel_buf_pa + offset; + ent = __va(paddr); + + memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry)); + + /* We have a local copy now, so release the entry. */ + ent->err_handle = 0; + wmb(); + + put_cpu(); + +#ifdef CONFIG_PCI + /* Check for the special PCI poke sequence. */ + if (pci_poke_in_progress && pci_poke_cpu == cpu) { + pci_poke_faulted = 1; + regs->tpc += 4; + regs->tnpc = regs->tpc + 4; + return; + } +#endif + + sun4v_log_error(&local_copy, cpu, + KERN_EMERG "NON-RESUMABLE ERROR", + &sun4v_nonresum_oflow_cnt); + + panic("Non-resumable error."); +} + +/* If we try to printk() we'll probably make matters worse, by trying + * to retake locks this cpu already holds or causing more errors. So + * just bump a counter, and we'll report these counter bumps above. + */ +void sun4v_nonresum_overflow(struct pt_regs *regs) +{ + /* XXX Actually even this can make not that much sense. Perhaps + * XXX we should just pull the plug and panic directly from here? + */ + atomic_inc(&sun4v_nonresum_oflow_cnt); +} + void do_fpe_common(struct pt_regs *regs) { if (regs->tstate & TSTATE_PRIV) { @@ -2190,8 +2370,12 @@ void __init trap_init(void) offsetof(struct trap_per_cpu, dev_mondo_pa)) || (TRAP_PER_CPU_RESUM_MONDO_PA != offsetof(struct trap_per_cpu, resum_mondo_pa)) || + (TRAP_PER_CPU_RESUM_KBUF_PA != + offsetof(struct trap_per_cpu, resum_kernel_buf_pa)) || (TRAP_PER_CPU_NONRESUM_MONDO_PA != offsetof(struct trap_per_cpu, nonresum_mondo_pa)) || + (TRAP_PER_CPU_NONRESUM_KBUF_PA != + offsetof(struct trap_per_cpu, nonresum_kernel_buf_pa)) || (TRAP_PER_CPU_FAULT_INFO != offsetof(struct trap_per_cpu, fault_info))) trap_per_cpu_offsets_are_bolixed_dave(); diff --git a/arch/sparc64/kernel/ttable.S b/arch/sparc64/kernel/ttable.S index 2679b6e253a..1608ba4bf1c 100644 --- a/arch/sparc64/kernel/ttable.S +++ b/arch/sparc64/kernel/ttable.S @@ -88,7 +88,10 @@ tl0_dcpe: BTRAP(0x71) /* D-cache Parity Error on Cheetah+ */ tl0_icpe: BTRAP(0x72) /* I-cache Parity Error on Cheetah+ */ tl0_resv073: BTRAP(0x73) BTRAP(0x74) BTRAP(0x75) tl0_resv076: BTRAP(0x76) BTRAP(0x77) BTRAP(0x78) BTRAP(0x79) BTRAP(0x7a) BTRAP(0x7b) -tl0_resv07c: BTRAP(0x7c) BTRAP(0x7d) BTRAP(0x7e) BTRAP(0x7f) +tl0_cpu_mondo: TRAP_NOSAVE(sun4v_cpu_mondo) +tl0_dev_mondo: TRAP_NOSAVE(sun4v_dev_mondo) +tl0_res_mondo: TRAP_NOSAVE(sun4v_res_mondo) +tl0_nres_mondo: TRAP_NOSAVE(sun4v_nonres_mondo) tl0_s0n: SPILL_0_NORMAL tl0_s1n: SPILL_1_NORMAL tl0_s2n: SPILL_2_NORMAL -- cgit v1.2.3 From 1d2f1f90a1e004b0c1b8a73ed4394a93f09104b3 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 8 Feb 2006 16:41:20 -0800 Subject: [SPARC64]: Sun4v cross-call sending support. Technically the hypervisor call supports sending in a list of all cpus to get the cross-call, but I only pass in one cpu at a time for now. The multi-cpu support is there, just ifdef'd out so it's easy to enable or delete it later. Signed-off-by: David S. Miller --- arch/sparc64/kernel/irq.c | 22 ++++++++ arch/sparc64/kernel/smp.c | 125 +++++++++++++++++++++++++++++++++++++++++++- arch/sparc64/kernel/traps.c | 6 ++- 3 files changed, 151 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c index ff201c007e0..c80d2531ec4 100644 --- a/arch/sparc64/kernel/irq.c +++ b/arch/sparc64/kernel/irq.c @@ -900,6 +900,24 @@ static void __cpuinit init_one_kbuf(unsigned long *pa_ptr) *pa_ptr = __pa(page); } +static void __cpuinit init_cpu_send_mondo_info(struct trap_per_cpu *tb) +{ +#ifdef CONFIG_SMP + unsigned long page; + + BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64)); + + page = get_zeroed_page(GFP_ATOMIC); + if (!page) { + prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n"); + prom_halt(); + } + + tb->cpu_mondo_block_pa = __pa(page); + tb->cpu_list_pa = __pa(page + 64); +#endif +} + /* Allocate and init the mondo and error queues for this cpu. */ void __cpuinit sun4v_init_mondo_queues(void) { @@ -908,10 +926,14 @@ void __cpuinit sun4v_init_mondo_queues(void) init_one_mondo(&tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO); init_one_mondo(&tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO); + init_one_mondo(&tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR); init_one_kbuf(&tb->resum_kernel_buf_pa); + init_one_mondo(&tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR); init_one_kbuf(&tb->nonresum_kernel_buf_pa); + + init_cpu_send_mondo_info(tb); } /* Only invoked on boot processor. */ diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index 223cc6bd369..c10a3a8639e 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c @@ -531,10 +531,133 @@ retry: } } +#if 0 +/* Multi-cpu list version. */ +static int init_cpu_list(u16 *list, cpumask_t mask) +{ + int i, cnt; + + cnt = 0; + for_each_cpu_mask(i, mask) + list[cnt++] = i; + + return cnt; +} + +static int update_cpu_list(u16 *list, int orig_cnt, cpumask_t mask) +{ + int i; + + for (i = 0; i < orig_cnt; i++) { + if (list[i] == 0xffff) + cpu_clear(i, mask); + } + + return init_cpu_list(list, mask); +} + +static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask) +{ + int this_cpu = get_cpu(); + struct trap_per_cpu *tb = &trap_block[this_cpu]; + u64 *mondo = __va(tb->cpu_mondo_block_pa); + u16 *cpu_list = __va(tb->cpu_list_pa); + int cnt, retries; + + mondo[0] = data0; + mondo[1] = data1; + mondo[2] = data2; + wmb(); + + retries = 0; + cnt = init_cpu_list(cpu_list, mask); + do { + register unsigned long func __asm__("%o0"); + register unsigned long arg0 __asm__("%o1"); + register unsigned long arg1 __asm__("%o2"); + register unsigned long arg2 __asm__("%o3"); + + func = HV_FAST_CPU_MONDO_SEND; + arg0 = cnt; + arg1 = tb->cpu_list_pa; + arg2 = tb->cpu_mondo_block_pa; + + __asm__ __volatile__("ta %8" + : "=&r" (func), "=&r" (arg0), + "=&r" (arg1), "=&r" (arg2) + : "0" (func), "1" (arg0), + "2" (arg1), "3" (arg2), + "i" (HV_FAST_TRAP) + : "memory"); + if (likely(func == HV_EOK)) + break; + + if (unlikely(++retries > 100)) { + printk("CPU[%d]: sun4v mondo error %lu\n", + this_cpu, func); + break; + } + + cnt = update_cpu_list(cpu_list, cnt, mask); + + udelay(2 * cnt); + } while (1); + + put_cpu(); +} +#else +/* Single-cpu list version. */ static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask) { - /* XXX implement me */ + int this_cpu = get_cpu(); + struct trap_per_cpu *tb = &trap_block[this_cpu]; + u64 *mondo = __va(tb->cpu_mondo_block_pa); + u16 *cpu_list = __va(tb->cpu_list_pa); + int i; + + mondo[0] = data0; + mondo[1] = data1; + mondo[2] = data2; + wmb(); + + for_each_cpu_mask(i, mask) { + int retries = 0; + + do { + register unsigned long func __asm__("%o0"); + register unsigned long arg0 __asm__("%o1"); + register unsigned long arg1 __asm__("%o2"); + register unsigned long arg2 __asm__("%o3"); + + cpu_list[0] = i; + func = HV_FAST_CPU_MONDO_SEND; + arg0 = 1; + arg1 = tb->cpu_list_pa; + arg2 = tb->cpu_mondo_block_pa; + + __asm__ __volatile__("ta %8" + : "=&r" (func), "=&r" (arg0), + "=&r" (arg1), "=&r" (arg2) + : "0" (func), "1" (arg0), + "2" (arg1), "3" (arg2), + "i" (HV_FAST_TRAP) + : "memory"); + if (likely(func == HV_EOK)) + break; + + if (unlikely(++retries > 100)) { + printk("CPU[%d]: sun4v mondo error %lu\n", + this_cpu, func); + break; + } + + udelay(2 * i); + } while (1); + } + + put_cpu(); } +#endif /* Send cross call to all processors mentioned in MASK * except self. diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c index 5417ff1b934..ac171161e79 100644 --- a/arch/sparc64/kernel/traps.c +++ b/arch/sparc64/kernel/traps.c @@ -2377,7 +2377,11 @@ void __init trap_init(void) (TRAP_PER_CPU_NONRESUM_KBUF_PA != offsetof(struct trap_per_cpu, nonresum_kernel_buf_pa)) || (TRAP_PER_CPU_FAULT_INFO != - offsetof(struct trap_per_cpu, fault_info))) + offsetof(struct trap_per_cpu, fault_info)) || + (TRAP_PER_CPU_CPU_MONDO_BLOCK_PA != + offsetof(struct trap_per_cpu, cpu_mondo_block_pa)) || + (TRAP_PER_CPU_CPU_LIST_PA != + offsetof(struct trap_per_cpu, cpu_list_pa))) trap_per_cpu_offsets_are_bolixed_dave(); /* Attach to the address space of init_task. On SMP we -- cgit v1.2.3 From d82ace7dc4073b090a55b9740700e32b9a9ae302 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 9 Feb 2006 02:52:44 -0800 Subject: [SPARC64]: Detect sun4v early in boot process. We look for "SUNW,sun4v" in the 'compatible' property of the root OBP device tree node. Protect every %ver register access, to make sure it is not touched on sun4v, as %ver is hyperprivileged there. Lock kernel TLB entries using hypervisor calls instead of calls into OBP. Signed-off-by: David S. Miller --- arch/sparc64/kernel/cpu.c | 6 +++ arch/sparc64/kernel/head.S | 96 ++++++++++++++++++++++++++++++++++---- arch/sparc64/kernel/irq.c | 78 +++++++++++++++++-------------- arch/sparc64/kernel/setup.c | 9 ++-- arch/sparc64/kernel/trampoline.S | 62 +++++++++++++++++++++--- arch/sparc64/kernel/us2e_cpufreq.c | 3 ++ arch/sparc64/kernel/us3_cpufreq.c | 3 ++ arch/sparc64/mm/init.c | 58 ++++++++++++++++++----- arch/sparc64/prom/init.c | 20 ++++---- arch/sparc64/prom/tree.c | 9 ++-- 10 files changed, 264 insertions(+), 80 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/cpu.c b/arch/sparc64/kernel/cpu.c index 00eed88ef2e..c7a4fb39702 100644 --- a/arch/sparc64/kernel/cpu.c +++ b/arch/sparc64/kernel/cpu.c @@ -71,6 +71,12 @@ void __init cpu_probe(void) unsigned long ver, fpu_vers, manuf, impl, fprs; int i; + if (tlb_type == hypervisor) { + sparc_cpu_type = "UltraSparc T1 (Niagara)"; + sparc_fpu_type = "UltraSparc T1 integrated FPU"; + return; + } + fprs = fprs_read(); fprs_write(FPRS_FEF); __asm__ __volatile__ ("rdpr %%ver, %0; stx %%fsr, [%1]" diff --git a/arch/sparc64/kernel/head.S b/arch/sparc64/kernel/head.S index 01980014aea..d048f0dfd42 100644 --- a/arch/sparc64/kernel/head.S +++ b/arch/sparc64/kernel/head.S @@ -95,12 +95,17 @@ sparc64_boot: wrpr %g1, 0x0, %pstate ba,a,pt %xcc, 1f - .globl prom_finddev_name, prom_chosen_path - .globl prom_getprop_name, prom_mmu_name - .globl prom_callmethod_name, prom_translate_name + .globl prom_finddev_name, prom_chosen_path, prom_root_node + .globl prom_getprop_name, prom_mmu_name, prom_peer_name + .globl prom_callmethod_name, prom_translate_name, prom_root_compatible .globl prom_map_name, prom_unmap_name, prom_mmu_ihandle_cache .globl prom_boot_mapped_pc, prom_boot_mapping_mode .globl prom_boot_mapping_phys_high, prom_boot_mapping_phys_low + .globl is_sun4v +prom_peer_name: + .asciz "peer" +prom_compatible_name: + .asciz "compatible" prom_finddev_name: .asciz "finddevice" prom_chosen_path: @@ -117,7 +122,13 @@ prom_map_name: .asciz "map" prom_unmap_name: .asciz "unmap" +prom_sun4v_name: + .asciz "SUNW,sun4v" .align 4 +prom_root_compatible: + .skip 64 +prom_root_node: + .word 0 prom_mmu_ihandle_cache: .word 0 prom_boot_mapped_pc: @@ -129,8 +140,54 @@ prom_boot_mapping_phys_high: .xword 0 prom_boot_mapping_phys_low: .xword 0 +is_sun4v: + .word 0 1: rd %pc, %l0 + + mov (1b - prom_peer_name), %l1 + sub %l0, %l1, %l1 + mov 0, %l2 + + /* prom_root_node = prom_peer(0) */ + stx %l1, [%sp + 2047 + 128 + 0x00] ! service, "peer" + mov 1, %l3 + stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 1 + stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 1 + stx %l2, [%sp + 2047 + 128 + 0x18] ! arg1, 0 + stx %g0, [%sp + 2047 + 128 + 0x20] ! ret1 + call %l7 + add %sp, (2047 + 128), %o0 ! argument array + + ldx [%sp + 2047 + 128 + 0x20], %l4 ! prom root node + mov (1b - prom_root_node), %l1 + sub %l0, %l1, %l1 + stw %l4, [%l1] + + mov (1b - prom_getprop_name), %l1 + mov (1b - prom_compatible_name), %l2 + mov (1b - prom_root_compatible), %l5 + sub %l0, %l1, %l1 + sub %l0, %l2, %l2 + sub %l0, %l5, %l5 + + /* prom_getproperty(prom_root_node, "compatible", + * &prom_root_compatible, 64) + */ + stx %l1, [%sp + 2047 + 128 + 0x00] ! service, "getprop" + mov 4, %l3 + stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 4 + mov 1, %l3 + stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 1 + stx %l4, [%sp + 2047 + 128 + 0x18] ! arg1, prom_root_node + stx %l2, [%sp + 2047 + 128 + 0x20] ! arg2, "compatible" + stx %l5, [%sp + 2047 + 128 + 0x28] ! arg3, &prom_root_compatible + mov 64, %l3 + stx %l3, [%sp + 2047 + 128 + 0x30] ! arg4, size + stx %g0, [%sp + 2047 + 128 + 0x38] ! ret1 + call %l7 + add %sp, (2047 + 128), %o0 ! argument array + mov (1b - prom_finddev_name), %l1 mov (1b - prom_chosen_path), %l2 mov (1b - prom_boot_mapped_pc), %l3 @@ -239,6 +296,27 @@ prom_boot_mapping_phys_low: add %sp, (192 + 128), %sp sparc64_boot_after_remap: + sethi %hi(prom_root_compatible), %g1 + or %g1, %lo(prom_root_compatible), %g1 + sethi %hi(prom_sun4v_name), %g7 + or %g7, %lo(prom_sun4v_name), %g7 + mov 10, %g3 +1: ldub [%g7], %g2 + ldub [%g1], %g4 + cmp %g2, %g4 + bne,pn %icc, 2f + add %g7, 1, %g7 + subcc %g3, 1, %g3 + bne,pt %xcc, 1b + add %g1, 1, %g1 + + sethi %hi(is_sun4v), %g1 + or %g1, %lo(is_sun4v), %g1 + mov 1, %g7 + stw %g7, [%g1] + +2: + BRANCH_IF_SUN4V(g1, jump_to_sun4u_init) BRANCH_IF_CHEETAH_BASE(g1,g7,cheetah_boot) BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g7,cheetah_plus_boot) ba,pt %xcc, spitfire_boot @@ -323,14 +401,12 @@ sun4u_init: membar #Sync - BRANCH_IF_ANY_CHEETAH(g1,g7,cheetah_tlb_fixup) + BRANCH_IF_SUN4V(g1, niagara_tlb_fixup) + BRANCH_IF_ANY_CHEETAH(g1, g7, cheetah_tlb_fixup) ba,pt %xcc, spitfire_tlb_fixup nop - /* XXX Nothing branches to here yet, when %ver register indicates - * XXX Niagara we should do this. - */ niagara_tlb_fixup: mov 3, %g2 /* Set TLB type to hypervisor. */ sethi %hi(tlb_type), %g1 @@ -346,6 +422,9 @@ niagara_tlb_fixup: call hypervisor_patch_cachetlbops nop + ba,pt %xcc, tlb_fixup_done + nop + cheetah_tlb_fixup: mov 2, %g2 /* Set TLB type to cheetah+. */ BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g7,1f) @@ -464,7 +543,8 @@ setup_trap_table: sllx %o2, 32, %o2 wr %o2, 0, %tick_cmpr - BRANCH_IF_ANY_CHEETAH(o2,o3,1f) + BRANCH_IF_SUN4V(o2, 1f) + BRANCH_IF_ANY_CHEETAH(o2, o3, 1f) ba,pt %xcc, 2f nop diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c index c80d2531ec4..1f6455503f2 100644 --- a/arch/sparc64/kernel/irq.c +++ b/arch/sparc64/kernel/irq.c @@ -150,47 +150,53 @@ void enable_irq(unsigned int irq) preempt_disable(); - if (tlb_type == cheetah || tlb_type == cheetah_plus) { - unsigned long ver; - - __asm__ ("rdpr %%ver, %0" : "=r" (ver)); - if ((ver >> 32) == __JALAPENO_ID || - (ver >> 32) == __SERRANO_ID) { - /* We set it to our JBUS ID. */ + if (tlb_type == hypervisor) { + /* XXX SUN4V: implement me... XXX */ + } else { + if (tlb_type == cheetah || tlb_type == cheetah_plus) { + unsigned long ver; + + __asm__ ("rdpr %%ver, %0" : "=r" (ver)); + if ((ver >> 32) == __JALAPENO_ID || + (ver >> 32) == __SERRANO_ID) { + /* We set it to our JBUS ID. */ + __asm__ __volatile__("ldxa [%%g0] %1, %0" + : "=r" (tid) + : "i" (ASI_JBUS_CONFIG)); + tid = ((tid & (0x1fUL<<17)) << 9); + tid &= IMAP_TID_JBUS; + } else { + /* We set it to our Safari AID. */ + __asm__ __volatile__("ldxa [%%g0] %1, %0" + : "=r" (tid) + : "i"(ASI_SAFARI_CONFIG)); + tid = ((tid & (0x3ffUL<<17)) << 9); + tid &= IMAP_AID_SAFARI; + } + } else if (this_is_starfire == 0) { + /* We set it to our UPA MID. */ __asm__ __volatile__("ldxa [%%g0] %1, %0" : "=r" (tid) - : "i" (ASI_JBUS_CONFIG)); - tid = ((tid & (0x1fUL<<17)) << 9); - tid &= IMAP_TID_JBUS; + : "i" (ASI_UPA_CONFIG)); + tid = ((tid & UPA_CONFIG_MID) << 9); + tid &= IMAP_TID_UPA; } else { - /* We set it to our Safari AID. */ - __asm__ __volatile__("ldxa [%%g0] %1, %0" - : "=r" (tid) - : "i" (ASI_SAFARI_CONFIG)); - tid = ((tid & (0x3ffUL<<17)) << 9); - tid &= IMAP_AID_SAFARI; + tid = (starfire_translate(imap, + smp_processor_id()) << 26); + tid &= IMAP_TID_UPA; } - } else if (this_is_starfire == 0) { - /* We set it to our UPA MID. */ - __asm__ __volatile__("ldxa [%%g0] %1, %0" - : "=r" (tid) - : "i" (ASI_UPA_CONFIG)); - tid = ((tid & UPA_CONFIG_MID) << 9); - tid &= IMAP_TID_UPA; - } else { - tid = (starfire_translate(imap, smp_processor_id()) << 26); - tid &= IMAP_TID_UPA; - } - /* NOTE NOTE NOTE, IGN and INO are read-only, IGN is a product - * of this SYSIO's preconfigured IGN in the SYSIO Control - * Register, the hardware just mirrors that value here. - * However for Graphics and UPA Slave devices the full - * IMAP_INR field can be set by the programmer here. - * - * Things like FFB can now be handled via the new IRQ mechanism. - */ - upa_writel(tid | IMAP_VALID, imap); + /* NOTE NOTE NOTE, IGN and INO are read-only, IGN is a product + * of this SYSIO's preconfigured IGN in the SYSIO Control + * Register, the hardware just mirrors that value here. + * However for Graphics and UPA Slave devices the full + * IMAP_INR field can be set by the programmer here. + * + * Things like FFB can now be handled via the new IRQ + * mechanism. + */ + upa_writel(tid | IMAP_VALID, imap); + } preempt_enable(); } diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c index 2d64320d3a4..7f02c8f71df 100644 --- a/arch/sparc64/kernel/setup.c +++ b/arch/sparc64/kernel/setup.c @@ -504,9 +504,12 @@ static void __init per_cpu_patch(void) if (tlb_type == spitfire && !this_is_starfire) return; - __asm__ ("rdpr %%ver, %0" : "=r" (ver)); - is_jbus = ((ver >> 32) == __JALAPENO_ID || - (ver >> 32) == __SERRANO_ID); + is_jbus = 0; + if (tlb_type != hypervisor) { + __asm__ ("rdpr %%ver, %0" : "=r" (ver)); + is_jbus = ((ver >> 32) == __JALAPENO_ID || + (ver >> 32) == __SERRANO_ID); + } p = &__cpuid_patch; while (p < &__cpuid_patch_end) { diff --git a/arch/sparc64/kernel/trampoline.S b/arch/sparc64/kernel/trampoline.S index fbf844f84a4..ffa8b79632c 100644 --- a/arch/sparc64/kernel/trampoline.S +++ b/arch/sparc64/kernel/trampoline.S @@ -16,6 +16,7 @@ #include #include #include +#include .data .align 8 @@ -34,8 +35,9 @@ dtlb_load: sparc64_cpu_startup: flushw - BRANCH_IF_CHEETAH_BASE(g1,g5,cheetah_startup) - BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g5,cheetah_plus_startup) + BRANCH_IF_SUN4V(g1, niagara_startup) + BRANCH_IF_CHEETAH_BASE(g1, g5, cheetah_startup) + BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1, g5, cheetah_plus_startup) ba,pt %xcc, spitfire_startup nop @@ -70,7 +72,9 @@ cheetah_generic_startup: stxa %g0, [%g3] ASI_DMMU stxa %g0, [%g3] ASI_IMMU membar #Sync + /* fallthru */ +niagara_startup: /* Disable STICK_INT interrupts. */ sethi %hi(0x80000000), %g5 sllx %g5, 32, %g5 @@ -91,6 +95,8 @@ startup_continue: sllx %g2, 32, %g2 wr %g2, 0, %tick_cmpr + BRANCH_IF_SUN4V(g1, niagara_lock_tlb) + /* Call OBP by hand to lock KERNBASE into i/d tlbs. * We lock 2 consequetive entries if we are 'bigkernel'. */ @@ -142,8 +148,7 @@ startup_continue: sethi %hi(bigkernel), %g2 lduw [%g2 + %lo(bigkernel)], %g2 - cmp %g2, 0 - be,pt %icc, do_dtlb + brz,pt %g2, do_dtlb nop sethi %hi(call_method), %g2 @@ -214,8 +219,7 @@ do_dtlb: sethi %hi(bigkernel), %g2 lduw [%g2 + %lo(bigkernel)], %g2 - cmp %g2, 0 - be,pt %icc, do_unlock + brz,pt %g2, do_unlock nop sethi %hi(call_method), %g2 @@ -257,6 +261,52 @@ do_unlock: stb %g0, [%g2 + %lo(prom_entry_lock)] membar #StoreStore | #StoreLoad + ba,pt %xcc, after_lock_tlb + nop + +niagara_lock_tlb: + mov HV_FAST_MMU_MAP_PERM_ADDR, %o0 + sethi %hi(KERNBASE), %o1 + clr %o2 + sethi %hi(kern_locked_tte_data), %o3 + ldx [%o3 + %lo(kern_locked_tte_data)], %o3 + mov HV_MMU_IMMU, %o4 + ta HV_FAST_TRAP + + mov HV_FAST_MMU_MAP_PERM_ADDR, %o0 + sethi %hi(KERNBASE), %o1 + clr %o2 + sethi %hi(kern_locked_tte_data), %o3 + ldx [%o3 + %lo(kern_locked_tte_data)], %o3 + mov HV_MMU_DMMU, %o4 + ta HV_FAST_TRAP + + sethi %hi(bigkernel), %g2 + lduw [%g2 + %lo(bigkernel)], %g2 + brz,pt %g2, after_lock_tlb + nop + + mov HV_FAST_MMU_MAP_PERM_ADDR, %o0 + sethi %hi(KERNBASE + 0x400000), %o1 + clr %o2 + sethi %hi(kern_locked_tte_data), %o3 + ldx [%o3 + %lo(kern_locked_tte_data)], %o3 + sethi %hi(0x400000), %o4 + add %o3, %o4, %o3 + mov HV_MMU_IMMU, %o4 + ta HV_FAST_TRAP + + mov HV_FAST_MMU_MAP_PERM_ADDR, %o0 + sethi %hi(KERNBASE + 0x400000), %o1 + clr %o2 + sethi %hi(kern_locked_tte_data), %o3 + ldx [%o3 + %lo(kern_locked_tte_data)], %o3 + sethi %hi(0x400000), %o4 + add %o3, %o4, %o3 + mov HV_MMU_DMMU, %o4 + ta HV_FAST_TRAP + +after_lock_tlb: mov %l1, %sp flushw diff --git a/arch/sparc64/kernel/us2e_cpufreq.c b/arch/sparc64/kernel/us2e_cpufreq.c index b35dc8dc995..669fb83dd4f 100644 --- a/arch/sparc64/kernel/us2e_cpufreq.c +++ b/arch/sparc64/kernel/us2e_cpufreq.c @@ -346,6 +346,9 @@ static int __init us2e_freq_init(void) unsigned long manuf, impl, ver; int ret; + if (tlb_type != spitfire) + return -ENODEV; + __asm__("rdpr %%ver, %0" : "=r" (ver)); manuf = ((ver >> 48) & 0xffff); impl = ((ver >> 32) & 0xffff); diff --git a/arch/sparc64/kernel/us3_cpufreq.c b/arch/sparc64/kernel/us3_cpufreq.c index 6d1f9a3c464..a912c45bdc0 100644 --- a/arch/sparc64/kernel/us3_cpufreq.c +++ b/arch/sparc64/kernel/us3_cpufreq.c @@ -203,6 +203,9 @@ static int __init us3_freq_init(void) unsigned long manuf, impl, ver; int ret; + if (tlb_type != cheetah && tlb_type != cheetah_plus) + return -ENODEV; + __asm__("rdpr %%ver, %0" : "=r" (ver)); manuf = ((ver >> 48) & 0xffff); impl = ((ver >> 32) & 0xffff); diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 6504d6eb537..e602b857071 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -514,6 +514,29 @@ static void __init read_obp_translations(void) } } +static void __init hypervisor_tlb_lock(unsigned long vaddr, + unsigned long pte, + unsigned long mmu) +{ + register unsigned long func asm("%o0"); + register unsigned long arg0 asm("%o1"); + register unsigned long arg1 asm("%o2"); + register unsigned long arg2 asm("%o3"); + register unsigned long arg3 asm("%o4"); + + func = HV_FAST_MMU_MAP_PERM_ADDR; + arg0 = vaddr; + arg1 = 0; + arg2 = pte; + arg3 = mmu; + __asm__ __volatile__("ta 0x80" + : "=&r" (func), "=&r" (arg0), + "=&r" (arg1), "=&r" (arg2), + "=&r" (arg3) + : "0" (func), "1" (arg0), "2" (arg1), + "3" (arg2), "4" (arg3)); +} + static void __init remap_kernel(void) { unsigned long phys_page, tte_vaddr, tte_data; @@ -527,19 +550,30 @@ static void __init remap_kernel(void) kern_locked_tte_data = tte_data; - /* Now lock us into the TLBs via OBP. */ - prom_dtlb_load(tlb_ent, tte_data, tte_vaddr); - prom_itlb_load(tlb_ent, tte_data, tte_vaddr); - if (bigkernel) { - tlb_ent -= 1; - prom_dtlb_load(tlb_ent, - tte_data + 0x400000, - tte_vaddr + 0x400000); - prom_itlb_load(tlb_ent, - tte_data + 0x400000, - tte_vaddr + 0x400000); + /* Now lock us into the TLBs via Hypervisor or OBP. */ + if (tlb_type == hypervisor) { + hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU); + hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU); + if (bigkernel) { + tte_vaddr += 0x400000; + tte_data += 0x400000; + hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU); + hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU); + } + } else { + prom_dtlb_load(tlb_ent, tte_data, tte_vaddr); + prom_itlb_load(tlb_ent, tte_data, tte_vaddr); + if (bigkernel) { + tlb_ent -= 1; + prom_dtlb_load(tlb_ent, + tte_data + 0x400000, + tte_vaddr + 0x400000); + prom_itlb_load(tlb_ent, + tte_data + 0x400000, + tte_vaddr + 0x400000); + } + sparc64_highest_unlocked_tlb_ent = tlb_ent - 1; } - sparc64_highest_unlocked_tlb_ent = tlb_ent - 1; if (tlb_type == cheetah_plus) { sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 | CTX_CHEETAH_PLUS_NUC); diff --git a/arch/sparc64/prom/init.c b/arch/sparc64/prom/init.c index f3cc2d8578b..095755e428a 100644 --- a/arch/sparc64/prom/init.c +++ b/arch/sparc64/prom/init.c @@ -18,7 +18,6 @@ enum prom_major_version prom_vers; unsigned int prom_rev, prom_prev; /* The root node of the prom device tree. */ -int prom_root_node; int prom_stdin, prom_stdout; int prom_chosen_node; @@ -41,26 +40,22 @@ void __init prom_init(void *cif_handler, void *cif_stack) prom_cif_init(cif_handler, cif_stack); - prom_root_node = prom_getsibling(0); - if((prom_root_node == 0) || (prom_root_node == -1)) - prom_halt(); - prom_chosen_node = prom_finddevice(prom_chosen_path); if (!prom_chosen_node || prom_chosen_node == -1) prom_halt(); - prom_stdin = prom_getint (prom_chosen_node, "stdin"); - prom_stdout = prom_getint (prom_chosen_node, "stdout"); + prom_stdin = prom_getint(prom_chosen_node, "stdin"); + prom_stdout = prom_getint(prom_chosen_node, "stdout"); node = prom_finddevice("/openprom"); if (!node || node == -1) prom_halt(); - prom_getstring (node, "version", buffer, sizeof (buffer)); + prom_getstring(node, "version", buffer, sizeof (buffer)); - prom_printf ("\n"); + prom_printf("\n"); - if (strncmp (buffer, "OBP ", 4)) + if (strncmp(buffer, "OBP ", 4)) goto strange_version; /* @@ -70,7 +65,7 @@ void __init prom_init(void *cif_handler, void *cif_stack) * accordingly. -spot */ - if (strncmp (buffer, "OBP ", 5)) + if (strncmp(buffer, "OBP ", 5)) bufadjust = 4; else bufadjust = 5; @@ -87,7 +82,8 @@ void __init prom_init(void *cif_handler, void *cif_stack) prom_rev = ints[1]; prom_prev = (ints[0] << 16) | (ints[1] << 8) | ints[2]; - printk ("PROMLIB: Sun IEEE Boot Prom %s\n", buffer + bufadjust); + printk("PROMLIB: Sun IEEE Boot Prom %s\n", buffer + bufadjust); + printk("PROMLIB: Root node compatible: %s\n", prom_root_compatible); /* Initialization successful. */ return; diff --git a/arch/sparc64/prom/tree.c b/arch/sparc64/prom/tree.c index b1ff9e87dcc..49075abd7cb 100644 --- a/arch/sparc64/prom/tree.c +++ b/arch/sparc64/prom/tree.c @@ -51,7 +51,7 @@ prom_getparent(int node) __inline__ int __prom_getsibling(int node) { - return p1275_cmd ("peer", P1275_INOUT(1, 1), node); + return p1275_cmd(prom_peer_name, P1275_INOUT(1, 1), node); } __inline__ int @@ -59,9 +59,12 @@ prom_getsibling(int node) { int sibnode; - if(node == -1) return 0; + if (node == -1) + return 0; sibnode = __prom_getsibling(node); - if(sibnode == -1) return 0; + if (sibnode == -1) + return 0; + return sibnode; } -- cgit v1.2.3 From 3a8c069d0eea936c2fe3637ab62967af31555ee9 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 9 Feb 2006 02:54:54 -0800 Subject: [SPARC64]: Print ARCH as SUN4V when tlb_type is hypervisor. Signed-off-by: David S. Miller --- arch/sparc64/kernel/setup.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c index 7f02c8f71df..cec921f6cdb 100644 --- a/arch/sparc64/kernel/setup.c +++ b/arch/sparc64/kernel/setup.c @@ -597,7 +597,10 @@ void __init setup_arch(char **cmdline_p) *cmdline_p = prom_getbootargs(); strcpy(saved_command_line, *cmdline_p); - printk("ARCH: SUN4U\n"); + if (tlb_type == hypervisor) + printk("ARCH: SUN4V\n"); + else + printk("ARCH: SUN4U\n"); #ifdef CONFIG_DUMMY_CONSOLE conswitchp = &dummy_con; -- cgit v1.2.3 From 12816ab38adddc9d7e9b3315d1739655dedc7c9f Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 9 Feb 2006 03:00:00 -0800 Subject: [SPARC64]: kernel/cpu.c needs asm/spitfire.h For 'tlb_type'. Signed-off-by: David S. Miller --- arch/sparc64/kernel/cpu.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/sparc64/kernel/cpu.c b/arch/sparc64/kernel/cpu.c index c7a4fb39702..11cc0caef59 100644 --- a/arch/sparc64/kernel/cpu.c +++ b/arch/sparc64/kernel/cpu.c @@ -13,6 +13,7 @@ #include #include #include +#include DEFINE_PER_CPU(cpuinfo_sparc, __cpu_data) = { 0 }; -- cgit v1.2.3 From aa9143b9719c07fb6f1f6207790c9c5086ae07e7 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 9 Feb 2006 16:12:22 -0800 Subject: [SPARC64]: Implement sun4v TSB miss handlers. When we register a TSB with the hypervisor, so that it or hardware can handle TLB misses and do the TSB walk for us, the hypervisor traps down to these trap when it incurs a TSB miss. Processing is simple, we load the missing virtual address and context, and do a full page table walk. Signed-off-by: David S. Miller --- arch/sparc64/kernel/sun4v_tlb_miss.S | 51 ++++++++++++++++++++++++++++++++++++ arch/sparc64/kernel/tsb.S | 7 +++-- arch/sparc64/kernel/ttable.S | 15 ++++++----- 3 files changed, 64 insertions(+), 9 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/sun4v_tlb_miss.S b/arch/sparc64/kernel/sun4v_tlb_miss.S index 58ea5dd8573..b8678b5557a 100644 --- a/arch/sparc64/kernel/sun4v_tlb_miss.S +++ b/arch/sparc64/kernel/sun4v_tlb_miss.S @@ -187,6 +187,57 @@ sun4v_dtlb_prot: ba,pt %xcc, sparc64_realfault_common mov FAULT_CODE_DTLB | FAULT_CODE_WRITE, %g4 + /* Called from trap table with &trap_block[smp_processor_id()] in + * %g5 and SCRATCHPAD_UTSBREG1 contents in %g1. + */ +sun4v_itsb_miss: + ldx [%g5 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_I_ADDR_OFFSET], %g4 + ldx [%g5 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_I_CTX_OFFSET], %g5 + + srlx %g4, 22, %g7 + sllx %g5, 48, %g6 + or %g6, %g7, %g6 + brz,pn %g5, kvmap_itlb_4v + nop + + ba,pt %xcc, sun4v_tsb_miss_common + mov FAULT_CODE_ITLB, %g3 + + /* Called from trap table with &trap_block[smp_processor_id()] in + * %g5 and SCRATCHPAD_UTSBREG1 contents in %g1. + */ +sun4v_dtsb_miss: + ldx [%g5 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_ADDR_OFFSET], %g4 + ldx [%g5 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_CTX_OFFSET], %g5 + + srlx %g4, 22, %g7 + sllx %g5, 48, %g6 + or %g6, %g7, %g6 + brz,pn %g5, kvmap_dtlb_4v + nop + + mov FAULT_CODE_DTLB, %g3 + + /* Create TSB pointer into %g1. This is something like: + * + * index_mask = (512 << (tsb_reg & 0x7UL)) - 1UL; + * tsb_base = tsb_reg & ~0x7UL; + * tsb_index = ((vaddr >> PAGE_SHIFT) & tsb_mask); + * tsb_ptr = tsb_base + (tsb_index * 16); + */ +sun4v_tsb_miss_common: + and %g1, 0x7, %g2 + andn %g1, 0x7, %g1 + mov 512, %g7 + sllx %g7, %g2, %g7 + sub %g7, 1, %g7 + srlx %g4, PAGE_SHIFT, %g2 + and %g2, %g7, %g2 + sllx %g2, 4, %g2 + ba,pt %xcc, tsb_miss_page_table_walk + add %g1, %g2, %g1 + + #define BRANCH_ALWAYS 0x10680000 #define NOP 0x01000000 #define SUN4V_DO_PATCH(OLD, NEW) \ diff --git a/arch/sparc64/kernel/tsb.S b/arch/sparc64/kernel/tsb.S index 819a6ef9799..c848c8847cd 100644 --- a/arch/sparc64/kernel/tsb.S +++ b/arch/sparc64/kernel/tsb.S @@ -35,8 +35,11 @@ tsb_miss_itlb: nop /* The sun4v TLB miss handlers jump directly here instead - * of tsb_miss_{d,i}tlb with the missing virtual address - * already loaded into %g4. + * of tsb_miss_{d,i}tlb with registers setup as follows: + * + * %g4: missing virtual address + * %g1: TSB entry address loaded + * %g6: TAG TARGET ((vaddr >> 22) | (ctx << 48)) */ tsb_miss_page_table_walk: TRAP_LOAD_PGD_PHYS(%g7, %g5) diff --git a/arch/sparc64/kernel/ttable.S b/arch/sparc64/kernel/ttable.S index 1608ba4bf1c..a9d210e11eb 100644 --- a/arch/sparc64/kernel/ttable.S +++ b/arch/sparc64/kernel/ttable.S @@ -1,7 +1,6 @@ -/* $Id: ttable.S,v 1.38 2002/02/09 19:49:30 davem Exp $ - * ttable.S: Sparc V9 Trap Table(s) with SpitFire/Cheetah extensions. +/* ttable.S: Sparc V9 Trap Table(s) with SpitFire/Cheetah/SUN4V extensions. * - * Copyright (C) 1996, 2001 David S. Miller (davem@caip.rutgers.edu) + * Copyright (C) 1996, 2001, 2006 David S. Miller (davem@davemloft.net) */ #include @@ -22,7 +21,8 @@ tl0_iax: membar #Sync tl0_resv009: BTRAP(0x9) tl0_iae: membar #Sync TRAP_NOSAVE_7INSNS(__spitfire_access_error) -tl0_resv00b: BTRAP(0xb) BTRAP(0xc) BTRAP(0xd) BTRAP(0xe) BTRAP(0xf) +tl0_itsb_4v: SUN4V_ITSB_MISS +tl0_resv00c: BTRAP(0xc) BTRAP(0xd) BTRAP(0xe) BTRAP(0xf) tl0_ill: membar #Sync TRAP_7INSNS(do_illegal_instruction) tl0_privop: TRAP(do_privop) @@ -38,7 +38,7 @@ tl0_div0: TRAP(do_div0) tl0_resv029: BTRAP(0x29) BTRAP(0x2a) BTRAP(0x2b) BTRAP(0x2c) BTRAP(0x2d) BTRAP(0x2e) tl0_resv02f: BTRAP(0x2f) tl0_dax: TRAP_NOSAVE(__spitfire_data_access_exception) -tl0_resv031: BTRAP(0x31) +tl0_dtsb_4v: SUN4V_DTSB_MISS tl0_dae: membar #Sync TRAP_NOSAVE_7INSNS(__spitfire_access_error) tl0_resv033: BTRAP(0x33) @@ -185,7 +185,8 @@ tl1_iax: TRAP_NOSAVE(__spitfire_insn_access_exception_tl1) tl1_resv009: BTRAPTL1(0x9) tl1_iae: membar #Sync TRAP_NOSAVE_7INSNS(__spitfire_access_error) -tl1_resv00b: BTRAPTL1(0xb) BTRAPTL1(0xc) BTRAPTL1(0xd) BTRAPTL1(0xe) BTRAPTL1(0xf) +tl1_itsb_4v: SUN4V_ITSB_MISS +tl1_resv00c: BTRAPTL1(0xc) BTRAPTL1(0xd) BTRAPTL1(0xe) BTRAPTL1(0xf) tl1_ill: TRAPTL1(do_ill_tl1) tl1_privop: BTRAPTL1(0x11) tl1_resv012: BTRAPTL1(0x12) BTRAPTL1(0x13) BTRAPTL1(0x14) BTRAPTL1(0x15) @@ -201,7 +202,7 @@ tl1_div0: TRAPTL1(do_div0_tl1) tl1_resv029: BTRAPTL1(0x29) BTRAPTL1(0x2a) BTRAPTL1(0x2b) BTRAPTL1(0x2c) tl1_resv02d: BTRAPTL1(0x2d) BTRAPTL1(0x2e) BTRAPTL1(0x2f) tl1_dax: TRAP_NOSAVE(__spitfire_data_access_exception_tl1) -tl1_resv031: BTRAPTL1(0x31) +tl1_dtsb_4v: SUN4V_DTSB_MISS tl1_dae: membar #Sync TRAP_NOSAVE_7INSNS(__spitfire_access_error) tl1_resv033: BTRAPTL1(0x33) -- cgit v1.2.3 From 618e9ed98aed924a1fc664eb6522db4a5e927043 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 9 Feb 2006 17:21:53 -0800 Subject: [SPARC64]: Hypervisor TSB context switching. Signed-off-by: David S. Miller --- arch/sparc64/kernel/tsb.S | 42 ++++++++++++++++++++++++++--------------- arch/sparc64/mm/tsb.c | 48 ++++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 74 insertions(+), 16 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/tsb.S b/arch/sparc64/kernel/tsb.S index c848c8847cd..a53ec6fb769 100644 --- a/arch/sparc64/kernel/tsb.S +++ b/arch/sparc64/kernel/tsb.S @@ -4,6 +4,7 @@ */ #include +#include .text .align 32 @@ -233,6 +234,7 @@ tsb_flush: * %o1: TSB register value * %o2: TSB virtual address * %o3: TSB mapping locked PTE + * %o4: Hypervisor TSB descriptor physical address * * We have to run this whole thing with interrupts * disabled so that the current cpu doesn't change @@ -251,30 +253,40 @@ __tsb_context_switch: add %g2, %g1, %g2 stx %o0, [%g2 + TRAP_PER_CPU_PGD_PADDR] -661: mov TSB_REG, %g1 - stxa %o1, [%g1] ASI_DMMU - .section .sun4v_2insn_patch, "ax" - .word 661b + sethi %hi(tlb_type), %g1 + lduw [%g1 + %lo(tlb_type)], %g1 + cmp %g1, 3 + bne,pt %icc, 1f + nop + + /* Hypervisor TSB switch. */ mov SCRATCHPAD_UTSBREG1, %g1 stxa %o1, [%g1] ASI_SCRATCHPAD - .previous + mov -1, %g2 + mov SCRATCHPAD_UTSBREG2, %g1 + stxa %g2, [%g1] ASI_SCRATCHPAD - membar #Sync + mov HV_FAST_MMU_TSB_CTXNON0, %o0 + mov 1, %o1 + mov %o4, %o2 + ta HV_FAST_TRAP + + ba,pt %xcc, 9f + nop -661: stxa %o1, [%g1] ASI_IMMU + /* SUN4U TSB switch. */ +1: mov TSB_REG, %g1 + stxa %o1, [%g1] ASI_DMMU + membar #Sync + stxa %o1, [%g1] ASI_IMMU membar #Sync - .section .sun4v_2insn_patch, "ax" - .word 661b - nop - nop - .previous - brz %o2, 9f +2: brz %o2, 9f nop - sethi %hi(sparc64_highest_unlocked_tlb_ent), %o4 + sethi %hi(sparc64_highest_unlocked_tlb_ent), %g2 mov TLB_TAG_ACCESS, %g1 - lduw [%o4 + %lo(sparc64_highest_unlocked_tlb_ent)], %g2 + lduw [%g2 + %lo(sparc64_highest_unlocked_tlb_ent)], %g2 stxa %o2, [%g1] ASI_DMMU membar #Sync sllx %g2, 3, %g2 diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c index 2cc8e6528c6..6ae2a5a702c 100644 --- a/arch/sparc64/mm/tsb.c +++ b/arch/sparc64/mm/tsb.c @@ -149,7 +149,7 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes) BUG(); }; - if (tlb_type == cheetah_plus) { + if (tlb_type == cheetah_plus || tlb_type == hypervisor) { /* Physical mapping, no locked TLB entry for TSB. */ tsb_reg |= tsb_paddr; @@ -166,6 +166,52 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes) mm->context.tsb_map_pte = tte; } + /* Setup the Hypervisor TSB descriptor. */ + if (tlb_type == hypervisor) { + struct hv_tsb_descr *hp = &mm->context.tsb_descr; + + switch (PAGE_SIZE) { + case 8192: + default: + hp->pgsz_idx = HV_PGSZ_IDX_8K; + break; + + case 64 * 1024: + hp->pgsz_idx = HV_PGSZ_IDX_64K; + break; + + case 512 * 1024: + hp->pgsz_idx = HV_PGSZ_IDX_512K; + break; + + case 4 * 1024 * 1024: + hp->pgsz_idx = HV_PGSZ_IDX_4MB; + break; + }; + hp->assoc = 1; + hp->num_ttes = tsb_bytes / 16; + hp->ctx_idx = 0; + switch (PAGE_SIZE) { + case 8192: + default: + hp->pgsz_mask = HV_PGSZ_MASK_8K; + break; + + case 64 * 1024: + hp->pgsz_mask = HV_PGSZ_MASK_64K; + break; + + case 512 * 1024: + hp->pgsz_mask = HV_PGSZ_MASK_512K; + break; + + case 4 * 1024 * 1024: + hp->pgsz_mask = HV_PGSZ_MASK_4MB; + break; + }; + hp->tsb_base = tsb_paddr; + hp->resv = 0; + } } /* The page tables are locked against modifications while this -- cgit v1.2.3 From ed6b0b45437dcf7ef1c48b3be413bebcc84771d8 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 9 Feb 2006 20:20:34 -0800 Subject: [SPARC64]: SUN4V memory exception trap handlers. Signed-off-by: David S. Miller --- arch/sparc64/kernel/sun4v_tlb_miss.S | 170 +++++++++++++++++++++++++++++++++++ arch/sparc64/kernel/traps.c | 108 ++++++++++++++++++++-- arch/sparc64/kernel/unaligned.c | 45 +++++++--- arch/sparc64/kernel/winfixup.S | 26 ++++-- 4 files changed, 325 insertions(+), 24 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/sun4v_tlb_miss.S b/arch/sparc64/kernel/sun4v_tlb_miss.S index b8678b5557a..c408b05a5f0 100644 --- a/arch/sparc64/kernel/sun4v_tlb_miss.S +++ b/arch/sparc64/kernel/sun4v_tlb_miss.S @@ -237,6 +237,167 @@ sun4v_tsb_miss_common: ba,pt %xcc, tsb_miss_page_table_walk add %g1, %g2, %g1 + /* Instruction Access Exception, tl0. */ +sun4v_iacc: + mov SCRATCHPAD_CPUID, %g1 + ldxa [%g1] ASI_SCRATCHPAD, %g3 + sethi %hi(trap_block), %g2 + or %g2, %lo(trap_block), %g2 + sllx %g3, TRAP_BLOCK_SZ_SHIFT, %g3 + add %g2, %g3, %g2 + ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_I_TYPE_OFFSET], %g3 + ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_I_ADDR_OFFSET], %g4 + ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_I_CTX_OFFSET], %g5 + sllx %g3, 16, %g3 + or %g5, %g3, %g5 + ba,pt %xcc, etrap + rd %pc, %g7 + mov %l4, %o1 + mov %l5, %o2 + call sun4v_insn_access_exception + add %sp, PTREGS_OFF, %o0 + ba,a,pt %xcc, rtrap_clr_l6 + + /* Instruction Access Exception, tl1. */ +sun4v_iacc_tl1: + mov SCRATCHPAD_CPUID, %g1 + ldxa [%g1] ASI_SCRATCHPAD, %g3 + sethi %hi(trap_block), %g2 + or %g2, %lo(trap_block), %g2 + sllx %g3, TRAP_BLOCK_SZ_SHIFT, %g3 + add %g2, %g3, %g2 + ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_I_TYPE_OFFSET], %g3 + ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_I_ADDR_OFFSET], %g4 + ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_I_CTX_OFFSET], %g5 + sllx %g3, 16, %g3 + or %g5, %g3, %g5 + ba,pt %xcc, etraptl1 + rd %pc, %g7 + mov %l4, %o1 + mov %l5, %o2 + call sun4v_insn_access_exception_tl1 + add %sp, PTREGS_OFF, %o0 + ba,a,pt %xcc, rtrap_clr_l6 + + /* Data Access Exception, tl0. */ +sun4v_dacc: + mov SCRATCHPAD_CPUID, %g1 + ldxa [%g1] ASI_SCRATCHPAD, %g3 + sethi %hi(trap_block), %g2 + or %g2, %lo(trap_block), %g2 + sllx %g3, TRAP_BLOCK_SZ_SHIFT, %g3 + add %g2, %g3, %g2 + ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_TYPE_OFFSET], %g3 + ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_ADDR_OFFSET], %g4 + ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_CTX_OFFSET], %g5 + sllx %g3, 16, %g3 + or %g5, %g3, %g5 + ba,pt %xcc, etrap + rd %pc, %g7 + mov %l4, %o1 + mov %l5, %o2 + call sun4v_data_access_exception + add %sp, PTREGS_OFF, %o0 + ba,a,pt %xcc, rtrap_clr_l6 + + /* Data Access Exception, tl1. */ +sun4v_dacc_tl1: + mov SCRATCHPAD_CPUID, %g1 + ldxa [%g1] ASI_SCRATCHPAD, %g3 + sethi %hi(trap_block), %g2 + or %g2, %lo(trap_block), %g2 + sllx %g3, TRAP_BLOCK_SZ_SHIFT, %g3 + add %g2, %g3, %g2 + ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_TYPE_OFFSET], %g3 + ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_ADDR_OFFSET], %g4 + ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_CTX_OFFSET], %g5 + sllx %g3, 16, %g3 + or %g5, %g3, %g5 + ba,pt %xcc, etraptl1 + rd %pc, %g7 + mov %l4, %o1 + mov %l5, %o2 + call sun4v_data_access_exception_tl1 + add %sp, PTREGS_OFF, %o0 + ba,a,pt %xcc, rtrap_clr_l6 + + /* Memory Address Unaligned. */ +sun4v_mna: + mov SCRATCHPAD_CPUID, %g1 + ldxa [%g1] ASI_SCRATCHPAD, %g3 + sethi %hi(trap_block), %g2 + or %g2, %lo(trap_block), %g2 + sllx %g3, TRAP_BLOCK_SZ_SHIFT, %g3 + add %g2, %g3, %g2 + mov HV_FAULT_TYPE_UNALIGNED, %g3 + ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_ADDR_OFFSET], %g4 + ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_CTX_OFFSET], %g5 + sllx %g3, 16, %g3 + or %g5, %g3, %g5 + + /* Window fixup? */ + rdpr %tl, %g2 + cmp %g2, 1 + bgu,pn %icc, winfix_mna + rdpr %tpc, %g3 + + ba,pt %xcc, etrap + rd %pc, %g7 + mov %l4, %o1 + mov %l5, %o2 + call sun4v_mna + add %sp, PTREGS_OFF, %o0 + ba,a,pt %xcc, rtrap_clr_l6 + + /* Privileged Action. */ +sun4v_privact: + ba,pt %xcc, etrap + rd %pc, %g7 + call do_privact + add %sp, PTREGS_OFF, %o0 + ba,a,pt %xcc, rtrap_clr_l6 + + /* Unaligned ldd float, tl0. */ +sun4v_lddfmna: + mov SCRATCHPAD_CPUID, %g1 + ldxa [%g1] ASI_SCRATCHPAD, %g3 + sethi %hi(trap_block), %g2 + or %g2, %lo(trap_block), %g2 + sllx %g3, TRAP_BLOCK_SZ_SHIFT, %g3 + add %g2, %g3, %g2 + ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_TYPE_OFFSET], %g3 + ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_ADDR_OFFSET], %g4 + ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_CTX_OFFSET], %g5 + sllx %g3, 16, %g3 + or %g5, %g3, %g5 + ba,pt %xcc, etrap + rd %pc, %g7 + mov %l4, %o1 + mov %l5, %o2 + call handle_lddfmna + add %sp, PTREGS_OFF, %o0 + ba,a,pt %xcc, rtrap_clr_l6 + + /* Unaligned std float, tl0. */ +sun4v_stdfmna: + mov SCRATCHPAD_CPUID, %g1 + ldxa [%g1] ASI_SCRATCHPAD, %g3 + sethi %hi(trap_block), %g2 + or %g2, %lo(trap_block), %g2 + sllx %g3, TRAP_BLOCK_SZ_SHIFT, %g3 + add %g2, %g3, %g2 + ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_TYPE_OFFSET], %g3 + ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_ADDR_OFFSET], %g4 + ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_CTX_OFFSET], %g5 + sllx %g3, 16, %g3 + or %g5, %g3, %g5 + ba,pt %xcc, etrap + rd %pc, %g7 + mov %l4, %o1 + mov %l5, %o2 + call handle_stdfmna + add %sp, PTREGS_OFF, %o0 + ba,a,pt %xcc, rtrap_clr_l6 #define BRANCH_ALWAYS 0x10680000 #define NOP 0x01000000 @@ -265,6 +426,15 @@ sun4v_patch_tlb_handlers: SUN4V_DO_PATCH(tl1_damiss, sun4v_dtlb_miss) SUN4V_DO_PATCH(tl0_daprot, sun4v_dtlb_prot) SUN4V_DO_PATCH(tl1_daprot, sun4v_dtlb_prot) + SUN4V_DO_PATCH(tl0_iax, sun4v_iacc) + SUN4V_DO_PATCH(tl1_iax, sun4v_iacc_tl1) + SUN4V_DO_PATCH(tl0_dax, sun4v_dacc) + SUN4V_DO_PATCH(tl1_dax, sun4v_dacc_tl1) + SUN4V_DO_PATCH(tl0_mna, sun4v_mna) + SUN4V_DO_PATCH(tl1_mna, sun4v_mna) + SUN4V_DO_PATCH(tl0_lddfmna, sun4v_lddfmna) + SUN4V_DO_PATCH(tl0_stdfmna, sun4v_stdfmna) + SUN4V_DO_PATCH(tl0_privact, sun4v_privact) retl nop .size sun4v_patch_tlb_handlers,.-sun4v_patch_tlb_handlers diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c index ac171161e79..1e9a4b6b1fe 100644 --- a/arch/sparc64/kernel/traps.c +++ b/arch/sparc64/kernel/traps.c @@ -180,6 +180,45 @@ void spitfire_insn_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr spitfire_insn_access_exception(regs, sfsr, sfar); } +void sun4v_insn_access_exception(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx) +{ + unsigned short type = (type_ctx >> 16); + unsigned short ctx = (type_ctx & 0xffff); + siginfo_t info; + + if (notify_die(DIE_TRAP, "instruction access exception", regs, + 0, 0x8, SIGTRAP) == NOTIFY_STOP) + return; + + if (regs->tstate & TSTATE_PRIV) { + printk("sun4v_insn_access_exception: ADDR[%016lx] " + "CTX[%04x] TYPE[%04x], going.\n", + addr, ctx, type); + die_if_kernel("Iax", regs); + } + + if (test_thread_flag(TIF_32BIT)) { + regs->tpc &= 0xffffffff; + regs->tnpc &= 0xffffffff; + } + info.si_signo = SIGSEGV; + info.si_errno = 0; + info.si_code = SEGV_MAPERR; + info.si_addr = (void __user *) addr; + info.si_trapno = 0; + force_sig_info(SIGSEGV, &info, current); +} + +void sun4v_insn_access_exception_tl1(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx) +{ + if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs, + 0, 0x8, SIGTRAP) == NOTIFY_STOP) + return; + + dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); + sun4v_insn_access_exception(regs, addr, type_ctx); +} + void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar) { siginfo_t info; @@ -228,6 +267,45 @@ void spitfire_data_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr spitfire_data_access_exception(regs, sfsr, sfar); } +void sun4v_data_access_exception(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx) +{ + unsigned short type = (type_ctx >> 16); + unsigned short ctx = (type_ctx & 0xffff); + siginfo_t info; + + if (notify_die(DIE_TRAP, "data access exception", regs, + 0, 0x8, SIGTRAP) == NOTIFY_STOP) + return; + + if (regs->tstate & TSTATE_PRIV) { + printk("sun4v_data_access_exception: ADDR[%016lx] " + "CTX[%04x] TYPE[%04x], going.\n", + addr, ctx, type); + die_if_kernel("Iax", regs); + } + + if (test_thread_flag(TIF_32BIT)) { + regs->tpc &= 0xffffffff; + regs->tnpc &= 0xffffffff; + } + info.si_signo = SIGSEGV; + info.si_errno = 0; + info.si_code = SEGV_MAPERR; + info.si_addr = (void __user *) addr; + info.si_trapno = 0; + force_sig_info(SIGSEGV, &info, current); +} + +void sun4v_data_access_exception_tl1(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx) +{ + if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs, + 0, 0x8, SIGTRAP) == NOTIFY_STOP) + return; + + dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); + sun4v_data_access_exception(regs, addr, type_ctx); +} + #ifdef CONFIG_PCI /* This is really pathetic... */ extern volatile int pci_poke_in_progress; @@ -2150,6 +2228,8 @@ void do_illegal_instruction(struct pt_regs *regs) force_sig_info(SIGILL, &info, current); } +extern void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn); + void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr) { siginfo_t info; @@ -2159,13 +2239,7 @@ void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned lo return; if (regs->tstate & TSTATE_PRIV) { - extern void kernel_unaligned_trap(struct pt_regs *regs, - unsigned int insn, - unsigned long sfar, - unsigned long sfsr); - - kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc), - sfar, sfsr); + kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc)); return; } info.si_signo = SIGBUS; @@ -2176,6 +2250,26 @@ void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned lo force_sig_info(SIGBUS, &info, current); } +void sun4v_mna(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx) +{ + siginfo_t info; + + if (notify_die(DIE_TRAP, "memory address unaligned", regs, + 0, 0x34, SIGSEGV) == NOTIFY_STOP) + return; + + if (regs->tstate & TSTATE_PRIV) { + kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc)); + return; + } + info.si_signo = SIGBUS; + info.si_errno = 0; + info.si_code = BUS_ADRALN; + info.si_addr = (void __user *) addr; + info.si_trapno = 0; + force_sig_info(SIGBUS, &info, current); +} + void do_privop(struct pt_regs *regs) { siginfo_t info; diff --git a/arch/sparc64/kernel/unaligned.c b/arch/sparc64/kernel/unaligned.c index 70faf630603..001e8518331 100644 --- a/arch/sparc64/kernel/unaligned.c +++ b/arch/sparc64/kernel/unaligned.c @@ -277,7 +277,7 @@ static void kernel_mna_trap_fault(void) regs->tstate |= (ASI_AIUS << 24UL); } -asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn, unsigned long sfar, unsigned long sfsr) +asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn) { enum direction dir = decode_direction(insn); int size = decode_access_size(insn); @@ -405,6 +405,9 @@ extern void do_privact(struct pt_regs *regs); extern void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar); +extern void sun4v_data_access_exception(struct pt_regs *regs, + unsigned long addr, + unsigned long type_ctx); int handle_ldf_stq(u32 insn, struct pt_regs *regs) { @@ -447,14 +450,20 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs) break; } default: - spitfire_data_access_exception(regs, 0, addr); + if (tlb_type == hypervisor) + sun4v_data_access_exception(regs, addr, 0); + else + spitfire_data_access_exception(regs, 0, addr); return 1; } if (put_user (first >> 32, (u32 __user *)addr) || __put_user ((u32)first, (u32 __user *)(addr + 4)) || __put_user (second >> 32, (u32 __user *)(addr + 8)) || __put_user ((u32)second, (u32 __user *)(addr + 12))) { - spitfire_data_access_exception(regs, 0, addr); + if (tlb_type == hypervisor) + sun4v_data_access_exception(regs, addr, 0); + else + spitfire_data_access_exception(regs, 0, addr); return 1; } } else { @@ -467,7 +476,10 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs) do_privact(regs); return 1; } else if (asi > ASI_SNFL) { - spitfire_data_access_exception(regs, 0, addr); + if (tlb_type == hypervisor) + sun4v_data_access_exception(regs, addr, 0); + else + spitfire_data_access_exception(regs, 0, addr); return 1; } switch (insn & 0x180000) { @@ -484,7 +496,10 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs) err |= __get_user (data[i], (u32 __user *)(addr + 4*i)); } if (err && !(asi & 0x2 /* NF */)) { - spitfire_data_access_exception(regs, 0, addr); + if (tlb_type == hypervisor) + sun4v_data_access_exception(regs, addr, 0); + else + spitfire_data_access_exception(regs, 0, addr); return 1; } if (asi & 0x8) /* Little */ { @@ -548,7 +563,7 @@ void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr u32 insn; u32 first, second; u64 value; - u8 asi, freg; + u8 freg; int flag; struct fpustate *f = FPUSTATE; @@ -557,7 +572,7 @@ void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr if (test_thread_flag(TIF_32BIT)) pc = (u32)pc; if (get_user(insn, (u32 __user *) pc) != -EFAULT) { - asi = sfsr >> 16; + int asi = decode_asi(insn, regs); if ((asi > ASI_SNFL) || (asi < ASI_P)) goto daex; @@ -587,7 +602,11 @@ void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr *(u64 *)(f->regs + freg) = value; current_thread_info()->fpsaved[0] |= flag; } else { -daex: spitfire_data_access_exception(regs, sfsr, sfar); +daex: + if (tlb_type == hypervisor) + sun4v_data_access_exception(regs, sfar, sfsr); + else + spitfire_data_access_exception(regs, sfsr, sfar); return; } advance(regs); @@ -600,7 +619,7 @@ void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr unsigned long tstate = regs->tstate; u32 insn; u64 value; - u8 asi, freg; + u8 freg; int flag; struct fpustate *f = FPUSTATE; @@ -609,8 +628,8 @@ void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr if (test_thread_flag(TIF_32BIT)) pc = (u32)pc; if (get_user(insn, (u32 __user *) pc) != -EFAULT) { + int asi = decode_asi(insn, regs); freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20); - asi = sfsr >> 16; value = 0; flag = (freg < 32) ? FPRS_DL : FPRS_DU; if ((asi > ASI_SNFL) || @@ -631,7 +650,11 @@ void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr __put_user ((u32)value, (u32 __user *)(sfar + 4))) goto daex; } else { -daex: spitfire_data_access_exception(regs, sfsr, sfar); +daex: + if (tlb_type == hypervisor) + sun4v_data_access_exception(regs, sfar, sfsr); + else + spitfire_data_access_exception(regs, sfsr, sfar); return; } advance(regs); diff --git a/arch/sparc64/kernel/winfixup.S b/arch/sparc64/kernel/winfixup.S index efe2770e8f5..aca2a98b930 100644 --- a/arch/sparc64/kernel/winfixup.S +++ b/arch/sparc64/kernel/winfixup.S @@ -109,16 +109,23 @@ winfix_mna: done fill_fixup_mna: - TRAP_LOAD_THREAD_REG(%g6, %g1) rdpr %tstate, %g1 and %g1, TSTATE_CWP, %g1 wrpr %g1, %cwp ba,pt %xcc, etrap rd %pc, %g7 - mov %l4, %o2 - mov %l5, %o1 - call mem_address_unaligned + sethi %hi(tlb_type), %g1 + mov %l4, %o1 + lduw [%g1 + %lo(tlb_type)], %g1 + mov %l5, %o2 + cmp %g1, 3 + bne,pt %icc, 1f add %sp, PTREGS_OFF, %o0 + call sun4v_mna + nop + ba,a,pt %xcc, rtrap_clr_l6 +1: call mem_address_unaligned + nop ba,a,pt %xcc, rtrap_clr_l6 winfix_dax: @@ -128,14 +135,21 @@ winfix_dax: done fill_fixup_dax: - TRAP_LOAD_THREAD_REG(%g6, %g1) rdpr %tstate, %g1 and %g1, TSTATE_CWP, %g1 wrpr %g1, %cwp ba,pt %xcc, etrap rd %pc, %g7 + sethi %hi(tlb_type), %g1 mov %l4, %o1 + lduw [%g1 + %lo(tlb_type)], %g1 mov %l5, %o2 - call spitfire_data_access_exception + cmp %g1, 3 + bne,pt %icc, 1f add %sp, PTREGS_OFF, %o0 + call sun4v_data_access_exception + nop + ba,a,pt %xcc, rtrap_clr_l6 +1: call spitfire_data_access_exception + nop ba,a,pt %xcc, rtrap_clr_l6 -- cgit v1.2.3 From 4cce4b7cc56abc3d7b269d09224b8297aad15138 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 9 Feb 2006 20:46:22 -0800 Subject: [SPARC64]: Fetch cpu mid properly on sun4v. If there is a "cpuid" property, use that. Else suck it out of the top bits of the "reg" property. Signed-off-by: David S. Miller --- arch/sparc64/kernel/devices.c | 23 ++++++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/devices.c b/arch/sparc64/kernel/devices.c index df9a1ca8fd7..ac11d872ef7 100644 --- a/arch/sparc64/kernel/devices.c +++ b/arch/sparc64/kernel/devices.c @@ -29,13 +29,30 @@ DEFINE_SPINLOCK(ns87303_lock); extern void cpu_probe(void); extern void central_probe(void); -static char *cpu_mid_prop(void) +static const char *cpu_mid_prop(void) { if (tlb_type == spitfire) return "upa-portid"; return "portid"; } +static int get_cpu_mid(int prom_node) +{ + if (tlb_type == hypervisor) { + struct linux_prom64_registers reg; + + if (prom_getproplen(prom_node, "cpuid") == 4) + return prom_getintdefault(prom_node, "cpuid", 0); + + prom_getproperty(prom_node, "reg", (char *) ®, sizeof(reg)); + return (reg.phys_addr >> 32) & 0x0fffffffUL; + } else { + const char *prop_name = cpu_mid_prop(); + + return prom_getintdefault(prom_node, prop_name, 0); + } +} + static int check_cpu_node(int nd, int *cur_inst, int (*compare)(int, int, void *), void *compare_arg, int *prom_node, int *mid) @@ -50,7 +67,7 @@ static int check_cpu_node(int nd, int *cur_inst, if (prom_node) *prom_node = nd; if (mid) - *mid = prom_getintdefault(nd, cpu_mid_prop(), 0); + *mid = get_cpu_mid(nd); return 0; } @@ -105,7 +122,7 @@ static int cpu_mid_compare(int nd, int instance, void *_arg) int desired_mid = (int) (long) _arg; int this_mid; - this_mid = prom_getintdefault(nd, cpu_mid_prop(), 0); + this_mid = get_cpu_mid(nd); if (this_mid == desired_mid) return 0; return -ENODEV; -- cgit v1.2.3 From 8f6a93a196ba6c569c3e8daa6e81cca7e3ba81b1 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 9 Feb 2006 21:32:07 -0800 Subject: [SPARC64]: Beginnings of SUN4V PCI controller support. Abstract out IOMMU operations so that we can have a different set of calls on sun4v, which needs to do things through hypervisor calls. Signed-off-by: David S. Miller --- arch/sparc64/kernel/Makefile | 2 +- arch/sparc64/kernel/pci.c | 13 ++++++++ arch/sparc64/kernel/pci_iommu.c | 27 ++++++++++----- arch/sparc64/kernel/pci_sun4v.c | 74 +++++++++++++++++++++++++++++++++++++++++ 4 files changed, 107 insertions(+), 9 deletions(-) create mode 100644 arch/sparc64/kernel/pci_sun4v.c (limited to 'arch') diff --git a/arch/sparc64/kernel/Makefile b/arch/sparc64/kernel/Makefile index a482a9ffe5b..44043390e34 100644 --- a/arch/sparc64/kernel/Makefile +++ b/arch/sparc64/kernel/Makefile @@ -14,7 +14,7 @@ obj-y := process.o setup.o cpu.o idprom.o \ power.o sbus.o iommu_common.o sparc64_ksyms.o chmc.o obj-$(CONFIG_PCI) += ebus.o isa.o pci_common.o pci_iommu.o \ - pci_psycho.o pci_sabre.o pci_schizo.o + pci_psycho.o pci_sabre.o pci_schizo.o pci_sun4v.o obj-$(CONFIG_SMP) += smp.o trampoline.o obj-$(CONFIG_SPARC32_COMPAT) += sys32.o sys_sparc32.o signal32.o obj-$(CONFIG_BINFMT_ELF32) += binfmt_elf32.o diff --git a/arch/sparc64/kernel/pci.c b/arch/sparc64/kernel/pci.c index 2ff7c32ab0c..95ffa941862 100644 --- a/arch/sparc64/kernel/pci.c +++ b/arch/sparc64/kernel/pci.c @@ -188,6 +188,7 @@ extern void psycho_init(int, char *); extern void schizo_init(int, char *); extern void schizo_plus_init(int, char *); extern void tomatillo_init(int, char *); +extern void sun4v_pci_init(int, char *); static struct { char *model_name; @@ -204,6 +205,7 @@ static struct { { "pci108e,8002", schizo_plus_init }, { "SUNW,tomatillo", tomatillo_init }, { "pci108e,a801", tomatillo_init }, + { "SUNW,sun4v-pci", sun4v_pci_init }, }; #define PCI_NUM_CONTROLLER_TYPES (sizeof(pci_controller_table) / \ sizeof(pci_controller_table[0])) @@ -283,6 +285,12 @@ int __init pcic_present(void) return pci_controller_scan(pci_is_controller); } +struct pci_iommu_ops *pci_iommu_ops; +EXPORT_SYMBOL(pci_iommu_ops); + +extern struct pci_iommu_ops pci_sun4u_iommu_ops, + pci_sun4v_iommu_ops; + /* Find each controller in the system, attach and initialize * software state structure for each and link into the * pci_controller_root. Setup the controller enough such @@ -290,6 +298,11 @@ int __init pcic_present(void) */ static void __init pci_controller_probe(void) { + if (tlb_type == hypervisor) + pci_iommu_ops = &pci_sun4v_iommu_ops; + else + pci_iommu_ops = &pci_sun4u_iommu_ops; + printk("PCI: Probing for controllers.\n"); pci_controller_scan(pci_controller_init); diff --git a/arch/sparc64/kernel/pci_iommu.c b/arch/sparc64/kernel/pci_iommu.c index a11910be101..8e52232f6f3 100644 --- a/arch/sparc64/kernel/pci_iommu.c +++ b/arch/sparc64/kernel/pci_iommu.c @@ -219,7 +219,7 @@ static inline void iommu_free_ctx(struct pci_iommu *iommu, int ctx) * DMA for PCI device PDEV. Return non-NULL cpu-side address if * successful and set *DMA_ADDRP to the PCI side dma address. */ -void *pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp) +static void *pci_4u_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp) { struct pcidev_cookie *pcp; struct pci_iommu *iommu; @@ -267,7 +267,7 @@ void *pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_ad } /* Free and unmap a consistent DMA translation. */ -void pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma) +static void pci_4u_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma) { struct pcidev_cookie *pcp; struct pci_iommu *iommu; @@ -294,7 +294,7 @@ void pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_ /* Map a single buffer at PTR of SZ bytes for PCI DMA * in streaming mode. */ -dma_addr_t pci_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction) +static dma_addr_t pci_4u_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction) { struct pcidev_cookie *pcp; struct pci_iommu *iommu; @@ -415,7 +415,7 @@ do_flush_sync: } /* Unmap a single streaming mode DMA translation. */ -void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) +static void pci_4u_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) { struct pcidev_cookie *pcp; struct pci_iommu *iommu; @@ -548,7 +548,7 @@ static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg, * When making changes here, inspect the assembly output. I was having * hard time to kepp this routine out of using stack slots for holding variables. */ -int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) +static int pci_4u_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) { struct pcidev_cookie *pcp; struct pci_iommu *iommu; @@ -635,7 +635,7 @@ bad_no_ctx: } /* Unmap a set of streaming mode DMA translations. */ -void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) +static void pci_4u_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) { struct pcidev_cookie *pcp; struct pci_iommu *iommu; @@ -695,7 +695,7 @@ void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, /* Make physical memory consistent for a single * streaming mode DMA translation after a transfer. */ -void pci_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) +static void pci_4u_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) { struct pcidev_cookie *pcp; struct pci_iommu *iommu; @@ -735,7 +735,7 @@ void pci_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size /* Make physical memory consistent for a set of streaming * mode DMA translations after a transfer. */ -void pci_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) +static void pci_4u_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) { struct pcidev_cookie *pcp; struct pci_iommu *iommu; @@ -776,6 +776,17 @@ void pci_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, i spin_unlock_irqrestore(&iommu->lock, flags); } +struct pci_iommu_ops pci_sun4u_iommu_ops = { + .alloc_consistent = pci_4u_alloc_consistent, + .free_consistent = pci_4u_free_consistent, + .map_single = pci_4u_map_single, + .unmap_single = pci_4u_unmap_single, + .map_sg = pci_4u_map_sg, + .unmap_sg = pci_4u_unmap_sg, + .dma_sync_single_for_cpu = pci_4u_dma_sync_single_for_cpu, + .dma_sync_sg_for_cpu = pci_4u_dma_sync_sg_for_cpu, +}; + static void ali_sound_dma_hack(struct pci_dev *pdev, int set_bit) { struct pci_dev *ali_isa_bridge; diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c new file mode 100644 index 00000000000..c1a077196c5 --- /dev/null +++ b/arch/sparc64/kernel/pci_sun4v.c @@ -0,0 +1,74 @@ +/* pci_sun4v.c: SUN4V specific PCI controller support. + * + * Copyright (C) 2006 David S. Miller (davem@davemloft.net) + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "pci_impl.h" +#include "iommu_common.h" + +static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp) +{ + return NULL; +} + +static void pci_4v_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma) +{ +} + +static dma_addr_t pci_4v_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction) +{ + return 0; +} + +static void pci_4v_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) +{ +} + +static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) +{ + return nelems; +} + +static void pci_4v_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) +{ +} + +static void pci_4v_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) +{ +} + +static void pci_4v_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) +{ +} + +struct pci_iommu_ops pci_sun4v_iommu_ops = { + .alloc_consistent = pci_4v_alloc_consistent, + .free_consistent = pci_4v_free_consistent, + .map_single = pci_4v_map_single, + .unmap_single = pci_4v_unmap_single, + .map_sg = pci_4v_map_sg, + .unmap_sg = pci_4v_unmap_sg, + .dma_sync_single_for_cpu = pci_4v_dma_sync_single_for_cpu, + .dma_sync_sg_for_cpu = pci_4v_dma_sync_sg_for_cpu, +}; + +void sun4v_pci_init(int node, char *model_name) +{ + prom_printf("sun4v_pci_init: Implement me.\n"); + prom_halt(); +} -- cgit v1.2.3 From bade5622167181844cd4e60087971c1f949e149f Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 9 Feb 2006 22:05:54 -0800 Subject: [SPARC64]: More SUN4V PCI controller work. Add assembler file for PCI hypervisor calls. Setup basic skeleton of SUN4V PCI controller driver. Add 32-bit devhandle to PBM struct, as this is needed for hypervisor calls. Signed-off-by: David S. Miller --- arch/sparc64/kernel/Makefile | 3 +- arch/sparc64/kernel/pci_sun4v.c | 286 ++++++++++++++++++++++++++++++++++++ arch/sparc64/kernel/pci_sun4v.h | 20 +++ arch/sparc64/kernel/pci_sun4v_asm.S | 56 +++++++ 4 files changed, 364 insertions(+), 1 deletion(-) create mode 100644 arch/sparc64/kernel/pci_sun4v.h create mode 100644 arch/sparc64/kernel/pci_sun4v_asm.S (limited to 'arch') diff --git a/arch/sparc64/kernel/Makefile b/arch/sparc64/kernel/Makefile index 44043390e34..fedfd9c6729 100644 --- a/arch/sparc64/kernel/Makefile +++ b/arch/sparc64/kernel/Makefile @@ -14,7 +14,8 @@ obj-y := process.o setup.o cpu.o idprom.o \ power.o sbus.o iommu_common.o sparc64_ksyms.o chmc.o obj-$(CONFIG_PCI) += ebus.o isa.o pci_common.o pci_iommu.o \ - pci_psycho.o pci_sabre.o pci_schizo.o pci_sun4v.o + pci_psycho.o pci_sabre.o pci_schizo.o \ + pci_sun4v.o pci_sun4v_asm.o obj-$(CONFIG_SMP) += smp.o trampoline.o obj-$(CONFIG_SPARC32_COMPAT) += sys32.o sys_sparc32.o signal32.o obj-$(CONFIG_BINFMT_ELF32) += binfmt_elf32.o diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c index c1a077196c5..1d61353e264 100644 --- a/arch/sparc64/kernel/pci_sun4v.c +++ b/arch/sparc64/kernel/pci_sun4v.c @@ -21,6 +21,8 @@ #include "pci_impl.h" #include "iommu_common.h" +#include "pci_sun4v.h" + static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp) { return NULL; @@ -67,8 +69,292 @@ struct pci_iommu_ops pci_sun4v_iommu_ops = { .dma_sync_sg_for_cpu = pci_4v_dma_sync_sg_for_cpu, }; +/* SUN4V PCI configuration space accessors. */ + +static int pci_sun4v_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, + int where, int size, u32 *value) +{ + /* XXX Implement me! XXX */ + return 0; +} + +static int pci_sun4v_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, + int where, int size, u32 value) +{ + /* XXX Implement me! XXX */ + return 0; +} + +static struct pci_ops pci_sun4v_ops = { + .read = pci_sun4v_read_pci_cfg, + .write = pci_sun4v_write_pci_cfg, +}; + + +static void pci_sun4v_scan_bus(struct pci_controller_info *p) +{ + /* XXX Implement me! XXX */ +} + +static unsigned int pci_sun4v_irq_build(struct pci_pbm_info *pbm, + struct pci_dev *pdev, + unsigned int ino) +{ + /* XXX Implement me! XXX */ + return 0; +} + +/* XXX correct? XXX */ +static void pci_sun4v_base_address_update(struct pci_dev *pdev, int resource) +{ + struct pcidev_cookie *pcp = pdev->sysdata; + struct pci_pbm_info *pbm = pcp->pbm; + struct resource *res, *root; + u32 reg; + int where, size, is_64bit; + + res = &pdev->resource[resource]; + if (resource < 6) { + where = PCI_BASE_ADDRESS_0 + (resource * 4); + } else if (resource == PCI_ROM_RESOURCE) { + where = pdev->rom_base_reg; + } else { + /* Somebody might have asked allocation of a non-standard resource */ + return; + } + + is_64bit = 0; + if (res->flags & IORESOURCE_IO) + root = &pbm->io_space; + else { + root = &pbm->mem_space; + if ((res->flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK) + == PCI_BASE_ADDRESS_MEM_TYPE_64) + is_64bit = 1; + } + + size = res->end - res->start; + pci_read_config_dword(pdev, where, ®); + reg = ((reg & size) | + (((u32)(res->start - root->start)) & ~size)); + if (resource == PCI_ROM_RESOURCE) { + reg |= PCI_ROM_ADDRESS_ENABLE; + res->flags |= IORESOURCE_ROM_ENABLE; + } + pci_write_config_dword(pdev, where, reg); + + /* This knows that the upper 32-bits of the address + * must be zero. Our PCI common layer enforces this. + */ + if (is_64bit) + pci_write_config_dword(pdev, where + 4, 0); +} + +/* XXX correct? XXX */ +static void pci_sun4v_resource_adjust(struct pci_dev *pdev, + struct resource *res, + struct resource *root) +{ + res->start += root->start; + res->end += root->start; +} + +/* Use ranges property to determine where PCI MEM, I/O, and Config + * space are for this PCI bus module. + */ +static void pci_sun4v_determine_mem_io_space(struct pci_pbm_info *pbm) +{ + int i, saw_cfg, saw_mem, saw_io; + + saw_cfg = saw_mem = saw_io = 0; + for (i = 0; i < pbm->num_pbm_ranges; i++) { + struct linux_prom_pci_ranges *pr = &pbm->pbm_ranges[i]; + unsigned long a; + int type; + + type = (pr->child_phys_hi >> 24) & 0x3; + a = (((unsigned long)pr->parent_phys_hi << 32UL) | + ((unsigned long)pr->parent_phys_lo << 0UL)); + + switch (type) { + case 0: + /* PCI config space, 16MB */ + pbm->config_space = a; + saw_cfg = 1; + break; + + case 1: + /* 16-bit IO space, 16MB */ + pbm->io_space.start = a; + pbm->io_space.end = a + ((16UL*1024UL*1024UL) - 1UL); + pbm->io_space.flags = IORESOURCE_IO; + saw_io = 1; + break; + + case 2: + /* 32-bit MEM space, 2GB */ + pbm->mem_space.start = a; + pbm->mem_space.end = a + (0x80000000UL - 1UL); + pbm->mem_space.flags = IORESOURCE_MEM; + saw_mem = 1; + break; + + default: + break; + }; + } + + if (!saw_cfg || !saw_io || !saw_mem) { + prom_printf("%s: Fatal error, missing %s PBM range.\n", + pbm->name, + ((!saw_cfg ? + "CFG" : + (!saw_io ? + "IO" : "MEM")))); + prom_halt(); + } + + printk("%s: PCI CFG[%lx] IO[%lx] MEM[%lx]\n", + pbm->name, + pbm->config_space, + pbm->io_space.start, + pbm->mem_space.start); +} + +static void pbm_register_toplevel_resources(struct pci_controller_info *p, + struct pci_pbm_info *pbm) +{ + pbm->io_space.name = pbm->mem_space.name = pbm->name; + + request_resource(&ioport_resource, &pbm->io_space); + request_resource(&iomem_resource, &pbm->mem_space); + pci_register_legacy_regions(&pbm->io_space, + &pbm->mem_space); +} + +static void pci_sun4v_iommu_init(struct pci_pbm_info *pbm) +{ + /* XXX Implement me! XXX */ +} + +static void pci_sun4v_pbm_init(struct pci_controller_info *p, int prom_node) +{ + struct pci_pbm_info *pbm; + struct linux_prom64_registers regs; + unsigned int busrange[2]; + int err; + + /* XXX */ + pbm = &p->pbm_A; + + pbm->parent = p; + pbm->prom_node = prom_node; + pbm->pci_first_slot = 1; + + prom_getproperty(prom_node, "reg", (char *)®s, sizeof(regs)); + pbm->devhandle = (regs.phys_addr >> 32UL) & 0x0fffffff; + + sprintf(pbm->name, "SUN4V-PCI%d PBM%c", + p->index, (pbm == &p->pbm_A ? 'A' : 'B')); + + printk("%s: devhandle[%x]\n", pbm->name, pbm->devhandle); + + prom_getstring(prom_node, "name", + pbm->prom_name, sizeof(pbm->prom_name)); + + err = prom_getproperty(prom_node, "ranges", + (char *) pbm->pbm_ranges, + sizeof(pbm->pbm_ranges)); + if (err == 0 || err == -1) { + prom_printf("%s: Fatal error, no ranges property.\n", + pbm->name); + prom_halt(); + } + + pbm->num_pbm_ranges = + (err / sizeof(struct linux_prom_pci_ranges)); + + pci_sun4v_determine_mem_io_space(pbm); + pbm_register_toplevel_resources(p, pbm); + + err = prom_getproperty(prom_node, "interrupt-map", + (char *)pbm->pbm_intmap, + sizeof(pbm->pbm_intmap)); + if (err != -1) { + pbm->num_pbm_intmap = (err / sizeof(struct linux_prom_pci_intmap)); + err = prom_getproperty(prom_node, "interrupt-map-mask", + (char *)&pbm->pbm_intmask, + sizeof(pbm->pbm_intmask)); + if (err == -1) { + prom_printf("%s: Fatal error, no " + "interrupt-map-mask.\n", pbm->name); + prom_halt(); + } + } else { + pbm->num_pbm_intmap = 0; + memset(&pbm->pbm_intmask, 0, sizeof(pbm->pbm_intmask)); + } + + err = prom_getproperty(prom_node, "bus-range", + (char *)&busrange[0], + sizeof(busrange)); + if (err == 0 || err == -1) { + prom_printf("%s: Fatal error, no bus-range.\n", pbm->name); + prom_halt(); + } + pbm->pci_first_busno = busrange[0]; + pbm->pci_last_busno = busrange[1]; + + pci_sun4v_iommu_init(pbm); +} + void sun4v_pci_init(int node, char *model_name) { + struct pci_controller_info *p; + struct pci_iommu *iommu; + + p = kmalloc(sizeof(struct pci_controller_info), GFP_ATOMIC); + if (!p) { + prom_printf("SUN4V_PCI: Fatal memory allocation error.\n"); + prom_halt(); + } + memset(p, 0, sizeof(*p)); + + iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC); + if (!iommu) { + prom_printf("SCHIZO: Fatal memory allocation error.\n"); + prom_halt(); + } + memset(iommu, 0, sizeof(*iommu)); + p->pbm_A.iommu = iommu; + + iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC); + if (!iommu) { + prom_printf("SCHIZO: Fatal memory allocation error.\n"); + prom_halt(); + } + memset(iommu, 0, sizeof(*iommu)); + p->pbm_B.iommu = iommu; + + p->next = pci_controller_root; + pci_controller_root = p; + + p->index = pci_num_controllers++; + p->pbms_same_domain = 0; + + p->scan_bus = pci_sun4v_scan_bus; + p->irq_build = pci_sun4v_irq_build; + p->base_address_update = pci_sun4v_base_address_update; + p->resource_adjust = pci_sun4v_resource_adjust; + p->pci_ops = &pci_sun4v_ops; + + /* Like PSYCHO and SCHIZO we have a 2GB aligned area + * for memory space. + */ + pci_memspace_mask = 0x7fffffffUL; + + pci_sun4v_pbm_init(p, node); + prom_printf("sun4v_pci_init: Implement me.\n"); prom_halt(); } diff --git a/arch/sparc64/kernel/pci_sun4v.h b/arch/sparc64/kernel/pci_sun4v.h new file mode 100644 index 00000000000..d3ac7ece4b3 --- /dev/null +++ b/arch/sparc64/kernel/pci_sun4v.h @@ -0,0 +1,20 @@ +/* pci_sun4v.h: SUN4V specific PCI controller support. + * + * Copyright (C) 2006 David S. Miller (davem@davemloft.net) + */ + +#ifndef _PCI_SUN4V_H +#define _PCI_SUN4V_H + +extern unsigned long pci_sun4v_devino_to_sysino(unsigned long devhandle, + unsigned long deino); +extern unsigned long pci_sun4v_iommu_map(unsigned long devhandle, + unsigned long tsbid, + unsigned long num_ttes, + unsigned long io_attributes, + unsigned long io_page_list_pa); +extern unsigned long pci_sun4v_iommu_demap(unsigned long devhandle, + unsigned long tsbid, + unsigned long num_ttes); + +#endif /* !(_PCI_SUN4V_H) */ diff --git a/arch/sparc64/kernel/pci_sun4v_asm.S b/arch/sparc64/kernel/pci_sun4v_asm.S new file mode 100644 index 00000000000..fd2fe0edf16 --- /dev/null +++ b/arch/sparc64/kernel/pci_sun4v_asm.S @@ -0,0 +1,56 @@ +/* pci_sun4v_asm: Hypervisor calls for PCI support. + * + * Copyright (C) 2006 David S. Miller + */ + +#include + + /* %o0: devhandle + * %o1: devino + * + * returns %o0: sysino + */ + .globl pci_sun4v_devino_to_sysino +pci_sun4v_devino_to_sysino: + mov %o1, %o2 + mov %o0, %o1 + mov HV_FAST_INTR_DEVINO2SYSINO, %o0 + ta HV_FAST_TRAP + retl + mov %o1, %o0 + + /* %o0: devhandle + * %o1: tsbid + * %o2: num ttes + * %o3: io_attributes + * %o4: io_page_list phys address + * + * returns %o0: num ttes mapped + */ + .globl pci_sun4v_iommu_map +pci_sun4v_iommu_map: + mov %o4, %o5 + mov %o3, %o4 + mov %o2, %o3 + mov %o1, %o2 + mov %o0, %o1 + mov HV_FAST_PCI_IOMMU_MAP, %o0 + ta HV_FAST_TRAP + retl + mov %o1, %o0 + + /* %o0: devhandle + * %o1: tsbid + * %o2: num ttes + * + * returns %o0: num ttes demapped + */ + .globl pci_sun4v_iommu_demap +pci_sun4v_iommu_demap: + mov %o2, %o3 + mov %o1, %o2 + mov %o0, %o1 + mov HV_FAST_PCI_IOMMU_DEMAP, %o0 + ta HV_FAST_TRAP + retl + mov %o1, %o0 -- cgit v1.2.3 From 7eae642f75e0f7fbce7c37b2dfe0641ff1e9ebfd Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 9 Feb 2006 22:20:01 -0800 Subject: [SPARC64]: Implement SUN4V PCI config space access. Signed-off-by: David S. Miller --- arch/sparc64/kernel/pci_sun4v.c | 40 +++++++++++++++++++++++++++---- arch/sparc64/kernel/pci_sun4v.h | 9 +++++++ arch/sparc64/kernel/pci_sun4v_asm.S | 48 +++++++++++++++++++++++++++++++++++++ 3 files changed, 93 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c index 1d61353e264..abd9bfb245c 100644 --- a/arch/sparc64/kernel/pci_sun4v.c +++ b/arch/sparc64/kernel/pci_sun4v.c @@ -74,15 +74,47 @@ struct pci_iommu_ops pci_sun4v_iommu_ops = { static int pci_sun4v_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, int where, int size, u32 *value) { - /* XXX Implement me! XXX */ - return 0; + struct pci_pbm_info *pbm = bus_dev->sysdata; + unsigned long devhandle = pbm->devhandle; + unsigned int bus = bus_dev->number; + unsigned int device = PCI_SLOT(devfn); + unsigned int func = PCI_FUNC(devfn); + unsigned long ret; + + ret = pci_sun4v_config_get(devhandle, + HV_PCI_DEVICE_BUILD(bus, device, func), + where, size); + switch (size) { + case 1: + *value = ret & 0xff; + break; + case 2: + *value = ret & 0xffff; + break; + case 4: + *value = ret & 0xffffffff; + break; + }; + + + return PCIBIOS_SUCCESSFUL; } static int pci_sun4v_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, int where, int size, u32 value) { - /* XXX Implement me! XXX */ - return 0; + struct pci_pbm_info *pbm = bus_dev->sysdata; + unsigned long devhandle = pbm->devhandle; + unsigned int bus = bus_dev->number; + unsigned int device = PCI_SLOT(devfn); + unsigned int func = PCI_FUNC(devfn); + unsigned long ret; + + ret = pci_sun4v_config_put(devhandle, + HV_PCI_DEVICE_BUILD(bus, device, func), + where, size, value); + + return PCIBIOS_SUCCESSFUL; } static struct pci_ops pci_sun4v_ops = { diff --git a/arch/sparc64/kernel/pci_sun4v.h b/arch/sparc64/kernel/pci_sun4v.h index d3ac7ece4b3..5c7ed2ca150 100644 --- a/arch/sparc64/kernel/pci_sun4v.h +++ b/arch/sparc64/kernel/pci_sun4v.h @@ -16,5 +16,14 @@ extern unsigned long pci_sun4v_iommu_map(unsigned long devhandle, extern unsigned long pci_sun4v_iommu_demap(unsigned long devhandle, unsigned long tsbid, unsigned long num_ttes); +extern unsigned long pci_sun4v_config_get(unsigned long devhandle, + unsigned long pci_device, + unsigned long config_offset, + unsigned long size); +extern int pci_sun4v_config_put(unsigned long devhandle, + unsigned long pci_device, + unsigned long config_offset, + unsigned long size, + unsigned long data); #endif /* !(_PCI_SUN4V_H) */ diff --git a/arch/sparc64/kernel/pci_sun4v_asm.S b/arch/sparc64/kernel/pci_sun4v_asm.S index fd2fe0edf16..2f1147146ab 100644 --- a/arch/sparc64/kernel/pci_sun4v_asm.S +++ b/arch/sparc64/kernel/pci_sun4v_asm.S @@ -54,3 +54,51 @@ pci_sun4v_iommu_demap: ta HV_FAST_TRAP retl mov %o1, %o0 + + /* %o0: devhandle + * %o1: pci_device + * %o2: pci_config_offset + * %o3: size + * + * returns %o0: data + * + * If there is an error, the data will be returned + * as all 1's. + */ + .globl pci_sun4v_config_get +pci_sun4v_config_get: + mov %o3, %o4 + mov %o2, %o3 + mov %o1, %o2 + mov %o0, %o1 + mov HV_FAST_PCI_CONFIG_GET, %o0 + ta HV_FAST_TRAP + brnz,a,pn %o1, 1f + mov -1, %o2 +1: retl + mov %o2, %o0 + + /* %o0: devhandle + * %o1: pci_device + * %o2: pci_config_offset + * %o3: size + * %o4: data + * + * returns %o0: status + * + * status will be zero if the operation completed + * successfully, else -1 if not + */ + .globl pci_sun4v_config_put +pci_sun4v_config_put: + mov %o3, %o4 + mov %o2, %o3 + mov %o1, %o2 + mov %o0, %o1 + mov HV_FAST_PCI_CONFIG_PUT, %o0 + ta HV_FAST_TRAP + brnz,a,pn %o1, 1f + mov -1, %o1 +1: retl + mov %o1, %o0 + -- cgit v1.2.3 From 164c220fa3947abbada65329d168f421b461a2a7 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 9 Feb 2006 22:57:21 -0800 Subject: [SPARC64]: Fix hypervisor call arg passing. Function goes in %o5, args go in %o0 --> %o5. Signed-off-by: David S. Miller --- arch/sparc64/kernel/irq.c | 8 +++--- arch/sparc64/kernel/smp.c | 16 ++++++------ arch/sparc64/kernel/trampoline.S | 56 ++++++++++++++++++++-------------------- arch/sparc64/kernel/tsb.S | 6 ++--- arch/sparc64/mm/init.c | 20 +++++++------- 5 files changed, 53 insertions(+), 53 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c index 1f6455503f2..c5dd6daf127 100644 --- a/arch/sparc64/kernel/irq.c +++ b/arch/sparc64/kernel/irq.c @@ -863,10 +863,10 @@ void init_irqwork_curcpu(void) static void __cpuinit init_one_mondo(unsigned long *pa_ptr, unsigned long type) { - register unsigned long func __asm__("%o0"); - register unsigned long arg0 __asm__("%o1"); - register unsigned long arg1 __asm__("%o2"); - register unsigned long arg2 __asm__("%o3"); + register unsigned long func __asm__("%o5"); + register unsigned long arg0 __asm__("%o0"); + register unsigned long arg1 __asm__("%o1"); + register unsigned long arg2 __asm__("%o2"); unsigned long page = get_zeroed_page(GFP_ATOMIC); if (!page) { diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index c10a3a8639e..f553264588d 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c @@ -572,10 +572,10 @@ static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t retries = 0; cnt = init_cpu_list(cpu_list, mask); do { - register unsigned long func __asm__("%o0"); - register unsigned long arg0 __asm__("%o1"); - register unsigned long arg1 __asm__("%o2"); - register unsigned long arg2 __asm__("%o3"); + register unsigned long func __asm__("%o5"); + register unsigned long arg0 __asm__("%o0"); + register unsigned long arg1 __asm__("%o1"); + register unsigned long arg2 __asm__("%o2"); func = HV_FAST_CPU_MONDO_SEND; arg0 = cnt; @@ -624,10 +624,10 @@ static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t int retries = 0; do { - register unsigned long func __asm__("%o0"); - register unsigned long arg0 __asm__("%o1"); - register unsigned long arg1 __asm__("%o2"); - register unsigned long arg2 __asm__("%o3"); + register unsigned long func __asm__("%o5"); + register unsigned long arg0 __asm__("%o0"); + register unsigned long arg1 __asm__("%o1"); + register unsigned long arg2 __asm__("%o2"); cpu_list[0] = i; func = HV_FAST_CPU_MONDO_SEND; diff --git a/arch/sparc64/kernel/trampoline.S b/arch/sparc64/kernel/trampoline.S index ffa8b79632c..c476f5b321f 100644 --- a/arch/sparc64/kernel/trampoline.S +++ b/arch/sparc64/kernel/trampoline.S @@ -265,20 +265,20 @@ do_unlock: nop niagara_lock_tlb: - mov HV_FAST_MMU_MAP_PERM_ADDR, %o0 - sethi %hi(KERNBASE), %o1 - clr %o2 - sethi %hi(kern_locked_tte_data), %o3 - ldx [%o3 + %lo(kern_locked_tte_data)], %o3 - mov HV_MMU_IMMU, %o4 + mov HV_FAST_MMU_MAP_PERM_ADDR, %o5 + sethi %hi(KERNBASE), %o0 + clr %o1 + sethi %hi(kern_locked_tte_data), %o2 + ldx [%o2 + %lo(kern_locked_tte_data)], %o2 + mov HV_MMU_IMMU, %o3 ta HV_FAST_TRAP - mov HV_FAST_MMU_MAP_PERM_ADDR, %o0 - sethi %hi(KERNBASE), %o1 - clr %o2 - sethi %hi(kern_locked_tte_data), %o3 - ldx [%o3 + %lo(kern_locked_tte_data)], %o3 - mov HV_MMU_DMMU, %o4 + mov HV_FAST_MMU_MAP_PERM_ADDR, %o5 + sethi %hi(KERNBASE), %o0 + clr %o1 + sethi %hi(kern_locked_tte_data), %o2 + ldx [%o2 + %lo(kern_locked_tte_data)], %o2 + mov HV_MMU_DMMU, %o3 ta HV_FAST_TRAP sethi %hi(bigkernel), %g2 @@ -286,24 +286,24 @@ niagara_lock_tlb: brz,pt %g2, after_lock_tlb nop - mov HV_FAST_MMU_MAP_PERM_ADDR, %o0 - sethi %hi(KERNBASE + 0x400000), %o1 - clr %o2 - sethi %hi(kern_locked_tte_data), %o3 - ldx [%o3 + %lo(kern_locked_tte_data)], %o3 - sethi %hi(0x400000), %o4 - add %o3, %o4, %o3 - mov HV_MMU_IMMU, %o4 + mov HV_FAST_MMU_MAP_PERM_ADDR, %o5 + sethi %hi(KERNBASE + 0x400000), %o0 + clr %o1 + sethi %hi(kern_locked_tte_data), %o2 + ldx [%o2 + %lo(kern_locked_tte_data)], %o2 + sethi %hi(0x400000), %o3 + add %o2, %o3, %o2 + mov HV_MMU_IMMU, %o3 ta HV_FAST_TRAP - mov HV_FAST_MMU_MAP_PERM_ADDR, %o0 - sethi %hi(KERNBASE + 0x400000), %o1 - clr %o2 - sethi %hi(kern_locked_tte_data), %o3 - ldx [%o3 + %lo(kern_locked_tte_data)], %o3 - sethi %hi(0x400000), %o4 - add %o3, %o4, %o3 - mov HV_MMU_DMMU, %o4 + mov HV_FAST_MMU_MAP_PERM_ADDR, %o5 + sethi %hi(KERNBASE + 0x400000), %o0 + clr %o1 + sethi %hi(kern_locked_tte_data), %o2 + ldx [%o2 + %lo(kern_locked_tte_data)], %o2 + sethi %hi(0x400000), %o3 + add %o2, %o3, %o2 + mov HV_MMU_DMMU, %o3 ta HV_FAST_TRAP after_lock_tlb: diff --git a/arch/sparc64/kernel/tsb.S b/arch/sparc64/kernel/tsb.S index a53ec6fb769..8a9351258af 100644 --- a/arch/sparc64/kernel/tsb.S +++ b/arch/sparc64/kernel/tsb.S @@ -266,9 +266,9 @@ __tsb_context_switch: mov SCRATCHPAD_UTSBREG2, %g1 stxa %g2, [%g1] ASI_SCRATCHPAD - mov HV_FAST_MMU_TSB_CTXNON0, %o0 - mov 1, %o1 - mov %o4, %o2 + mov HV_FAST_MMU_TSB_CTXNON0, %o5 + mov 1, %o0 + mov %o4, %o1 ta HV_FAST_TRAP ba,pt %xcc, 9f diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index e602b857071..7faba33202a 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -518,11 +518,11 @@ static void __init hypervisor_tlb_lock(unsigned long vaddr, unsigned long pte, unsigned long mmu) { - register unsigned long func asm("%o0"); - register unsigned long arg0 asm("%o1"); - register unsigned long arg1 asm("%o2"); - register unsigned long arg2 asm("%o3"); - register unsigned long arg3 asm("%o4"); + register unsigned long func asm("%o5"); + register unsigned long arg0 asm("%o0"); + register unsigned long arg1 asm("%o1"); + register unsigned long arg2 asm("%o2"); + register unsigned long arg3 asm("%o3"); func = HV_FAST_MMU_MAP_PERM_ADDR; arg0 = vaddr; @@ -1112,18 +1112,18 @@ static void __init tsb_phys_patch(void) /* Register this cpu's fault status area with the hypervisor. */ void __cpuinit sun4v_register_fault_status(void) { + register unsigned long func asm("%o5"); register unsigned long arg0 asm("%o0"); - register unsigned long arg1 asm("%o1"); int cpu = hard_smp_processor_id(); struct trap_per_cpu *tb = &trap_block[cpu]; unsigned long pa; pa = kern_base + ((unsigned long) tb - KERNBASE); - arg0 = HV_FAST_MMU_FAULT_AREA_CONF; - arg1 = pa; + func = HV_FAST_MMU_FAULT_AREA_CONF; + arg0 = pa; __asm__ __volatile__("ta %4" - : "=&r" (arg0), "=&r" (arg1) - : "0" (arg0), "1" (arg1), + : "=&r" (func), "=&r" (arg0) + : "0" (func), "1" (arg0), "i" (HV_FAST_TRAP)); } -- cgit v1.2.3 From 18397944642cbca7fcd4a109b43ed5b4652e95b9 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Fri, 10 Feb 2006 00:08:26 -0800 Subject: [SPARC64]: First cut at SUN4V PCI IOMMU handling. Signed-off-by: David S. Miller --- arch/sparc64/kernel/pci_iommu.c | 6 +- arch/sparc64/kernel/pci_sun4v.c | 525 +++++++++++++++++++++++++++++++++++- arch/sparc64/kernel/pci_sun4v.h | 4 + arch/sparc64/kernel/pci_sun4v_asm.S | 46 ++-- 4 files changed, 550 insertions(+), 31 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/pci_iommu.c b/arch/sparc64/kernel/pci_iommu.c index 8e52232f6f3..c9320eac45d 100644 --- a/arch/sparc64/kernel/pci_iommu.c +++ b/arch/sparc64/kernel/pci_iommu.c @@ -562,9 +562,9 @@ static int pci_4u_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int n /* Fast path single entry scatterlists. */ if (nelems == 1) { sglist->dma_address = - pci_map_single(pdev, - (page_address(sglist->page) + sglist->offset), - sglist->length, direction); + pci_4u_map_single(pdev, + (page_address(sglist->page) + sglist->offset), + sglist->length, direction); if (unlikely(sglist->dma_address == PCI_DMA_ERROR_CODE)) return 0; sglist->dma_length = sglist->length; diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c index abd9bfb245c..3f0e3c09f4d 100644 --- a/arch/sparc64/kernel/pci_sun4v.c +++ b/arch/sparc64/kernel/pci_sun4v.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include @@ -23,39 +24,481 @@ #include "pci_sun4v.h" +#define PGLIST_NENTS 2048 + +struct sun4v_pglist { + u64 pglist[PGLIST_NENTS]; +}; + +static DEFINE_PER_CPU(struct sun4v_pglist, iommu_pglists); + +static long pci_arena_alloc(struct pci_iommu_arena *arena, unsigned long npages) +{ + unsigned long n, i, start, end, limit; + int pass; + + limit = arena->limit; + start = arena->hint; + pass = 0; + +again: + n = find_next_zero_bit(arena->map, limit, start); + end = n + npages; + if (unlikely(end >= limit)) { + if (likely(pass < 1)) { + limit = start; + start = 0; + pass++; + goto again; + } else { + /* Scanned the whole thing, give up. */ + return -1; + } + } + + for (i = n; i < end; i++) { + if (test_bit(i, arena->map)) { + start = i + 1; + goto again; + } + } + + for (i = n; i < end; i++) + __set_bit(i, arena->map); + + arena->hint = end; + + return n; +} + +static void pci_arena_free(struct pci_iommu_arena *arena, unsigned long base, unsigned long npages) +{ + unsigned long i; + + for (i = base; i < (base + npages); i++) + __clear_bit(i, arena->map); +} + static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp) { - return NULL; + struct pcidev_cookie *pcp; + struct pci_iommu *iommu; + unsigned long devhandle, flags, order, first_page, npages, n; + void *ret; + long entry; + u64 *pglist; + int cpu; + + size = IO_PAGE_ALIGN(size); + order = get_order(size); + if (order >= MAX_ORDER) + return NULL; + + npages = size >> IO_PAGE_SHIFT; + if (npages > PGLIST_NENTS) + return NULL; + + first_page = __get_free_pages(GFP_ATOMIC, order); + if (first_page == 0UL) + return NULL; + memset((char *)first_page, 0, PAGE_SIZE << order); + + pcp = pdev->sysdata; + devhandle = pcp->pbm->devhandle; + iommu = pcp->pbm->iommu; + + spin_lock_irqsave(&iommu->lock, flags); + entry = pci_arena_alloc(&iommu->arena, npages); + spin_unlock_irqrestore(&iommu->lock, flags); + + if (unlikely(entry < 0L)) { + free_pages(first_page, order); + return NULL; + } + + *dma_addrp = (iommu->page_table_map_base + + (entry << IO_PAGE_SHIFT)); + ret = (void *) first_page; + first_page = __pa(first_page); + + cpu = get_cpu(); + + pglist = &__get_cpu_var(iommu_pglists).pglist[0]; + for (n = 0; n < npages; n++) + pglist[n] = first_page + (n * PAGE_SIZE); + + do { + unsigned long num; + + num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry), + npages, + (HV_PCI_MAP_ATTR_READ | + HV_PCI_MAP_ATTR_WRITE), + __pa(pglist)); + entry += num; + npages -= num; + pglist += num; + } while (npages != 0); + + put_cpu(); + + return ret; } static void pci_4v_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma) { + struct pcidev_cookie *pcp; + struct pci_iommu *iommu; + unsigned long flags, order, npages, entry, devhandle; + + npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; + pcp = pdev->sysdata; + iommu = pcp->pbm->iommu; + devhandle = pcp->pbm->devhandle; + entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT); + + spin_lock_irqsave(&iommu->lock, flags); + + pci_arena_free(&iommu->arena, entry, npages); + + do { + unsigned long num; + + num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry), + npages); + entry += num; + npages -= num; + } while (npages != 0); + + spin_unlock_irqrestore(&iommu->lock, flags); + + order = get_order(size); + if (order < 10) + free_pages((unsigned long)cpu, order); } static dma_addr_t pci_4v_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction) { - return 0; + struct pcidev_cookie *pcp; + struct pci_iommu *iommu; + unsigned long flags, npages, oaddr; + unsigned long i, base_paddr, devhandle; + u32 bus_addr, ret; + unsigned long prot; + long entry; + u64 *pglist; + int cpu; + + pcp = pdev->sysdata; + iommu = pcp->pbm->iommu; + devhandle = pcp->pbm->devhandle; + + if (unlikely(direction == PCI_DMA_NONE)) + goto bad; + + oaddr = (unsigned long)ptr; + npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); + npages >>= IO_PAGE_SHIFT; + if (unlikely(npages > PGLIST_NENTS)) + goto bad; + + spin_lock_irqsave(&iommu->lock, flags); + entry = pci_arena_alloc(&iommu->arena, npages); + spin_unlock_irqrestore(&iommu->lock, flags); + + if (unlikely(entry < 0L)) + goto bad; + + bus_addr = (iommu->page_table_map_base + + (entry << IO_PAGE_SHIFT)); + ret = bus_addr | (oaddr & ~IO_PAGE_MASK); + base_paddr = __pa(oaddr & IO_PAGE_MASK); + prot = HV_PCI_MAP_ATTR_READ; + if (direction != PCI_DMA_TODEVICE) + prot |= HV_PCI_MAP_ATTR_WRITE; + + cpu = get_cpu(); + + pglist = &__get_cpu_var(iommu_pglists).pglist[0]; + for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) + pglist[i] = base_paddr; + + do { + unsigned long num; + + num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry), + npages, prot, + __pa(pglist)); + entry += num; + npages -= num; + pglist += num; + } while (npages != 0); + + put_cpu(); + + return ret; + +bad: + if (printk_ratelimit()) + WARN_ON(1); + return PCI_DMA_ERROR_CODE; } static void pci_4v_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) { + struct pcidev_cookie *pcp; + struct pci_iommu *iommu; + unsigned long flags, npages, devhandle; + long entry; + + if (unlikely(direction == PCI_DMA_NONE)) { + if (printk_ratelimit()) + WARN_ON(1); + return; + } + + pcp = pdev->sysdata; + iommu = pcp->pbm->iommu; + devhandle = pcp->pbm->devhandle; + + npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); + npages >>= IO_PAGE_SHIFT; + bus_addr &= IO_PAGE_MASK; + + spin_lock_irqsave(&iommu->lock, flags); + + entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT; + pci_arena_free(&iommu->arena, entry, npages); + + do { + unsigned long num; + + num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry), + npages); + entry += num; + npages -= num; + } while (npages != 0); + + spin_unlock_irqrestore(&iommu->lock, flags); +} + +#define SG_ENT_PHYS_ADDRESS(SG) \ + (__pa(page_address((SG)->page)) + (SG)->offset) + +static inline void fill_sg(long entry, unsigned long devhandle, + struct scatterlist *sg, + int nused, int nelems, unsigned long prot) +{ + struct scatterlist *dma_sg = sg; + struct scatterlist *sg_end = sg + nelems; + int i, cpu, pglist_ent; + u64 *pglist; + + cpu = get_cpu(); + pglist = &__get_cpu_var(iommu_pglists).pglist[0]; + pglist_ent = 0; + for (i = 0; i < nused; i++) { + unsigned long pteval = ~0UL; + u32 dma_npages; + + dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) + + dma_sg->dma_length + + ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT; + do { + unsigned long offset; + signed int len; + + /* If we are here, we know we have at least one + * more page to map. So walk forward until we + * hit a page crossing, and begin creating new + * mappings from that spot. + */ + for (;;) { + unsigned long tmp; + + tmp = SG_ENT_PHYS_ADDRESS(sg); + len = sg->length; + if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) { + pteval = tmp & IO_PAGE_MASK; + offset = tmp & (IO_PAGE_SIZE - 1UL); + break; + } + if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) { + pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK; + offset = 0UL; + len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL))); + break; + } + sg++; + } + + pteval = (pteval & IOPTE_PAGE); + while (len > 0) { + pglist[pglist_ent++] = pteval; + pteval += IO_PAGE_SIZE; + len -= (IO_PAGE_SIZE - offset); + offset = 0; + dma_npages--; + } + + pteval = (pteval & IOPTE_PAGE) + len; + sg++; + + /* Skip over any tail mappings we've fully mapped, + * adjusting pteval along the way. Stop when we + * detect a page crossing event. + */ + while (sg < sg_end && + (pteval << (64 - IO_PAGE_SHIFT)) != 0UL && + (pteval == SG_ENT_PHYS_ADDRESS(sg)) && + ((pteval ^ + (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) { + pteval += sg->length; + sg++; + } + if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL) + pteval = ~0UL; + } while (dma_npages != 0); + dma_sg++; + } + + BUG_ON(pglist_ent == 0); + + do { + unsigned long num; + + num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry), + pglist_ent); + entry += num; + pglist_ent -= num; + } while (pglist_ent != 0); + + put_cpu(); } static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) { - return nelems; + struct pcidev_cookie *pcp; + struct pci_iommu *iommu; + unsigned long flags, npages, prot, devhandle; + u32 dma_base; + struct scatterlist *sgtmp; + long entry; + int used; + + /* Fast path single entry scatterlists. */ + if (nelems == 1) { + sglist->dma_address = + pci_4v_map_single(pdev, + (page_address(sglist->page) + sglist->offset), + sglist->length, direction); + if (unlikely(sglist->dma_address == PCI_DMA_ERROR_CODE)) + return 0; + sglist->dma_length = sglist->length; + return 1; + } + + pcp = pdev->sysdata; + iommu = pcp->pbm->iommu; + devhandle = pcp->pbm->devhandle; + + if (unlikely(direction == PCI_DMA_NONE)) + goto bad; + + /* Step 1: Prepare scatter list. */ + npages = prepare_sg(sglist, nelems); + if (unlikely(npages > PGLIST_NENTS)) + goto bad; + + /* Step 2: Allocate a cluster and context, if necessary. */ + spin_lock_irqsave(&iommu->lock, flags); + entry = pci_arena_alloc(&iommu->arena, npages); + spin_unlock_irqrestore(&iommu->lock, flags); + + if (unlikely(entry < 0L)) + goto bad; + + dma_base = iommu->page_table_map_base + + (entry << IO_PAGE_SHIFT); + + /* Step 3: Normalize DMA addresses. */ + used = nelems; + + sgtmp = sglist; + while (used && sgtmp->dma_length) { + sgtmp->dma_address += dma_base; + sgtmp++; + used--; + } + used = nelems - used; + + /* Step 4: Create the mappings. */ + prot = HV_PCI_MAP_ATTR_READ; + if (direction != PCI_DMA_TODEVICE) + prot |= HV_PCI_MAP_ATTR_WRITE; + + fill_sg(entry, devhandle, sglist, used, nelems, prot); + + return used; + +bad: + if (printk_ratelimit()) + WARN_ON(1); + return 0; } static void pci_4v_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) { + struct pcidev_cookie *pcp; + struct pci_iommu *iommu; + unsigned long flags, i, npages, devhandle; + long entry; + u32 bus_addr; + + if (unlikely(direction == PCI_DMA_NONE)) { + if (printk_ratelimit()) + WARN_ON(1); + } + + pcp = pdev->sysdata; + iommu = pcp->pbm->iommu; + devhandle = pcp->pbm->devhandle; + + bus_addr = sglist->dma_address & IO_PAGE_MASK; + + for (i = 1; i < nelems; i++) + if (sglist[i].dma_length == 0) + break; + i--; + npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) - + bus_addr) >> IO_PAGE_SHIFT; + + entry = ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT); + + spin_lock_irqsave(&iommu->lock, flags); + + pci_arena_free(&iommu->arena, entry, npages); + + do { + unsigned long num; + + num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry), + npages); + entry += num; + npages -= num; + } while (npages != 0); + + spin_unlock_irqrestore(&iommu->lock, flags); } static void pci_4v_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) { + /* Nothing to do... */ } static void pci_4v_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) { + /* Nothing to do... */ } struct pci_iommu_ops pci_sun4v_iommu_ops = { @@ -264,9 +707,83 @@ static void pbm_register_toplevel_resources(struct pci_controller_info *p, &pbm->mem_space); } +static void probe_existing_entries(struct pci_pbm_info *pbm, + struct pci_iommu *iommu) +{ + struct pci_iommu_arena *arena = &iommu->arena; + unsigned long i, devhandle; + + devhandle = pbm->devhandle; + for (i = 0; i < arena->limit; i++) { + unsigned long ret, io_attrs, ra; + + ret = pci_sun4v_iommu_getmap(devhandle, + HV_PCI_TSBID(0, i), + &io_attrs, &ra); + if (ret == HV_EOK) + __set_bit(i, arena->map); + } +} + static void pci_sun4v_iommu_init(struct pci_pbm_info *pbm) { - /* XXX Implement me! XXX */ + struct pci_iommu *iommu = pbm->iommu; + unsigned long num_tsb_entries, sz; + u32 vdma[2], dma_mask, dma_offset; + int err, tsbsize; + + err = prom_getproperty(pbm->prom_node, "virtual-dma", + (char *)&vdma[0], sizeof(vdma)); + if (err == 0 || err == -1) { + /* No property, use default values. */ + vdma[0] = 0x80000000; + vdma[1] = 0x80000000; + } + + dma_mask = vdma[0]; + switch (vdma[1]) { + case 0x20000000: + dma_mask |= 0x1fffffff; + tsbsize = 64; + break; + + case 0x40000000: + dma_mask |= 0x3fffffff; + tsbsize = 128; + break; + + case 0x80000000: + dma_mask |= 0x7fffffff; + tsbsize = 128; + break; + + default: + prom_printf("PCI-SUN4V: strange virtual-dma size.\n"); + prom_halt(); + }; + + num_tsb_entries = tsbsize / sizeof(iopte_t); + + dma_offset = vdma[0]; + + /* Setup initial software IOMMU state. */ + spin_lock_init(&iommu->lock); + iommu->ctx_lowest_free = 1; + iommu->page_table_map_base = dma_offset; + iommu->dma_addr_mask = dma_mask; + + /* Allocate and initialize the free area map. */ + sz = num_tsb_entries / 8; + sz = (sz + 7UL) & ~7UL; + iommu->arena.map = kmalloc(sz, GFP_KERNEL); + if (!iommu->arena.map) { + prom_printf("PCI_IOMMU: Error, kmalloc(arena.map) failed.\n"); + prom_halt(); + } + memset(iommu->arena.map, 0, sz); + iommu->arena.limit = num_tsb_entries; + + probe_existing_entries(pbm, iommu); } static void pci_sun4v_pbm_init(struct pci_controller_info *p, int prom_node) diff --git a/arch/sparc64/kernel/pci_sun4v.h b/arch/sparc64/kernel/pci_sun4v.h index 5c7ed2ca150..00322ed0cf8 100644 --- a/arch/sparc64/kernel/pci_sun4v.h +++ b/arch/sparc64/kernel/pci_sun4v.h @@ -16,6 +16,10 @@ extern unsigned long pci_sun4v_iommu_map(unsigned long devhandle, extern unsigned long pci_sun4v_iommu_demap(unsigned long devhandle, unsigned long tsbid, unsigned long num_ttes); +extern unsigned long pci_sun4v_iommu_getmap(unsigned long devhandle, + unsigned long tsbid, + unsigned long *io_attributes, + unsigned long *real_address); extern unsigned long pci_sun4v_config_get(unsigned long devhandle, unsigned long pci_device, unsigned long config_offset, diff --git a/arch/sparc64/kernel/pci_sun4v_asm.S b/arch/sparc64/kernel/pci_sun4v_asm.S index 2f1147146ab..4a12341dd5d 100644 --- a/arch/sparc64/kernel/pci_sun4v_asm.S +++ b/arch/sparc64/kernel/pci_sun4v_asm.S @@ -12,9 +12,7 @@ */ .globl pci_sun4v_devino_to_sysino pci_sun4v_devino_to_sysino: - mov %o1, %o2 - mov %o0, %o1 - mov HV_FAST_INTR_DEVINO2SYSINO, %o0 + mov HV_FAST_INTR_DEVINO2SYSINO, %o5 ta HV_FAST_TRAP retl mov %o1, %o0 @@ -29,12 +27,7 @@ pci_sun4v_devino_to_sysino: */ .globl pci_sun4v_iommu_map pci_sun4v_iommu_map: - mov %o4, %o5 - mov %o3, %o4 - mov %o2, %o3 - mov %o1, %o2 - mov %o0, %o1 - mov HV_FAST_PCI_IOMMU_MAP, %o0 + mov HV_FAST_PCI_IOMMU_MAP, %o5 ta HV_FAST_TRAP retl mov %o1, %o0 @@ -47,14 +40,28 @@ pci_sun4v_iommu_map: */ .globl pci_sun4v_iommu_demap pci_sun4v_iommu_demap: - mov %o2, %o3 - mov %o1, %o2 - mov %o0, %o1 - mov HV_FAST_PCI_IOMMU_DEMAP, %o0 + mov HV_FAST_PCI_IOMMU_DEMAP, %o5 ta HV_FAST_TRAP retl mov %o1, %o0 + /* %o0: devhandle + * %o1: tsbid + * %o2: &io_attributes + * %o3: &real_address + * + * returns %o0: status + */ + .globl pci_sun4v_iommu_getmap +pci_sun4v_iommu_getmap: + mov %o2, %o4 + mov HV_FAST_PCI_IOMMU_GETMAP, %o5 + ta HV_FAST_TRAP + stx %o1, [%o4] + stx %o2, [%o3] + retl + mov %o0, %o0 + /* %o0: devhandle * %o1: pci_device * %o2: pci_config_offset @@ -67,11 +74,7 @@ pci_sun4v_iommu_demap: */ .globl pci_sun4v_config_get pci_sun4v_config_get: - mov %o3, %o4 - mov %o2, %o3 - mov %o1, %o2 - mov %o0, %o1 - mov HV_FAST_PCI_CONFIG_GET, %o0 + mov HV_FAST_PCI_CONFIG_GET, %o5 ta HV_FAST_TRAP brnz,a,pn %o1, 1f mov -1, %o2 @@ -91,14 +94,9 @@ pci_sun4v_config_get: */ .globl pci_sun4v_config_put pci_sun4v_config_put: - mov %o3, %o4 - mov %o2, %o3 - mov %o1, %o2 - mov %o0, %o1 - mov HV_FAST_PCI_CONFIG_PUT, %o0 + mov HV_FAST_PCI_CONFIG_PUT, %o5 ta HV_FAST_TRAP brnz,a,pn %o1, 1f mov -1, %o1 1: retl mov %o1, %o0 - -- cgit v1.2.3 From 12eaa328f9fb2d3fcb5afb682c762690d05a3cd8 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Fri, 10 Feb 2006 15:39:51 -0800 Subject: [SPARC64]: Use ASI_SCRATCHPAD address 0x0 properly. This is where the virtual address of the fault status area belongs. To set it up we don't make a hypervisor call, instead we call OBP's SUNW,set-trap-table with the real address of the fault status area as the second argument. And right before that call we write the virtual address into ASI_SCRATCHPAD vaddr 0x0. Signed-off-by: David S. Miller --- arch/sparc64/kernel/head.S | 29 ++++++- arch/sparc64/kernel/sun4v_ivec.S | 28 ++---- arch/sparc64/kernel/sun4v_tlb_miss.S | 162 ++++++++++------------------------- arch/sparc64/kernel/trampoline.S | 29 ++++++- arch/sparc64/mm/init.c | 22 +---- arch/sparc64/prom/misc.c | 5 ++ 6 files changed, 113 insertions(+), 162 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/head.S b/arch/sparc64/kernel/head.S index d048f0dfd42..f581f0e917f 100644 --- a/arch/sparc64/kernel/head.S +++ b/arch/sparc64/kernel/head.S @@ -521,11 +521,36 @@ setup_trap_table: wrpr %g0, 15, %pil /* Make the firmware call to jump over to the Linux trap table. */ - call prom_set_trap_table + sethi %hi(is_sun4v), %o0 + lduw [%o0 + %lo(is_sun4v)], %o0 + brz,pt %o0, 1f + nop + + TRAP_LOAD_TRAP_BLOCK(%g2, %g3) + add %g2, TRAP_PER_CPU_FAULT_INFO, %g2 + stxa %g2, [%g0] ASI_SCRATCHPAD + + /* Compute physical address: + * + * paddr = kern_base + (mmfsa_vaddr - KERNBASE) + */ + sethi %hi(KERNBASE), %g3 + sub %g2, %g3, %g2 + sethi %hi(kern_base), %g3 + ldx [%g3 + %lo(kern_base)], %g3 + add %g2, %g3, %o1 + + call prom_set_trap_table_sun4v + sethi %hi(sparc64_ttable_tl0), %o0 + + ba,pt %xcc, 2f + nop + +1: call prom_set_trap_table sethi %hi(sparc64_ttable_tl0), %o0 /* Start using proper page size encodings in ctx register. */ - sethi %hi(sparc64_kern_pri_context), %g3 +2: sethi %hi(sparc64_kern_pri_context), %g3 ldx [%g3 + %lo(sparc64_kern_pri_context)], %g2 mov PRIMARY_CONTEXT, %g1 diff --git a/arch/sparc64/kernel/sun4v_ivec.S b/arch/sparc64/kernel/sun4v_ivec.S index d9d442017d3..c0367ef7e09 100644 --- a/arch/sparc64/kernel/sun4v_ivec.S +++ b/arch/sparc64/kernel/sun4v_ivec.S @@ -22,11 +22,8 @@ sun4v_cpu_mondo: nop /* Get &trap_block[smp_processor_id()] into %g3. */ - __GET_CPUID(%g1) - sethi %hi(trap_block), %g3 - sllx %g1, TRAP_BLOCK_SZ_SHIFT, %g7 - or %g3, %lo(trap_block), %g3 - add %g3, %g7, %g3 + ldxa [%g0] ASI_SCRATCHPAD, %g3 + sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3 /* Get CPU mondo queue base phys address into %g7. */ ldx [%g3 + TRAP_PER_CPU_CPU_MONDO_PA], %g7 @@ -74,11 +71,8 @@ sun4v_dev_mondo: nop /* Get &trap_block[smp_processor_id()] into %g3. */ - __GET_CPUID(%g1) - sethi %hi(trap_block), %g3 - sllx %g1, TRAP_BLOCK_SZ_SHIFT, %g7 - or %g3, %lo(trap_block), %g3 - add %g3, %g7, %g3 + ldxa [%g0] ASI_SCRATCHPAD, %g3 + sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3 /* Get DEV mondo queue base phys address into %g5. */ ldx [%g3 + TRAP_PER_CPU_DEV_MONDO_PA], %g5 @@ -143,11 +137,8 @@ sun4v_res_mondo: nop /* Get &trap_block[smp_processor_id()] into %g3. */ - __GET_CPUID(%g1) - sethi %hi(trap_block), %g3 - sllx %g1, TRAP_BLOCK_SZ_SHIFT, %g7 - or %g3, %lo(trap_block), %g3 - add %g3, %g7, %g3 + ldxa [%g0] ASI_SCRATCHPAD, %g3 + sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3 /* Get RES mondo queue base phys address into %g5. */ ldx [%g3 + TRAP_PER_CPU_RESUM_MONDO_PA], %g5 @@ -251,11 +242,8 @@ sun4v_nonres_mondo: nop /* Get &trap_block[smp_processor_id()] into %g3. */ - __GET_CPUID(%g1) - sethi %hi(trap_block), %g3 - sllx %g1, TRAP_BLOCK_SZ_SHIFT, %g7 - or %g3, %lo(trap_block), %g3 - add %g3, %g7, %g3 + ldxa [%g0] ASI_SCRATCHPAD, %g3 + sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3 /* Get RES mondo queue base phys address into %g5. */ ldx [%g3 + TRAP_PER_CPU_NONRESUM_MONDO_PA], %g5 diff --git a/arch/sparc64/kernel/sun4v_tlb_miss.S b/arch/sparc64/kernel/sun4v_tlb_miss.S index c408b05a5f0..f6222623de3 100644 --- a/arch/sparc64/kernel/sun4v_tlb_miss.S +++ b/arch/sparc64/kernel/sun4v_tlb_miss.S @@ -7,26 +7,20 @@ .align 32 sun4v_itlb_miss: - /* Load CPU ID into %g3. */ - mov SCRATCHPAD_CPUID, %g1 - ldxa [%g1] ASI_SCRATCHPAD, %g3 + /* Load MMU Miss base into %g2. */ + ldxa [%g0] ASI_SCRATCHPAD, %g3 /* Load UTSB reg into %g1. */ - ldxa [%g1 + %g1] ASI_SCRATCHPAD, %g1 - - /* Load &trap_block[smp_processor_id()] into %g2. */ - sethi %hi(trap_block), %g2 - or %g2, %lo(trap_block), %g2 - sllx %g3, TRAP_BLOCK_SZ_SHIFT, %g3 - add %g2, %g3, %g2 + mov SCRATCHPAD_UTSBREG1, %g1 + ldxa [%g1] ASI_SCRATCHPAD, %g1 /* Create a TAG TARGET, "(vaddr>>22) | (ctx << 48)", in %g6. * Branch if kernel TLB miss. The kernel TSB and user TSB miss * code wants the missing virtual address in %g4, so that value * cannot be modified through the entirety of this handler. */ - ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_I_ADDR_OFFSET], %g4 - ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_I_CTX_OFFSET], %g5 + ldx [%g2 + HV_FAULT_I_ADDR_OFFSET], %g4 + ldx [%g2 + HV_FAULT_I_CTX_OFFSET], %g5 srlx %g4, 22, %g3 sllx %g5, 48, %g6 or %g6, %g3, %g6 @@ -90,26 +84,20 @@ sun4v_itlb_load: retry sun4v_dtlb_miss: - /* Load CPU ID into %g3. */ - mov SCRATCHPAD_CPUID, %g1 - ldxa [%g1] ASI_SCRATCHPAD, %g3 + /* Load MMU Miss base into %g2. */ + ldxa [%g0] ASI_SCRATCHPAD, %g2 /* Load UTSB reg into %g1. */ + mov SCRATCHPAD_UTSBREG1, %g1 ldxa [%g1 + %g1] ASI_SCRATCHPAD, %g1 - /* Load &trap_block[smp_processor_id()] into %g2. */ - sethi %hi(trap_block), %g2 - or %g2, %lo(trap_block), %g2 - sllx %g3, TRAP_BLOCK_SZ_SHIFT, %g3 - add %g2, %g3, %g2 - /* Create a TAG TARGET, "(vaddr>>22) | (ctx << 48)", in %g6. * Branch if kernel TLB miss. The kernel TSB and user TSB miss * code wants the missing virtual address in %g4, so that value * cannot be modified through the entirety of this handler. */ - ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_ADDR_OFFSET], %g4 - ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_CTX_OFFSET], %g5 + ldx [%g2 + HV_FAULT_D_ADDR_OFFSET], %g4 + ldx [%g2 + HV_FAULT_D_CTX_OFFSET], %g5 srlx %g4, 22, %g3 sllx %g5, 48, %g6 or %g6, %g3, %g6 @@ -169,17 +157,10 @@ sun4v_dtlb_load: retry sun4v_dtlb_prot: - /* Load CPU ID into %g3. */ - mov SCRATCHPAD_CPUID, %g1 - ldxa [%g1] ASI_SCRATCHPAD, %g3 + /* Load MMU Miss base into %g2. */ + ldxa [%g0] ASI_SCRATCHPAD, %g2 - /* Load &trap_block[smp_processor_id()] into %g2. */ - sethi %hi(trap_block), %g2 - or %g2, %lo(trap_block), %g2 - sllx %g3, TRAP_BLOCK_SZ_SHIFT, %g3 - add %g2, %g3, %g2 - - ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_ADDR_OFFSET], %g5 + ldx [%g2 + HV_FAULT_D_ADDR_OFFSET], %g5 rdpr %tl, %g1 cmp %g1, 1 bgu,pn %xcc, winfix_trampoline @@ -187,35 +168,17 @@ sun4v_dtlb_prot: ba,pt %xcc, sparc64_realfault_common mov FAULT_CODE_DTLB | FAULT_CODE_WRITE, %g4 - /* Called from trap table with &trap_block[smp_processor_id()] in - * %g5 and SCRATCHPAD_UTSBREG1 contents in %g1. + /* Called from trap table with TAG TARGET placed into + * %g6 and SCRATCHPAD_UTSBREG1 contents in %g1. */ sun4v_itsb_miss: - ldx [%g5 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_I_ADDR_OFFSET], %g4 - ldx [%g5 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_I_CTX_OFFSET], %g5 - - srlx %g4, 22, %g7 - sllx %g5, 48, %g6 - or %g6, %g7, %g6 - brz,pn %g5, kvmap_itlb_4v - nop - ba,pt %xcc, sun4v_tsb_miss_common mov FAULT_CODE_ITLB, %g3 - /* Called from trap table with &trap_block[smp_processor_id()] in - * %g5 and SCRATCHPAD_UTSBREG1 contents in %g1. + /* Called from trap table with TAG TARGET placed into + * %g6 and SCRATCHPAD_UTSBREG1 contents in %g1. */ sun4v_dtsb_miss: - ldx [%g5 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_ADDR_OFFSET], %g4 - ldx [%g5 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_CTX_OFFSET], %g5 - - srlx %g4, 22, %g7 - sllx %g5, 48, %g6 - or %g6, %g7, %g6 - brz,pn %g5, kvmap_dtlb_4v - nop - mov FAULT_CODE_DTLB, %g3 /* Create TSB pointer into %g1. This is something like: @@ -239,15 +202,10 @@ sun4v_tsb_miss_common: /* Instruction Access Exception, tl0. */ sun4v_iacc: - mov SCRATCHPAD_CPUID, %g1 - ldxa [%g1] ASI_SCRATCHPAD, %g3 - sethi %hi(trap_block), %g2 - or %g2, %lo(trap_block), %g2 - sllx %g3, TRAP_BLOCK_SZ_SHIFT, %g3 - add %g2, %g3, %g2 - ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_I_TYPE_OFFSET], %g3 - ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_I_ADDR_OFFSET], %g4 - ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_I_CTX_OFFSET], %g5 + ldxa [%g0] ASI_SCRATCHPAD, %g2 + ldx [%g2 + HV_FAULT_I_TYPE_OFFSET], %g3 + ldx [%g2 + HV_FAULT_I_ADDR_OFFSET], %g4 + ldx [%g2 + HV_FAULT_I_CTX_OFFSET], %g5 sllx %g3, 16, %g3 or %g5, %g3, %g5 ba,pt %xcc, etrap @@ -260,15 +218,10 @@ sun4v_iacc: /* Instruction Access Exception, tl1. */ sun4v_iacc_tl1: - mov SCRATCHPAD_CPUID, %g1 - ldxa [%g1] ASI_SCRATCHPAD, %g3 - sethi %hi(trap_block), %g2 - or %g2, %lo(trap_block), %g2 - sllx %g3, TRAP_BLOCK_SZ_SHIFT, %g3 - add %g2, %g3, %g2 - ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_I_TYPE_OFFSET], %g3 - ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_I_ADDR_OFFSET], %g4 - ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_I_CTX_OFFSET], %g5 + ldxa [%g0] ASI_SCRATCHPAD, %g2 + ldx [%g2 + HV_FAULT_I_TYPE_OFFSET], %g3 + ldx [%g2 + HV_FAULT_I_ADDR_OFFSET], %g4 + ldx [%g2 + HV_FAULT_I_CTX_OFFSET], %g5 sllx %g3, 16, %g3 or %g5, %g3, %g5 ba,pt %xcc, etraptl1 @@ -281,15 +234,10 @@ sun4v_iacc_tl1: /* Data Access Exception, tl0. */ sun4v_dacc: - mov SCRATCHPAD_CPUID, %g1 - ldxa [%g1] ASI_SCRATCHPAD, %g3 - sethi %hi(trap_block), %g2 - or %g2, %lo(trap_block), %g2 - sllx %g3, TRAP_BLOCK_SZ_SHIFT, %g3 - add %g2, %g3, %g2 - ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_TYPE_OFFSET], %g3 - ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_ADDR_OFFSET], %g4 - ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_CTX_OFFSET], %g5 + ldxa [%g0] ASI_SCRATCHPAD, %g2 + ldx [%g2 + HV_FAULT_D_TYPE_OFFSET], %g3 + ldx [%g2 + HV_FAULT_D_ADDR_OFFSET], %g4 + ldx [%g2 + HV_FAULT_D_CTX_OFFSET], %g5 sllx %g3, 16, %g3 or %g5, %g3, %g5 ba,pt %xcc, etrap @@ -302,15 +250,10 @@ sun4v_dacc: /* Data Access Exception, tl1. */ sun4v_dacc_tl1: - mov SCRATCHPAD_CPUID, %g1 - ldxa [%g1] ASI_SCRATCHPAD, %g3 - sethi %hi(trap_block), %g2 - or %g2, %lo(trap_block), %g2 - sllx %g3, TRAP_BLOCK_SZ_SHIFT, %g3 - add %g2, %g3, %g2 - ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_TYPE_OFFSET], %g3 - ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_ADDR_OFFSET], %g4 - ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_CTX_OFFSET], %g5 + ldxa [%g0] ASI_SCRATCHPAD, %g2 + ldx [%g2 + HV_FAULT_D_TYPE_OFFSET], %g3 + ldx [%g2 + HV_FAULT_D_ADDR_OFFSET], %g4 + ldx [%g2 + HV_FAULT_D_CTX_OFFSET], %g5 sllx %g3, 16, %g3 or %g5, %g3, %g5 ba,pt %xcc, etraptl1 @@ -323,15 +266,10 @@ sun4v_dacc_tl1: /* Memory Address Unaligned. */ sun4v_mna: - mov SCRATCHPAD_CPUID, %g1 - ldxa [%g1] ASI_SCRATCHPAD, %g3 - sethi %hi(trap_block), %g2 - or %g2, %lo(trap_block), %g2 - sllx %g3, TRAP_BLOCK_SZ_SHIFT, %g3 - add %g2, %g3, %g2 + ldxa [%g0] ASI_SCRATCHPAD, %g2 mov HV_FAULT_TYPE_UNALIGNED, %g3 - ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_ADDR_OFFSET], %g4 - ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_CTX_OFFSET], %g5 + ldx [%g2 + HV_FAULT_D_ADDR_OFFSET], %g4 + ldx [%g2 + HV_FAULT_D_CTX_OFFSET], %g5 sllx %g3, 16, %g3 or %g5, %g3, %g5 @@ -359,15 +297,10 @@ sun4v_privact: /* Unaligned ldd float, tl0. */ sun4v_lddfmna: - mov SCRATCHPAD_CPUID, %g1 - ldxa [%g1] ASI_SCRATCHPAD, %g3 - sethi %hi(trap_block), %g2 - or %g2, %lo(trap_block), %g2 - sllx %g3, TRAP_BLOCK_SZ_SHIFT, %g3 - add %g2, %g3, %g2 - ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_TYPE_OFFSET], %g3 - ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_ADDR_OFFSET], %g4 - ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_CTX_OFFSET], %g5 + ldxa [%g0] ASI_SCRATCHPAD, %g2 + ldx [%g2 + HV_FAULT_D_TYPE_OFFSET], %g3 + ldx [%g2 + HV_FAULT_D_ADDR_OFFSET], %g4 + ldx [%g2 + HV_FAULT_D_CTX_OFFSET], %g5 sllx %g3, 16, %g3 or %g5, %g3, %g5 ba,pt %xcc, etrap @@ -380,15 +313,10 @@ sun4v_lddfmna: /* Unaligned std float, tl0. */ sun4v_stdfmna: - mov SCRATCHPAD_CPUID, %g1 - ldxa [%g1] ASI_SCRATCHPAD, %g3 - sethi %hi(trap_block), %g2 - or %g2, %lo(trap_block), %g2 - sllx %g3, TRAP_BLOCK_SZ_SHIFT, %g3 - add %g2, %g3, %g2 - ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_TYPE_OFFSET], %g3 - ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_ADDR_OFFSET], %g4 - ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_CTX_OFFSET], %g5 + ldxa [%g0] ASI_SCRATCHPAD, %g2 + ldx [%g2 + HV_FAULT_D_TYPE_OFFSET], %g3 + ldx [%g2 + HV_FAULT_D_ADDR_OFFSET], %g4 + ldx [%g2 + HV_FAULT_D_CTX_OFFSET], %g5 sllx %g3, 16, %g3 or %g5, %g3, %g5 ba,pt %xcc, etrap diff --git a/arch/sparc64/kernel/trampoline.S b/arch/sparc64/kernel/trampoline.S index c476f5b321f..88382200c7b 100644 --- a/arch/sparc64/kernel/trampoline.S +++ b/arch/sparc64/kernel/trampoline.S @@ -389,10 +389,35 @@ after_lock_tlb: or %o1, PSTATE_IE, %o1 wrpr %o1, 0, %pstate - call prom_set_trap_table + sethi %hi(is_sun4v), %o0 + lduw [%o0 + %lo(is_sun4v)], %o0 + brz,pt %o0, 1f + nop + + TRAP_LOAD_TRAP_BLOCK(%g2, %g3) + add %g2, TRAP_PER_CPU_FAULT_INFO, %g2 + stxa %g2, [%g0] ASI_SCRATCHPAD + + /* Compute physical address: + * + * paddr = kern_base + (mmfsa_vaddr - KERNBASE) + */ + sethi %hi(KERNBASE), %g3 + sub %g2, %g3, %g2 + sethi %hi(kern_base), %g3 + ldx [%g3 + %lo(kern_base)], %g3 + add %g2, %g3, %o1 + + call prom_set_trap_table_sun4v + sethi %hi(sparc64_ttable_tl0), %o0 + + ba,pt %xcc, 2f + nop + +1: call prom_set_trap_table sethi %hi(sparc64_ttable_tl0), %o0 - call smp_callin +2: call smp_callin nop call cpu_idle mov 0, %o0 diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 7faba33202a..88eb6f6be56 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -1109,24 +1109,6 @@ static void __init tsb_phys_patch(void) } } -/* Register this cpu's fault status area with the hypervisor. */ -void __cpuinit sun4v_register_fault_status(void) -{ - register unsigned long func asm("%o5"); - register unsigned long arg0 asm("%o0"); - int cpu = hard_smp_processor_id(); - struct trap_per_cpu *tb = &trap_block[cpu]; - unsigned long pa; - - pa = kern_base + ((unsigned long) tb - KERNBASE); - func = HV_FAST_MMU_FAULT_AREA_CONF; - arg0 = pa; - __asm__ __volatile__("ta %4" - : "=&r" (func), "=&r" (arg0) - : "0" (func), "1" (arg0), - "i" (HV_FAST_TRAP)); -} - /* paging_init() sets up the page tables */ extern void cheetah_ecache_flush_init(void); @@ -1147,10 +1129,8 @@ void __init paging_init(void) tlb_type == hypervisor) tsb_phys_patch(); - if (tlb_type == hypervisor) { + if (tlb_type == hypervisor) sun4v_patch_tlb_handlers(); - sun4v_register_fault_status(); - } /* Find available physical memory... */ read_obp_memory("available", &pavail[0], &pavail_ents); diff --git a/arch/sparc64/prom/misc.c b/arch/sparc64/prom/misc.c index 87f5cfce23b..713cbac5f9b 100644 --- a/arch/sparc64/prom/misc.c +++ b/arch/sparc64/prom/misc.c @@ -136,6 +136,11 @@ void prom_set_trap_table(unsigned long tba) p1275_cmd("SUNW,set-trap-table", P1275_INOUT(1, 0), tba); } +void prom_set_trap_table_sun4v(unsigned long tba, unsigned long mmfsa) +{ + p1275_cmd("SUNW,set-trap-table", P1275_INOUT(2, 0), tba, mmfsa); +} + int prom_get_mmu_ihandle(void) { int node, ret; -- cgit v1.2.3 From 36a68e77c554f1ef1c206fd618e6daf82d3e38a3 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 11 Feb 2006 00:29:34 -0800 Subject: [SPARC64]: Simplify sun4v TLB handling using macros. There was also a bug in sun4v_itlb_miss, it loaded the MMU Fault Status base into %g3 instead of %g2. This pointed out a fast path for TSB miss processing, since we have %g2 with the MMU Fault Status base, we can use that to quickly load up the PGD phys address. Signed-off-by: David S. Miller --- arch/sparc64/kernel/sun4v_tlb_miss.S | 130 ++++++++++++++--------------------- arch/sparc64/kernel/tsb.S | 18 +++-- 2 files changed, 61 insertions(+), 87 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/sun4v_tlb_miss.S b/arch/sparc64/kernel/sun4v_tlb_miss.S index f6222623de3..f7129137f9a 100644 --- a/arch/sparc64/kernel/sun4v_tlb_miss.S +++ b/arch/sparc64/kernel/sun4v_tlb_miss.S @@ -6,48 +6,55 @@ .text .align 32 -sun4v_itlb_miss: - /* Load MMU Miss base into %g2. */ - ldxa [%g0] ASI_SCRATCHPAD, %g3 - - /* Load UTSB reg into %g1. */ - mov SCRATCHPAD_UTSBREG1, %g1 - ldxa [%g1] ASI_SCRATCHPAD, %g1 + /* Load ITLB fault information into VADDR and CTX, using BASE. */ +#define LOAD_ITLB_INFO(BASE, VADDR, CTX) \ + ldx [BASE + HV_FAULT_I_ADDR_OFFSET], VADDR; \ + ldx [BASE + HV_FAULT_I_CTX_OFFSET], CTX; + + /* Load DTLB fault information into VADDR and CTX, using BASE. */ +#define LOAD_DTLB_INFO(BASE, VADDR, CTX) \ + ldx [BASE + HV_FAULT_D_ADDR_OFFSET], VADDR; \ + ldx [BASE + HV_FAULT_D_CTX_OFFSET], CTX; - /* Create a TAG TARGET, "(vaddr>>22) | (ctx << 48)", in %g6. - * Branch if kernel TLB miss. The kernel TSB and user TSB miss - * code wants the missing virtual address in %g4, so that value - * cannot be modified through the entirety of this handler. + /* DEST = (CTX << 48) | (VADDR >> 22) + * + * Branch to ZERO_CTX_LABEL is context is zero. */ - ldx [%g2 + HV_FAULT_I_ADDR_OFFSET], %g4 - ldx [%g2 + HV_FAULT_I_CTX_OFFSET], %g5 - srlx %g4, 22, %g3 - sllx %g5, 48, %g6 - or %g6, %g3, %g6 - brz,pn %g5, kvmap_itlb_4v - nop +#define COMPUTE_TAG_TARGET(DEST, VADDR, CTX, TMP, ZERO_CTX_LABEL) \ + srlx VADDR, 22, TMP; \ + sllx CTX, 48, DEST; \ + brz,pn CTX, ZERO_CTX_LABEL; \ + or DEST, TMP, DEST; /* Create TSB pointer. This is something like: * * index_mask = (512 << (tsb_reg & 0x7UL)) - 1UL; * tsb_base = tsb_reg & ~0x7UL; - */ - and %g1, 0x7, %g3 - andn %g1, 0x7, %g1 - mov 512, %g7 - sllx %g7, %g3, %g7 - sub %g7, 1, %g7 - - /* TSB index mask is in %g7, tsb base is in %g1. Compute - * the TSB entry pointer into %g1: - * * tsb_index = ((vaddr >> PAGE_SHIFT) & tsb_mask); * tsb_ptr = tsb_base + (tsb_index * 16); */ - srlx %g4, PAGE_SHIFT, %g3 - and %g3, %g7, %g3 - sllx %g3, 4, %g3 - add %g1, %g3, %g1 +#define COMPUTE_TSB_PTR(TSB_PTR, VADDR, TMP1, TMP2) \ + and TSB_PTR, 0x7, TMP1; \ + mov 512, TMP2; \ + andn TSB_PTR, 0x7, TSB_PTR; \ + sllx TMP2, TMP1, TMP2; \ + srlx VADDR, PAGE_SHIFT, TMP1; \ + sub TMP2, 1, TMP2; \ + and TMP1, TMP2, TMP1; \ + sllx TMP1, 4, TMP1; \ + add TSB_PTR, TMP1, TSB_PTR; + +sun4v_itlb_miss: + /* Load MMU Miss base into %g2. */ + ldxa [%g0] ASI_SCRATCHPAD, %g2 + + /* Load UTSB reg into %g1. */ + mov SCRATCHPAD_UTSBREG1, %g1 + ldxa [%g1] ASI_SCRATCHPAD, %g1 + + LOAD_ITLB_INFO(%g2, %g4, %g5) + COMPUTE_TAG_TARGET(%g6, %g4, %g5, %g3, kvmap_itlb_4v) + COMPUTE_TSB_PTR(%g1, %g4, %g3, %g7) /* Load TSB tag/pte into %g2/%g3 and compare the tag. */ ldda [%g1] ASI_QUAD_LDD_PHYS, %g2 @@ -91,40 +98,9 @@ sun4v_dtlb_miss: mov SCRATCHPAD_UTSBREG1, %g1 ldxa [%g1 + %g1] ASI_SCRATCHPAD, %g1 - /* Create a TAG TARGET, "(vaddr>>22) | (ctx << 48)", in %g6. - * Branch if kernel TLB miss. The kernel TSB and user TSB miss - * code wants the missing virtual address in %g4, so that value - * cannot be modified through the entirety of this handler. - */ - ldx [%g2 + HV_FAULT_D_ADDR_OFFSET], %g4 - ldx [%g2 + HV_FAULT_D_CTX_OFFSET], %g5 - srlx %g4, 22, %g3 - sllx %g5, 48, %g6 - or %g6, %g3, %g6 - brz,pn %g5, kvmap_dtlb_4v - nop - - /* Create TSB pointer. This is something like: - * - * index_mask = (512 << (tsb_reg & 0x7UL)) - 1UL; - * tsb_base = tsb_reg & ~0x7UL; - */ - and %g1, 0x7, %g3 - andn %g1, 0x7, %g1 - mov 512, %g7 - sllx %g7, %g3, %g7 - sub %g7, 1, %g7 - - /* TSB index mask is in %g7, tsb base is in %g1. Compute - * the TSB entry pointer into %g1: - * - * tsb_index = ((vaddr >> PAGE_SHIFT) & tsb_mask); - * tsb_ptr = tsb_base + (tsb_index * 16); - */ - srlx %g4, PAGE_SHIFT, %g3 - and %g3, %g7, %g3 - sllx %g3, 4, %g3 - add %g1, %g3, %g1 + LOAD_DTLB_INFO(%g2, %g4, %g5) + COMPUTE_TAG_TARGET(%g6, %g4, %g5, %g3, kvmap_dtlb_4v) + COMPUTE_TSB_PTR(%g1, %g4, %g3, %g7) /* Load TSB tag/pte into %g2/%g3 and compare the tag. */ ldda [%g1] ASI_QUAD_LDD_PHYS, %g2 @@ -169,7 +145,8 @@ sun4v_dtlb_prot: mov FAULT_CODE_DTLB | FAULT_CODE_WRITE, %g4 /* Called from trap table with TAG TARGET placed into - * %g6 and SCRATCHPAD_UTSBREG1 contents in %g1. + * %g6, SCRATCHPAD_UTSBREG1 contents in %g1, and + * SCRATCHPAD_MMU_MISS contents in %g2. */ sun4v_itsb_miss: ba,pt %xcc, sun4v_tsb_miss_common @@ -189,16 +166,15 @@ sun4v_dtsb_miss: * tsb_ptr = tsb_base + (tsb_index * 16); */ sun4v_tsb_miss_common: - and %g1, 0x7, %g2 - andn %g1, 0x7, %g1 - mov 512, %g7 - sllx %g7, %g2, %g7 - sub %g7, 1, %g7 - srlx %g4, PAGE_SHIFT, %g2 - and %g2, %g7, %g2 - sllx %g2, 4, %g2 - ba,pt %xcc, tsb_miss_page_table_walk - add %g1, %g2, %g1 + COMPUTE_TSB_PTR(%g1, %g4, %g5, %g7) + + /* Branch directly to page table lookup. We have SCRATCHPAD_MMU_MISS + * still in %g2, so it's quite trivial to get at the PGD PHYS value + * so we can preload it into %g7. + */ + sub %g2, TRAP_PER_CPU_FAULT_INFO, %g2 + ba,pt %xcc, tsb_miss_page_table_walk_sun4v_fastpath + ldx [%g2 + TRAP_PER_CPU_PGD_PADDR], %g7 /* Instruction Access Exception, tl0. */ sun4v_iacc: diff --git a/arch/sparc64/kernel/tsb.S b/arch/sparc64/kernel/tsb.S index 8a9351258af..667dcb077be 100644 --- a/arch/sparc64/kernel/tsb.S +++ b/arch/sparc64/kernel/tsb.S @@ -25,26 +25,24 @@ */ tsb_miss_dtlb: mov TLB_TAG_ACCESS, %g4 - ldxa [%g4] ASI_DMMU, %g4 ba,pt %xcc, tsb_miss_page_table_walk - nop + ldxa [%g4] ASI_DMMU, %g4 tsb_miss_itlb: mov TLB_TAG_ACCESS, %g4 - ldxa [%g4] ASI_IMMU, %g4 ba,pt %xcc, tsb_miss_page_table_walk - nop + ldxa [%g4] ASI_IMMU, %g4 - /* The sun4v TLB miss handlers jump directly here instead - * of tsb_miss_{d,i}tlb with registers setup as follows: - * - * %g4: missing virtual address - * %g1: TSB entry address loaded - * %g6: TAG TARGET ((vaddr >> 22) | (ctx << 48)) + /* At this point we have: + * %g4 -- missing virtual address + * %g1 -- TSB entry address + * %g6 -- TAG TARGET ((vaddr >> 22) | (ctx << 48)) */ tsb_miss_page_table_walk: TRAP_LOAD_PGD_PHYS(%g7, %g5) + /* And now we have the PGD base physical address in %g7. */ +tsb_miss_page_table_walk_sun4v_fastpath: USER_PGTABLE_WALK_TL1(%g4, %g7, %g5, %g2, tsb_do_fault) tsb_reload: -- cgit v1.2.3 From 4bdff41464c2954c6f62f849df0e73eb9fa21c65 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 11 Feb 2006 01:01:55 -0800 Subject: [SPARC64]: Fetch bootup time of day from Hypervisor. Signed-off-by: David S. Miller --- arch/sparc64/kernel/time.c | 58 +++++++++++++++++++++++++++++++++++++++------- 1 file changed, 50 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/time.c b/arch/sparc64/kernel/time.c index a22930d62ad..24681b9b442 100644 --- a/arch/sparc64/kernel/time.c +++ b/arch/sparc64/kernel/time.c @@ -683,6 +683,48 @@ static void __init set_system_time(void) } } +/* davem suggests we keep this within the 4M locked kernel image */ +static u32 starfire_get_time(void) +{ + static char obp_gettod[32]; + static u32 unix_tod; + + sprintf(obp_gettod, "h# %08x unix-gettod", + (unsigned int) (long) &unix_tod); + prom_feval(obp_gettod); + + return unix_tod; +} + +static u32 hypervisor_get_time(void) +{ + register unsigned long func asm("%o5"); + register unsigned long arg0 asm("%o0"); + register unsigned long arg1 asm("%o1"); + int retries = 10000; + +retry: + func = HV_FAST_TOD_GET; + arg0 = 0; + arg1 = 0; + __asm__ __volatile__("ta %6" + : "=&r" (func), "=&r" (arg0), "=&r" (arg1) + : "0" (func), "1" (arg0), "2" (arg1), + "i" (HV_FAST_TRAP)); + if (arg0 == HV_EOK) + return arg1; + if (arg0 == HV_EWOULDBLOCK) { + if (--retries > 0) { + udelay(100); + goto retry; + } + printk(KERN_WARNING "SUN4V: tod_get() timed out.\n"); + return 0; + } + printk(KERN_WARNING "SUN4V: tod_get() not supported.\n"); + return 0; +} + void __init clock_probe(void) { struct linux_prom_registers clk_reg[2]; @@ -702,14 +744,14 @@ void __init clock_probe(void) if (this_is_starfire) { - /* davem suggests we keep this within the 4M locked kernel image */ - static char obp_gettod[256]; - static u32 unix_tod; - - sprintf(obp_gettod, "h# %08x unix-gettod", - (unsigned int) (long) &unix_tod); - prom_feval(obp_gettod); - xtime.tv_sec = unix_tod; + xtime.tv_sec = starfire_get_time(); + xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ); + set_normalized_timespec(&wall_to_monotonic, + -xtime.tv_sec, -xtime.tv_nsec); + return; + } + if (tlb_type == hypervisor) { + xtime.tv_sec = hypervisor_get_time(); xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ); set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec); -- cgit v1.2.3 From e92b92571c85dfa1cdc56e88566134c51ae1d12b Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 11 Feb 2006 10:19:37 -0800 Subject: [SPARC64]: Handle hypervisor case correctly in copy_tsb(). Signed-off-by: David S. Miller --- arch/sparc64/mm/tsb.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c index 6ae2a5a702c..c5dc4b0cc1c 100644 --- a/arch/sparc64/mm/tsb.c +++ b/arch/sparc64/mm/tsb.c @@ -231,7 +231,13 @@ static void copy_tsb(struct tsb *old_tsb, unsigned long old_size, register unsigned long pte asm("o5"); unsigned long v, hash; - if (tlb_type == cheetah_plus) { + if (tlb_type == hypervisor) { + __asm__ __volatile__( + "ldda [%2] %3, %0" + : "=r" (tag), "=r" (pte) + : "r" (__pa(&old_tsb[i])), + "i" (ASI_QUAD_LDD_PHYS_4V)); + } else if (tlb_type == cheetah_plus) { __asm__ __volatile__( "ldda [%2] %3, %0" : "=r" (tag), "=r" (pte) @@ -267,7 +273,8 @@ static void copy_tsb(struct tsb *old_tsb, unsigned long old_size, v |= (i & (512UL - 1UL)) << 13UL; hash = tsb_hash(v, new_nentries); - if (tlb_type == cheetah_plus) { + if (tlb_type == cheetah_plus || + tlb_type == hypervisor) { __asm__ __volatile__( "stxa %0, [%1] %2\n\t" "stxa %3, [%4] %2" -- cgit v1.2.3 From 0d4bc95b9c205a7374afbe93b38d9c2757a45862 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 11 Feb 2006 10:30:41 -0800 Subject: [SPARC64]: Fix some Niagara memcpy() bugs. We need to restore the %asi register properly. For the kernel this means get_fs(), for user this means ASI_PNF. Also, NGcopy_to_user.S was including U3memcpy.S instead of NGmemcpy.S, oops :-) Signed-off-by: David S. Miller --- arch/sparc64/lib/NGcopy_to_user.S | 2 +- arch/sparc64/lib/NGmemcpy.S | 10 +++++++--- 2 files changed, 8 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/lib/NGcopy_to_user.S b/arch/sparc64/lib/NGcopy_to_user.S index 4a12395b450..34112d5054e 100644 --- a/arch/sparc64/lib/NGcopy_to_user.S +++ b/arch/sparc64/lib/NGcopy_to_user.S @@ -37,4 +37,4 @@ nop #endif -#include "U3memcpy.S" +#include "NGmemcpy.S" diff --git a/arch/sparc64/lib/NGmemcpy.S b/arch/sparc64/lib/NGmemcpy.S index a39aa3bd434..8e522b3dc09 100644 --- a/arch/sparc64/lib/NGmemcpy.S +++ b/arch/sparc64/lib/NGmemcpy.S @@ -5,11 +5,15 @@ #ifdef __KERNEL__ #include +#include #define GLOBAL_SPARE %g7 -#define RESTORE_ASI wr %g0, ASI_AIUS, %asi +#define RESTORE_ASI(TMP) \ + ldub [%g6 + TI_CURRENT_DS], TMP; \ + wr TMP, 0x0, %asi; #else #define GLOBAL_SPARE %g5 -#define RESTORE_ASI +#define RESTORE_ASI(TMP) \ + wr %g0, ASI_PNF, %asi #endif #ifndef STORE_ASI @@ -246,7 +250,7 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ /* %o2 contains any final bytes still needed to be copied * over. If anything is left, we copy it one byte at a time. */ - RESTORE_ASI + RESTORE_ASI(%o3) brz,pt %o2, 85f sub %o0, %o1, %o3 ba,a,pt %XCC, 90f -- cgit v1.2.3 From 6cebb52094baddd4c167bb5100a7cf6f7bee6910 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 11 Feb 2006 10:56:43 -0800 Subject: [SPARC64]: Fix sun4v early bootup. prom_sun4v_name should be "sun4v" not "SUNW,sun4v" Also, this is too early to make use of the .sun4v_Xinsn_patch code patching, so just check things manually. This gets us at least to prom_init() on Niagara. Signed-off-by: David S. Miller --- arch/sparc64/kernel/head.S | 35 ++++++++++++++++++++--------------- 1 file changed, 20 insertions(+), 15 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/head.S b/arch/sparc64/kernel/head.S index f581f0e917f..3222c8205b5 100644 --- a/arch/sparc64/kernel/head.S +++ b/arch/sparc64/kernel/head.S @@ -123,7 +123,7 @@ prom_map_name: prom_unmap_name: .asciz "unmap" prom_sun4v_name: - .asciz "SUNW,sun4v" + .asciz "sun4v" .align 4 prom_root_compatible: .skip 64 @@ -300,7 +300,7 @@ sparc64_boot_after_remap: or %g1, %lo(prom_root_compatible), %g1 sethi %hi(prom_sun4v_name), %g7 or %g7, %lo(prom_sun4v_name), %g7 - mov 10, %g3 + mov 5, %g3 1: ldub [%g7], %g2 ldub [%g1], %g4 cmp %g2, %g4 @@ -380,28 +380,33 @@ jump_to_sun4u_init: nop sun4u_init: + BRANCH_IF_SUN4V(g1, sun4v_init) + /* Set ctx 0 */ mov PRIMARY_CONTEXT, %g7 - -661: stxa %g0, [%g7] ASI_DMMU - .section .sun4v_1insn_patch, "ax" - .word 661b - stxa %g0, [%g7] ASI_MMU - .previous - + stxa %g0, [%g7] ASI_DMMU membar #Sync mov SECONDARY_CONTEXT, %g7 + stxa %g0, [%g7] ASI_DMMU + membar #Sync -661: stxa %g0, [%g7] ASI_DMMU - .section .sun4v_1insn_patch, "ax" - .word 661b + ba,pt %xcc, sun4u_continue + nop + +sun4v_init: + /* Set ctx 0 */ + mov PRIMARY_CONTEXT, %g7 stxa %g0, [%g7] ASI_MMU - .previous + membar #Sync - membar #Sync + mov SECONDARY_CONTEXT, %g7 + stxa %g0, [%g7] ASI_MMU + membar #Sync + ba,pt %xcc, niagara_tlb_fixup + nop - BRANCH_IF_SUN4V(g1, niagara_tlb_fixup) +sun4u_continue: BRANCH_IF_ANY_CHEETAH(g1, g7, cheetah_tlb_fixup) ba,pt %xcc, spitfire_tlb_fixup -- cgit v1.2.3 From fd05068d7b22b64211f9202aa67ad44b51d44242 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 11 Feb 2006 11:05:52 -0800 Subject: [SPARC64]: Fix typo in sun4v_patch(). Second instruction offset is '4' not '3'. Signed-off-by: David S. Miller --- arch/sparc64/kernel/setup.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c index cec921f6cdb..f36b257b2e4 100644 --- a/arch/sparc64/kernel/setup.c +++ b/arch/sparc64/kernel/setup.c @@ -583,7 +583,7 @@ static void __init sun4v_patch(void) wmb(); __asm__ __volatile__("flush %0" : : "r" (addr + 0)); - *(unsigned int *) (addr + 3) = p2->insns[1]; + *(unsigned int *) (addr + 4) = p2->insns[1]; wmb(); __asm__ __volatile__("flush %0" : : "r" (addr + 4)); -- cgit v1.2.3 From 459b6e621e0e15315c25bac47fa7113e5818d45d Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 11 Feb 2006 12:21:20 -0800 Subject: [SPARC64]: Fix some SUN4V TLB miss bugs. Code patching did not sign extend negative branch offsets correctly. Kernel TLB miss path needs patching and %g4 register preservation in order to handle SUN4V correctly. Signed-off-by: David S. Miller --- arch/sparc64/kernel/ktlb.S | 67 +++++++++++++++++++++++++++++++----- arch/sparc64/kernel/sun4v_tlb_miss.S | 14 +++++--- 2 files changed, 68 insertions(+), 13 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/ktlb.S b/arch/sparc64/kernel/ktlb.S index f6bb2e08964..2d333ab4b91 100644 --- a/arch/sparc64/kernel/ktlb.S +++ b/arch/sparc64/kernel/ktlb.S @@ -48,7 +48,7 @@ kvmap_itlb_tsb_miss: kvmap_itlb_vmalloc_addr: KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_itlb_longpath) - KTSB_LOCK_TAG(%g1, %g2, %g4) + KTSB_LOCK_TAG(%g1, %g2, %g7) /* Load and check PTE. */ ldxa [%g5] ASI_PHYS_USE_EC, %g5 @@ -60,8 +60,29 @@ kvmap_itlb_vmalloc_addr: /* fallthrough to TLB load */ kvmap_itlb_load: - stxa %g5, [%g0] ASI_ITLB_DATA_IN ! Reload TLB + +661: stxa %g5, [%g0] ASI_ITLB_DATA_IN retry + .section .sun4v_2insn_patch, "ax" + .word 661b + nop + nop + .previous + + /* For sun4v the ASI_ITLB_DATA_IN store and the retry + * instruction get nop'd out and we get here to branch + * to the sun4v tlb load code. The registers are setup + * as follows: + * + * %g4: vaddr + * %g5: PTE + * %g6: TAG + * + * The sun4v TLB load wants the PTE in %g3 so we fix that + * up here. + */ + ba,pt %xcc, sun4v_itlb_load + mov %g5, %g3 kvmap_itlb_longpath: @@ -80,7 +101,7 @@ kvmap_itlb_longpath: kvmap_itlb_obp: OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_itlb_longpath) - KTSB_LOCK_TAG(%g1, %g2, %g4) + KTSB_LOCK_TAG(%g1, %g2, %g7) KTSB_WRITE(%g1, %g5, %g6) @@ -90,7 +111,7 @@ kvmap_itlb_obp: kvmap_dtlb_obp: OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_dtlb_longpath) - KTSB_LOCK_TAG(%g1, %g2, %g4) + KTSB_LOCK_TAG(%g1, %g2, %g7) KTSB_WRITE(%g1, %g5, %g6) @@ -129,7 +150,7 @@ kvmap_linear_patch: kvmap_dtlb_vmalloc_addr: KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_dtlb_longpath) - KTSB_LOCK_TAG(%g1, %g2, %g4) + KTSB_LOCK_TAG(%g1, %g2, %g7) /* Load and check PTE. */ ldxa [%g5] ASI_PHYS_USE_EC, %g5 @@ -141,8 +162,29 @@ kvmap_dtlb_vmalloc_addr: /* fallthrough to TLB load */ kvmap_dtlb_load: - stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB + +661: stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB retry + .section .sun4v_2insn_patch, "ax" + .word 661b + nop + nop + .previous + + /* For sun4v the ASI_DTLB_DATA_IN store and the retry + * instruction get nop'd out and we get here to branch + * to the sun4v tlb load code. The registers are setup + * as follows: + * + * %g4: vaddr + * %g5: PTE + * %g6: TAG + * + * The sun4v TLB load wants the PTE in %g3 so we fix that + * up here. + */ + ba,pt %xcc, sun4v_dtlb_load + mov %g5, %g3 kvmap_dtlb_nonlinear: /* Catch kernel NULL pointer derefs. */ @@ -185,10 +227,17 @@ kvmap_dtlb_longpath: nop .previous - rdpr %tl, %g4 - cmp %g4, 1 - mov TLB_TAG_ACCESS, %g4 + rdpr %tl, %g3 + cmp %g3, 1 + +661: mov TLB_TAG_ACCESS, %g4 ldxa [%g4] ASI_DMMU, %g5 + .section .sun4v_2insn_patch, "ax" + .word 661b + mov %g4, %g5 + nop + .previous + be,pt %xcc, sparc64_realfault_common mov FAULT_CODE_DTLB, %g4 ba,pt %xcc, winfix_trampoline diff --git a/arch/sparc64/kernel/sun4v_tlb_miss.S b/arch/sparc64/kernel/sun4v_tlb_miss.S index f7129137f9a..597359ced23 100644 --- a/arch/sparc64/kernel/sun4v_tlb_miss.S +++ b/arch/sparc64/kernel/sun4v_tlb_miss.S @@ -96,7 +96,7 @@ sun4v_dtlb_miss: /* Load UTSB reg into %g1. */ mov SCRATCHPAD_UTSBREG1, %g1 - ldxa [%g1 + %g1] ASI_SCRATCHPAD, %g1 + ldxa [%g1] ASI_SCRATCHPAD, %g1 LOAD_DTLB_INFO(%g2, %g4, %g5) COMPUTE_TAG_TARGET(%g6, %g4, %g5, %g3, kvmap_dtlb_4v) @@ -149,14 +149,19 @@ sun4v_dtlb_prot: * SCRATCHPAD_MMU_MISS contents in %g2. */ sun4v_itsb_miss: - ba,pt %xcc, sun4v_tsb_miss_common + mov SCRATCHPAD_UTSBREG1, %g1 + ldxa [%g1] ASI_SCRATCHPAD, %g1 + brz,pn %g5, kvmap_itlb_4v mov FAULT_CODE_ITLB, %g3 /* Called from trap table with TAG TARGET placed into * %g6 and SCRATCHPAD_UTSBREG1 contents in %g1. */ sun4v_dtsb_miss: - mov FAULT_CODE_DTLB, %g3 + mov SCRATCHPAD_UTSBREG1, %g1 + ldxa [%g1] ASI_SCRATCHPAD, %g1 + brz,pn %g5, kvmap_dtlb_4v + mov FAULT_CODE_DTLB, %g3 /* Create TSB pointer into %g1. This is something like: * @@ -312,7 +317,8 @@ sun4v_stdfmna: or %g2, %lo(OLD), %g2; \ sub %g1, %g2, %g1; \ sethi %hi(BRANCH_ALWAYS), %g3; \ - srl %g1, 2, %g1; \ + sll %g1, 11, %g1; \ + srl %g1, 11 + 2, %g1; \ or %g3, %lo(BRANCH_ALWAYS), %g3; \ or %g3, %g1, %g3; \ stw %g3, [%g2]; \ -- cgit v1.2.3 From 490384e752a43aa281ed533e9de2da36df25c337 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 11 Feb 2006 14:41:18 -0800 Subject: [SPARC64]: Register kernel TSB with hypervisor. We do this right after we take over the trap table from OBP. Signed-off-by: David S. Miller --- arch/sparc64/kernel/smp.c | 4 ++- arch/sparc64/mm/init.c | 70 ++++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 72 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index f553264588d..7d7e02ba297 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c @@ -122,8 +122,10 @@ void __init smp_callin(void) __local_per_cpu_offset = __per_cpu_offset(cpuid); - if (tlb_type == hypervisor) + if (tlb_type == hypervisor) { sun4v_register_fault_status(); + sun4v_ktsb_register(); + } __flush_tlb_all(); diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 88eb6f6be56..92756da273b 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -1109,6 +1109,69 @@ static void __init tsb_phys_patch(void) } } +/* Don't mark as init, we give this to the Hypervisor. */ +static struct hv_tsb_descr ktsb_descr[2]; +extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES]; + +static void __init sun4v_ktsb_init(void) +{ + unsigned long ktsb_pa; + + ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE); + + switch (PAGE_SIZE) { + case 8 * 1024: + default: + ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_8K; + ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_8K; + break; + + case 64 * 1024: + ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_64K; + ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_64K; + break; + + case 512 * 1024: + ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_512K; + ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_512K; + break; + + case 4 * 1024 * 1024: + ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_4MB; + ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_4MB; + break; + }; + + ktsb_descr[0].assoc = 0; + ktsb_descr[0].num_ttes = KERNEL_TSB_NENTRIES; + ktsb_descr[0].ctx_idx = 0; + ktsb_descr[0].tsb_base = ktsb_pa; + ktsb_descr[0].resv = 0; + + /* XXX When we have a kernel large page size TSB, describe + * XXX it in ktsb_descr[1] here. + */ +} + +void __cpuinit sun4v_ktsb_register(void) +{ + register unsigned long func asm("%o5"); + register unsigned long arg0 asm("%o0"); + register unsigned long arg1 asm("%o1"); + unsigned long pa; + + pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE); + + func = HV_FAST_MMU_TSB_CTX0; + /* XXX set arg0 to 2 when we use ktsb_descr[1], see above XXX */ + arg0 = 1; + arg1 = pa; + __asm__ __volatile__("ta %6" + : "=&r" (func), "=&r" (arg0), "=&r" (arg1) + : "0" (func), "1" (arg0), "2" (arg1), + "i" (HV_FAST_TRAP)); +} + /* paging_init() sets up the page tables */ extern void cheetah_ecache_flush_init(void); @@ -1129,8 +1192,10 @@ void __init paging_init(void) tlb_type == hypervisor) tsb_phys_patch(); - if (tlb_type == hypervisor) + if (tlb_type == hypervisor) { sun4v_patch_tlb_handlers(); + sun4v_ktsb_init(); + } /* Find available physical memory... */ read_obp_memory("available", &pavail[0], &pavail_ents); @@ -1171,6 +1236,9 @@ void __init paging_init(void) __flush_tlb_all(); + if (tlb_type == hypervisor) + sun4v_ktsb_register(); + /* Setup bootmem... */ pages_avail = 0; last_valid_pfn = end_pfn = bootmem_init(&pages_avail); -- cgit v1.2.3 From c4bce90ea2069e5a87beac806de3090ab32128d5 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 11 Feb 2006 21:57:54 -0800 Subject: [SPARC64]: Deal with PTE layout differences in SUN4V. Yes, you heard it right, they changed the PTE layout for SUN4V. Ho hum... This is the simple and inefficient way to support this. It'll get optimized, don't worry. Signed-off-by: David S. Miller --- arch/sparc64/kernel/itlb_miss.S | 4 +- arch/sparc64/kernel/ktlb.S | 12 +- arch/sparc64/kernel/setup.c | 274 -------------- arch/sparc64/kernel/sun4v_tlb_miss.S | 3 +- arch/sparc64/kernel/tsb.S | 9 +- arch/sparc64/lib/clear_page.S | 8 +- arch/sparc64/lib/copy_page.S | 7 +- arch/sparc64/mm/fault.c | 2 +- arch/sparc64/mm/generic.c | 40 +- arch/sparc64/mm/init.c | 703 ++++++++++++++++++++++++++++++----- arch/sparc64/mm/tsb.c | 12 +- 11 files changed, 645 insertions(+), 429 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/itlb_miss.S b/arch/sparc64/kernel/itlb_miss.S index 97facce27aa..730caa4a150 100644 --- a/arch/sparc64/kernel/itlb_miss.S +++ b/arch/sparc64/kernel/itlb_miss.S @@ -6,9 +6,10 @@ nop ! Delay slot (fill me) TSB_LOAD_QUAD(%g1, %g4) ! Load TSB entry cmp %g4, %g6 ! Compare TAG - sethi %hi(_PAGE_EXEC), %g4 ! Setup exec check + sethi %hi(PAGE_EXEC), %g4 ! Setup exec check /* ITLB ** ICACHE line 2: TSB compare and TLB load */ + ldx [%g4 + %lo(PAGE_EXEC)], %g4 bne,pn %xcc, tsb_miss_itlb ! Miss mov FAULT_CODE_ITLB, %g3 andcc %g5, %g4, %g0 ! Executable? @@ -16,7 +17,6 @@ nop ! Delay slot, fill me stxa %g5, [%g0] ASI_ITLB_DATA_IN ! Load TLB retry ! Trap done - nop /* ITLB ** ICACHE line 3: */ nop diff --git a/arch/sparc64/kernel/ktlb.S b/arch/sparc64/kernel/ktlb.S index 2d333ab4b91..47dfd45971e 100644 --- a/arch/sparc64/kernel/ktlb.S +++ b/arch/sparc64/kernel/ktlb.S @@ -131,16 +131,8 @@ kvmap_dtlb_4v: brgez,pn %g4, kvmap_dtlb_nonlinear nop -#define KERN_HIGHBITS ((_PAGE_VALID|_PAGE_SZ4MB)^0xfffff80000000000) -#define KERN_LOWBITS (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W) - - sethi %uhi(KERN_HIGHBITS), %g2 - or %g2, %ulo(KERN_HIGHBITS), %g2 - sllx %g2, 32, %g2 - or %g2, KERN_LOWBITS, %g2 - -#undef KERN_HIGHBITS -#undef KERN_LOWBITS + sethi %hi(kern_linear_pte_xor), %g2 + ldx [%g2 + %lo(kern_linear_pte_xor)], %g2 .globl kvmap_linear_patch kvmap_linear_patch: diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c index f36b257b2e4..ca75f3b26a3 100644 --- a/arch/sparc64/kernel/setup.c +++ b/arch/sparc64/kernel/setup.c @@ -64,12 +64,6 @@ struct screen_info screen_info = { 16 /* orig-video-points */ }; -/* Typing sync at the prom prompt calls the function pointed to by - * the sync callback which I set to the following function. - * This should sync all filesystems and return, for now it just - * prints out pretty messages and returns. - */ - void (*prom_palette)(int); void (*prom_keyboard)(void); @@ -79,263 +73,6 @@ prom_console_write(struct console *con, const char *s, unsigned n) prom_write(s, n); } -static struct console prom_console = { - .name = "prom", - .write = prom_console_write, - .flags = CON_CONSDEV | CON_ENABLED, - .index = -1, -}; - -#define PROM_TRUE -1 -#define PROM_FALSE 0 - -/* Pretty sick eh? */ -int prom_callback(long *args) -{ - struct console *cons, *saved_console = NULL; - unsigned long flags; - char *cmd; - extern spinlock_t prom_entry_lock; - - if (!args) - return -1; - if (!(cmd = (char *)args[0])) - return -1; - - /* - * The callback can be invoked on the cpu that first dropped - * into prom_cmdline after taking the serial interrupt, or on - * a slave processor that was smp_captured() if the - * administrator has done a switch-cpu inside obp. In either - * case, the cpu is marked as in-interrupt. Drop IRQ locks. - */ - irq_exit(); - - /* XXX Revisit the locking here someday. This is a debugging - * XXX feature so it isnt all that critical. -DaveM - */ - local_irq_save(flags); - - spin_unlock(&prom_entry_lock); - cons = console_drivers; - while (cons) { - unregister_console(cons); - cons->flags &= ~(CON_PRINTBUFFER); - cons->next = saved_console; - saved_console = cons; - cons = console_drivers; - } - register_console(&prom_console); - if (!strcmp(cmd, "sync")) { - prom_printf("PROM `%s' command...\n", cmd); - show_free_areas(); - if (current->pid != 0) { - local_irq_enable(); - sys_sync(); - local_irq_disable(); - } - args[2] = 0; - args[args[1] + 3] = -1; - prom_printf("Returning to PROM\n"); - } else if (!strcmp(cmd, "va>tte-data")) { - unsigned long ctx, va; - unsigned long tte = 0; - long res = PROM_FALSE; - - ctx = args[3]; - va = args[4]; - if (ctx) { - /* - * Find process owning ctx, lookup mapping. - */ - struct task_struct *p; - struct mm_struct *mm = NULL; - pgd_t *pgdp; - pud_t *pudp; - pmd_t *pmdp; - pte_t *ptep; - pte_t pte; - - for_each_process(p) { - mm = p->mm; - if (CTX_NRBITS(mm->context) == ctx) - break; - } - if (!mm || - CTX_NRBITS(mm->context) != ctx) - goto done; - - pgdp = pgd_offset(mm, va); - if (pgd_none(*pgdp)) - goto done; - pudp = pud_offset(pgdp, va); - if (pud_none(*pudp)) - goto done; - pmdp = pmd_offset(pudp, va); - if (pmd_none(*pmdp)) - goto done; - - /* Preemption implicitly disabled by virtue of - * being called from inside OBP. - */ - ptep = pte_offset_map(pmdp, va); - pte = *ptep; - if (pte_present(pte)) { - tte = pte_val(pte); - res = PROM_TRUE; - } - pte_unmap(ptep); - goto done; - } - - if ((va >= KERNBASE) && (va < (KERNBASE + (4 * 1024 * 1024)))) { - if (tlb_type == spitfire) { - extern unsigned long sparc64_kern_pri_context; - - /* Spitfire Errata #32 workaround */ - __asm__ __volatile__( - "stxa %0, [%1] %2\n\t" - "flush %%g6" - : /* No outputs */ - : "r" (sparc64_kern_pri_context), - "r" (PRIMARY_CONTEXT), - "i" (ASI_DMMU)); - } - - /* - * Locked down tlb entry. - */ - - if (tlb_type == spitfire) { - tte = spitfire_get_dtlb_data(SPITFIRE_HIGHEST_LOCKED_TLBENT); - res = PROM_TRUE; - } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { - tte = cheetah_get_ldtlb_data(CHEETAH_HIGHEST_LOCKED_TLBENT); - res = PROM_TRUE; - } - goto done; - } - - if (va < PGDIR_SIZE) { - /* - * vmalloc or prom_inherited mapping. - */ - pgd_t *pgdp; - pud_t *pudp; - pmd_t *pmdp; - pte_t *ptep; - pte_t pte; - int error; - - if ((va >= LOW_OBP_ADDRESS) && (va < HI_OBP_ADDRESS)) { - tte = prom_virt_to_phys(va, &error); - if (!error) - res = PROM_TRUE; - goto done; - } - pgdp = pgd_offset_k(va); - if (pgd_none(*pgdp)) - goto done; - pudp = pud_offset(pgdp, va); - if (pud_none(*pudp)) - goto done; - pmdp = pmd_offset(pudp, va); - if (pmd_none(*pmdp)) - goto done; - - /* Preemption implicitly disabled by virtue of - * being called from inside OBP. - */ - ptep = pte_offset_kernel(pmdp, va); - pte = *ptep; - if (pte_present(pte)) { - tte = pte_val(pte); - res = PROM_TRUE; - } - goto done; - } - - if (va < PAGE_OFFSET) { - /* - * No mappings here. - */ - goto done; - } - - if (va & (1UL << 40)) { - /* - * I/O page. - */ - - tte = (__pa(va) & _PAGE_PADDR) | - _PAGE_VALID | _PAGE_SZ4MB | - _PAGE_E | _PAGE_P | _PAGE_W; - res = PROM_TRUE; - goto done; - } - - /* - * Normal page. - */ - tte = (__pa(va) & _PAGE_PADDR) | - _PAGE_VALID | _PAGE_SZ4MB | - _PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W; - res = PROM_TRUE; - - done: - if (res == PROM_TRUE) { - args[2] = 3; - args[args[1] + 3] = 0; - args[args[1] + 4] = res; - args[args[1] + 5] = tte; - } else { - args[2] = 2; - args[args[1] + 3] = 0; - args[args[1] + 4] = res; - } - } else if (!strcmp(cmd, ".soft1")) { - unsigned long tte; - - tte = args[3]; - prom_printf("%lx:\"%s%s%s%s%s\" ", - (tte & _PAGE_SOFT) >> 7, - tte & _PAGE_MODIFIED ? "M" : "-", - tte & _PAGE_ACCESSED ? "A" : "-", - tte & _PAGE_READ ? "W" : "-", - tte & _PAGE_WRITE ? "R" : "-", - tte & _PAGE_PRESENT ? "P" : "-"); - - args[2] = 2; - args[args[1] + 3] = 0; - args[args[1] + 4] = PROM_TRUE; - } else if (!strcmp(cmd, ".soft2")) { - unsigned long tte; - - tte = args[3]; - prom_printf("%lx ", (tte & 0x07FC000000000000UL) >> 50); - - args[2] = 2; - args[args[1] + 3] = 0; - args[args[1] + 4] = PROM_TRUE; - } else { - prom_printf("unknown PROM `%s' command...\n", cmd); - } - unregister_console(&prom_console); - while (saved_console) { - cons = saved_console; - saved_console = cons->next; - register_console(cons); - } - spin_lock(&prom_entry_lock); - local_irq_restore(flags); - - /* - * Restore in-interrupt status for a resume from obp. - */ - irq_enter(); - return 0; -} - unsigned int boot_flags = 0; #define BOOTME_DEBUG 0x1 #define BOOTME_SINGLE 0x2 @@ -483,17 +220,6 @@ char reboot_command[COMMAND_LINE_SIZE]; static struct pt_regs fake_swapper_regs = { { 0, }, 0, 0, 0, 0 }; -void register_prom_callbacks(void) -{ - prom_setcallback(prom_callback); - prom_feval(": linux-va>tte-data 2 \" va>tte-data\" $callback drop ; " - "' linux-va>tte-data to va>tte-data"); - prom_feval(": linux-.soft1 1 \" .soft1\" $callback 2drop ; " - "' linux-.soft1 to .soft1"); - prom_feval(": linux-.soft2 1 \" .soft2\" $callback 2drop ; " - "' linux-.soft2 to .soft2"); -} - static void __init per_cpu_patch(void) { #ifdef CONFIG_SMP diff --git a/arch/sparc64/kernel/sun4v_tlb_miss.S b/arch/sparc64/kernel/sun4v_tlb_miss.S index 597359ced23..950ca74b4a5 100644 --- a/arch/sparc64/kernel/sun4v_tlb_miss.S +++ b/arch/sparc64/kernel/sun4v_tlb_miss.S @@ -59,7 +59,8 @@ sun4v_itlb_miss: /* Load TSB tag/pte into %g2/%g3 and compare the tag. */ ldda [%g1] ASI_QUAD_LDD_PHYS, %g2 cmp %g2, %g6 - sethi %hi(_PAGE_EXEC), %g7 + sethi %hi(PAGE_EXEC), %g7 + ldx [%g7 + %lo(PAGE_EXEC)], %g7 bne,a,pn %xcc, tsb_miss_page_table_walk mov FAULT_CODE_ITLB, %g3 andcc %g3, %g7, %g0 diff --git a/arch/sparc64/kernel/tsb.S b/arch/sparc64/kernel/tsb.S index 667dcb077be..be8f0892d72 100644 --- a/arch/sparc64/kernel/tsb.S +++ b/arch/sparc64/kernel/tsb.S @@ -56,10 +56,11 @@ tsb_reload: /* If it is larger than the base page size, don't * bother putting it into the TSB. */ - srlx %g5, 32, %g2 - sethi %hi(_PAGE_ALL_SZ_BITS >> 32), %g7 - and %g2, %g7, %g2 - sethi %hi(_PAGE_SZBITS >> 32), %g7 + sethi %hi(_PAGE_ALL_SZ_BITS), %g7 + ldx [%g7 + %lo(_PAGE_ALL_SZ_BITS)], %g7 + and %g5, %g7, %g2 + sethi %hi(_PAGE_SZBITS), %g7 + ldx [%g7 + %lo(_PAGE_SZBITS)], %g7 cmp %g2, %g7 bne,a,pn %xcc, tsb_tlb_reload TSB_STORE(%g1, %g0) diff --git a/arch/sparc64/lib/clear_page.S b/arch/sparc64/lib/clear_page.S index cdc634bceba..77e531f6c2a 100644 --- a/arch/sparc64/lib/clear_page.S +++ b/arch/sparc64/lib/clear_page.S @@ -23,9 +23,6 @@ * disable preemption during the clear. */ -#define TTE_BITS_TOP (_PAGE_VALID | _PAGE_SZBITS) -#define TTE_BITS_BOTTOM (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W) - .text .globl _clear_page @@ -44,12 +41,11 @@ clear_user_page: /* %o0=dest, %o1=vaddr */ sethi %hi(PAGE_SIZE), %o4 sllx %g2, 32, %g2 - sethi %uhi(TTE_BITS_TOP), %g3 + sethi %hi(PAGE_KERNEL_LOCKED), %g3 - sllx %g3, 32, %g3 + ldx [%g3 + %lo(PAGE_KERNEL_LOCKED)], %g3 sub %o0, %g2, %g1 ! paddr - or %g3, TTE_BITS_BOTTOM, %g3 and %o1, %o4, %o0 ! vaddr D-cache alias bit or %g1, %g3, %g1 ! TTE data diff --git a/arch/sparc64/lib/copy_page.S b/arch/sparc64/lib/copy_page.S index feebb14fd27..37460666a5c 100644 --- a/arch/sparc64/lib/copy_page.S +++ b/arch/sparc64/lib/copy_page.S @@ -23,8 +23,6 @@ * disable preemption during the clear. */ -#define TTE_BITS_TOP (_PAGE_VALID | _PAGE_SZBITS) -#define TTE_BITS_BOTTOM (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W) #define DCACHE_SIZE (PAGE_SIZE * 2) #if (PAGE_SHIFT == 13) || (PAGE_SHIFT == 19) @@ -52,13 +50,12 @@ copy_user_page: /* %o0=dest, %o1=src, %o2=vaddr */ sethi %hi(PAGE_SIZE), %o3 sllx %g2, 32, %g2 - sethi %uhi(TTE_BITS_TOP), %g3 + sethi %hi(PAGE_KERNEL_LOCKED), %g3 - sllx %g3, 32, %g3 + ldx [%g3 + %lo(PAGE_KERNEL_LOCKED)], %g3 sub %o0, %g2, %g1 ! dest paddr sub %o1, %g2, %g2 ! src paddr - or %g3, TTE_BITS_BOTTOM, %g3 and %o2, %o3, %o0 ! vaddr D-cache alias bit or %g1, %g3, %g1 ! dest TTE data diff --git a/arch/sparc64/mm/fault.c b/arch/sparc64/mm/fault.c index 6f0539aa44d..439a53c1e56 100644 --- a/arch/sparc64/mm/fault.c +++ b/arch/sparc64/mm/fault.c @@ -137,7 +137,7 @@ static unsigned int get_user_insn(unsigned long tpc) if (!pte_present(pte)) goto out; - pa = (pte_val(pte) & _PAGE_PADDR); + pa = (pte_pfn(pte) << PAGE_SHIFT); pa += (tpc & ~PAGE_MASK); /* Use phys bypass so we don't pollute dtlb/dcache. */ diff --git a/arch/sparc64/mm/generic.c b/arch/sparc64/mm/generic.c index 580b63da836..5fc5c579e35 100644 --- a/arch/sparc64/mm/generic.c +++ b/arch/sparc64/mm/generic.c @@ -15,15 +15,6 @@ #include #include -static inline pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space) -{ - pte_t pte; - pte_val(pte) = (((page) | pgprot_val(prot) | _PAGE_E) & - ~(unsigned long)_PAGE_CACHE); - pte_val(pte) |= (((unsigned long)space) << 32); - return pte; -} - /* Remap IO memory, the same way as remap_pfn_range(), but use * the obio memory space. * @@ -48,24 +39,29 @@ static inline void io_remap_pte_range(struct mm_struct *mm, pte_t * pte, pte_t entry; unsigned long curend = address + PAGE_SIZE; - entry = mk_pte_io(offset, prot, space); + entry = mk_pte_io(offset, prot, space, PAGE_SIZE); if (!(address & 0xffff)) { - if (!(address & 0x3fffff) && !(offset & 0x3ffffe) && end >= address + 0x400000) { - entry = mk_pte_io(offset, - __pgprot(pgprot_val (prot) | _PAGE_SZ4MB), - space); + if (PAGE_SIZE < (4 * 1024 * 1024) && + !(address & 0x3fffff) && + !(offset & 0x3ffffe) && + end >= address + 0x400000) { + entry = mk_pte_io(offset, prot, space, + 4 * 1024 * 1024); curend = address + 0x400000; offset += 0x400000; - } else if (!(address & 0x7ffff) && !(offset & 0x7fffe) && end >= address + 0x80000) { - entry = mk_pte_io(offset, - __pgprot(pgprot_val (prot) | _PAGE_SZ512K), - space); + } else if (PAGE_SIZE < (512 * 1024) && + !(address & 0x7ffff) && + !(offset & 0x7fffe) && + end >= address + 0x80000) { + entry = mk_pte_io(offset, prot, space, + 512 * 1024 * 1024); curend = address + 0x80000; offset += 0x80000; - } else if (!(offset & 0xfffe) && end >= address + 0x10000) { - entry = mk_pte_io(offset, - __pgprot(pgprot_val (prot) | _PAGE_SZ64K), - space); + } else if (PAGE_SIZE < (64 * 1024) && + !(offset & 0xfffe) && + end >= address + 0x10000) { + entry = mk_pte_io(offset, prot, space, + 64 * 1024); curend = address + 0x10000; offset += 0x10000; } else diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 92756da273b..9c2fc239f3e 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -6,6 +6,7 @@ */ #include +#include #include #include #include @@ -118,6 +119,7 @@ unsigned long phys_base __read_mostly; unsigned long kern_base __read_mostly; unsigned long kern_size __read_mostly; unsigned long pfn_base __read_mostly; +unsigned long kern_linear_pte_xor __read_mostly; /* get_new_mmu_context() uses "cache + 1". */ DEFINE_SPINLOCK(ctx_alloc_lock); @@ -256,6 +258,9 @@ static inline void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long __tsb_insert(tsb_addr, tag, pte); } +unsigned long _PAGE_ALL_SZ_BITS __read_mostly; +unsigned long _PAGE_SZBITS __read_mostly; + void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) { struct mm_struct *mm; @@ -398,39 +403,9 @@ struct linux_prom_translation { struct linux_prom_translation prom_trans[512] __read_mostly; unsigned int prom_trans_ents __read_mostly; -extern unsigned long prom_boot_page; -extern void prom_remap(unsigned long physpage, unsigned long virtpage, int mmu_ihandle); -extern int prom_get_mmu_ihandle(void); -extern void register_prom_callbacks(void); - /* Exported for SMP bootup purposes. */ unsigned long kern_locked_tte_data; -/* - * Translate PROM's mapping we capture at boot time into physical address. - * The second parameter is only set from prom_callback() invocations. - */ -unsigned long prom_virt_to_phys(unsigned long promva, int *error) -{ - int i; - - for (i = 0; i < prom_trans_ents; i++) { - struct linux_prom_translation *p = &prom_trans[i]; - - if (promva >= p->virt && - promva < (p->virt + p->size)) { - unsigned long base = p->data & _PAGE_PADDR; - - if (error) - *error = 0; - return base + (promva & (8192 - 1)); - } - } - if (error) - *error = 1; - return 0UL; -} - /* The obp translations are saved based on 8k pagesize, since obp can * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS -> * HI_OBP_ADDRESS range are handled in ktlb.S. @@ -537,6 +512,8 @@ static void __init hypervisor_tlb_lock(unsigned long vaddr, "3" (arg2), "4" (arg3)); } +static unsigned long kern_large_tte(unsigned long paddr); + static void __init remap_kernel(void) { unsigned long phys_page, tte_vaddr, tte_data; @@ -544,9 +521,7 @@ static void __init remap_kernel(void) tte_vaddr = (unsigned long) KERNBASE; phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL; - tte_data = (phys_page | (_PAGE_VALID | _PAGE_SZ4MB | - _PAGE_CP | _PAGE_CV | _PAGE_P | - _PAGE_L | _PAGE_W)); + tte_data = kern_large_tte(phys_page); kern_locked_tte_data = tte_data; @@ -591,10 +566,6 @@ static void __init inherit_prom_mappings(void) prom_printf("Remapping the kernel... "); remap_kernel(); prom_printf("done.\n"); - - prom_printf("Registering callbacks... "); - register_prom_callbacks(); - prom_printf("done.\n"); } void prom_world(int enter) @@ -631,63 +602,6 @@ void __flush_dcache_range(unsigned long start, unsigned long end) } #endif /* DCACHE_ALIASING_POSSIBLE */ -/* If not locked, zap it. */ -void __flush_tlb_all(void) -{ - unsigned long pstate; - int i; - - __asm__ __volatile__("flushw\n\t" - "rdpr %%pstate, %0\n\t" - "wrpr %0, %1, %%pstate" - : "=r" (pstate) - : "i" (PSTATE_IE)); - if (tlb_type == spitfire) { - for (i = 0; i < 64; i++) { - /* Spitfire Errata #32 workaround */ - /* NOTE: Always runs on spitfire, so no - * cheetah+ page size encodings. - */ - __asm__ __volatile__("stxa %0, [%1] %2\n\t" - "flush %%g6" - : /* No outputs */ - : "r" (0), - "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); - - if (!(spitfire_get_dtlb_data(i) & _PAGE_L)) { - __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" - "membar #Sync" - : /* no outputs */ - : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU)); - spitfire_put_dtlb_data(i, 0x0UL); - } - - /* Spitfire Errata #32 workaround */ - /* NOTE: Always runs on spitfire, so no - * cheetah+ page size encodings. - */ - __asm__ __volatile__("stxa %0, [%1] %2\n\t" - "flush %%g6" - : /* No outputs */ - : "r" (0), - "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); - - if (!(spitfire_get_itlb_data(i) & _PAGE_L)) { - __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" - "membar #Sync" - : /* no outputs */ - : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU)); - spitfire_put_itlb_data(i, 0x0UL); - } - } - } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { - cheetah_flush_dtlb_all(); - cheetah_flush_itlb_all(); - } - __asm__ __volatile__("wrpr %0, 0, %%pstate" - : : "r" (pstate)); -} - /* Caller does TLB context flushing on local CPU if necessary. * The caller also ensures that CTX_VALID(mm->context) is false. * @@ -1180,6 +1094,9 @@ extern void sun4v_patch_tlb_handlers(void); static unsigned long last_valid_pfn; pgd_t swapper_pg_dir[2048]; +static void sun4u_pgprot_init(void); +static void sun4v_pgprot_init(void); + void __init paging_init(void) { unsigned long end_pfn, pages_avail, shift; @@ -1188,6 +1105,11 @@ void __init paging_init(void) kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL; kern_size = (unsigned long)&_end - (unsigned long)KERNBASE; + if (tlb_type == hypervisor) + sun4v_pgprot_init(); + else + sun4u_pgprot_init(); + if (tlb_type == cheetah_plus || tlb_type == hypervisor) tsb_phys_patch(); @@ -1411,3 +1333,596 @@ void free_initrd_mem(unsigned long start, unsigned long end) } } #endif + +/* SUN4U pte bits... */ +#define _PAGE_SZ4MB_4U 0x6000000000000000 /* 4MB Page */ +#define _PAGE_SZ512K_4U 0x4000000000000000 /* 512K Page */ +#define _PAGE_SZ64K_4U 0x2000000000000000 /* 64K Page */ +#define _PAGE_SZ8K_4U 0x0000000000000000 /* 8K Page */ +#define _PAGE_NFO_4U 0x1000000000000000 /* No Fault Only */ +#define _PAGE_IE_4U 0x0800000000000000 /* Invert Endianness */ +#define _PAGE_SOFT2_4U 0x07FC000000000000 /* Software bits, set 2 */ +#define _PAGE_RES1_4U 0x0002000000000000 /* Reserved */ +#define _PAGE_SZ32MB_4U 0x0001000000000000 /* (Panther) 32MB page */ +#define _PAGE_SZ256MB_4U 0x2001000000000000 /* (Panther) 256MB page */ +#define _PAGE_SN_4U 0x0000800000000000 /* (Cheetah) Snoop */ +#define _PAGE_RES2_4U 0x0000780000000000 /* Reserved */ +#define _PAGE_PADDR_4U 0x000007FFFFFFE000 /* (Cheetah) paddr[42:13] */ +#define _PAGE_SOFT_4U 0x0000000000001F80 /* Software bits: */ +#define _PAGE_EXEC_4U 0x0000000000001000 /* Executable SW bit */ +#define _PAGE_MODIFIED_4U 0x0000000000000800 /* Modified (dirty) */ +#define _PAGE_FILE_4U 0x0000000000000800 /* Pagecache page */ +#define _PAGE_ACCESSED_4U 0x0000000000000400 /* Accessed (ref'd) */ +#define _PAGE_READ_4U 0x0000000000000200 /* Readable SW Bit */ +#define _PAGE_WRITE_4U 0x0000000000000100 /* Writable SW Bit */ +#define _PAGE_PRESENT_4U 0x0000000000000080 /* Present */ +#define _PAGE_L_4U 0x0000000000000040 /* Locked TTE */ +#define _PAGE_CP_4U 0x0000000000000020 /* Cacheable in P-Cache */ +#define _PAGE_CV_4U 0x0000000000000010 /* Cacheable in V-Cache */ +#define _PAGE_E_4U 0x0000000000000008 /* side-Effect */ +#define _PAGE_P_4U 0x0000000000000004 /* Privileged Page */ +#define _PAGE_W_4U 0x0000000000000002 /* Writable */ + +/* SUN4V pte bits... */ +#define _PAGE_NFO_4V 0x4000000000000000 /* No Fault Only */ +#define _PAGE_SOFT2_4V 0x3F00000000000000 /* Software bits, set 2 */ +#define _PAGE_MODIFIED_4V 0x2000000000000000 /* Modified (dirty) */ +#define _PAGE_ACCESSED_4V 0x1000000000000000 /* Accessed (ref'd) */ +#define _PAGE_READ_4V 0x0800000000000000 /* Readable SW Bit */ +#define _PAGE_WRITE_4V 0x0400000000000000 /* Writable SW Bit */ +#define _PAGE_PADDR_4V 0x00FFFFFFFFFFE000 /* paddr[55:13] */ +#define _PAGE_IE_4V 0x0000000000001000 /* Invert Endianness */ +#define _PAGE_E_4V 0x0000000000000800 /* side-Effect */ +#define _PAGE_CP_4V 0x0000000000000400 /* Cacheable in P-Cache */ +#define _PAGE_CV_4V 0x0000000000000200 /* Cacheable in V-Cache */ +#define _PAGE_P_4V 0x0000000000000100 /* Privileged Page */ +#define _PAGE_EXEC_4V 0x0000000000000080 /* Executable Page */ +#define _PAGE_W_4V 0x0000000000000040 /* Writable */ +#define _PAGE_SOFT_4V 0x0000000000000030 /* Software bits */ +#define _PAGE_FILE_4V 0x0000000000000020 /* Pagecache page */ +#define _PAGE_PRESENT_4V 0x0000000000000010 /* Present */ +#define _PAGE_RESV_4V 0x0000000000000008 /* Reserved */ +#define _PAGE_SZ16GB_4V 0x0000000000000007 /* 16GB Page */ +#define _PAGE_SZ2GB_4V 0x0000000000000006 /* 2GB Page */ +#define _PAGE_SZ256MB_4V 0x0000000000000005 /* 256MB Page */ +#define _PAGE_SZ32MB_4V 0x0000000000000004 /* 32MB Page */ +#define _PAGE_SZ4MB_4V 0x0000000000000003 /* 4MB Page */ +#define _PAGE_SZ512K_4V 0x0000000000000002 /* 512K Page */ +#define _PAGE_SZ64K_4V 0x0000000000000001 /* 64K Page */ +#define _PAGE_SZ8K_4V 0x0000000000000000 /* 8K Page */ + +#if PAGE_SHIFT == 13 +#define _PAGE_SZBITS_4U _PAGE_SZ8K_4U +#define _PAGE_SZBITS_4V _PAGE_SZ8K_4V +#elif PAGE_SHIFT == 16 +#define _PAGE_SZBITS_4U _PAGE_SZ64K_4U +#define _PAGE_SZBITS_4V _PAGE_SZ64K_4V +#elif PAGE_SHIFT == 19 +#define _PAGE_SZBITS_4U _PAGE_SZ512K_4U +#define _PAGE_SZBITS_4V _PAGE_SZ512K_4V +#elif PAGE_SHIFT == 22 +#define _PAGE_SZBITS_4U _PAGE_SZ4MB_4U +#define _PAGE_SZBITS_4V _PAGE_SZ4MB_4V +#else +#error Wrong PAGE_SHIFT specified +#endif + +#if defined(CONFIG_HUGETLB_PAGE_SIZE_4MB) +#define _PAGE_SZHUGE_4U _PAGE_SZ4MB_4U +#define _PAGE_SZHUGE_4V _PAGE_SZ4MB_4V +#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K) +#define _PAGE_SZHUGE_4U _PAGE_SZ512K_4U +#define _PAGE_SZHUGE_4V _PAGE_SZ512K_4V +#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K) +#define _PAGE_SZHUGE_4U _PAGE_SZ64K_4U +#define _PAGE_SZHUGE_4V _PAGE_SZ64K_4V +#endif + +#define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U) +#define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V) +#define __DIRTY_BITS_4U (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U) +#define __DIRTY_BITS_4V (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V) +#define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R) +#define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R) + +pgprot_t PAGE_KERNEL __read_mostly; +EXPORT_SYMBOL(PAGE_KERNEL); + +pgprot_t PAGE_KERNEL_LOCKED __read_mostly; +pgprot_t PAGE_COPY __read_mostly; +pgprot_t PAGE_EXEC __read_mostly; +unsigned long pg_iobits __read_mostly; + +unsigned long _PAGE_IE __read_mostly; +unsigned long _PAGE_E __read_mostly; +unsigned long _PAGE_CACHE __read_mostly; + +static void prot_init_common(unsigned long page_none, + unsigned long page_shared, + unsigned long page_copy, + unsigned long page_readonly, + unsigned long page_exec_bit) +{ + PAGE_COPY = __pgprot(page_copy); + + protection_map[0x0] = __pgprot(page_none); + protection_map[0x1] = __pgprot(page_readonly & ~page_exec_bit); + protection_map[0x2] = __pgprot(page_copy & ~page_exec_bit); + protection_map[0x3] = __pgprot(page_copy & ~page_exec_bit); + protection_map[0x4] = __pgprot(page_readonly); + protection_map[0x5] = __pgprot(page_readonly); + protection_map[0x6] = __pgprot(page_copy); + protection_map[0x7] = __pgprot(page_copy); + protection_map[0x8] = __pgprot(page_none); + protection_map[0x9] = __pgprot(page_readonly & ~page_exec_bit); + protection_map[0xa] = __pgprot(page_shared & ~page_exec_bit); + protection_map[0xb] = __pgprot(page_shared & ~page_exec_bit); + protection_map[0xc] = __pgprot(page_readonly); + protection_map[0xd] = __pgprot(page_readonly); + protection_map[0xe] = __pgprot(page_shared); + protection_map[0xf] = __pgprot(page_shared); +} + +static void __init sun4u_pgprot_init(void) +{ + unsigned long page_none, page_shared, page_copy, page_readonly; + unsigned long page_exec_bit; + + PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID | + _PAGE_CACHE_4U | _PAGE_P_4U | + __ACCESS_BITS_4U | __DIRTY_BITS_4U | + _PAGE_EXEC_4U); + PAGE_KERNEL_LOCKED = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID | + _PAGE_CACHE_4U | _PAGE_P_4U | + __ACCESS_BITS_4U | __DIRTY_BITS_4U | + _PAGE_EXEC_4U | _PAGE_L_4U); + PAGE_EXEC = __pgprot(_PAGE_EXEC_4U); + + _PAGE_IE = _PAGE_IE_4U; + _PAGE_E = _PAGE_E_4U; + _PAGE_CACHE = _PAGE_CACHE_4U; + + pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4U | __DIRTY_BITS_4U | + __ACCESS_BITS_4U | _PAGE_E_4U); + + kern_linear_pte_xor = (_PAGE_VALID | _PAGE_SZ4MB_4U) ^ + 0xfffff80000000000; + kern_linear_pte_xor |= (_PAGE_CP_4U | _PAGE_CV_4U | + _PAGE_P_4U | _PAGE_W_4U); + + _PAGE_SZBITS = _PAGE_SZBITS_4U; + _PAGE_ALL_SZ_BITS = (_PAGE_SZ4MB_4U | _PAGE_SZ512K_4U | + _PAGE_SZ64K_4U | _PAGE_SZ8K_4U | + _PAGE_SZ32MB_4U | _PAGE_SZ256MB_4U); + + + page_none = _PAGE_PRESENT_4U | _PAGE_ACCESSED_4U | _PAGE_CACHE_4U; + page_shared = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U | + __ACCESS_BITS_4U | _PAGE_WRITE_4U | _PAGE_EXEC_4U); + page_copy = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U | + __ACCESS_BITS_4U | _PAGE_EXEC_4U); + page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U | + __ACCESS_BITS_4U | _PAGE_EXEC_4U); + + page_exec_bit = _PAGE_EXEC_4U; + + prot_init_common(page_none, page_shared, page_copy, page_readonly, + page_exec_bit); +} + +static void __init sun4v_pgprot_init(void) +{ + unsigned long page_none, page_shared, page_copy, page_readonly; + unsigned long page_exec_bit; + + PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4V | _PAGE_VALID | + _PAGE_CACHE_4V | _PAGE_P_4V | + __ACCESS_BITS_4V | __DIRTY_BITS_4V | + _PAGE_EXEC_4V); + PAGE_KERNEL_LOCKED = PAGE_KERNEL; + PAGE_EXEC = __pgprot(_PAGE_EXEC_4V); + + _PAGE_IE = _PAGE_IE_4V; + _PAGE_E = _PAGE_E_4V; + _PAGE_CACHE = _PAGE_CACHE_4V; + + kern_linear_pte_xor = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^ + 0xfffff80000000000; + kern_linear_pte_xor |= (_PAGE_CP_4V | _PAGE_CV_4V | + _PAGE_P_4V | _PAGE_W_4V); + + pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4V | __DIRTY_BITS_4V | + __ACCESS_BITS_4V | _PAGE_E_4V); + + _PAGE_SZBITS = _PAGE_SZBITS_4V; + _PAGE_ALL_SZ_BITS = (_PAGE_SZ16GB_4V | _PAGE_SZ2GB_4V | + _PAGE_SZ256MB_4V | _PAGE_SZ32MB_4V | + _PAGE_SZ4MB_4V | _PAGE_SZ512K_4V | + _PAGE_SZ64K_4V | _PAGE_SZ8K_4V); + + page_none = _PAGE_PRESENT_4V | _PAGE_ACCESSED_4V | _PAGE_CACHE_4V; + page_shared = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V | + __ACCESS_BITS_4V | _PAGE_WRITE_4V | _PAGE_EXEC_4V); + page_copy = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V | + __ACCESS_BITS_4V | _PAGE_EXEC_4V); + page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V | + __ACCESS_BITS_4V | _PAGE_EXEC_4V); + + page_exec_bit = _PAGE_EXEC_4V; + + prot_init_common(page_none, page_shared, page_copy, page_readonly, + page_exec_bit); +} + +unsigned long pte_sz_bits(unsigned long sz) +{ + if (tlb_type == hypervisor) { + switch (sz) { + case 8 * 1024: + default: + return _PAGE_SZ8K_4V; + case 64 * 1024: + return _PAGE_SZ64K_4V; + case 512 * 1024: + return _PAGE_SZ512K_4V; + case 4 * 1024 * 1024: + return _PAGE_SZ4MB_4V; + }; + } else { + switch (sz) { + case 8 * 1024: + default: + return _PAGE_SZ8K_4U; + case 64 * 1024: + return _PAGE_SZ64K_4U; + case 512 * 1024: + return _PAGE_SZ512K_4U; + case 4 * 1024 * 1024: + return _PAGE_SZ4MB_4U; + }; + } +} + +pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space, unsigned long page_size) +{ + pte_t pte; + if (tlb_type == hypervisor) { + pte_val(pte) = (((page) | pgprot_val(prot) | _PAGE_E_4V) & + ~(unsigned long)_PAGE_CACHE_4V); + } else { + pte_val(pte) = (((page) | pgprot_val(prot) | _PAGE_E_4U) & + ~(unsigned long)_PAGE_CACHE_4U); + } + pte_val(pte) |= (((unsigned long)space) << 32); + pte_val(pte) |= pte_sz_bits(page_size); + return pte; +} + +unsigned long pte_present(pte_t pte) +{ + return (pte_val(pte) & + ((tlb_type == hypervisor) ? + _PAGE_PRESENT_4V : _PAGE_PRESENT_4U)); +} + +unsigned long pte_file(pte_t pte) +{ + return (pte_val(pte) & + ((tlb_type == hypervisor) ? + _PAGE_FILE_4V : _PAGE_FILE_4U)); +} + +unsigned long pte_read(pte_t pte) +{ + return (pte_val(pte) & + ((tlb_type == hypervisor) ? + _PAGE_READ_4V : _PAGE_READ_4U)); +} + +unsigned long pte_exec(pte_t pte) +{ + return (pte_val(pte) & + ((tlb_type == hypervisor) ? + _PAGE_EXEC_4V : _PAGE_EXEC_4U)); +} + +unsigned long pte_write(pte_t pte) +{ + return (pte_val(pte) & + ((tlb_type == hypervisor) ? + _PAGE_WRITE_4V : _PAGE_WRITE_4U)); +} + +unsigned long pte_dirty(pte_t pte) +{ + return (pte_val(pte) & + ((tlb_type == hypervisor) ? + _PAGE_MODIFIED_4V : _PAGE_MODIFIED_4U)); +} + +unsigned long pte_young(pte_t pte) +{ + return (pte_val(pte) & + ((tlb_type == hypervisor) ? + _PAGE_ACCESSED_4V : _PAGE_ACCESSED_4U)); +} + +pte_t pte_wrprotect(pte_t pte) +{ + unsigned long mask = _PAGE_WRITE_4U | _PAGE_W_4U; + + if (tlb_type == hypervisor) + mask = _PAGE_WRITE_4V | _PAGE_W_4V; + + return __pte(pte_val(pte) & ~mask); +} + +pte_t pte_rdprotect(pte_t pte) +{ + unsigned long mask = _PAGE_R | _PAGE_READ_4U; + + if (tlb_type == hypervisor) + mask = _PAGE_R | _PAGE_READ_4V; + + return __pte(pte_val(pte) & ~mask); +} + +pte_t pte_mkclean(pte_t pte) +{ + unsigned long mask = _PAGE_MODIFIED_4U | _PAGE_W_4U; + + if (tlb_type == hypervisor) + mask = _PAGE_MODIFIED_4V | _PAGE_W_4V; + + return __pte(pte_val(pte) & ~mask); +} + +pte_t pte_mkold(pte_t pte) +{ + unsigned long mask = _PAGE_R | _PAGE_ACCESSED_4U; + + if (tlb_type == hypervisor) + mask = _PAGE_R | _PAGE_ACCESSED_4V; + + return __pte(pte_val(pte) & ~mask); +} + +pte_t pte_mkyoung(pte_t pte) +{ + unsigned long mask = _PAGE_R | _PAGE_ACCESSED_4U; + + if (tlb_type == hypervisor) + mask = _PAGE_R | _PAGE_ACCESSED_4V; + + return __pte(pte_val(pte) | mask); +} + +pte_t pte_mkwrite(pte_t pte) +{ + unsigned long mask = _PAGE_WRITE_4U; + + if (tlb_type == hypervisor) + mask = _PAGE_WRITE_4V; + + return __pte(pte_val(pte) | mask); +} + +pte_t pte_mkdirty(pte_t pte) +{ + unsigned long mask = _PAGE_MODIFIED_4U | _PAGE_W_4U; + + if (tlb_type == hypervisor) + mask = _PAGE_MODIFIED_4V | _PAGE_W_4V; + + return __pte(pte_val(pte) | mask); +} + +pte_t pte_mkhuge(pte_t pte) +{ + unsigned long mask = _PAGE_SZHUGE_4U; + + if (tlb_type == hypervisor) + mask = _PAGE_SZHUGE_4V; + + return __pte(pte_val(pte) | mask); +} + +pte_t pgoff_to_pte(unsigned long off) +{ + unsigned long bit = _PAGE_FILE_4U; + + if (tlb_type == hypervisor) + bit = _PAGE_FILE_4V; + + return __pte((off << PAGE_SHIFT) | bit); +} + +pgprot_t pgprot_noncached(pgprot_t prot) +{ + unsigned long val = pgprot_val(prot); + unsigned long off = _PAGE_CP_4U | _PAGE_CV_4U; + unsigned long on = _PAGE_E_4U; + + if (tlb_type == hypervisor) { + off = _PAGE_CP_4V | _PAGE_CV_4V; + on = _PAGE_E_4V; + } + + return __pgprot((val & ~off) | on); +} + +pte_t pfn_pte(unsigned long pfn, pgprot_t prot) +{ + unsigned long sz_bits = _PAGE_SZBITS_4U; + + if (tlb_type == hypervisor) + sz_bits = _PAGE_SZBITS_4V; + + return __pte((pfn << PAGE_SHIFT) | pgprot_val(prot) | sz_bits); +} + +unsigned long pte_pfn(pte_t pte) +{ + unsigned long mask = _PAGE_PADDR_4U; + + if (tlb_type == hypervisor) + mask = _PAGE_PADDR_4V; + + return (pte_val(pte) & mask) >> PAGE_SHIFT; +} + +pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot) +{ + unsigned long preserve_mask; + unsigned long val; + + preserve_mask = (_PAGE_PADDR_4U | + _PAGE_MODIFIED_4U | + _PAGE_ACCESSED_4U | + _PAGE_CP_4U | + _PAGE_CV_4U | + _PAGE_E_4U | + _PAGE_PRESENT_4U | + _PAGE_SZBITS_4U); + if (tlb_type == hypervisor) + preserve_mask = (_PAGE_PADDR_4V | + _PAGE_MODIFIED_4V | + _PAGE_ACCESSED_4V | + _PAGE_CP_4V | + _PAGE_CV_4V | + _PAGE_E_4V | + _PAGE_PRESENT_4V | + _PAGE_SZBITS_4V); + + val = (pte_val(orig_pte) & preserve_mask); + + return __pte(val | (pgprot_val(new_prot) & ~preserve_mask)); +} + +static unsigned long kern_large_tte(unsigned long paddr) +{ + unsigned long val; + + val = (_PAGE_VALID | _PAGE_SZ4MB_4U | + _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_P_4U | + _PAGE_EXEC_4U | _PAGE_L_4U | _PAGE_W_4U); + if (tlb_type == hypervisor) + val = (_PAGE_VALID | _PAGE_SZ4MB_4V | + _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_P_4V | + _PAGE_EXEC_4V | _PAGE_W_4V); + + return val | paddr; +} + +/* + * Translate PROM's mapping we capture at boot time into physical address. + * The second parameter is only set from prom_callback() invocations. + */ +unsigned long prom_virt_to_phys(unsigned long promva, int *error) +{ + unsigned long mask; + int i; + + mask = _PAGE_PADDR_4U; + if (tlb_type == hypervisor) + mask = _PAGE_PADDR_4V; + + for (i = 0; i < prom_trans_ents; i++) { + struct linux_prom_translation *p = &prom_trans[i]; + + if (promva >= p->virt && + promva < (p->virt + p->size)) { + unsigned long base = p->data & mask; + + if (error) + *error = 0; + return base + (promva & (8192 - 1)); + } + } + if (error) + *error = 1; + return 0UL; +} + +/* XXX We should kill off this ugly thing at so me point. XXX */ +unsigned long sun4u_get_pte(unsigned long addr) +{ + pgd_t *pgdp; + pud_t *pudp; + pmd_t *pmdp; + pte_t *ptep; + unsigned long mask = _PAGE_PADDR_4U; + + if (tlb_type == hypervisor) + mask = _PAGE_PADDR_4V; + + if (addr >= PAGE_OFFSET) + return addr & mask; + + if ((addr >= LOW_OBP_ADDRESS) && (addr < HI_OBP_ADDRESS)) + return prom_virt_to_phys(addr, NULL); + + pgdp = pgd_offset_k(addr); + pudp = pud_offset(pgdp, addr); + pmdp = pmd_offset(pudp, addr); + ptep = pte_offset_kernel(pmdp, addr); + + return pte_val(*ptep) & mask; +} + +/* If not locked, zap it. */ +void __flush_tlb_all(void) +{ + unsigned long pstate; + int i; + + __asm__ __volatile__("flushw\n\t" + "rdpr %%pstate, %0\n\t" + "wrpr %0, %1, %%pstate" + : "=r" (pstate) + : "i" (PSTATE_IE)); + if (tlb_type == spitfire) { + for (i = 0; i < 64; i++) { + /* Spitfire Errata #32 workaround */ + /* NOTE: Always runs on spitfire, so no + * cheetah+ page size encodings. + */ + __asm__ __volatile__("stxa %0, [%1] %2\n\t" + "flush %%g6" + : /* No outputs */ + : "r" (0), + "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); + + if (!(spitfire_get_dtlb_data(i) & _PAGE_L_4U)) { + __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" + "membar #Sync" + : /* no outputs */ + : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU)); + spitfire_put_dtlb_data(i, 0x0UL); + } + + /* Spitfire Errata #32 workaround */ + /* NOTE: Always runs on spitfire, so no + * cheetah+ page size encodings. + */ + __asm__ __volatile__("stxa %0, [%1] %2\n\t" + "flush %%g6" + : /* No outputs */ + : "r" (0), + "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); + + if (!(spitfire_get_itlb_data(i) & _PAGE_L_4U)) { + __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" + "membar #Sync" + : /* no outputs */ + : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU)); + spitfire_put_itlb_data(i, 0x0UL); + } + } + } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { + cheetah_flush_dtlb_all(); + cheetah_flush_itlb_all(); + } + __asm__ __volatile__("wrpr %0, 0, %%pstate" + : : "r" (pstate)); +} diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c index c5dc4b0cc1c..975242ab88e 100644 --- a/arch/sparc64/mm/tsb.c +++ b/arch/sparc64/mm/tsb.c @@ -85,8 +85,7 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes) mm->context.tsb_nentries = tsb_bytes / sizeof(struct tsb); base = TSBMAP_BASE; - tte = (_PAGE_VALID | _PAGE_L | _PAGE_CP | - _PAGE_CV | _PAGE_P | _PAGE_W); + tte = pgprot_val(PAGE_KERNEL_LOCKED); tsb_paddr = __pa(mm->context.tsb); BUG_ON(tsb_paddr & (tsb_bytes - 1UL)); @@ -99,55 +98,48 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes) #ifdef DCACHE_ALIASING_POSSIBLE base += (tsb_paddr & 8192); #endif - tte |= _PAGE_SZ8K; page_sz = 8192; break; case 8192 << 1: tsb_reg = 0x1UL; - tte |= _PAGE_SZ64K; page_sz = 64 * 1024; break; case 8192 << 2: tsb_reg = 0x2UL; - tte |= _PAGE_SZ64K; page_sz = 64 * 1024; break; case 8192 << 3: tsb_reg = 0x3UL; - tte |= _PAGE_SZ64K; page_sz = 64 * 1024; break; case 8192 << 4: tsb_reg = 0x4UL; - tte |= _PAGE_SZ512K; page_sz = 512 * 1024; break; case 8192 << 5: tsb_reg = 0x5UL; - tte |= _PAGE_SZ512K; page_sz = 512 * 1024; break; case 8192 << 6: tsb_reg = 0x6UL; - tte |= _PAGE_SZ512K; page_sz = 512 * 1024; break; case 8192 << 7: tsb_reg = 0x7UL; - tte |= _PAGE_SZ4MB; page_sz = 4 * 1024 * 1024; break; default: BUG(); }; + tte |= pte_sz_bits(page_sz); if (tlb_type == cheetah_plus || tlb_type == hypervisor) { /* Physical mapping, no locked TLB entry for TSB. */ -- cgit v1.2.3 From b5a37e96b8dc067b979e44c4e109c9bc49c2f4d8 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 11 Feb 2006 23:07:13 -0800 Subject: [SPARC64]: Fix mondo queue allocations. We have to use bootmem during init_IRQ and page alloc for sibling cpu calls. Also, fix incorrect hypervisor call return value checks in the hypervisor SMP cpu mondo send code. Signed-off-by: David S. Miller --- arch/sparc64/kernel/irq.c | 83 +++++++++++++++++++++++++++------------- arch/sparc64/kernel/smp.c | 4 +- arch/sparc64/kernel/trampoline.S | 2 +- 3 files changed, 59 insertions(+), 30 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c index c5dd6daf127..51f65054bf1 100644 --- a/arch/sparc64/kernel/irq.c +++ b/arch/sparc64/kernel/irq.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include @@ -861,24 +862,16 @@ void init_irqwork_curcpu(void) memset(__irq_work + cpu, 0, sizeof(struct irq_work_struct)); } -static void __cpuinit init_one_mondo(unsigned long *pa_ptr, unsigned long type) +static void __cpuinit register_one_mondo(unsigned long paddr, unsigned long type) { register unsigned long func __asm__("%o5"); register unsigned long arg0 __asm__("%o0"); register unsigned long arg1 __asm__("%o1"); register unsigned long arg2 __asm__("%o2"); - unsigned long page = get_zeroed_page(GFP_ATOMIC); - - if (!page) { - prom_printf("SUN4V: Error, cannot allocate mondo queue.\n"); - prom_halt(); - } - - *pa_ptr = __pa(page); func = HV_FAST_CPU_QCONF; arg0 = type; - arg1 = *pa_ptr; + arg1 = paddr; arg2 = 128; /* XXX Implied by Niagara queue offsets. XXX */ __asm__ __volatile__("ta %8" : "=&r" (func), "=&r" (arg0), @@ -887,16 +880,48 @@ static void __cpuinit init_one_mondo(unsigned long *pa_ptr, unsigned long type) "2" (arg1), "3" (arg2), "i" (HV_FAST_TRAP)); - if (func != HV_EOK) { + if (arg0 != HV_EOK) { prom_printf("SUN4V: cpu_qconf(%lu) failed with error %lu\n", type, func); prom_halt(); } } -static void __cpuinit init_one_kbuf(unsigned long *pa_ptr) +static void __cpuinit sun4v_register_mondo_queues(int this_cpu) { - unsigned long page = get_zeroed_page(GFP_ATOMIC); + struct trap_per_cpu *tb = &trap_block[this_cpu]; + + register_one_mondo(tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO); + register_one_mondo(tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO); + register_one_mondo(tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR); + register_one_mondo(tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR); +} + +static void __cpuinit alloc_one_mondo(unsigned long *pa_ptr, int use_bootmem) +{ + void *page; + + if (use_bootmem) + page = alloc_bootmem_low_pages(PAGE_SIZE); + else + page = (void *) get_zeroed_page(GFP_ATOMIC); + + if (!page) { + prom_printf("SUN4V: Error, cannot allocate mondo queue.\n"); + prom_halt(); + } + + *pa_ptr = __pa(page); +} + +static void __cpuinit alloc_one_kbuf(unsigned long *pa_ptr, int use_bootmem) +{ + void *page; + + if (use_bootmem) + page = alloc_bootmem_low_pages(PAGE_SIZE); + else + page = (void *) get_zeroed_page(GFP_ATOMIC); if (!page) { prom_printf("SUN4V: Error, cannot allocate kbuf page.\n"); @@ -906,14 +931,18 @@ static void __cpuinit init_one_kbuf(unsigned long *pa_ptr) *pa_ptr = __pa(page); } -static void __cpuinit init_cpu_send_mondo_info(struct trap_per_cpu *tb) +static void __cpuinit init_cpu_send_mondo_info(struct trap_per_cpu *tb, int use_bootmem) { #ifdef CONFIG_SMP - unsigned long page; + void *page; BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64)); - page = get_zeroed_page(GFP_ATOMIC); + if (use_bootmem) + page = alloc_bootmem_low_pages(PAGE_SIZE); + else + page = (void *) get_zeroed_page(GFP_ATOMIC); + if (!page) { prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n"); prom_halt(); @@ -924,22 +953,22 @@ static void __cpuinit init_cpu_send_mondo_info(struct trap_per_cpu *tb) #endif } -/* Allocate and init the mondo and error queues for this cpu. */ -void __cpuinit sun4v_init_mondo_queues(void) +/* Allocate and register the mondo and error queues for this cpu. */ +void __cpuinit sun4v_init_mondo_queues(int use_bootmem) { int cpu = hard_smp_processor_id(); struct trap_per_cpu *tb = &trap_block[cpu]; - init_one_mondo(&tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO); - init_one_mondo(&tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO); - - init_one_mondo(&tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR); - init_one_kbuf(&tb->resum_kernel_buf_pa); + alloc_one_mondo(&tb->cpu_mondo_pa, use_bootmem); + alloc_one_mondo(&tb->dev_mondo_pa, use_bootmem); + alloc_one_mondo(&tb->resum_mondo_pa, use_bootmem); + alloc_one_kbuf(&tb->resum_kernel_buf_pa, use_bootmem); + alloc_one_mondo(&tb->nonresum_mondo_pa, use_bootmem); + alloc_one_kbuf(&tb->nonresum_kernel_buf_pa, use_bootmem); - init_one_mondo(&tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR); - init_one_kbuf(&tb->nonresum_kernel_buf_pa); + init_cpu_send_mondo_info(tb, use_bootmem); - init_cpu_send_mondo_info(tb); + sun4v_register_mondo_queues(cpu); } /* Only invoked on boot processor. */ @@ -950,7 +979,7 @@ void __init init_IRQ(void) memset(&ivector_table[0], 0, sizeof(ivector_table)); if (tlb_type == hypervisor) - sun4v_init_mondo_queues(); + sun4v_init_mondo_queues(1); /* We need to clear any IRQ's pending in the soft interrupt * registers, a spurious one could be left around from the diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index 7d7e02ba297..d637168ce37 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c @@ -591,7 +591,7 @@ static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t "2" (arg1), "3" (arg2), "i" (HV_FAST_TRAP) : "memory"); - if (likely(func == HV_EOK)) + if (likely(arg0 == HV_EOK)) break; if (unlikely(++retries > 100)) { @@ -644,7 +644,7 @@ static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t "2" (arg1), "3" (arg2), "i" (HV_FAST_TRAP) : "memory"); - if (likely(func == HV_EOK)) + if (likely(arg0 == HV_EOK)) break; if (unlikely(++retries > 100)) { diff --git a/arch/sparc64/kernel/trampoline.S b/arch/sparc64/kernel/trampoline.S index 88382200c7b..22fb24eac99 100644 --- a/arch/sparc64/kernel/trampoline.S +++ b/arch/sparc64/kernel/trampoline.S @@ -367,7 +367,7 @@ after_lock_tlb: nop call sun4v_init_mondo_queues - nop + mov 0, %o0 1: call init_cur_cpu_trap nop -- cgit v1.2.3 From 7aa6264543f19ceea9b5f386242917296d63be05 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 11 Feb 2006 23:14:59 -0800 Subject: [SPARC64]: Do not try to write to %tick or %stick on SUN4V. Writes by privileged code are disallowed. The hypervisor manages the non-privileged bit. Signed-off-by: David S. Miller --- arch/sparc64/kernel/time.c | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/time.c b/arch/sparc64/kernel/time.c index 24681b9b442..7041146f86f 100644 --- a/arch/sparc64/kernel/time.c +++ b/arch/sparc64/kernel/time.c @@ -193,16 +193,22 @@ struct sparc64_tick_ops *tick_ops __read_mostly = &tick_operations; static void stick_init_tick(unsigned long offset) { - tick_disable_protection(); - - /* Let the user get at STICK too. */ - __asm__ __volatile__( - " rd %%asr24, %%g2\n" - " andn %%g2, %0, %%g2\n" - " wr %%g2, 0, %%asr24" - : /* no outputs */ - : "r" (TICK_PRIV_BIT) - : "g1", "g2"); + /* Writes to the %tick and %stick register are not + * allowed on sun4v. The Hypervisor controls that + * bit, per-strand. + */ + if (tlb_type != hypervisor) { + tick_disable_protection(); + + /* Let the user get at STICK too. */ + __asm__ __volatile__( + " rd %%asr24, %%g2\n" + " andn %%g2, %0, %%g2\n" + " wr %%g2, 0, %%asr24" + : /* no outputs */ + : "r" (TICK_PRIV_BIT) + : "g1", "g2"); + } __asm__ __volatile__( " rd %%asr24, %%g1\n" -- cgit v1.2.3 From 02fead75055246d01af56a45a9d1b311d506da3e Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 11 Feb 2006 23:22:47 -0800 Subject: [SPARC64]: Do not try to synchronize %stick registers on SUN4V. Writes by privileged code are not allowed. Signed-off-by: David S. Miller --- arch/sparc64/kernel/smp.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index d637168ce37..2dbe008d6b7 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c @@ -1278,7 +1278,11 @@ int __devinit __cpu_up(unsigned int cpu) if (!cpu_isset(cpu, cpu_online_map)) { ret = -ENODEV; } else { - smp_synchronize_one_tick(cpu); + /* On SUN4V, writes to %tick and %stick are + * not allowed. + */ + if (tlb_type != hypervisor) + smp_synchronize_one_tick(cpu); } } return ret; -- cgit v1.2.3 From 1a7a242c898dd131f2df005c492e9b44fb8900e0 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 11 Feb 2006 23:24:30 -0800 Subject: [SPARC64]: Recognize "virtual-console" as input and output console device. Signed-off-by: David S. Miller --- arch/sparc64/kernel/setup.c | 5 +++++ arch/sparc64/prom/console.c | 6 ++++++ 2 files changed, 11 insertions(+) (limited to 'arch') diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c index ca75f3b26a3..4f253a0755b 100644 --- a/arch/sparc64/kernel/setup.c +++ b/arch/sparc64/kernel/setup.c @@ -405,6 +405,11 @@ static int __init set_preferred_console(void) serial_console = 2; } else if (idev == PROMDEV_IRSC && odev == PROMDEV_ORSC) { serial_console = 3; + } else if (idev == PROMDEV_IVCONS && odev == PROMDEV_OVCONS) { + /* sunhv_console_init() doesn't check the serial_console + * value anyways... + */ + serial_console = 4; } else { prom_printf("Inconsistent console: " "input %d, output %d\n", diff --git a/arch/sparc64/prom/console.c b/arch/sparc64/prom/console.c index ac6d035dd15..7c25c54cefd 100644 --- a/arch/sparc64/prom/console.c +++ b/arch/sparc64/prom/console.c @@ -102,6 +102,9 @@ prom_query_input_device(void) if (!strncmp (propb, "rsc", 3)) return PROMDEV_IRSC; + if (!strncmp (propb, "virtual-console", 3)) + return PROMDEV_IVCONS; + if (strncmp (propb, "tty", 3) || !propb[3]) return PROMDEV_I_UNK; @@ -143,6 +146,9 @@ prom_query_output_device(void) if (!strncmp (propb, "rsc", 3)) return PROMDEV_ORSC; + if (!strncmp (propb, "virtual-console", 3)) + return PROMDEV_OVCONS; + if (strncmp (propb, "tty", 3) || !propb[3]) return PROMDEV_O_UNK; -- cgit v1.2.3 From 6241e5cc6afe2c5b75b51e1c890df18f05838cf6 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 11 Feb 2006 23:28:40 -0800 Subject: [SPARC64]: Fix branch signedness bug in all code patching. The bug that hit SUN4V TLB patching exists elsewhere. Make sure we cure all such cases. Signed-off-by: David S. Miller --- arch/sparc64/lib/NGpage.S | 3 ++- arch/sparc64/lib/NGpatch.S | 3 ++- arch/sparc64/lib/U3patch.S | 3 ++- 3 files changed, 6 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/lib/NGpage.S b/arch/sparc64/lib/NGpage.S index 0e6152c28b0..7d7c3bb8dcb 100644 --- a/arch/sparc64/lib/NGpage.S +++ b/arch/sparc64/lib/NGpage.S @@ -75,7 +75,8 @@ NGclear_user_page: /* %o0=dest, %o1=vaddr */ or %g2, %lo(OLD), %g2; \ sub %g1, %g2, %g1; \ sethi %hi(BRANCH_ALWAYS), %g3; \ - srl %g1, 2, %g1; \ + sll %g1, 11, %g1; \ + srl %g1, 11 + 2, %g1; \ or %g3, %lo(BRANCH_ALWAYS), %g3; \ or %g3, %g1, %g3; \ stw %g3, [%g2]; \ diff --git a/arch/sparc64/lib/NGpatch.S b/arch/sparc64/lib/NGpatch.S index f13ec9e4c8a..3b0674fc336 100644 --- a/arch/sparc64/lib/NGpatch.S +++ b/arch/sparc64/lib/NGpatch.S @@ -12,7 +12,8 @@ or %g2, %lo(OLD), %g2; \ sub %g1, %g2, %g1; \ sethi %hi(BRANCH_ALWAYS), %g3; \ - srl %g1, 2, %g1; \ + sll %g1, 11, %g1; \ + srl %g1, 11 + 2, %g1; \ or %g3, %lo(BRANCH_ALWAYS), %g3; \ or %g3, %g1, %g3; \ stw %g3, [%g2]; \ diff --git a/arch/sparc64/lib/U3patch.S b/arch/sparc64/lib/U3patch.S index e2b6c5e4b95..ecc302619a6 100644 --- a/arch/sparc64/lib/U3patch.S +++ b/arch/sparc64/lib/U3patch.S @@ -12,7 +12,8 @@ or %g2, %lo(OLD), %g2; \ sub %g1, %g2, %g1; \ sethi %hi(BRANCH_ALWAYS), %g3; \ - srl %g1, 2, %g1; \ + sll %g1, 11, %g1; \ + srl %g1, 11 + 2, %g1; \ or %g3, %lo(BRANCH_ALWAYS), %g3; \ or %g3, %g1, %g3; \ stw %g3, [%g2]; \ -- cgit v1.2.3 From 221b2fb818c307e1cb47e036a1671ca554d9cd0a Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 11 Feb 2006 23:38:00 -0800 Subject: [SPARC64]: Don't expect cfg space in PCI PBM ranges on SUN4V. PCI cfg space is accessed transparently through the Hypervisor and not through direct cpu PIO operations. Signed-off-by: David S. Miller --- arch/sparc64/kernel/pci_sun4v.c | 20 +++++--------------- 1 file changed, 5 insertions(+), 15 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c index 3f0e3c09f4d..699e91e3e42 100644 --- a/arch/sparc64/kernel/pci_sun4v.c +++ b/arch/sparc64/kernel/pci_sun4v.c @@ -639,9 +639,9 @@ static void pci_sun4v_resource_adjust(struct pci_dev *pdev, */ static void pci_sun4v_determine_mem_io_space(struct pci_pbm_info *pbm) { - int i, saw_cfg, saw_mem, saw_io; + int i, saw_mem, saw_io; - saw_cfg = saw_mem = saw_io = 0; + saw_mem = saw_io = 0; for (i = 0; i < pbm->num_pbm_ranges; i++) { struct linux_prom_pci_ranges *pr = &pbm->pbm_ranges[i]; unsigned long a; @@ -652,12 +652,6 @@ static void pci_sun4v_determine_mem_io_space(struct pci_pbm_info *pbm) ((unsigned long)pr->parent_phys_lo << 0UL)); switch (type) { - case 0: - /* PCI config space, 16MB */ - pbm->config_space = a; - saw_cfg = 1; - break; - case 1: /* 16-bit IO space, 16MB */ pbm->io_space.start = a; @@ -679,19 +673,15 @@ static void pci_sun4v_determine_mem_io_space(struct pci_pbm_info *pbm) }; } - if (!saw_cfg || !saw_io || !saw_mem) { + if (!saw_io || !saw_mem) { prom_printf("%s: Fatal error, missing %s PBM range.\n", pbm->name, - ((!saw_cfg ? - "CFG" : - (!saw_io ? - "IO" : "MEM")))); + (!saw_io ? "IO" : "MEM")); prom_halt(); } - printk("%s: PCI CFG[%lx] IO[%lx] MEM[%lx]\n", + printk("%s: PCI IO[%lx] MEM[%lx]\n", pbm->name, - pbm->config_space, pbm->io_space.start, pbm->mem_space.start); } -- cgit v1.2.3 From ff02e0d26f139ad95ec3a7e94f88faccaa180dff Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 12 Feb 2006 17:07:51 -0800 Subject: [SPARC64]: Move PTE field definitions back into asm/pgtable.h Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 84 -------------------------------------------------- 1 file changed, 84 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 9c2fc239f3e..81f9f4bffaf 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -1334,90 +1334,6 @@ void free_initrd_mem(unsigned long start, unsigned long end) } #endif -/* SUN4U pte bits... */ -#define _PAGE_SZ4MB_4U 0x6000000000000000 /* 4MB Page */ -#define _PAGE_SZ512K_4U 0x4000000000000000 /* 512K Page */ -#define _PAGE_SZ64K_4U 0x2000000000000000 /* 64K Page */ -#define _PAGE_SZ8K_4U 0x0000000000000000 /* 8K Page */ -#define _PAGE_NFO_4U 0x1000000000000000 /* No Fault Only */ -#define _PAGE_IE_4U 0x0800000000000000 /* Invert Endianness */ -#define _PAGE_SOFT2_4U 0x07FC000000000000 /* Software bits, set 2 */ -#define _PAGE_RES1_4U 0x0002000000000000 /* Reserved */ -#define _PAGE_SZ32MB_4U 0x0001000000000000 /* (Panther) 32MB page */ -#define _PAGE_SZ256MB_4U 0x2001000000000000 /* (Panther) 256MB page */ -#define _PAGE_SN_4U 0x0000800000000000 /* (Cheetah) Snoop */ -#define _PAGE_RES2_4U 0x0000780000000000 /* Reserved */ -#define _PAGE_PADDR_4U 0x000007FFFFFFE000 /* (Cheetah) paddr[42:13] */ -#define _PAGE_SOFT_4U 0x0000000000001F80 /* Software bits: */ -#define _PAGE_EXEC_4U 0x0000000000001000 /* Executable SW bit */ -#define _PAGE_MODIFIED_4U 0x0000000000000800 /* Modified (dirty) */ -#define _PAGE_FILE_4U 0x0000000000000800 /* Pagecache page */ -#define _PAGE_ACCESSED_4U 0x0000000000000400 /* Accessed (ref'd) */ -#define _PAGE_READ_4U 0x0000000000000200 /* Readable SW Bit */ -#define _PAGE_WRITE_4U 0x0000000000000100 /* Writable SW Bit */ -#define _PAGE_PRESENT_4U 0x0000000000000080 /* Present */ -#define _PAGE_L_4U 0x0000000000000040 /* Locked TTE */ -#define _PAGE_CP_4U 0x0000000000000020 /* Cacheable in P-Cache */ -#define _PAGE_CV_4U 0x0000000000000010 /* Cacheable in V-Cache */ -#define _PAGE_E_4U 0x0000000000000008 /* side-Effect */ -#define _PAGE_P_4U 0x0000000000000004 /* Privileged Page */ -#define _PAGE_W_4U 0x0000000000000002 /* Writable */ - -/* SUN4V pte bits... */ -#define _PAGE_NFO_4V 0x4000000000000000 /* No Fault Only */ -#define _PAGE_SOFT2_4V 0x3F00000000000000 /* Software bits, set 2 */ -#define _PAGE_MODIFIED_4V 0x2000000000000000 /* Modified (dirty) */ -#define _PAGE_ACCESSED_4V 0x1000000000000000 /* Accessed (ref'd) */ -#define _PAGE_READ_4V 0x0800000000000000 /* Readable SW Bit */ -#define _PAGE_WRITE_4V 0x0400000000000000 /* Writable SW Bit */ -#define _PAGE_PADDR_4V 0x00FFFFFFFFFFE000 /* paddr[55:13] */ -#define _PAGE_IE_4V 0x0000000000001000 /* Invert Endianness */ -#define _PAGE_E_4V 0x0000000000000800 /* side-Effect */ -#define _PAGE_CP_4V 0x0000000000000400 /* Cacheable in P-Cache */ -#define _PAGE_CV_4V 0x0000000000000200 /* Cacheable in V-Cache */ -#define _PAGE_P_4V 0x0000000000000100 /* Privileged Page */ -#define _PAGE_EXEC_4V 0x0000000000000080 /* Executable Page */ -#define _PAGE_W_4V 0x0000000000000040 /* Writable */ -#define _PAGE_SOFT_4V 0x0000000000000030 /* Software bits */ -#define _PAGE_FILE_4V 0x0000000000000020 /* Pagecache page */ -#define _PAGE_PRESENT_4V 0x0000000000000010 /* Present */ -#define _PAGE_RESV_4V 0x0000000000000008 /* Reserved */ -#define _PAGE_SZ16GB_4V 0x0000000000000007 /* 16GB Page */ -#define _PAGE_SZ2GB_4V 0x0000000000000006 /* 2GB Page */ -#define _PAGE_SZ256MB_4V 0x0000000000000005 /* 256MB Page */ -#define _PAGE_SZ32MB_4V 0x0000000000000004 /* 32MB Page */ -#define _PAGE_SZ4MB_4V 0x0000000000000003 /* 4MB Page */ -#define _PAGE_SZ512K_4V 0x0000000000000002 /* 512K Page */ -#define _PAGE_SZ64K_4V 0x0000000000000001 /* 64K Page */ -#define _PAGE_SZ8K_4V 0x0000000000000000 /* 8K Page */ - -#if PAGE_SHIFT == 13 -#define _PAGE_SZBITS_4U _PAGE_SZ8K_4U -#define _PAGE_SZBITS_4V _PAGE_SZ8K_4V -#elif PAGE_SHIFT == 16 -#define _PAGE_SZBITS_4U _PAGE_SZ64K_4U -#define _PAGE_SZBITS_4V _PAGE_SZ64K_4V -#elif PAGE_SHIFT == 19 -#define _PAGE_SZBITS_4U _PAGE_SZ512K_4U -#define _PAGE_SZBITS_4V _PAGE_SZ512K_4V -#elif PAGE_SHIFT == 22 -#define _PAGE_SZBITS_4U _PAGE_SZ4MB_4U -#define _PAGE_SZBITS_4V _PAGE_SZ4MB_4V -#else -#error Wrong PAGE_SHIFT specified -#endif - -#if defined(CONFIG_HUGETLB_PAGE_SIZE_4MB) -#define _PAGE_SZHUGE_4U _PAGE_SZ4MB_4U -#define _PAGE_SZHUGE_4V _PAGE_SZ4MB_4V -#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K) -#define _PAGE_SZHUGE_4U _PAGE_SZ512K_4U -#define _PAGE_SZHUGE_4V _PAGE_SZ512K_4V -#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K) -#define _PAGE_SZHUGE_4U _PAGE_SZ64K_4U -#define _PAGE_SZHUGE_4V _PAGE_SZ64K_4V -#endif - #define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U) #define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V) #define __DIRTY_BITS_4U (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U) -- cgit v1.2.3 From cf627156c450cd5a0741b31f55181db3400d4887 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 12 Feb 2006 21:10:07 -0800 Subject: [SPARC64]: Use inline patching for critical PTE operations. This handles the SUN4U vs SUN4V PTE layout differences with near zero performance cost. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 211 +------------------------------------------------ 1 file changed, 3 insertions(+), 208 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 81f9f4bffaf..6f860c39db8 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -1502,217 +1502,12 @@ unsigned long pte_sz_bits(unsigned long sz) pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space, unsigned long page_size) { pte_t pte; - if (tlb_type == hypervisor) { - pte_val(pte) = (((page) | pgprot_val(prot) | _PAGE_E_4V) & - ~(unsigned long)_PAGE_CACHE_4V); - } else { - pte_val(pte) = (((page) | pgprot_val(prot) | _PAGE_E_4U) & - ~(unsigned long)_PAGE_CACHE_4U); - } + + pte_val(pte) = page | pgprot_val(pgprot_noncached(prot)); pte_val(pte) |= (((unsigned long)space) << 32); pte_val(pte) |= pte_sz_bits(page_size); - return pte; -} - -unsigned long pte_present(pte_t pte) -{ - return (pte_val(pte) & - ((tlb_type == hypervisor) ? - _PAGE_PRESENT_4V : _PAGE_PRESENT_4U)); -} - -unsigned long pte_file(pte_t pte) -{ - return (pte_val(pte) & - ((tlb_type == hypervisor) ? - _PAGE_FILE_4V : _PAGE_FILE_4U)); -} - -unsigned long pte_read(pte_t pte) -{ - return (pte_val(pte) & - ((tlb_type == hypervisor) ? - _PAGE_READ_4V : _PAGE_READ_4U)); -} - -unsigned long pte_exec(pte_t pte) -{ - return (pte_val(pte) & - ((tlb_type == hypervisor) ? - _PAGE_EXEC_4V : _PAGE_EXEC_4U)); -} - -unsigned long pte_write(pte_t pte) -{ - return (pte_val(pte) & - ((tlb_type == hypervisor) ? - _PAGE_WRITE_4V : _PAGE_WRITE_4U)); -} - -unsigned long pte_dirty(pte_t pte) -{ - return (pte_val(pte) & - ((tlb_type == hypervisor) ? - _PAGE_MODIFIED_4V : _PAGE_MODIFIED_4U)); -} - -unsigned long pte_young(pte_t pte) -{ - return (pte_val(pte) & - ((tlb_type == hypervisor) ? - _PAGE_ACCESSED_4V : _PAGE_ACCESSED_4U)); -} -pte_t pte_wrprotect(pte_t pte) -{ - unsigned long mask = _PAGE_WRITE_4U | _PAGE_W_4U; - - if (tlb_type == hypervisor) - mask = _PAGE_WRITE_4V | _PAGE_W_4V; - - return __pte(pte_val(pte) & ~mask); -} - -pte_t pte_rdprotect(pte_t pte) -{ - unsigned long mask = _PAGE_R | _PAGE_READ_4U; - - if (tlb_type == hypervisor) - mask = _PAGE_R | _PAGE_READ_4V; - - return __pte(pte_val(pte) & ~mask); -} - -pte_t pte_mkclean(pte_t pte) -{ - unsigned long mask = _PAGE_MODIFIED_4U | _PAGE_W_4U; - - if (tlb_type == hypervisor) - mask = _PAGE_MODIFIED_4V | _PAGE_W_4V; - - return __pte(pte_val(pte) & ~mask); -} - -pte_t pte_mkold(pte_t pte) -{ - unsigned long mask = _PAGE_R | _PAGE_ACCESSED_4U; - - if (tlb_type == hypervisor) - mask = _PAGE_R | _PAGE_ACCESSED_4V; - - return __pte(pte_val(pte) & ~mask); -} - -pte_t pte_mkyoung(pte_t pte) -{ - unsigned long mask = _PAGE_R | _PAGE_ACCESSED_4U; - - if (tlb_type == hypervisor) - mask = _PAGE_R | _PAGE_ACCESSED_4V; - - return __pte(pte_val(pte) | mask); -} - -pte_t pte_mkwrite(pte_t pte) -{ - unsigned long mask = _PAGE_WRITE_4U; - - if (tlb_type == hypervisor) - mask = _PAGE_WRITE_4V; - - return __pte(pte_val(pte) | mask); -} - -pte_t pte_mkdirty(pte_t pte) -{ - unsigned long mask = _PAGE_MODIFIED_4U | _PAGE_W_4U; - - if (tlb_type == hypervisor) - mask = _PAGE_MODIFIED_4V | _PAGE_W_4V; - - return __pte(pte_val(pte) | mask); -} - -pte_t pte_mkhuge(pte_t pte) -{ - unsigned long mask = _PAGE_SZHUGE_4U; - - if (tlb_type == hypervisor) - mask = _PAGE_SZHUGE_4V; - - return __pte(pte_val(pte) | mask); -} - -pte_t pgoff_to_pte(unsigned long off) -{ - unsigned long bit = _PAGE_FILE_4U; - - if (tlb_type == hypervisor) - bit = _PAGE_FILE_4V; - - return __pte((off << PAGE_SHIFT) | bit); -} - -pgprot_t pgprot_noncached(pgprot_t prot) -{ - unsigned long val = pgprot_val(prot); - unsigned long off = _PAGE_CP_4U | _PAGE_CV_4U; - unsigned long on = _PAGE_E_4U; - - if (tlb_type == hypervisor) { - off = _PAGE_CP_4V | _PAGE_CV_4V; - on = _PAGE_E_4V; - } - - return __pgprot((val & ~off) | on); -} - -pte_t pfn_pte(unsigned long pfn, pgprot_t prot) -{ - unsigned long sz_bits = _PAGE_SZBITS_4U; - - if (tlb_type == hypervisor) - sz_bits = _PAGE_SZBITS_4V; - - return __pte((pfn << PAGE_SHIFT) | pgprot_val(prot) | sz_bits); -} - -unsigned long pte_pfn(pte_t pte) -{ - unsigned long mask = _PAGE_PADDR_4U; - - if (tlb_type == hypervisor) - mask = _PAGE_PADDR_4V; - - return (pte_val(pte) & mask) >> PAGE_SHIFT; -} - -pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot) -{ - unsigned long preserve_mask; - unsigned long val; - - preserve_mask = (_PAGE_PADDR_4U | - _PAGE_MODIFIED_4U | - _PAGE_ACCESSED_4U | - _PAGE_CP_4U | - _PAGE_CV_4U | - _PAGE_E_4U | - _PAGE_PRESENT_4U | - _PAGE_SZBITS_4U); - if (tlb_type == hypervisor) - preserve_mask = (_PAGE_PADDR_4V | - _PAGE_MODIFIED_4V | - _PAGE_ACCESSED_4V | - _PAGE_CP_4V | - _PAGE_CV_4V | - _PAGE_E_4V | - _PAGE_PRESENT_4V | - _PAGE_SZBITS_4V); - - val = (pte_val(orig_pte) & preserve_mask); - - return __pte(val | (pgprot_val(new_prot) & ~preserve_mask)); + return pte; } static unsigned long kern_large_tte(unsigned long paddr) -- cgit v1.2.3 From 3833789bb2e15eb85fad296d8fb40f1437925645 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 12 Feb 2006 22:06:53 -0800 Subject: [SPARC64]: PCI-SUN4V fixes. Clear top 8-bits of physical addresses in "ranges" property. This gives the actual physical address. Detect PBM-A vs. PBM-B by checking bit 0x40 of the devhandle. Signed-off-by: David S. Miller --- arch/sparc64/kernel/pci_sun4v.c | 41 +++++++++++++++++++++++++++++++++-------- 1 file changed, 33 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c index 699e91e3e42..19a07f0115c 100644 --- a/arch/sparc64/kernel/pci_sun4v.c +++ b/arch/sparc64/kernel/pci_sun4v.c @@ -776,22 +776,22 @@ static void pci_sun4v_iommu_init(struct pci_pbm_info *pbm) probe_existing_entries(pbm, iommu); } -static void pci_sun4v_pbm_init(struct pci_controller_info *p, int prom_node) +static void pci_sun4v_pbm_init(struct pci_controller_info *p, int prom_node, unsigned int devhandle) { struct pci_pbm_info *pbm; - struct linux_prom64_registers regs; unsigned int busrange[2]; - int err; + int err, i; - /* XXX */ - pbm = &p->pbm_A; + if (devhandle & 0x40) + pbm = &p->pbm_B; + else + pbm = &p->pbm_A; pbm->parent = p; pbm->prom_node = prom_node; pbm->pci_first_slot = 1; - prom_getproperty(prom_node, "reg", (char *)®s, sizeof(regs)); - pbm->devhandle = (regs.phys_addr >> 32UL) & 0x0fffffff; + pbm->devhandle = devhandle; sprintf(pbm->name, "SUN4V-PCI%d PBM%c", p->index, (pbm == &p->pbm_A ? 'A' : 'B')); @@ -813,6 +813,12 @@ static void pci_sun4v_pbm_init(struct pci_controller_info *p, int prom_node) pbm->num_pbm_ranges = (err / sizeof(struct linux_prom_pci_ranges)); + /* Mask out the top 8 bits of the ranges, leaving the real + * physical address. + */ + for (i = 0; i < pbm->num_pbm_ranges; i++) + pbm->pbm_ranges[i].parent_phys_hi &= 0x0fffffff; + pci_sun4v_determine_mem_io_space(pbm); pbm_register_toplevel_resources(p, pbm); @@ -851,6 +857,25 @@ void sun4v_pci_init(int node, char *model_name) { struct pci_controller_info *p; struct pci_iommu *iommu; + struct linux_prom64_registers regs; + unsigned int devhandle; + + prom_getproperty(node, "reg", (char *)®s, sizeof(regs)); + devhandle = (regs.phys_addr >> 32UL) & 0x0fffffff;; + + for (p = pci_controller_root; p; p = p->next) { + struct pci_pbm_info *pbm; + + if (p->pbm_A.prom_node && p->pbm_B.prom_node) + continue; + + pbm = (p->pbm_A.prom_node ? + &p->pbm_A : + &p->pbm_B); + + if (pbm->devhandle == (devhandle ^ 0x40)) + pci_sun4v_pbm_init(p, node, devhandle); + } p = kmalloc(sizeof(struct pci_controller_info), GFP_ATOMIC); if (!p) { @@ -892,7 +917,7 @@ void sun4v_pci_init(int node, char *model_name) */ pci_memspace_mask = 0x7fffffffUL; - pci_sun4v_pbm_init(p, node); + pci_sun4v_pbm_init(p, node, devhandle); prom_printf("sun4v_pci_init: Implement me.\n"); prom_halt(); -- cgit v1.2.3 From c26092675020ff495a16dd635bf1733215325540 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 12 Feb 2006 22:18:52 -0800 Subject: [SPARC64]: Implement basic pci_sun4v_scan_bus(). Signed-off-by: David S. Miller --- arch/sparc64/kernel/pci_sun4v.c | 49 ++++++++++++++++++++++++++++++++++++++--- 1 file changed, 46 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c index 19a07f0115c..59e660d849a 100644 --- a/arch/sparc64/kernel/pci_sun4v.c +++ b/arch/sparc64/kernel/pci_sun4v.c @@ -566,9 +566,50 @@ static struct pci_ops pci_sun4v_ops = { }; +static void pbm_scan_bus(struct pci_controller_info *p, + struct pci_pbm_info *pbm) +{ + struct pcidev_cookie *cookie = kmalloc(sizeof(*cookie), GFP_KERNEL); + + if (!cookie) { + prom_printf("%s: Critical allocation failure.\n", pbm->name); + prom_halt(); + } + + /* All we care about is the PBM. */ + memset(cookie, 0, sizeof(*cookie)); + cookie->pbm = pbm; + + pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno, + p->pci_ops, + pbm); + pci_fixup_host_bridge_self(pbm->pci_bus); + pbm->pci_bus->self->sysdata = cookie; + + pci_fill_in_pbm_cookies(pbm->pci_bus, pbm, pbm->prom_node); + pci_record_assignments(pbm, pbm->pci_bus); + pci_assign_unassigned(pbm, pbm->pci_bus); + pci_fixup_irq(pbm, pbm->pci_bus); + pci_determine_66mhz_disposition(pbm, pbm->pci_bus); + pci_setup_busmastering(pbm, pbm->pci_bus); +} + static void pci_sun4v_scan_bus(struct pci_controller_info *p) { - /* XXX Implement me! XXX */ + if (p->pbm_A.prom_node) { + p->pbm_A.is_66mhz_capable = + prom_getbool(p->pbm_A.prom_node, "66mhz-capable"); + + pbm_scan_bus(p, &p->pbm_A); + } + if (p->pbm_B.prom_node) { + p->pbm_B.is_66mhz_capable = + prom_getbool(p->pbm_B.prom_node, "66mhz-capable"); + + pbm_scan_bus(p, &p->pbm_B); + } + + /* XXX register error interrupt handlers XXX */ } static unsigned int pci_sun4v_irq_build(struct pci_pbm_info *pbm, @@ -579,7 +620,6 @@ static unsigned int pci_sun4v_irq_build(struct pci_pbm_info *pbm, return 0; } -/* XXX correct? XXX */ static void pci_sun4v_base_address_update(struct pci_dev *pdev, int resource) { struct pcidev_cookie *pcp = pdev->sysdata; @@ -598,6 +638,7 @@ static void pci_sun4v_base_address_update(struct pci_dev *pdev, int resource) return; } + /* XXX 64-bit MEM handling is not %100 correct... XXX */ is_64bit = 0; if (res->flags & IORESOURCE_IO) root = &pbm->io_space; @@ -625,7 +666,6 @@ static void pci_sun4v_base_address_update(struct pci_dev *pdev, int resource) pci_write_config_dword(pdev, where + 4, 0); } -/* XXX correct? XXX */ static void pci_sun4v_resource_adjust(struct pci_dev *pdev, struct resource *res, struct resource *root) @@ -668,6 +708,9 @@ static void pci_sun4v_determine_mem_io_space(struct pci_pbm_info *pbm) saw_mem = 1; break; + case 3: + /* XXX 64-bit MEM handling XXX */ + default: break; }; -- cgit v1.2.3 From 0b522497a176f222ae4cf7e6733a5357352224b2 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 12 Feb 2006 22:29:36 -0800 Subject: [SPARC64]: Missing 'return' statement in sun4v_pci_init(). Signed-off-by: David S. Miller --- arch/sparc64/kernel/pci_sun4v.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c index 59e660d849a..7055616e083 100644 --- a/arch/sparc64/kernel/pci_sun4v.c +++ b/arch/sparc64/kernel/pci_sun4v.c @@ -916,8 +916,10 @@ void sun4v_pci_init(int node, char *model_name) &p->pbm_A : &p->pbm_B); - if (pbm->devhandle == (devhandle ^ 0x40)) + if (pbm->devhandle == (devhandle ^ 0x40)) { pci_sun4v_pbm_init(p, node, devhandle); + return; + } } p = kmalloc(sizeof(struct pci_controller_info), GFP_ATOMIC); @@ -961,7 +963,4 @@ void sun4v_pci_init(int node, char *model_name) pci_memspace_mask = 0x7fffffffUL; pci_sun4v_pbm_init(p, node, devhandle); - - prom_printf("sun4v_pci_init: Implement me.\n"); - prom_halt(); } -- cgit v1.2.3 From 059833eb817fec3a5a7f62fba9592749c4cebc73 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 12 Feb 2006 23:49:18 -0800 Subject: [SPARC64]: Range check bus number in SUN4V PCI controller driver. It has to be somewhere in the range from pbm->pci_first_busno to pbm->pci_last_busno, inclusive. Signed-off-by: David S. Miller --- arch/sparc64/kernel/pci_sun4v.c | 33 ++++++++++++++++++++++++--------- 1 file changed, 24 insertions(+), 9 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c index 7055616e083..dc79b748fea 100644 --- a/arch/sparc64/kernel/pci_sun4v.c +++ b/arch/sparc64/kernel/pci_sun4v.c @@ -514,19 +514,31 @@ struct pci_iommu_ops pci_sun4v_iommu_ops = { /* SUN4V PCI configuration space accessors. */ +static inline int pci_sun4v_out_of_range(struct pci_pbm_info *pbm, unsigned int bus) +{ + if (bus < pbm->pci_first_busno || + bus > pbm->pci_last_busno) + return 1; + return 0; +} + static int pci_sun4v_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, int where, int size, u32 *value) { struct pci_pbm_info *pbm = bus_dev->sysdata; - unsigned long devhandle = pbm->devhandle; + u32 devhandle = pbm->devhandle; unsigned int bus = bus_dev->number; unsigned int device = PCI_SLOT(devfn); unsigned int func = PCI_FUNC(devfn); unsigned long ret; - ret = pci_sun4v_config_get(devhandle, - HV_PCI_DEVICE_BUILD(bus, device, func), - where, size); + if (pci_sun4v_out_of_range(pbm, bus)) { + ret = ~0UL; + } else { + ret = pci_sun4v_config_get(devhandle, + HV_PCI_DEVICE_BUILD(bus, device, func), + where, size); + } switch (size) { case 1: *value = ret & 0xff; @@ -547,16 +559,19 @@ static int pci_sun4v_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, int where, int size, u32 value) { struct pci_pbm_info *pbm = bus_dev->sysdata; - unsigned long devhandle = pbm->devhandle; + u32 devhandle = pbm->devhandle; unsigned int bus = bus_dev->number; unsigned int device = PCI_SLOT(devfn); unsigned int func = PCI_FUNC(devfn); unsigned long ret; - ret = pci_sun4v_config_put(devhandle, - HV_PCI_DEVICE_BUILD(bus, device, func), - where, size, value); - + if (pci_sun4v_out_of_range(pbm, bus)) { + /* Do nothing. */ + } else { + ret = pci_sun4v_config_put(devhandle, + HV_PCI_DEVICE_BUILD(bus, device, func), + where, size, value); + } return PCIBIOS_SUCCESSFUL; } -- cgit v1.2.3 From 85dfa19ba92f88fa1c1482f655c7247119dfdcd5 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Mon, 13 Feb 2006 00:02:16 -0800 Subject: [SPARC64]: Move devino_to_sysino out of pci_sun4v_asm.S It is not PCI specific, it is for all system interrupts. Signed-off-by: David S. Miller --- arch/sparc64/kernel/entry.S | 12 ++++++++++++ arch/sparc64/kernel/pci_sun4v.h | 2 -- arch/sparc64/kernel/pci_sun4v_asm.S | 12 ------------ 3 files changed, 12 insertions(+), 14 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S index f51b66a1687..fa185f22705 100644 --- a/arch/sparc64/kernel/entry.S +++ b/arch/sparc64/kernel/entry.S @@ -1695,3 +1695,15 @@ hard_smp_processor_id: retl nop #endif + + /* %o0: devhandle + * %o1: devino + * + * returns %o0: sysino + */ + .globl pci_sun4v_devino_to_sysino +sun4v_devino_to_sysino: + mov HV_FAST_INTR_DEVINO2SYSINO, %o5 + ta HV_FAST_TRAP + retl + mov %o1, %o0 diff --git a/arch/sparc64/kernel/pci_sun4v.h b/arch/sparc64/kernel/pci_sun4v.h index 00322ed0cf8..88f199e11a7 100644 --- a/arch/sparc64/kernel/pci_sun4v.h +++ b/arch/sparc64/kernel/pci_sun4v.h @@ -6,8 +6,6 @@ #ifndef _PCI_SUN4V_H #define _PCI_SUN4V_H -extern unsigned long pci_sun4v_devino_to_sysino(unsigned long devhandle, - unsigned long deino); extern unsigned long pci_sun4v_iommu_map(unsigned long devhandle, unsigned long tsbid, unsigned long num_ttes, diff --git a/arch/sparc64/kernel/pci_sun4v_asm.S b/arch/sparc64/kernel/pci_sun4v_asm.S index 4a12341dd5d..424db652664 100644 --- a/arch/sparc64/kernel/pci_sun4v_asm.S +++ b/arch/sparc64/kernel/pci_sun4v_asm.S @@ -5,18 +5,6 @@ #include - /* %o0: devhandle - * %o1: devino - * - * returns %o0: sysino - */ - .globl pci_sun4v_devino_to_sysino -pci_sun4v_devino_to_sysino: - mov HV_FAST_INTR_DEVINO2SYSINO, %o5 - ta HV_FAST_TRAP - retl - mov %o1, %o0 - /* %o0: devhandle * %o1: tsbid * %o2: num ttes -- cgit v1.2.3 From 6c0f402f6cc62314ef83b975f3430350dcb6055f Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Mon, 13 Feb 2006 00:23:32 -0800 Subject: [SPARC64]: Implement rest of generic interrupt hypervisor calls. Signed-off-by: David S. Miller --- arch/sparc64/kernel/entry.S | 66 ++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 65 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S index fa185f22705..a2842a72f8e 100644 --- a/arch/sparc64/kernel/entry.S +++ b/arch/sparc64/kernel/entry.S @@ -1701,9 +1701,73 @@ hard_smp_processor_id: * * returns %o0: sysino */ - .globl pci_sun4v_devino_to_sysino + .globl sun4v_devino_to_sysino sun4v_devino_to_sysino: mov HV_FAST_INTR_DEVINO2SYSINO, %o5 ta HV_FAST_TRAP retl mov %o1, %o0 + + /* %o0: sysino + * + * returns %o0: intr_enabled (HV_INTR_{DISABLED,ENABLED}) + */ + .globl sun4v_intr_getenabled +sun4v_intr_getenabled: + mov HV_FAST_INTR_GETENABLED, %o5 + ta HV_FAST_TRAP + retl + mov %o1, %o0 + + /* %o0: sysino + * %o1: intr_enabled (HV_INTR_{DISABLED,ENABLED}) + */ + .globl sun4v_intr_setenabled +sun4v_intr_setenabled: + mov HV_FAST_INTR_SETENABLED, %o5 + ta HV_FAST_TRAP + retl + nop + + /* %o0: sysino + * + * returns %o0: intr_state (HV_INTR_STATE_*) + */ + .globl sun4v_intr_getstate +sun4v_intr_getstate: + mov HV_FAST_INTR_GETSTATE, %o5 + ta HV_FAST_TRAP + retl + mov %o1, %o0 + + /* %o0: sysino + * %o1: intr_state (HV_INTR_STATE_*) + */ + .globl sun4v_intr_setstate +sun4v_intr_setstate: + mov HV_FAST_INTR_SETSTATE, %o5 + ta HV_FAST_TRAP + retl + nop + + /* %o0: sysino + * + * returns %o0: cpuid + */ + .globl sun4v_intr_gettarget +sun4v_intr_gettarget: + mov HV_FAST_INTR_GETTARGET, %o5 + ta HV_FAST_TRAP + retl + mov %o1, %o0 + + /* %o0: sysino + * %o1: cpuid + */ + .globl sun4v_intr_settarget +sun4v_intr_settarget: + mov HV_FAST_INTR_SETTARGET, %o5 + ta HV_FAST_TRAP + retl + nop + -- cgit v1.2.3 From bf941d6cd62aa2022f0887e25e3d02c389b0bf9b Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Mon, 13 Feb 2006 18:07:45 -0800 Subject: [SPARC64]: Log faulting vaddr when bogus kernel PC detected. Signed-off-by: David S. Miller --- arch/sparc64/mm/fault.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/mm/fault.c b/arch/sparc64/mm/fault.c index 439a53c1e56..b97bd054aad 100644 --- a/arch/sparc64/mm/fault.c +++ b/arch/sparc64/mm/fault.c @@ -91,12 +91,13 @@ static void __kprobes unhandled_fault(unsigned long address, die_if_kernel("Oops", regs); } -static void bad_kernel_pc(struct pt_regs *regs) +static void bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr) { unsigned long *ksp; printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n", regs->tpc); + printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr); __asm__("mov %%sp, %0" : "=r" (ksp)); show_stack(current, ksp); unhandled_fault(regs->tpc, current, regs); @@ -280,7 +281,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) (tpc >= MODULES_VADDR && tpc < MODULES_END)) { /* Valid, no problems... */ } else { - bad_kernel_pc(regs); + bad_kernel_pc(regs, address); return; } } -- cgit v1.2.3 From 10804828fd06a43ce964e9d9852332e7ff1507b1 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Mon, 13 Feb 2006 18:09:44 -0800 Subject: [SPARC64]: More SUN4V PCI work. Get bus range from child of PCI controller root nexus. This is actually a hack, but the PCI-E bridge sitting at the top of the PCI tree responds to PCI config cycles for every device number, so best to just ignore it for now. Preliminary PCI irq routing, needs lots of work. Signed-off-by: David S. Miller --- arch/sparc64/kernel/pci_sun4v.c | 116 +++++++++++++++++++++++++++++++++++----- 1 file changed, 102 insertions(+), 14 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c index dc79b748fea..5174346ce35 100644 --- a/arch/sparc64/kernel/pci_sun4v.c +++ b/arch/sparc64/kernel/pci_sun4v.c @@ -538,6 +538,12 @@ static int pci_sun4v_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, ret = pci_sun4v_config_get(devhandle, HV_PCI_DEVICE_BUILD(bus, device, func), where, size); +#if 0 + printk("read_pci_cfg: devh[%x] device[%08x] where[%x] sz[%d] " + "== [%016lx]\n", + devhandle, HV_PCI_DEVICE_BUILD(bus, device, func), + where, size, ret); +#endif } switch (size) { case 1: @@ -571,6 +577,12 @@ static int pci_sun4v_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, ret = pci_sun4v_config_put(devhandle, HV_PCI_DEVICE_BUILD(bus, device, func), where, size, value); +#if 0 + printk("write_pci_cfg: devh[%x] device[%08x] where[%x] sz[%d] " + "val[%08x] == [%016lx]\n", + devhandle, HV_PCI_DEVICE_BUILD(bus, device, func), + where, size, value, ret); +#endif } return PCIBIOS_SUCCESSFUL; } @@ -598,10 +610,13 @@ static void pbm_scan_bus(struct pci_controller_info *p, pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno, p->pci_ops, pbm); +#if 0 pci_fixup_host_bridge_self(pbm->pci_bus); pbm->pci_bus->self->sysdata = cookie; +#endif - pci_fill_in_pbm_cookies(pbm->pci_bus, pbm, pbm->prom_node); + pci_fill_in_pbm_cookies(pbm->pci_bus, pbm, + prom_getchild(pbm->prom_node)); pci_record_assignments(pbm, pbm->pci_bus); pci_assign_unassigned(pbm, pbm->pci_bus); pci_fixup_irq(pbm, pbm->pci_bus); @@ -631,8 +646,65 @@ static unsigned int pci_sun4v_irq_build(struct pci_pbm_info *pbm, struct pci_dev *pdev, unsigned int ino) { - /* XXX Implement me! XXX */ - return 0; + struct ino_bucket *bucket; + unsigned long sysino; + u32 devhandle = pbm->devhandle; + int pil; + + sysino = sun4v_devino_to_sysino(devhandle, ino); + + printk(KERN_INFO "pci_irq_buld: Mapping ( devh[%08x] ino[%08x] ) " + "--> sysino[%016lx]\n", devhandle, ino, sysino); + + pil = 4; + if (pdev) { + switch ((pdev->class >> 16) & 0xff) { + case PCI_BASE_CLASS_STORAGE: + pil = 4; + break; + + case PCI_BASE_CLASS_NETWORK: + pil = 6; + break; + + case PCI_BASE_CLASS_DISPLAY: + pil = 9; + break; + + case PCI_BASE_CLASS_MULTIMEDIA: + case PCI_BASE_CLASS_MEMORY: + case PCI_BASE_CLASS_BRIDGE: + case PCI_BASE_CLASS_SERIAL: + pil = 10; + break; + + default: + pil = 4; + break; + }; + } + BUG_ON(PIL_RESERVED(pil)); + + bucket = &ivector_table[sysino]; + + /* Catch accidental accesses to these things. IMAP/ICLR handling + * is done by hypervisor calls on sun4v platforms, not by direct + * register accesses. + */ + bucket->imap = ~0UL; + bucket->iclr = ~0UL; + + bucket->pil = pil; + bucket->flags = IBF_PCI; + + bucket->irq_info = kmalloc(sizeof(struct irq_desc), GFP_ATOMIC); + if (!bucket->irq_info) { + prom_printf("IRQ: Error, kmalloc(irq_desc) failed.\n"); + prom_halt(); + } + memset(bucket->irq_info, 0, sizeof(struct irq_desc)); + + return __irq(bucket); } static void pci_sun4v_base_address_update(struct pci_dev *pdev, int resource) @@ -834,10 +906,35 @@ static void pci_sun4v_iommu_init(struct pci_pbm_info *pbm) probe_existing_entries(pbm, iommu); } +/* Don't get this from the root nexus, get it from the "pci@0" node below. */ +static void pci_sun4v_get_bus_range(struct pci_pbm_info *pbm) +{ + unsigned int busrange[2]; + int prom_node = pbm->prom_node; + int err; + + prom_node = prom_getchild(prom_node); + if (prom_node == 0) { + prom_printf("%s: Fatal error, no child OBP node.\n", pbm->name); + prom_halt(); + } + + err = prom_getproperty(prom_node, "bus-range", + (char *)&busrange[0], + sizeof(busrange)); + if (err == 0 || err == -1) { + prom_printf("%s: Fatal error, no bus-range.\n", pbm->name); + prom_halt(); + } + + pbm->pci_first_busno = busrange[0]; + pbm->pci_last_busno = busrange[1]; + +} + static void pci_sun4v_pbm_init(struct pci_controller_info *p, int prom_node, unsigned int devhandle) { struct pci_pbm_info *pbm; - unsigned int busrange[2]; int err, i; if (devhandle & 0x40) @@ -898,16 +995,7 @@ static void pci_sun4v_pbm_init(struct pci_controller_info *p, int prom_node, uns memset(&pbm->pbm_intmask, 0, sizeof(pbm->pbm_intmask)); } - err = prom_getproperty(prom_node, "bus-range", - (char *)&busrange[0], - sizeof(busrange)); - if (err == 0 || err == -1) { - prom_printf("%s: Fatal error, no bus-range.\n", pbm->name); - prom_halt(); - } - pbm->pci_first_busno = busrange[0]; - pbm->pci_last_busno = busrange[1]; - + pci_sun4v_get_bus_range(pbm); pci_sun4v_iommu_init(pbm); } -- cgit v1.2.3 From e3999574b48125c9bb0c95e3e9f1c696bf96c3e3 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Mon, 13 Feb 2006 18:16:10 -0800 Subject: [SPARC64]: Generic sun4v_build_irq(). Signed-off-by: David S. Miller --- arch/sparc64/kernel/irq.c | 32 ++++++++++++++++++++++++++++++++ arch/sparc64/kernel/pci_sun4v.c | 30 ++---------------------------- 2 files changed, 34 insertions(+), 28 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c index 51f65054bf1..bcc889a5332 100644 --- a/arch/sparc64/kernel/irq.c +++ b/arch/sparc64/kernel/irq.c @@ -303,6 +303,38 @@ out: return __irq(bucket); } +unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino, int pil, unsigned char flags) +{ + struct ino_bucket *bucket; + unsigned long sysino; + + sysino = sun4v_devino_to_sysino(devhandle, devino); + + printk(KERN_INFO "sun4v_irq: Mapping ( devh[%08x] devino[%08x] ) " + "--> sysino[%016lx]\n", devhandle, devino, sysino); + + bucket = &ivector_table[sysino]; + + /* Catch accidental accesses to these things. IMAP/ICLR handling + * is done by hypervisor calls on sun4v platforms, not by direct + * register accesses. + */ + bucket->imap = ~0UL; + bucket->iclr = ~0UL; + + bucket->pil = pil; + bucket->flags = flags; + + bucket->irq_info = kmalloc(sizeof(struct irq_desc), GFP_ATOMIC); + if (!bucket->irq_info) { + prom_printf("IRQ: Error, kmalloc(irq_desc) failed.\n"); + prom_halt(); + } + memset(bucket->irq_info, 0, sizeof(struct irq_desc)); + + return __irq(bucket); +} + static void atomic_bucket_insert(struct ino_bucket *bucket) { unsigned long pstate; diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c index 5174346ce35..b8846b271f9 100644 --- a/arch/sparc64/kernel/pci_sun4v.c +++ b/arch/sparc64/kernel/pci_sun4v.c @@ -644,18 +644,11 @@ static void pci_sun4v_scan_bus(struct pci_controller_info *p) static unsigned int pci_sun4v_irq_build(struct pci_pbm_info *pbm, struct pci_dev *pdev, - unsigned int ino) + unsigned int devino) { - struct ino_bucket *bucket; - unsigned long sysino; u32 devhandle = pbm->devhandle; int pil; - sysino = sun4v_devino_to_sysino(devhandle, ino); - - printk(KERN_INFO "pci_irq_buld: Mapping ( devh[%08x] ino[%08x] ) " - "--> sysino[%016lx]\n", devhandle, ino, sysino); - pil = 4; if (pdev) { switch ((pdev->class >> 16) & 0xff) { @@ -685,26 +678,7 @@ static unsigned int pci_sun4v_irq_build(struct pci_pbm_info *pbm, } BUG_ON(PIL_RESERVED(pil)); - bucket = &ivector_table[sysino]; - - /* Catch accidental accesses to these things. IMAP/ICLR handling - * is done by hypervisor calls on sun4v platforms, not by direct - * register accesses. - */ - bucket->imap = ~0UL; - bucket->iclr = ~0UL; - - bucket->pil = pil; - bucket->flags = IBF_PCI; - - bucket->irq_info = kmalloc(sizeof(struct irq_desc), GFP_ATOMIC); - if (!bucket->irq_info) { - prom_printf("IRQ: Error, kmalloc(irq_desc) failed.\n"); - prom_halt(); - } - memset(bucket->irq_info, 0, sizeof(struct irq_desc)); - - return __irq(bucket); + return sun4v_build_irq(devhandle, devino, pil, IBF_PCI); } static void pci_sun4v_base_address_update(struct pci_dev *pdev, int resource) -- cgit v1.2.3 From 10951ee61056a9f91c00c16746f2042672d7af7c Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Mon, 13 Feb 2006 18:22:57 -0800 Subject: [SPARC64]: Program IRQ registers correctly on sun4v. Need to use hypervisor calls instead of direct register accesses. Signed-off-by: David S. Miller --- arch/sparc64/kernel/irq.c | 71 +++++++++++++++++++++++++++++++---------------- 1 file changed, 47 insertions(+), 24 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c index bcc889a5332..735b3abb85e 100644 --- a/arch/sparc64/kernel/irq.c +++ b/arch/sparc64/kernel/irq.c @@ -152,7 +152,10 @@ void enable_irq(unsigned int irq) preempt_disable(); if (tlb_type == hypervisor) { - /* XXX SUN4V: implement me... XXX */ + int cpu = hard_smp_processor_id(); + + sun4v_intr_settarget(irq, cpu); + sun4v_intr_setenabled(irq, HV_INTR_ENABLED); } else { if (tlb_type == cheetah || tlb_type == cheetah_plus) { unsigned long ver; @@ -210,16 +213,20 @@ void disable_irq(unsigned int irq) imap = bucket->imap; if (imap != 0UL) { - u32 tmp; + if (tlb_type == hypervisor) { + sun4v_intr_setenabled(irq, HV_INTR_DISABLED); + } else { + u32 tmp; - /* NOTE: We do not want to futz with the IRQ clear registers - * and move the state to IDLE, the SCSI code does call - * disable_irq() to assure atomicity in the queue cmd - * SCSI adapter driver code. Thus we'd lose interrupts. - */ - tmp = upa_readl(imap); - tmp &= ~IMAP_VALID; - upa_writel(tmp, imap); + /* NOTE: We do not want to futz with the IRQ clear registers + * and move the state to IDLE, the SCSI code does call + * disable_irq() to assure atomicity in the queue cmd + * SCSI adapter driver code. Thus we'd lose interrupts. + */ + tmp = upa_readl(imap); + tmp &= ~IMAP_VALID; + upa_writel(tmp, imap); + } } } @@ -257,6 +264,8 @@ unsigned int build_irq(int pil, int inofixup, unsigned long iclr, unsigned long return __irq(&pil0_dummy_bucket); } + BUG_ON(tlb_type == hypervisor); + /* RULE: Both must be specified in all other cases. */ if (iclr == 0UL || imap == 0UL) { prom_printf("Invalid build_irq %d %d %016lx %016lx\n", @@ -633,10 +642,16 @@ static void process_bucket(int irq, struct ino_bucket *bp, struct pt_regs *regs) break; } if (bp->pil != 0) { - upa_writel(ICLR_IDLE, bp->iclr); - /* Test and add entropy */ - if (random & SA_SAMPLE_RANDOM) - add_interrupt_randomness(irq); + if (tlb_type == hypervisor) { + unsigned int irq = __irq(bp); + + sun4v_intr_setstate(irq, HV_INTR_STATE_IDLE); + } else { + upa_writel(ICLR_IDLE, bp->iclr); + /* Test and add entropy */ + if (random & SA_SAMPLE_RANDOM) + add_interrupt_randomness(irq); + } } out: bp->flags &= ~IBF_INPROGRESS; @@ -769,24 +784,32 @@ static int retarget_one_irq(struct irqaction *p, int goal_cpu) { struct ino_bucket *bucket = get_ino_in_irqaction(p) + ivector_table; unsigned long imap = bucket->imap; - unsigned int tid; while (!cpu_online(goal_cpu)) { if (++goal_cpu >= NR_CPUS) goal_cpu = 0; } - if (tlb_type == cheetah || tlb_type == cheetah_plus) { - tid = goal_cpu << 26; - tid &= IMAP_AID_SAFARI; - } else if (this_is_starfire == 0) { - tid = goal_cpu << 26; - tid &= IMAP_TID_UPA; + if (tlb_type == hypervisor) { + unsigned int irq = __irq(bucket); + + sun4v_intr_settarget(irq, goal_cpu); + sun4v_intr_setenabled(irq, HV_INTR_ENABLED); } else { - tid = (starfire_translate(imap, goal_cpu) << 26); - tid &= IMAP_TID_UPA; + unsigned int tid; + + if (tlb_type == cheetah || tlb_type == cheetah_plus) { + tid = goal_cpu << 26; + tid &= IMAP_AID_SAFARI; + } else if (this_is_starfire == 0) { + tid = goal_cpu << 26; + tid &= IMAP_TID_UPA; + } else { + tid = (starfire_translate(imap, goal_cpu) << 26); + tid &= IMAP_TID_UPA; + } + upa_writel(tid | IMAP_VALID, imap); } - upa_writel(tid | IMAP_VALID, imap); do { if (++goal_cpu >= NR_CPUS) -- cgit v1.2.3 From d5eb4004303b4dd04ec83b926b5fc2d9ceda4b2e Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Mon, 13 Feb 2006 20:41:11 -0800 Subject: [SPARC64]: Kill spurious semicolon in sun4v_pci_init(). Signed-off-by: David S. Miller --- arch/sparc64/kernel/pci_sun4v.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c index b8846b271f9..17080a61ad6 100644 --- a/arch/sparc64/kernel/pci_sun4v.c +++ b/arch/sparc64/kernel/pci_sun4v.c @@ -981,7 +981,7 @@ void sun4v_pci_init(int node, char *model_name) unsigned int devhandle; prom_getproperty(node, "reg", (char *)®s, sizeof(regs)); - devhandle = (regs.phys_addr >> 32UL) & 0x0fffffff;; + devhandle = (regs.phys_addr >> 32UL) & 0x0fffffff; for (p = pci_controller_root; p; p = p->next) { struct pci_pbm_info *pbm; -- cgit v1.2.3 From e77227eb4e17591a6a511b9c0ff6e8ad7350c575 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Mon, 13 Feb 2006 20:42:16 -0800 Subject: [SPARC64]: Probe virtual-devices root node on sun4v. This is where we learn how to get the interrupts for things like the hypervisor console device. Signed-off-by: David S. Miller --- arch/sparc64/kernel/devices.c | 58 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 58 insertions(+) (limited to 'arch') diff --git a/arch/sparc64/kernel/devices.c b/arch/sparc64/kernel/devices.c index ac11d872ef7..817132826e0 100644 --- a/arch/sparc64/kernel/devices.c +++ b/arch/sparc64/kernel/devices.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include @@ -20,6 +21,7 @@ #include #include #include +#include /* Used to synchronize acceses to NatSemi SUPER I/O chip configure * operations in asm/ns87303.h @@ -29,6 +31,61 @@ DEFINE_SPINLOCK(ns87303_lock); extern void cpu_probe(void); extern void central_probe(void); +u32 sun4v_vdev_devhandle; +int sun4v_vdev_root; +struct linux_prom_pci_intmap *sun4v_vdev_intmap; +int sun4v_vdev_num_intmap; +struct linux_prom_pci_intmap sun4v_vdev_intmask; + +static void __init sun4v_virtual_device_probe(void) +{ + struct linux_prom64_registers regs; + struct linux_prom_pci_intmap *ip; + int node, sz, err; + + if (tlb_type != hypervisor) + return; + + node = prom_getchild(prom_root_node); + node = prom_searchsiblings(node, "virtual-devices"); + if (!node) { + prom_printf("SUN4V: Fatal error, no virtual-devices node.\n"); + prom_halt(); + } + + sun4v_vdev_root = node; + + prom_getproperty(node, "reg", (char *)®s, sizeof(regs)); + sun4v_vdev_devhandle = (regs.phys_addr >> 32UL) & 0x0fffffff; + + sz = sizeof(*ip) * 64; + sun4v_vdev_intmap = ip = alloc_bootmem_low_pages(sz); + if (!sun4v_vdev_intmap) { + prom_printf("SUN4V: Error, cannot allocate vdev intmap.\n"); + prom_halt(); + } + + err = prom_getproperty(node, "interrupt-map", (char *) ip, sz); + if (err == -1) { + prom_printf("SUN4V: Fatal error, no vdev interrupt-map.\n"); + prom_halt(); + } + + sun4v_vdev_num_intmap = err / sizeof(*ip); + + err = prom_getproperty(node, "interrupt-map-mask", + (char *) &sun4v_vdev_intmask, + sizeof(sun4v_vdev_intmask)); + if (err == -1) { + prom_printf("SUN4V: Fatal error, no vdev " + "interrupt-map-mask.\n"); + prom_halt(); + } + + printk("SUN4V: virtual-devices devhandle[%x]\n", + sun4v_vdev_devhandle); +} + static const char *cpu_mid_prop(void) { if (tlb_type == spitfire) @@ -177,6 +234,7 @@ void __init device_scan(void) } #endif + sun4v_virtual_device_probe(); central_probe(); cpu_probe(); -- cgit v1.2.3 From d5a2aa241aa0babf382d42d6033b30a5112e4c1e Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Mon, 13 Feb 2006 21:28:40 -0800 Subject: [SPARC64] sunhv: Bug fixes. Add udelay to polling console write loop, and increment the loop limit. Name the device "ttyHV" and pass that to add_preferred_console() when we're using hypervisor console. Kill sunhv_console_setup(), it's empty. Handle the case where we don't want to use hypervisor console. (ie. we have a head attached to a sun4v machine) Signed-off-by: David S. Miller --- arch/sparc64/kernel/setup.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c index 4f253a0755b..06807cf95ee 100644 --- a/arch/sparc64/kernel/setup.c +++ b/arch/sparc64/kernel/setup.c @@ -410,6 +410,7 @@ static int __init set_preferred_console(void) * value anyways... */ serial_console = 4; + return add_preferred_console("ttyHV", 0, NULL); } else { prom_printf("Inconsistent console: " "input %d, output %d\n", -- cgit v1.2.3 From 87bdc367ca1a7e16c29a6bff6b1e8fe179e27f90 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Mon, 13 Feb 2006 21:36:30 -0800 Subject: [SPARC64]: Trim down sun4v IRQ translation kernel log message. Signed-off-by: David S. Miller --- arch/sparc64/kernel/irq.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c index 735b3abb85e..a55177e0643 100644 --- a/arch/sparc64/kernel/irq.c +++ b/arch/sparc64/kernel/irq.c @@ -319,8 +319,8 @@ unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino, int pil, unsign sysino = sun4v_devino_to_sysino(devhandle, devino); - printk(KERN_INFO "sun4v_irq: Mapping ( devh[%08x] devino[%08x] ) " - "--> sysino[%016lx]\n", devhandle, devino, sysino); + printk(KERN_INFO "sun4v_irq: Mapping (%x:%x) --> sysino[%lx]\n", + devhandle, devino, sysino); bucket = &ivector_table[sysino]; -- cgit v1.2.3 From 7c8f486ae7fe90d7bb99a70a42d71c9a40688ec2 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Mon, 13 Feb 2006 21:50:27 -0800 Subject: [SPARC64]: Fix IOMMU mapping on sun4v. We should dynamically allocate the per-cpu pglist not use an in-kernel-image datum, since __pa() does not work on such addresses. Also, consistently use "u32" for devhandle. Signed-off-by: David S. Miller --- arch/sparc64/kernel/pci_sun4v.c | 76 +++++++++++++++++++++++++---------------- 1 file changed, 46 insertions(+), 30 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c index 17080a61ad6..ac311d3dbc5 100644 --- a/arch/sparc64/kernel/pci_sun4v.c +++ b/arch/sparc64/kernel/pci_sun4v.c @@ -24,10 +24,10 @@ #include "pci_sun4v.h" -#define PGLIST_NENTS 2048 +#define PGLIST_NENTS (PAGE_SIZE / sizeof(u64)) struct sun4v_pglist { - u64 pglist[PGLIST_NENTS]; + u64 *pglist; }; static DEFINE_PER_CPU(struct sun4v_pglist, iommu_pglists); @@ -83,10 +83,11 @@ static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr { struct pcidev_cookie *pcp; struct pci_iommu *iommu; - unsigned long devhandle, flags, order, first_page, npages, n; + unsigned long flags, order, first_page, npages, n; void *ret; long entry; u64 *pglist; + u32 devhandle; int cpu; size = IO_PAGE_ALIGN(size); @@ -123,7 +124,7 @@ static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr cpu = get_cpu(); - pglist = &__get_cpu_var(iommu_pglists).pglist[0]; + pglist = __get_cpu_var(iommu_pglists).pglist; for (n = 0; n < npages; n++) pglist[n] = first_page + (n * PAGE_SIZE); @@ -149,7 +150,8 @@ static void pci_4v_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, { struct pcidev_cookie *pcp; struct pci_iommu *iommu; - unsigned long flags, order, npages, entry, devhandle; + unsigned long flags, order, npages, entry; + u32 devhandle; npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; pcp = pdev->sysdata; @@ -182,8 +184,8 @@ static dma_addr_t pci_4v_map_single(struct pci_dev *pdev, void *ptr, size_t sz, struct pcidev_cookie *pcp; struct pci_iommu *iommu; unsigned long flags, npages, oaddr; - unsigned long i, base_paddr, devhandle; - u32 bus_addr, ret; + unsigned long i, base_paddr; + u32 devhandle, bus_addr, ret; unsigned long prot; long entry; u64 *pglist; @@ -219,7 +221,7 @@ static dma_addr_t pci_4v_map_single(struct pci_dev *pdev, void *ptr, size_t sz, cpu = get_cpu(); - pglist = &__get_cpu_var(iommu_pglists).pglist[0]; + pglist = __get_cpu_var(iommu_pglists).pglist; for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) pglist[i] = base_paddr; @@ -248,8 +250,9 @@ static void pci_4v_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_ { struct pcidev_cookie *pcp; struct pci_iommu *iommu; - unsigned long flags, npages, devhandle; + unsigned long flags, npages; long entry; + u32 devhandle; if (unlikely(direction == PCI_DMA_NONE)) { if (printk_ratelimit()) @@ -285,7 +288,7 @@ static void pci_4v_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_ #define SG_ENT_PHYS_ADDRESS(SG) \ (__pa(page_address((SG)->page)) + (SG)->offset) -static inline void fill_sg(long entry, unsigned long devhandle, +static inline void fill_sg(long entry, u32 devhandle, struct scatterlist *sg, int nused, int nelems, unsigned long prot) { @@ -295,7 +298,7 @@ static inline void fill_sg(long entry, unsigned long devhandle, u64 *pglist; cpu = get_cpu(); - pglist = &__get_cpu_var(iommu_pglists).pglist[0]; + pglist = __get_cpu_var(iommu_pglists).pglist; pglist_ent = 0; for (i = 0; i < nused; i++) { unsigned long pteval = ~0UL; @@ -380,8 +383,8 @@ static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int n { struct pcidev_cookie *pcp; struct pci_iommu *iommu; - unsigned long flags, npages, prot, devhandle; - u32 dma_base; + unsigned long flags, npages, prot; + u32 devhandle, dma_base; struct scatterlist *sgtmp; long entry; int used; @@ -451,9 +454,9 @@ static void pci_4v_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, in { struct pcidev_cookie *pcp; struct pci_iommu *iommu; - unsigned long flags, i, npages, devhandle; + unsigned long flags, i, npages; long entry; - u32 bus_addr; + u32 devhandle, bus_addr; if (unlikely(direction == PCI_DMA_NONE)) { if (printk_ratelimit()) @@ -805,7 +808,8 @@ static void probe_existing_entries(struct pci_pbm_info *pbm, struct pci_iommu *iommu) { struct pci_iommu_arena *arena = &iommu->arena; - unsigned long i, devhandle; + unsigned long i; + u32 devhandle; devhandle = pbm->devhandle; for (i = 0; i < arena->limit; i++) { @@ -906,7 +910,7 @@ static void pci_sun4v_get_bus_range(struct pci_pbm_info *pbm) } -static void pci_sun4v_pbm_init(struct pci_controller_info *p, int prom_node, unsigned int devhandle) +static void pci_sun4v_pbm_init(struct pci_controller_info *p, int prom_node, u32 devhandle) { struct pci_pbm_info *pbm; int err, i; @@ -978,7 +982,8 @@ void sun4v_pci_init(int node, char *model_name) struct pci_controller_info *p; struct pci_iommu *iommu; struct linux_prom64_registers regs; - unsigned int devhandle; + u32 devhandle; + int i; prom_getproperty(node, "reg", (char *)®s, sizeof(regs)); devhandle = (regs.phys_addr >> 32UL) & 0x0fffffff; @@ -999,26 +1004,32 @@ void sun4v_pci_init(int node, char *model_name) } } - p = kmalloc(sizeof(struct pci_controller_info), GFP_ATOMIC); - if (!p) { - prom_printf("SUN4V_PCI: Fatal memory allocation error.\n"); - prom_halt(); + for (i = 0; i < NR_CPUS; i++) { + unsigned long page = get_zeroed_page(GFP_ATOMIC); + + if (!page) + goto fatal_memory_error; + + per_cpu(iommu_pglists, i).pglist = (u64 *) page; } + + p = kmalloc(sizeof(struct pci_controller_info), GFP_ATOMIC); + if (!p) + goto fatal_memory_error; + memset(p, 0, sizeof(*p)); iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC); - if (!iommu) { - prom_printf("SCHIZO: Fatal memory allocation error.\n"); - prom_halt(); - } + if (!iommu) + goto fatal_memory_error; + memset(iommu, 0, sizeof(*iommu)); p->pbm_A.iommu = iommu; iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC); - if (!iommu) { - prom_printf("SCHIZO: Fatal memory allocation error.\n"); - prom_halt(); - } + if (!iommu) + goto fatal_memory_error; + memset(iommu, 0, sizeof(*iommu)); p->pbm_B.iommu = iommu; @@ -1040,4 +1051,9 @@ void sun4v_pci_init(int node, char *model_name) pci_memspace_mask = 0x7fffffffUL; pci_sun4v_pbm_init(p, node, devhandle); + return; + +fatal_memory_error: + prom_printf("SUN4V_PCI: Fatal memory allocation error.\n"); + prom_halt(); } -- cgit v1.2.3 From a615fea48be4eada94986d63e3e8ee5563121649 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Mon, 13 Feb 2006 22:37:07 -0800 Subject: [SPARC64]: Use TRAP_LOAD_IRQ_WORK() in sun4v device mondo handler. Signed-off-by: David S. Miller --- arch/sparc64/kernel/sun4v_ivec.S | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/sun4v_ivec.S b/arch/sparc64/kernel/sun4v_ivec.S index c0367ef7e09..b49a68bdda4 100644 --- a/arch/sparc64/kernel/sun4v_ivec.S +++ b/arch/sparc64/kernel/sun4v_ivec.S @@ -98,10 +98,7 @@ sun4v_dev_mondo: membar #Sync /* Get &__irq_work[smp_processor_id()] into %g1. */ - sethi %hi(__irq_work), %g4 - sllx %g1, 6, %g1 - or %g4, %lo(__irq_work), %g4 - add %g4, %g1, %g1 + TRAP_LOAD_IRQ_WORK(%g1, %g4) /* Get &ivector_table[IVEC] into %g4. */ sethi %hi(ivector_table), %g4 -- cgit v1.2.3 From 4bf447d6f7c2357dec8bdc24ce0fcffd71cc29c0 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Mon, 13 Feb 2006 22:37:32 -0800 Subject: [SPARC64]: Pass correct ino to sun4v_intr_*(). Signed-off-by: David S. Miller --- arch/sparc64/kernel/irq.c | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c index a55177e0643..c57b1708ae8 100644 --- a/arch/sparc64/kernel/irq.c +++ b/arch/sparc64/kernel/irq.c @@ -152,10 +152,11 @@ void enable_irq(unsigned int irq) preempt_disable(); if (tlb_type == hypervisor) { + unsigned int ino = __irq_ino(irq); int cpu = hard_smp_processor_id(); - sun4v_intr_settarget(irq, cpu); - sun4v_intr_setenabled(irq, HV_INTR_ENABLED); + sun4v_intr_settarget(ino, cpu); + sun4v_intr_setenabled(ino, HV_INTR_ENABLED); } else { if (tlb_type == cheetah || tlb_type == cheetah_plus) { unsigned long ver; @@ -214,7 +215,9 @@ void disable_irq(unsigned int irq) imap = bucket->imap; if (imap != 0UL) { if (tlb_type == hypervisor) { - sun4v_intr_setenabled(irq, HV_INTR_DISABLED); + unsigned int ino = __irq_ino(irq); + + sun4v_intr_setenabled(ino, HV_INTR_DISABLED); } else { u32 tmp; @@ -643,9 +646,9 @@ static void process_bucket(int irq, struct ino_bucket *bp, struct pt_regs *regs) } if (bp->pil != 0) { if (tlb_type == hypervisor) { - unsigned int irq = __irq(bp); + unsigned int ino = __irq_ino(bp); - sun4v_intr_setstate(irq, HV_INTR_STATE_IDLE); + sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE); } else { upa_writel(ICLR_IDLE, bp->iclr); /* Test and add entropy */ @@ -791,10 +794,10 @@ static int retarget_one_irq(struct irqaction *p, int goal_cpu) } if (tlb_type == hypervisor) { - unsigned int irq = __irq(bucket); + unsigned int ino = __irq_ino(bucket); - sun4v_intr_settarget(irq, goal_cpu); - sun4v_intr_setenabled(irq, HV_INTR_ENABLED); + sun4v_intr_settarget(ino, goal_cpu); + sun4v_intr_setenabled(ino, HV_INTR_ENABLED); } else { unsigned int tid; -- cgit v1.2.3 From c4bea2883974a59ab7a0ac6c01d34f7ae0e8cd8e Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Mon, 13 Feb 2006 22:56:27 -0800 Subject: [SPARC64]: Make error codes available from sun4v_intr_get*(). And check for errors at call sites. Signed-off-by: David S. Miller --- arch/sparc64/kernel/irq.c | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c index c57b1708ae8..0d3b0ea329c 100644 --- a/arch/sparc64/kernel/irq.c +++ b/arch/sparc64/kernel/irq.c @@ -154,9 +154,16 @@ void enable_irq(unsigned int irq) if (tlb_type == hypervisor) { unsigned int ino = __irq_ino(irq); int cpu = hard_smp_processor_id(); + int err; - sun4v_intr_settarget(ino, cpu); + err = sun4v_intr_settarget(ino, cpu); + if (err != HV_EOK) + printk("sun4v_intr_settarget(%x,%d): err(%d)\n", + ino, cpu, err); sun4v_intr_setenabled(ino, HV_INTR_ENABLED); + if (err != HV_EOK) + printk("sun4v_intr_setenabled(%x): err(%d)\n", + ino, err); } else { if (tlb_type == cheetah || tlb_type == cheetah_plus) { unsigned long ver; @@ -216,8 +223,12 @@ void disable_irq(unsigned int irq) if (imap != 0UL) { if (tlb_type == hypervisor) { unsigned int ino = __irq_ino(irq); + int err; - sun4v_intr_setenabled(ino, HV_INTR_DISABLED); + err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED); + if (err != HV_EOK) + printk("sun4v_intr_setenabled(%x): " + "err(%d)\n", ino, err); } else { u32 tmp; @@ -647,8 +658,12 @@ static void process_bucket(int irq, struct ino_bucket *bp, struct pt_regs *regs) if (bp->pil != 0) { if (tlb_type == hypervisor) { unsigned int ino = __irq_ino(bp); + int err; - sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE); + err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE); + if (err != HV_EOK) + printk("sun4v_intr_setstate(%x): " + "err(%d)\n", ino, err); } else { upa_writel(ICLR_IDLE, bp->iclr); /* Test and add entropy */ -- cgit v1.2.3 From 3af6e01e9acfb786c5dd2862f57f206b0b3cb889 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 14 Feb 2006 00:55:49 -0800 Subject: [SPARC64]: arch/sparc64/kernel/trampoline.S needs asm/cpudata.h Signed-off-by: David S. Miller --- arch/sparc64/kernel/trampoline.S | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/sparc64/kernel/trampoline.S b/arch/sparc64/kernel/trampoline.S index 22fb24eac99..b9c9f54b0a0 100644 --- a/arch/sparc64/kernel/trampoline.S +++ b/arch/sparc64/kernel/trampoline.S @@ -17,6 +17,7 @@ #include #include #include +#include .data .align 8 -- cgit v1.2.3 From 4a07e646c55be118442e464b7a2a9682c7131d9a Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 14 Feb 2006 13:49:32 -0800 Subject: [SPARC64]: Kill sun4v_register_fault_status() on SMP. That now gets done as a side effect of taking over the trap table from OBP. Signed-off-by: David S. Miller --- arch/sparc64/kernel/smp.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index 2dbe008d6b7..c280e6742b2 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c @@ -122,10 +122,8 @@ void __init smp_callin(void) __local_per_cpu_offset = __per_cpu_offset(cpuid); - if (tlb_type == hypervisor) { - sun4v_register_fault_status(); + if (tlb_type == hypervisor) sun4v_ktsb_register(); - } __flush_tlb_all(); -- cgit v1.2.3 From e7093703d912254b5667dfe851a6d0f7e664f9df Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 14 Feb 2006 14:12:44 -0800 Subject: [SPARC64]: INO is never fully specified already on SUN4V. Signed-off-by: David S. Miller --- arch/sparc64/kernel/pci_common.c | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/pci_common.c b/arch/sparc64/kernel/pci_common.c index 58310aacea2..1b1f89b35f6 100644 --- a/arch/sparc64/kernel/pci_common.c +++ b/arch/sparc64/kernel/pci_common.c @@ -703,16 +703,18 @@ static void __init pdev_fixup_irq(struct pci_dev *pdev) return; } - /* Fully specified already? */ - if (((prom_irq & PCI_IRQ_IGN) >> 6) == portid) { - pdev->irq = p->irq_build(pbm, pdev, prom_irq); - goto have_irq; - } + if (tlb_type != hypervisor) { + /* Fully specified already? */ + if (((prom_irq & PCI_IRQ_IGN) >> 6) == portid) { + pdev->irq = p->irq_build(pbm, pdev, prom_irq); + goto have_irq; + } - /* An onboard device? (bit 5 set) */ - if ((prom_irq & PCI_IRQ_INO) & 0x20) { - pdev->irq = p->irq_build(pbm, pdev, (portid << 6 | prom_irq)); - goto have_irq; + /* An onboard device? (bit 5 set) */ + if ((prom_irq & PCI_IRQ_INO) & 0x20) { + pdev->irq = p->irq_build(pbm, pdev, (portid << 6 | prom_irq)); + goto have_irq; + } } /* Can we find a matching entry in the interrupt-map? */ -- cgit v1.2.3 From fbf1c68eaf8a945c8617e355d8d6bd2fa09109d2 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 14 Feb 2006 16:37:13 -0800 Subject: [SPARC64]: Don't printk() any messaages in sun4v_build_irq(). It just clutters up the log. Signed-off-by: David S. Miller --- arch/sparc64/kernel/irq.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c index 0d3b0ea329c..72e626b54ba 100644 --- a/arch/sparc64/kernel/irq.c +++ b/arch/sparc64/kernel/irq.c @@ -333,9 +333,6 @@ unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino, int pil, unsign sysino = sun4v_devino_to_sysino(devhandle, devino); - printk(KERN_INFO "sun4v_irq: Mapping (%x:%x) --> sysino[%lx]\n", - devhandle, devino, sysino); - bucket = &ivector_table[sysino]; /* Catch accidental accesses to these things. IMAP/ICLR handling -- cgit v1.2.3 From 9f8a5b843fc47ea150525f912574677483e1a5ac Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 14 Feb 2006 16:39:22 -0800 Subject: [SPARC64]: Fix C-function name called by sun4v_mna trap code. The trap code was calling itself :-) Signed-off-by: David S. Miller --- arch/sparc64/kernel/sun4v_tlb_miss.S | 2 +- arch/sparc64/kernel/traps.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/sun4v_tlb_miss.S b/arch/sparc64/kernel/sun4v_tlb_miss.S index 950ca74b4a5..b3844ee3844 100644 --- a/arch/sparc64/kernel/sun4v_tlb_miss.S +++ b/arch/sparc64/kernel/sun4v_tlb_miss.S @@ -265,7 +265,7 @@ sun4v_mna: rd %pc, %g7 mov %l4, %o1 mov %l5, %o2 - call sun4v_mna + call sun4v_do_mna add %sp, PTREGS_OFF, %o0 ba,a,pt %xcc, rtrap_clr_l6 diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c index 1e9a4b6b1fe..bedb2f693c7 100644 --- a/arch/sparc64/kernel/traps.c +++ b/arch/sparc64/kernel/traps.c @@ -2250,7 +2250,7 @@ void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned lo force_sig_info(SIGBUS, &info, current); } -void sun4v_mna(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx) +void sun4v_do_mna(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx) { siginfo_t info; -- cgit v1.2.3 From 987b6de7102cf2f583733efd726ae920a1335519 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 14 Feb 2006 16:42:11 -0800 Subject: [SPARC64]: Restrict PCI bus scanning on SUN4V. On the PBM's first bus number, only allow device 0, function 0, to be poked at with PCI config space accesses. For some reason, this single device responds to all device numbers. Also, reduce the verbiage of the debugging log printk's for PCI cfg space accesses in the SUN4V PCI controller driver, so that it doesn't overwhelm the slow SUN4V hypervisor console. Signed-off-by: David S. Miller --- arch/sparc64/kernel/pci_sun4v.c | 36 ++++++++++++++++-------------------- 1 file changed, 16 insertions(+), 20 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c index ac311d3dbc5..ea51ade43b8 100644 --- a/arch/sparc64/kernel/pci_sun4v.c +++ b/arch/sparc64/kernel/pci_sun4v.c @@ -517,8 +517,14 @@ struct pci_iommu_ops pci_sun4v_iommu_ops = { /* SUN4V PCI configuration space accessors. */ -static inline int pci_sun4v_out_of_range(struct pci_pbm_info *pbm, unsigned int bus) +static inline int pci_sun4v_out_of_range(struct pci_pbm_info *pbm, unsigned int bus, unsigned int device, unsigned int func) { + if (bus == pbm->pci_first_busno) { + if (device == 0 && func == 0) + return 0; + return 1; + } + if (bus < pbm->pci_first_busno || bus > pbm->pci_last_busno) return 1; @@ -535,15 +541,14 @@ static int pci_sun4v_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, unsigned int func = PCI_FUNC(devfn); unsigned long ret; - if (pci_sun4v_out_of_range(pbm, bus)) { + if (pci_sun4v_out_of_range(pbm, bus, device, func)) { ret = ~0UL; } else { ret = pci_sun4v_config_get(devhandle, HV_PCI_DEVICE_BUILD(bus, device, func), where, size); #if 0 - printk("read_pci_cfg: devh[%x] device[%08x] where[%x] sz[%d] " - "== [%016lx]\n", + printk("rcfg: [%x:%x:%x:%d]=[%lx]\n", devhandle, HV_PCI_DEVICE_BUILD(bus, device, func), where, size, ret); #endif @@ -574,15 +579,14 @@ static int pci_sun4v_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, unsigned int func = PCI_FUNC(devfn); unsigned long ret; - if (pci_sun4v_out_of_range(pbm, bus)) { + if (pci_sun4v_out_of_range(pbm, bus, device, func)) { /* Do nothing. */ } else { ret = pci_sun4v_config_put(devhandle, HV_PCI_DEVICE_BUILD(bus, device, func), where, size, value); #if 0 - printk("write_pci_cfg: devh[%x] device[%08x] where[%x] sz[%d] " - "val[%08x] == [%016lx]\n", + printk("wcfg: [%x:%x:%x:%d] v[%x] == [%lx]\n", devhandle, HV_PCI_DEVICE_BUILD(bus, device, func), where, size, value, ret); #endif @@ -610,16 +614,13 @@ static void pbm_scan_bus(struct pci_controller_info *p, memset(cookie, 0, sizeof(*cookie)); cookie->pbm = pbm; - pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno, - p->pci_ops, - pbm); + pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno, p->pci_ops, pbm); #if 0 pci_fixup_host_bridge_self(pbm->pci_bus); pbm->pci_bus->self->sysdata = cookie; #endif - pci_fill_in_pbm_cookies(pbm->pci_bus, pbm, - prom_getchild(pbm->prom_node)); + pbm->prom_node); pci_record_assignments(pbm, pbm->pci_bus); pci_assign_unassigned(pbm, pbm->pci_bus); pci_fixup_irq(pbm, pbm->pci_bus); @@ -884,19 +885,12 @@ static void pci_sun4v_iommu_init(struct pci_pbm_info *pbm) probe_existing_entries(pbm, iommu); } -/* Don't get this from the root nexus, get it from the "pci@0" node below. */ static void pci_sun4v_get_bus_range(struct pci_pbm_info *pbm) { unsigned int busrange[2]; int prom_node = pbm->prom_node; int err; - prom_node = prom_getchild(prom_node); - if (prom_node == 0) { - prom_printf("%s: Fatal error, no child OBP node.\n", pbm->name); - prom_halt(); - } - err = prom_getproperty(prom_node, "bus-range", (char *)&busrange[0], sizeof(busrange)); @@ -929,7 +923,9 @@ static void pci_sun4v_pbm_init(struct pci_controller_info *p, int prom_node, u32 sprintf(pbm->name, "SUN4V-PCI%d PBM%c", p->index, (pbm == &p->pbm_A ? 'A' : 'B')); - printk("%s: devhandle[%x]\n", pbm->name, pbm->devhandle); + printk("%s: devhandle[%x] prom_node[%x:%x]\n", + pbm->name, pbm->devhandle, + pbm->prom_node, prom_getchild(pbm->prom_node)); prom_getstring(prom_node, "name", pbm->prom_name, sizeof(pbm->prom_name)); -- cgit v1.2.3 From 355db99860a1d77d77cd2fc66e2f5ab58f3e0995 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 14 Feb 2006 16:44:39 -0800 Subject: [SPARC64]: Explicitly init *nregs to 0 in find_device_prom_node(). Signed-off-by: David S. Miller --- arch/sparc64/kernel/pci_common.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/pci_common.c b/arch/sparc64/kernel/pci_common.c index 1b1f89b35f6..b2d21b11a23 100644 --- a/arch/sparc64/kernel/pci_common.c +++ b/arch/sparc64/kernel/pci_common.c @@ -39,6 +39,8 @@ static int __init find_device_prom_node(struct pci_pbm_info *pbm, { int node; + *nregs = 0; + /* * Return the PBM's PROM node in case we are it's PCI device, * as the PBM's reg property is different to standard PCI reg @@ -51,10 +53,8 @@ static int __init find_device_prom_node(struct pci_pbm_info *pbm, pdev->device == PCI_DEVICE_ID_SUN_SCHIZO || pdev->device == PCI_DEVICE_ID_SUN_TOMATILLO || pdev->device == PCI_DEVICE_ID_SUN_SABRE || - pdev->device == PCI_DEVICE_ID_SUN_HUMMINGBIRD)) { - *nregs = 0; + pdev->device == PCI_DEVICE_ID_SUN_HUMMINGBIRD)) return bus_prom_node; - } node = prom_getchild(bus_prom_node); while (node != 0) { -- cgit v1.2.3 From abd92b2d21899d42a60c3c1ac995768c76774608 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 14 Feb 2006 22:20:13 -0800 Subject: [SPARC64]: Fix sun4v_intr_setenabled() return value check in enable_irq(). Signed-off-by: David S. Miller --- arch/sparc64/kernel/irq.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c index 72e626b54ba..c786d2549bc 100644 --- a/arch/sparc64/kernel/irq.c +++ b/arch/sparc64/kernel/irq.c @@ -160,7 +160,7 @@ void enable_irq(unsigned int irq) if (err != HV_EOK) printk("sun4v_intr_settarget(%x,%d): err(%d)\n", ino, cpu, err); - sun4v_intr_setenabled(ino, HV_INTR_ENABLED); + err = sun4v_intr_setenabled(ino, HV_INTR_ENABLED); if (err != HV_EOK) printk("sun4v_intr_setenabled(%x): err(%d)\n", ino, err); -- cgit v1.2.3 From 329c68b21896eea371edbfdf305c459fb74cf9a8 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 14 Feb 2006 22:20:41 -0800 Subject: [SPARC64]: Make lack of interrupt-map-* a fatal error on SUN4V. Signed-off-by: David S. Miller --- arch/sparc64/kernel/pci_sun4v.c | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c index ea51ade43b8..13b611db058 100644 --- a/arch/sparc64/kernel/pci_sun4v.c +++ b/arch/sparc64/kernel/pci_sun4v.c @@ -954,19 +954,20 @@ static void pci_sun4v_pbm_init(struct pci_controller_info *p, int prom_node, u32 err = prom_getproperty(prom_node, "interrupt-map", (char *)pbm->pbm_intmap, sizeof(pbm->pbm_intmap)); - if (err != -1) { - pbm->num_pbm_intmap = (err / sizeof(struct linux_prom_pci_intmap)); - err = prom_getproperty(prom_node, "interrupt-map-mask", - (char *)&pbm->pbm_intmask, - sizeof(pbm->pbm_intmask)); - if (err == -1) { - prom_printf("%s: Fatal error, no " - "interrupt-map-mask.\n", pbm->name); - prom_halt(); - } - } else { - pbm->num_pbm_intmap = 0; - memset(&pbm->pbm_intmask, 0, sizeof(pbm->pbm_intmask)); + if (err == 0 || err == -1) { + prom_printf("%s: Fatal error, no interrupt-map property.\n", + pbm->name); + prom_halt(); + } + + pbm->num_pbm_intmap = (err / sizeof(struct linux_prom_pci_intmap)); + err = prom_getproperty(prom_node, "interrupt-map-mask", + (char *)&pbm->pbm_intmask, + sizeof(pbm->pbm_intmask)); + if (err == 0 || err == -1) { + prom_printf("%s: Fatal error, no interrupt-map-mask.\n", + pbm->name); + prom_halt(); } pci_sun4v_get_bus_range(pbm); -- cgit v1.2.3 From f03b8a546868fcf43feb455b69b152eb867606b2 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 15 Feb 2006 00:35:50 -0800 Subject: [SPARC64]: Use different cache sizing defaults on SUN4V. Signed-off-by: David S. Miller --- arch/sparc64/kernel/devices.c | 34 +++++++++++++++++++++++++++------- arch/sparc64/kernel/smp.c | 28 +++++++++++++++++++++------- 2 files changed, 48 insertions(+), 14 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/devices.c b/arch/sparc64/kernel/devices.c index 817132826e0..71eee392e14 100644 --- a/arch/sparc64/kernel/devices.c +++ b/arch/sparc64/kernel/devices.c @@ -200,7 +200,8 @@ void __init device_scan(void) #ifndef CONFIG_SMP { - int err, cpu_node; + int err, cpu_node, def; + err = cpu_find_by_instance(0, &cpu_node, NULL); if (err) { prom_printf("No cpu nodes, cannot continue\n"); @@ -209,21 +210,40 @@ void __init device_scan(void) cpu_data(0).clock_tick = prom_getintdefault(cpu_node, "clock-frequency", 0); + + def = ((tlb_type == hypervisor) ? + (8 * 1024) : + (16 * 1024)); cpu_data(0).dcache_size = prom_getintdefault(cpu_node, "dcache-size", - 16 * 1024); + def); + + def = 32; cpu_data(0).dcache_line_size = - prom_getintdefault(cpu_node, "dcache-line-size", 32); + prom_getintdefault(cpu_node, "dcache-line-size", + def); + + def = 16 * 1024; cpu_data(0).icache_size = prom_getintdefault(cpu_node, "icache-size", - 16 * 1024); + def); + + def = 32; cpu_data(0).icache_line_size = - prom_getintdefault(cpu_node, "icache-line-size", 32); + prom_getintdefault(cpu_node, "icache-line-size", + def); + + def = ((tlb_type == hypervisor) ? + (3 * 1024 * 1024) : + (4 * 1024 * 1024)); cpu_data(0).ecache_size = prom_getintdefault(cpu_node, "ecache-size", - 4 * 1024 * 1024); + def); + + def = 64; cpu_data(0).ecache_line_size = - prom_getintdefault(cpu_node, "ecache-line-size", 64); + prom_getintdefault(cpu_node, "ecache-line-size", + def); printk("CPU[0]: Caches " "D[sz(%d):line_sz(%d)] " "I[sz(%d):line_sz(%d)] " diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index c280e6742b2..64046d37bbf 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c @@ -78,7 +78,7 @@ void smp_bogo(struct seq_file *m) void __init smp_store_cpu_info(int id) { - int cpu_node; + int cpu_node, def; /* multiplier and counter set by smp_setup_percpu_timer() */ @@ -90,18 +90,32 @@ void __init smp_store_cpu_info(int id) cpu_data(id).idle_volume = 1; + def = ((tlb_type == hypervisor) ? (8 * 1024) : (16 * 1024)); cpu_data(id).dcache_size = prom_getintdefault(cpu_node, "dcache-size", - 16 * 1024); + def); + + def = 32; cpu_data(id).dcache_line_size = - prom_getintdefault(cpu_node, "dcache-line-size", 32); + prom_getintdefault(cpu_node, "dcache-line-size", def); + + def = 16 * 1024; cpu_data(id).icache_size = prom_getintdefault(cpu_node, "icache-size", - 16 * 1024); + def); + + def = 32; cpu_data(id).icache_line_size = - prom_getintdefault(cpu_node, "icache-line-size", 32); + prom_getintdefault(cpu_node, "icache-line-size", def); + + def = ((tlb_type == hypervisor) ? + (3 * 1024 * 1024) : + (4 * 1024 * 1024)); cpu_data(id).ecache_size = prom_getintdefault(cpu_node, "ecache-size", - 4 * 1024 * 1024); + def); + + def = 64; cpu_data(id).ecache_line_size = - prom_getintdefault(cpu_node, "ecache-line-size", 64); + prom_getintdefault(cpu_node, "ecache-line-size", def); + printk("CPU[%d]: Caches " "D[sz(%d):line_sz(%d)] " "I[sz(%d):line_sz(%d)] " -- cgit v1.2.3 From ab66a50e31deb48b0444c248e67e5aa3217efda5 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 15 Feb 2006 01:18:19 -0800 Subject: [SPARC64]: Two IRQ handling fixes. On SUN4V, force IRQ state to idle in enable_irq(). However, I'm still not sure this is %100 correct. Call add_interrupt_randomness() on SUN4V too. Signed-off-by: David S. Miller --- arch/sparc64/kernel/irq.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c index c786d2549bc..4d9931d124a 100644 --- a/arch/sparc64/kernel/irq.c +++ b/arch/sparc64/kernel/irq.c @@ -164,6 +164,10 @@ void enable_irq(unsigned int irq) if (err != HV_EOK) printk("sun4v_intr_setenabled(%x): err(%d)\n", ino, err); + err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE); + if (err != HV_EOK) + printk("sun4v_intr_setstate(%x): " + "err(%d)\n", ino, err); } else { if (tlb_type == cheetah || tlb_type == cheetah_plus) { unsigned long ver; @@ -663,10 +667,11 @@ static void process_bucket(int irq, struct ino_bucket *bp, struct pt_regs *regs) "err(%d)\n", ino, err); } else { upa_writel(ICLR_IDLE, bp->iclr); - /* Test and add entropy */ - if (random & SA_SAMPLE_RANDOM) - add_interrupt_randomness(irq); } + + /* Test and add entropy */ + if (random & SA_SAMPLE_RANDOM) + add_interrupt_randomness(irq); } out: bp->flags &= ~IBF_INPROGRESS; -- cgit v1.2.3 From 63c2a0e598c2fa769a08a6e9ad124bf270b4436e Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 15 Feb 2006 01:19:56 -0800 Subject: [SPARC64]: Fix pci_intmap_match(). When crawling up the PCI bus chain, stop at the first node that has an interrupt-map property before we hit the root. Also, if we use a bus interrupt-{map,mask} do not forget to update the 'intmask' pointer as we do for the 'intmap' pointer. Signed-off-by: David S. Miller --- arch/sparc64/kernel/pci_common.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/pci_common.c b/arch/sparc64/kernel/pci_common.c index b2d21b11a23..ab6a2e1b76f 100644 --- a/arch/sparc64/kernel/pci_common.c +++ b/arch/sparc64/kernel/pci_common.c @@ -581,18 +581,23 @@ static int __init pci_intmap_match(struct pci_dev *pdev, unsigned int *interrupt int plen; bus_dev = pdev->bus->self; + bus_pcp = bus_dev->sysdata; regs_dev = pdev; + regs_pcp = regs_dev->sysdata; while (bus_dev->bus && - bus_dev->bus->number != pbm->pci_first_busno) { + bus_dev->bus->number != pbm->pci_first_busno && + prom_getproplen(bus_pcp->prom_node, + "interrupt-map") <= 0) { regs_dev = bus_dev; + regs_pcp = regs_dev->sysdata; + bus_dev = bus_dev->bus->self; + bus_pcp = bus_dev->sysdata; } - regs_pcp = regs_dev->sysdata; pregs = regs_pcp->prom_regs; - bus_pcp = bus_dev->sysdata; /* But if the PCI bridge has it's own interrupt map * and mask properties, use that and the regs of the @@ -616,6 +621,8 @@ static int __init pci_intmap_match(struct pci_dev *pdev, unsigned int *interrupt return 0; } + intmask = &bridge_local_intmask; + if (pdev->bus->self != bus_dev) map_slot = 1; } else { -- cgit v1.2.3 From 7890f794e0e6f7dce2a5f4a03ba64b0b3fe306bd Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 15 Feb 2006 02:26:54 -0800 Subject: [SPARC64]: Add prom_{start,stop}cpu_cpuid(). Use prom_startcpu_cpuid() on SUN4V instead of prom_startcpu(). We should really test for "SUNW,start-cpu-by-cpuid" presence and use it if present even on SUN4U. Signed-off-by: David S. Miller --- arch/sparc64/kernel/smp.c | 12 +++++++++--- arch/sparc64/prom/misc.c | 16 ++++++++++++++-- 2 files changed, 23 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index 64046d37bbf..527dfd7ae21 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c @@ -331,15 +331,21 @@ static int __devinit smp_boot_one_cpu(unsigned int cpu) unsigned long cookie = (unsigned long)(&cpu_new_thread); struct task_struct *p; - int timeout, ret, cpu_node; + int timeout, ret; p = fork_idle(cpu); callin_flag = 0; cpu_new_thread = task_thread_info(p); cpu_set(cpu, cpu_callout_map); - cpu_find_by_mid(cpu, &cpu_node); - prom_startcpu(cpu_node, entry, cookie); + if (tlb_type == hypervisor) { + prom_startcpu_cpuid(cpu, entry, cookie); + } else { + int cpu_node; + + cpu_find_by_mid(cpu, &cpu_node); + prom_startcpu(cpu_node, entry, cookie); + } for (timeout = 0; timeout < 5000000; timeout++) { if (callin_flag) diff --git a/arch/sparc64/prom/misc.c b/arch/sparc64/prom/misc.c index 713cbac5f9b..36d2b9c1622 100644 --- a/arch/sparc64/prom/misc.c +++ b/arch/sparc64/prom/misc.c @@ -308,9 +308,21 @@ int prom_wakeupsystem(void) } #ifdef CONFIG_SMP -void prom_startcpu(int cpunode, unsigned long pc, unsigned long o0) +void prom_startcpu(int cpunode, unsigned long pc, unsigned long arg) { - p1275_cmd("SUNW,start-cpu", P1275_INOUT(3, 0), cpunode, pc, o0); + p1275_cmd("SUNW,start-cpu", P1275_INOUT(3, 0), cpunode, pc, arg); +} + +void prom_startcpu_cpuid(int cpuid, unsigned long pc, unsigned long arg) +{ + p1275_cmd("SUNW,start-cpu-by-cpuid", P1275_INOUT(3, 0), + cpuid, pc, arg); +} + +void prom_stopcpu_cpuid(int cpuid) +{ + p1275_cmd("SUNW,stop-cpu-by-cpuid", P1275_INOUT(1, 0), + cpuid); } void prom_stopself(void) -- cgit v1.2.3 From 9d29a3fafd06534ad73427fee3c968c094d05b9b Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 15 Feb 2006 19:48:54 -0800 Subject: [SPARC64]: Decode virtual-devices interrupts correctly. Need to translate through the interrupt-map{,-mask] properties. Signed-off-by: David S. Miller --- arch/sparc64/kernel/devices.c | 98 +++++++++++++++++++++++++++++++++++++------ 1 file changed, 86 insertions(+), 12 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/devices.c b/arch/sparc64/kernel/devices.c index 71eee392e14..1341b99ca7a 100644 --- a/arch/sparc64/kernel/devices.c +++ b/arch/sparc64/kernel/devices.c @@ -22,6 +22,7 @@ #include #include #include +#include /* Used to synchronize acceses to NatSemi SUPER I/O chip configure * operations in asm/ns87303.h @@ -33,14 +34,28 @@ extern void central_probe(void); u32 sun4v_vdev_devhandle; int sun4v_vdev_root; -struct linux_prom_pci_intmap *sun4v_vdev_intmap; -int sun4v_vdev_num_intmap; -struct linux_prom_pci_intmap sun4v_vdev_intmask; + +struct vdev_intmap { + unsigned int phys; + unsigned int irq; + unsigned int cnode; + unsigned int cinterrupt; +}; + +struct vdev_intmask { + unsigned int phys; + unsigned int interrupt; + unsigned int __unused; +}; + +static struct vdev_intmap *vdev_intmap; +static int vdev_num_intmap; +static struct vdev_intmask vdev_intmask; static void __init sun4v_virtual_device_probe(void) { struct linux_prom64_registers regs; - struct linux_prom_pci_intmap *ip; + struct vdev_intmap *ip; int node, sz, err; if (tlb_type != hypervisor) @@ -58,10 +73,21 @@ static void __init sun4v_virtual_device_probe(void) prom_getproperty(node, "reg", (char *)®s, sizeof(regs)); sun4v_vdev_devhandle = (regs.phys_addr >> 32UL) & 0x0fffffff; - sz = sizeof(*ip) * 64; - sun4v_vdev_intmap = ip = alloc_bootmem_low_pages(sz); - if (!sun4v_vdev_intmap) { - prom_printf("SUN4V: Error, cannot allocate vdev intmap.\n"); + sz = prom_getproplen(node, "interrupt-map"); + if (sz <= 0) { + prom_printf("SUN4V: Error, no vdev interrupt-map.\n"); + prom_halt(); + } + + if ((sz % sizeof(*ip)) != 0) { + prom_printf("SUN4V: Bogus interrupt-map property size %d\n", + sz); + prom_halt(); + } + + vdev_intmap = ip = alloc_bootmem_low_pages(sz); + if (!vdev_intmap) { + prom_printf("SUN4V: Error, cannot allocate vdev_intmap.\n"); prom_halt(); } @@ -70,22 +96,70 @@ static void __init sun4v_virtual_device_probe(void) prom_printf("SUN4V: Fatal error, no vdev interrupt-map.\n"); prom_halt(); } + if (err != sz) { + prom_printf("SUN4V: Inconsistent interrupt-map size, " + "proplen(%d) vs getprop(%d).\n", sz,err); + prom_halt(); + } - sun4v_vdev_num_intmap = err / sizeof(*ip); + vdev_num_intmap = err / sizeof(*ip); err = prom_getproperty(node, "interrupt-map-mask", - (char *) &sun4v_vdev_intmask, - sizeof(sun4v_vdev_intmask)); - if (err == -1) { + (char *) &vdev_intmask, + sizeof(vdev_intmask)); + if (err <= 0) { prom_printf("SUN4V: Fatal error, no vdev " "interrupt-map-mask.\n"); prom_halt(); } + if (err % sizeof(vdev_intmask)) { + prom_printf("SUN4V: Bogus interrupt-map-mask " + "property size %d\n", err); + prom_halt(); + } printk("SUN4V: virtual-devices devhandle[%x]\n", sun4v_vdev_devhandle); } +unsigned int sun4v_vdev_device_interrupt(unsigned int dev_node) +{ + unsigned int irq, reg; + int err, i; + + err = prom_getproperty(dev_node, "interrupts", + (char *) &irq, sizeof(irq)); + if (err <= 0) { + printk("VDEV: Cannot get \"interrupts\" " + "property for OBP node %x\n", dev_node); + return 0; + } + + err = prom_getproperty(dev_node, "reg", + (char *) ®, sizeof(reg)); + if (err <= 0) { + printk("VDEV: Cannot get \"reg\" " + "property for OBP node %x\n", dev_node); + return 0; + } + + for (i = 0; i < vdev_num_intmap; i++) { + if (vdev_intmap[i].phys == (reg & vdev_intmask.phys) && + vdev_intmap[i].irq == (irq & vdev_intmask.interrupt)) { + irq = vdev_intmap[i].cinterrupt; + break; + } + } + + if (i == vdev_num_intmap) { + printk("VDEV: No matching interrupt map entry " + "for OBP node %x\n", dev_node); + return 0; + } + + return sun4v_build_irq(sun4v_vdev_devhandle, irq, 4, 0); +} + static const char *cpu_mid_prop(void) { if (tlb_type == spitfire) -- cgit v1.2.3 From 1daef08a12157923d90ec7a47ead8a97e0d243cc Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 15 Feb 2006 20:35:10 -0800 Subject: [SPARC64]: Fix comment typo in __flush_tlb_kernel_range. Signed-off-by: David S. Miller --- arch/sparc64/mm/ultra.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/sparc64/mm/ultra.S b/arch/sparc64/mm/ultra.S index 8c244932b1c..725f8b34af4 100644 --- a/arch/sparc64/mm/ultra.S +++ b/arch/sparc64/mm/ultra.S @@ -86,7 +86,7 @@ __flush_tlb_pending: /* 26 insns */ .align 32 .globl __flush_tlb_kernel_range -__flush_tlb_kernel_range: /* 14 insns */ +__flush_tlb_kernel_range: /* 16 insns */ /* %o0=start, %o1=end */ cmp %o0, %o1 be,pn %xcc, 2f -- cgit v1.2.3 From de635d833f61ce0f2ad0b3431e6a3323a1c4fed5 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 15 Feb 2006 21:01:31 -0800 Subject: [SPARC64]: Fix flush_tsb_user() on SUN4V. Needs to use physical addressing just like cheetah_plus. Signed-off-by: David S. Miller --- arch/sparc64/mm/tsb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c index 975242ab88e..3c1ff05038b 100644 --- a/arch/sparc64/mm/tsb.c +++ b/arch/sparc64/mm/tsb.c @@ -58,7 +58,7 @@ void flush_tsb_user(struct mmu_gather *mp) ctx = CTX_HWBITS(mm->context); - if (tlb_type == cheetah_plus) + if (tlb_type == cheetah_plus || tlb_type == hypervisor) base = __pa(tsb); else base = (unsigned long) tsb; -- cgit v1.2.3 From a7b31bac691668a60da8b9892124b7da408e0a0e Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 15 Feb 2006 21:16:42 -0800 Subject: [SPARC64]: Do not write garbage into %pstate in tsb_context_switch(). For SUN4V, we were clobbering %o5 to do the hypervisor call. This clobbers the saved %pstate value and we end up writing garbage into that register as a result. Oops. Signed-off-by: David S. Miller --- arch/sparc64/kernel/tsb.S | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/tsb.S b/arch/sparc64/kernel/tsb.S index be8f0892d72..7996c9d6670 100644 --- a/arch/sparc64/kernel/tsb.S +++ b/arch/sparc64/kernel/tsb.S @@ -265,13 +265,19 @@ __tsb_context_switch: mov SCRATCHPAD_UTSBREG2, %g1 stxa %g2, [%g1] ASI_SCRATCHPAD + /* Save away %o5's %pstate, we have to use %o5 for + * the hypervisor call. + */ + mov %o5, %g1 + mov HV_FAST_MMU_TSB_CTXNON0, %o5 mov 1, %o0 mov %o4, %o1 ta HV_FAST_TRAP + /* Finish up and restore %o5. */ ba,pt %xcc, 9f - nop + mov %g1, %o5 /* SUN4U TSB switch. */ 1: mov TSB_REG, %g1 -- cgit v1.2.3 From c7f81d42d3d07115a7b92e36ade0f3167f75bc55 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 15 Feb 2006 21:21:17 -0800 Subject: [SPARC64]: Don't use ASI_QUAD_LDD_PHYS on SUN4V. Need to use ASI_QUAD_LDD_PHYS_4V instead. Signed-off-by: David S. Miller --- arch/sparc64/kernel/sun4v_tlb_miss.S | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/sun4v_tlb_miss.S b/arch/sparc64/kernel/sun4v_tlb_miss.S index b3844ee3844..df65d712dcc 100644 --- a/arch/sparc64/kernel/sun4v_tlb_miss.S +++ b/arch/sparc64/kernel/sun4v_tlb_miss.S @@ -57,7 +57,7 @@ sun4v_itlb_miss: COMPUTE_TSB_PTR(%g1, %g4, %g3, %g7) /* Load TSB tag/pte into %g2/%g3 and compare the tag. */ - ldda [%g1] ASI_QUAD_LDD_PHYS, %g2 + ldda [%g1] ASI_QUAD_LDD_PHYS_4V, %g2 cmp %g2, %g6 sethi %hi(PAGE_EXEC), %g7 ldx [%g7 + %lo(PAGE_EXEC)], %g7 @@ -104,7 +104,7 @@ sun4v_dtlb_miss: COMPUTE_TSB_PTR(%g1, %g4, %g3, %g7) /* Load TSB tag/pte into %g2/%g3 and compare the tag. */ - ldda [%g1] ASI_QUAD_LDD_PHYS, %g2 + ldda [%g1] ASI_QUAD_LDD_PHYS_4V, %g2 cmp %g2, %g6 bne,a,pn %xcc, tsb_miss_page_table_walk mov FAULT_CODE_ITLB, %g3 -- cgit v1.2.3 From e7a0453ef82c1433a35ab82d874296fff68f3639 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 15 Feb 2006 22:25:27 -0800 Subject: [SPARC64] PCI: Size TSB correctly on SUN4V. Forgot to multiply by 8 * 1024, oops. Correct the size constant when the virtual-dma arena is 2GB in size, it should bet 256 not 128. Finally, log some info about the TSB at probe time. Signed-off-by: David S. Miller --- arch/sparc64/kernel/pci_sun4v.c | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c index 13b611db058..902d07c714f 100644 --- a/arch/sparc64/kernel/pci_sun4v.c +++ b/arch/sparc64/kernel/pci_sun4v.c @@ -102,6 +102,7 @@ static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr first_page = __get_free_pages(GFP_ATOMIC, order); if (first_page == 0UL) return NULL; + memset((char *)first_page, 0, PAGE_SIZE << order); pcp = pdev->sysdata; @@ -805,11 +806,11 @@ static void pbm_register_toplevel_resources(struct pci_controller_info *p, &pbm->mem_space); } -static void probe_existing_entries(struct pci_pbm_info *pbm, - struct pci_iommu *iommu) +static unsigned long probe_existing_entries(struct pci_pbm_info *pbm, + struct pci_iommu *iommu) { struct pci_iommu_arena *arena = &iommu->arena; - unsigned long i; + unsigned long i, cnt = 0; u32 devhandle; devhandle = pbm->devhandle; @@ -819,9 +820,13 @@ static void probe_existing_entries(struct pci_pbm_info *pbm, ret = pci_sun4v_iommu_getmap(devhandle, HV_PCI_TSBID(0, i), &io_attrs, &ra); - if (ret == HV_EOK) + if (ret == HV_EOK) { + cnt++; __set_bit(i, arena->map); + } } + + return cnt; } static void pci_sun4v_iommu_init(struct pci_pbm_info *pbm) @@ -853,7 +858,7 @@ static void pci_sun4v_iommu_init(struct pci_pbm_info *pbm) case 0x80000000: dma_mask |= 0x7fffffff; - tsbsize = 128; + tsbsize = 256; break; default: @@ -861,6 +866,8 @@ static void pci_sun4v_iommu_init(struct pci_pbm_info *pbm) prom_halt(); }; + tsbsize *= (8 * 1024); + num_tsb_entries = tsbsize / sizeof(iopte_t); dma_offset = vdma[0]; @@ -882,7 +889,10 @@ static void pci_sun4v_iommu_init(struct pci_pbm_info *pbm) memset(iommu->arena.map, 0, sz); iommu->arena.limit = num_tsb_entries; - probe_existing_entries(pbm, iommu); + sz = probe_existing_entries(pbm, iommu); + + printk("%s: TSB entries [%lu], existing mapings [%lu]\n", + pbm->name, num_tsb_entries, sz); } static void pci_sun4v_get_bus_range(struct pci_pbm_info *pbm) -- cgit v1.2.3 From 3d6395cb770b0db9135a853b1742418c99ed2148 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 16 Feb 2006 01:41:41 -0800 Subject: [SPARC64]: Fix tl1 trap state capture/dump on SUN4V. No trap levels above 2 in privileged mode on SUN4V. Signed-off-by: David S. Miller --- arch/sparc64/kernel/etrap.S | 6 ++++++ arch/sparc64/kernel/traps.c | 4 +++- 2 files changed, 9 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/etrap.S b/arch/sparc64/kernel/etrap.S index a0e7d480e5d..149383835c2 100644 --- a/arch/sparc64/kernel/etrap.S +++ b/arch/sparc64/kernel/etrap.S @@ -188,6 +188,11 @@ etraptl1: /* Save tstate/tpc/tnpc of TL 1-->4 and the tl register itself. rdpr %tt, %g3 stx %g3, [%g2 + STACK_BIAS + 0x38] + sethi %hi(is_sun4v), %g3 + lduw [%g3 + %lo(is_sun4v)], %g3 + brnz,pn %g3, finish_tl1_capture + nop + wrpr %g0, 3, %tl rdpr %tstate, %g3 stx %g3, [%g2 + STACK_BIAS + 0x40] @@ -210,6 +215,7 @@ etraptl1: /* Save tstate/tpc/tnpc of TL 1-->4 and the tl register itself. stx %g1, [%g2 + STACK_BIAS + 0x80] +finish_tl1_capture: wrpr %g0, 1, %tl 661: nop .section .sun4v_1insn_patch, "ax" diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c index bedb2f693c7..5956d0a9400 100644 --- a/arch/sparc64/kernel/traps.c +++ b/arch/sparc64/kernel/traps.c @@ -73,10 +73,12 @@ struct tl1_traplog { static void dump_tl1_traplog(struct tl1_traplog *p) { - int i; + int i, limit; printk("TRAPLOG: Error at trap level 0x%lx, dumping track stack.\n", p->tl); + + limit = (tlb_type == hypervisor) ? 2 : 4; for (i = 0; i < 4; i++) { printk(KERN_CRIT "TRAPLOG: Trap level %d TSTATE[%016lx] TPC[%016lx] " -- cgit v1.2.3 From 9b6b46470cc1c52f6917b0cd8b7cf4b5cbc5acf6 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 16 Feb 2006 01:45:49 -0800 Subject: [SPARC64]: Fix bogus call to sun4v_mna in winfixup code. The C function is named sun4v_do_mna not sun4v_mna. Signed-off-by: David S. Miller --- arch/sparc64/kernel/winfixup.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/winfixup.S b/arch/sparc64/kernel/winfixup.S index aca2a98b930..161371370e9 100644 --- a/arch/sparc64/kernel/winfixup.S +++ b/arch/sparc64/kernel/winfixup.S @@ -121,7 +121,7 @@ fill_fixup_mna: cmp %g1, 3 bne,pt %icc, 1f add %sp, PTREGS_OFF, %o0 - call sun4v_mna + call sun4v_do_mna nop ba,a,pt %xcc, rtrap_clr_l6 1: call mem_address_unaligned -- cgit v1.2.3 From 8e42550c683b2ad4869fc4fa438204841fd9b7cc Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 16 Feb 2006 02:18:49 -0800 Subject: [SPARC64]: do_fptrap needs to load the thread reg into %g6. Signed-off-by: David S. Miller --- arch/sparc64/kernel/entry.S | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S index a2842a72f8e..bf40b065bcc 100644 --- a/arch/sparc64/kernel/entry.S +++ b/arch/sparc64/kernel/entry.S @@ -356,6 +356,7 @@ fitos_emul_fini: .globl do_fptrap .align 32 do_fptrap: + TRAP_LOAD_THREAD_REG(%g6, %g1) stx %fsr, [%g6 + TI_XFSR] do_fptrap_after_fsr: ldub [%g6 + TI_FPSAVED], %g3 -- cgit v1.2.3 From 94f8762db9a80ed34252e9fe5fa38be87bb7826b Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 16 Feb 2006 14:26:53 -0800 Subject: [SPARC64]: Add sun4v_cpu_qconf() hypervisor call. Call it from register_one_mondo(). Signed-off-by: David S. Miller --- arch/sparc64/kernel/entry.S | 12 ++++++++++++ arch/sparc64/kernel/irq.c | 26 +++++++------------------- 2 files changed, 19 insertions(+), 19 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S index bf40b065bcc..f5c8a293979 100644 --- a/arch/sparc64/kernel/entry.S +++ b/arch/sparc64/kernel/entry.S @@ -1772,3 +1772,15 @@ sun4v_intr_settarget: retl nop + /* %o0: type + * %o1: queue paddr + * %o2: num queue entries + * + * returns %o0: status + */ + .globl sun4v_cpu_qconf +sun4v_cpu_qconf: + mov HV_FAST_CPU_QCONF, %o5 + ta HV_FAST_TRAP + retl + nop diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c index 4d9931d124a..e1729e5189a 100644 --- a/arch/sparc64/kernel/irq.c +++ b/arch/sparc64/kernel/irq.c @@ -939,25 +939,13 @@ void init_irqwork_curcpu(void) static void __cpuinit register_one_mondo(unsigned long paddr, unsigned long type) { - register unsigned long func __asm__("%o5"); - register unsigned long arg0 __asm__("%o0"); - register unsigned long arg1 __asm__("%o1"); - register unsigned long arg2 __asm__("%o2"); - - func = HV_FAST_CPU_QCONF; - arg0 = type; - arg1 = paddr; - arg2 = 128; /* XXX Implied by Niagara queue offsets. XXX */ - __asm__ __volatile__("ta %8" - : "=&r" (func), "=&r" (arg0), - "=&r" (arg1), "=&r" (arg2) - : "0" (func), "1" (arg0), - "2" (arg1), "3" (arg2), - "i" (HV_FAST_TRAP)); - - if (arg0 != HV_EOK) { - prom_printf("SUN4V: cpu_qconf(%lu) failed with error %lu\n", - type, func); + unsigned long num_entries = 128; + unsigned long status; + + status = sun4v_cpu_qconf(type, paddr, num_entries); + if (status != HV_EOK) { + prom_printf("SUN4V: sun4v_cpu_qconf(%lu:%lx:%lu) failed, " + "err %lu\n", type, paddr, num_entries, status); prom_halt(); } } -- cgit v1.2.3 From 22780e23c629303474797d17e7f09ad7721ef55b Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 16 Feb 2006 14:37:05 -0800 Subject: [SPARC64]: Set dummy bucket->{imap,iclr} unique on SUN4V. So that free_irq() disable's the IRQ correctly. Signed-off-by: David S. Miller --- arch/sparc64/kernel/irq.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c index e1729e5189a..580b4de8b7c 100644 --- a/arch/sparc64/kernel/irq.c +++ b/arch/sparc64/kernel/irq.c @@ -143,7 +143,6 @@ void enable_irq(unsigned int irq) { struct ino_bucket *bucket = __bucket(irq); unsigned long imap; - unsigned long tid; imap = bucket->imap; if (imap == 0UL) @@ -169,6 +168,8 @@ void enable_irq(unsigned int irq) printk("sun4v_intr_setstate(%x): " "err(%d)\n", ino, err); } else { + unsigned long tid; + if (tlb_type == cheetah || tlb_type == cheetah_plus) { unsigned long ver; @@ -342,9 +343,12 @@ unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino, int pil, unsign /* Catch accidental accesses to these things. IMAP/ICLR handling * is done by hypervisor calls on sun4v platforms, not by direct * register accesses. + * + * But we need to make them look unique for the disable_irq() logic + * in free_irq(). */ - bucket->imap = ~0UL; - bucket->iclr = ~0UL; + bucket->imap = ~0UL - sysino; + bucket->iclr = ~0UL - sysino; bucket->pil = pil; bucket->flags = flags; @@ -547,7 +551,6 @@ void free_irq(unsigned int irq, void *dev_id) bucket = __bucket(irq); if (bucket != &pil0_dummy_bucket) { struct irq_desc *desc = bucket->irq_info; - unsigned long imap = bucket->imap; int ent, i; for (i = 0; i < MAX_IRQ_DESC_ACTION; i++) { @@ -560,6 +563,8 @@ void free_irq(unsigned int irq, void *dev_id) } if (!desc->action_active_mask) { + unsigned long imap = bucket->imap; + /* This unique interrupt source is now inactive. */ bucket->flags &= ~IBF_ACTIVE; @@ -803,7 +808,6 @@ EXPORT_SYMBOL(probe_irq_off); static int retarget_one_irq(struct irqaction *p, int goal_cpu) { struct ino_bucket *bucket = get_ino_in_irqaction(p) + ivector_table; - unsigned long imap = bucket->imap; while (!cpu_online(goal_cpu)) { if (++goal_cpu >= NR_CPUS) @@ -816,6 +820,7 @@ static int retarget_one_irq(struct irqaction *p, int goal_cpu) sun4v_intr_settarget(ino, goal_cpu); sun4v_intr_setenabled(ino, HV_INTR_ENABLED); } else { + unsigned long imap = bucket->imap; unsigned int tid; if (tlb_type == cheetah || tlb_type == cheetah_plus) { -- cgit v1.2.3 From af02bec66294c76fba181c665c68a31fd4392020 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 16 Feb 2006 16:23:45 -0800 Subject: [SPARC64]: Fix return from trap on SUN4V. We need to set the global register set _AND_ disable PSTATE_IE in %pstate. The original patch sequence was leaving PSTATE_IE enabled when returning to kernel mode, oops. This fixes the random register corruption being seen on SUN4V. Signed-off-by: David S. Miller --- arch/sparc64/kernel/rtrap.S | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/rtrap.S b/arch/sparc64/kernel/rtrap.S index 551f7198200..1e724fe172a 100644 --- a/arch/sparc64/kernel/rtrap.S +++ b/arch/sparc64/kernel/rtrap.S @@ -234,8 +234,10 @@ rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1 /* Normal globals are restored, go to trap globals. */ 661: wrpr %g0, RTRAP_PSTATE_AG_IRQOFF, %pstate - .section .sun4v_1insn_patch, "ax" + nop + .section .sun4v_2insn_patch, "ax" .word 661b + wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate SET_GL(1) .previous -- cgit v1.2.3 From 14f6689cbb3ec2c194bd770fbe0d6e2d90eb6760 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 16 Feb 2006 20:44:25 -0800 Subject: [SPARC64]: Don't set interrupt state to IDLE in enable_irq(). We'll lose events that way. Signed-off-by: David S. Miller --- arch/sparc64/kernel/irq.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c index 580b4de8b7c..6eb44ca5dba 100644 --- a/arch/sparc64/kernel/irq.c +++ b/arch/sparc64/kernel/irq.c @@ -163,10 +163,6 @@ void enable_irq(unsigned int irq) if (err != HV_EOK) printk("sun4v_intr_setenabled(%x): err(%d)\n", ino, err); - err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE); - if (err != HV_EOK) - printk("sun4v_intr_setstate(%x): " - "err(%d)\n", ino, err); } else { unsigned long tid; -- cgit v1.2.3 From 6154f94f0e1b3984ad2d0bcda586bc8946398b8a Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 16 Feb 2006 23:01:10 -0800 Subject: [SPARC64]: Rewrite pci_intmap_match(). The whole algorithm was wrong. What we need to do is: 1) Walk each PCI bus above this device on the path to the PCI controller nexus, and for each: a) If interrupt-map exists, apply it, record IRQ controller node b) Else, swivel interrupt number using PCI_SLOT(), use PCI bus parent OBP node as controller node c) Walk up to "controller node" until we hit the first PCI bus in this domain, or "controller node" is the PCI controller OBP node 2) If we walked to PCI controller OBP node, we're done. 3) Else, apply PCI controller interrupt-map to interrupt. There is some stuff that needs to be checked out for ebus and isa, but the PCI part is good to go. Signed-off-by: David S. Miller --- arch/sparc64/kernel/pci_common.c | 269 ++++++++++++++++++++++----------------- 1 file changed, 155 insertions(+), 114 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/pci_common.c b/arch/sparc64/kernel/pci_common.c index ab6a2e1b76f..f9101966a74 100644 --- a/arch/sparc64/kernel/pci_common.c +++ b/arch/sparc64/kernel/pci_common.c @@ -541,142 +541,183 @@ void __init pci_assign_unassigned(struct pci_pbm_info *pbm, pci_assign_unassigned(pbm, bus); } -static int __init pci_intmap_match(struct pci_dev *pdev, unsigned int *interrupt) +static inline unsigned int pci_slot_swivel(struct pci_pbm_info *pbm, + struct pci_dev *toplevel_pdev, + struct pci_dev *pdev, + unsigned int interrupt) { - struct linux_prom_pci_intmap bridge_local_intmap[PROM_PCIIMAP_MAX], *intmap; - struct linux_prom_pci_intmask bridge_local_intmask, *intmask; - struct pcidev_cookie *dev_pcp = pdev->sysdata; - struct pci_pbm_info *pbm = dev_pcp->pbm; - struct linux_prom_pci_registers *pregs = dev_pcp->prom_regs; - unsigned int hi, mid, lo, irq; - int i, num_intmap, map_slot; + unsigned int ret; - intmap = &pbm->pbm_intmap[0]; - intmask = &pbm->pbm_intmask; - num_intmap = pbm->num_pbm_intmap; - map_slot = 0; + if (unlikely(interrupt < 1 || interrupt > 4)) { + printk("%s: Device %s interrupt value of %u is strange.\n", + pbm->name, pci_name(pdev), interrupt); + return interrupt; + } - /* If we are underneath a PCI bridge, use PROM register - * property of the parent bridge which is closest to - * the PBM. - * - * However if that parent bridge has interrupt map/mask - * properties of its own we use the PROM register property - * of the next child device on the path to PDEV. - * - * In detail the two cases are (note that the 'X' below is the - * 'next child on the path to PDEV' mentioned above): - * - * 1) PBM --> PCI bus lacking int{map,mask} --> X ... PDEV - * - * Here we use regs of 'PCI bus' device. - * - * 2) PBM --> PCI bus with int{map,mask} --> X ... PDEV - * - * Here we use regs of 'X'. Note that X can be PDEV. - */ - if (pdev->bus->number != pbm->pci_first_busno) { - struct pcidev_cookie *bus_pcp, *regs_pcp; - struct pci_dev *bus_dev, *regs_dev; - int plen; + ret = ((interrupt - 1 + (PCI_SLOT(pdev->devfn) & 3)) & 3) + 1; + + printk("%s: %s IRQ Swivel %s [%x:%x] -> [%x]\n", + pbm->name, pci_name(toplevel_pdev), pci_name(pdev), + interrupt, PCI_SLOT(pdev->devfn), ret); - bus_dev = pdev->bus->self; - bus_pcp = bus_dev->sysdata; - regs_dev = pdev; - regs_pcp = regs_dev->sysdata; + return ret; +} - while (bus_dev->bus && - bus_dev->bus->number != pbm->pci_first_busno && - prom_getproplen(bus_pcp->prom_node, - "interrupt-map") <= 0) { - regs_dev = bus_dev; - regs_pcp = regs_dev->sysdata; +static inline unsigned int pci_apply_intmap(struct pci_pbm_info *pbm, + struct pci_dev *toplevel_pdev, + struct pci_dev *pbus, + struct pci_dev *pdev, + unsigned int interrupt, + unsigned int *cnode) +{ + struct linux_prom_pci_intmap imap[PROM_PCIIMAP_MAX]; + struct linux_prom_pci_intmask imask; + struct pcidev_cookie *pbus_pcp = pbus->sysdata; + struct pcidev_cookie *pdev_pcp = pdev->sysdata; + struct linux_prom_pci_registers *pregs = pdev_pcp->prom_regs; + int plen, num_imap, i; + unsigned int hi, mid, lo, irq, orig_interrupt; + + *cnode = pbus_pcp->prom_node; + + plen = prom_getproperty(pbus_pcp->prom_node, "interrupt-map", + (char *) &imap[0], sizeof(imap)); + if (plen <= 0 || + (plen % sizeof(struct linux_prom_pci_intmap)) != 0) { + printk("%s: Device %s interrupt-map has bad len %d\n", + pbm->name, pci_name(pbus), plen); + goto no_intmap; + } + num_imap = plen / sizeof(struct linux_prom_pci_intmap); + + plen = prom_getproperty(pbus_pcp->prom_node, "interrupt-map-mask", + (char *) &imask, sizeof(imask)); + if (plen <= 0 || + (plen % sizeof(struct linux_prom_pci_intmask)) != 0) { + printk("%s: Device %s interrupt-map-mask has bad len %d\n", + pbm->name, pci_name(pbus), plen); + goto no_intmap; + } - bus_dev = bus_dev->bus->self; - bus_pcp = bus_dev->sysdata; + orig_interrupt = interrupt; + + hi = pregs->phys_hi & imask.phys_hi; + mid = pregs->phys_mid & imask.phys_mid; + lo = pregs->phys_lo & imask.phys_lo; + irq = interrupt & imask.interrupt; + + for (i = 0; i < num_imap; i++) { + if (imap[i].phys_hi == hi && + imap[i].phys_mid == mid && + imap[i].phys_lo == lo && + imap[i].interrupt == irq) { + *cnode = imap[i].cnode; + interrupt = imap[i].cinterrupt; } + } - pregs = regs_pcp->prom_regs; + printk("%s: %s MAP BUS %s DEV %s [%x] -> [%x]\n", + pbm->name, pci_name(toplevel_pdev), + pci_name(pbus), pci_name(pdev), + orig_interrupt, interrupt); +no_intmap: + return interrupt; +} - /* But if the PCI bridge has it's own interrupt map - * and mask properties, use that and the regs of the - * PCI entity at the next level down on the path to the - * device. - */ - plen = prom_getproperty(bus_pcp->prom_node, "interrupt-map", - (char *) &bridge_local_intmap[0], - sizeof(bridge_local_intmap)); - if (plen != -1) { - intmap = &bridge_local_intmap[0]; - num_intmap = plen / sizeof(struct linux_prom_pci_intmap); - plen = prom_getproperty(bus_pcp->prom_node, - "interrupt-map-mask", - (char *) &bridge_local_intmask, - sizeof(bridge_local_intmask)); - if (plen == -1) { - printk("pci_intmap_match: Warning! Bridge has intmap " - "but no intmask.\n"); - printk("pci_intmap_match: Trying to recover.\n"); - return 0; - } +/* For each PCI bus on the way to the root: + * 1) If it has an interrupt-map property, apply it. + * 2) Else, swivel the interrupt number based upon the PCI device number. + * + * Return the "IRQ controller" node. If this is the PBM's device node, + * all interrupt translations are complete, else we should use that node's + * "reg" property to apply the PBM's "interrupt-{map,mask}" to the interrupt. + */ +static unsigned int __init pci_intmap_match_to_root(struct pci_pbm_info *pbm, + struct pci_dev *pdev, + unsigned int *interrupt) +{ + struct pci_dev *toplevel_pdev = pdev; + struct pcidev_cookie *toplevel_pcp = toplevel_pdev->sysdata; + unsigned int cnode = toplevel_pcp->prom_node; - intmask = &bridge_local_intmask; + while (pdev->bus->number != pbm->pci_first_busno) { + struct pci_dev *pbus = pdev->bus->self; + struct pcidev_cookie *pcp = pbus->sysdata; + int plen; - if (pdev->bus->self != bus_dev) - map_slot = 1; + plen = prom_getproplen(pcp->prom_node, "interrupt-map"); + if (plen <= 0) { + *interrupt = pci_slot_swivel(pbm, toplevel_pdev, + pdev, *interrupt); + cnode = pcp->prom_node; } else { - pregs = bus_pcp->prom_regs; - map_slot = 1; + *interrupt = pci_apply_intmap(pbm, toplevel_pdev, + pbus, pdev, + *interrupt, &cnode); + + while (pcp->prom_node != cnode && + pbus->bus->number != pbm->pci_first_busno) { + pbus = pbus->bus->self; + pcp = pbus->sysdata; + } } - } + pdev = pbus; - if (map_slot) { - *interrupt = ((*interrupt - - 1 - + PCI_SLOT(pdev->devfn)) & 0x3) + 1; + if (cnode == pbm->prom_node) + break; } - hi = pregs->phys_hi & intmask->phys_hi; - mid = pregs->phys_mid & intmask->phys_mid; - lo = pregs->phys_lo & intmask->phys_lo; - irq = *interrupt & intmask->interrupt; - - for (i = 0; i < num_intmap; i++) { - if (intmap[i].phys_hi == hi && - intmap[i].phys_mid == mid && - intmap[i].phys_lo == lo && - intmap[i].interrupt == irq) { - *interrupt = intmap[i].cinterrupt; - printk("PCI-IRQ: Routing bus[%2x] slot[%2x] map[%d] to INO[%02x]\n", - pdev->bus->number, PCI_SLOT(pdev->devfn), - map_slot, *interrupt); - return 1; - } + return cnode; +} + +static int __init pci_intmap_match(struct pci_dev *pdev, unsigned int *interrupt) +{ + struct pcidev_cookie *dev_pcp = pdev->sysdata; + struct pci_pbm_info *pbm = dev_pcp->pbm; + struct linux_prom_pci_registers reg; + unsigned int hi, mid, lo, irq; + int i, cnode, plen; + + cnode = pci_intmap_match_to_root(pbm, pdev, interrupt); + if (cnode == pbm->prom_node) + goto success; + + plen = prom_getproperty(cnode, "reg", (char *) ®, sizeof(reg)); + if (plen <= 0 || + (plen % sizeof(struct linux_prom_pci_registers)) != 0) { + printk("%s: OBP node %x reg property has bad len %d\n", + pbm->name, cnode, plen); + goto fail; } - /* We will run this code even if pbm->num_pbm_intmap is zero, just so - * we can apply the slot mapping to the PROM interrupt property value. - * So do not spit out these warnings in that case. - */ - if (num_intmap != 0) { - /* Print it both to OBP console and kernel one so that if bootup - * hangs here the user has the information to report. - */ - prom_printf("pci_intmap_match: bus %02x, devfn %02x: ", - pdev->bus->number, pdev->devfn); - prom_printf("IRQ [%08x.%08x.%08x.%08x] not found in interrupt-map\n", - pregs->phys_hi, pregs->phys_mid, pregs->phys_lo, *interrupt); - prom_printf("Please email this information to davem@redhat.com\n"); - - printk("pci_intmap_match: bus %02x, devfn %02x: ", - pdev->bus->number, pdev->devfn); - printk("IRQ [%08x.%08x.%08x.%08x] not found in interrupt-map\n", - pregs->phys_hi, pregs->phys_mid, pregs->phys_lo, *interrupt); - printk("Please email this information to davem@redhat.com\n"); + hi = reg.phys_hi & pbm->pbm_intmask.phys_hi; + mid = reg.phys_mid & pbm->pbm_intmask.phys_mid; + lo = reg.phys_lo & pbm->pbm_intmask.phys_lo; + irq = *interrupt & pbm->pbm_intmask.interrupt; + + for (i = 0; i < pbm->num_pbm_intmap; i++) { + struct linux_prom_pci_intmap *intmap; + + intmap = &pbm->pbm_intmap[i]; + + if (intmap->phys_hi == hi && + intmap->phys_mid == mid && + intmap->phys_lo == lo && + intmap->interrupt == irq) { + *interrupt = intmap->cinterrupt; + goto success; + } } +fail: return 0; + +success: + printk("PCI-IRQ: Routing bus[%2x] slot[%2x] to INO[%02x]\n", + pdev->bus->number, PCI_SLOT(pdev->devfn), + *interrupt); + return 1; } static void __init pdev_fixup_irq(struct pci_dev *pdev) -- cgit v1.2.3 From 19a0d585e80e84b54bb9bf120bf0c826045dd3dd Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Fri, 17 Feb 2006 01:17:21 -0800 Subject: [SPARC64]: Disable smp_report_regs() for now. For 32 cpus and a slow console, it just wedges the machine especially with DETECT_SOFTLOCKUP enabled. Signed-off-by: David S. Miller --- arch/sparc64/kernel/process.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch') diff --git a/arch/sparc64/kernel/process.c b/arch/sparc64/kernel/process.c index 803eea4dc4f..d00cb7ad89b 100644 --- a/arch/sparc64/kernel/process.c +++ b/arch/sparc64/kernel/process.c @@ -355,6 +355,7 @@ void show_regs(struct pt_regs *regs) extern long etrap, etraptl1; #endif __show_regs(regs); +#if 0 #ifdef CONFIG_SMP { extern void smp_report_regs(void); @@ -362,6 +363,7 @@ void show_regs(struct pt_regs *regs) smp_report_regs(); } #endif +#endif #ifdef VERBOSE_SHOWREGS if (regs->tpc >= &etrap && regs->tpc < &etraptl1 && -- cgit v1.2.3 From 72aff53f1fe74153eccef303ab2f79de888d248c Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Fri, 17 Feb 2006 01:29:17 -0800 Subject: [SPARC64]: Get SUN4V SMP working. The sibling cpu bringup is extremely fragile. We can only perform the most basic calls until we take over the trap table from the firmware/hypervisor on the new cpu. This means no accesses to %g4, %g5, %g6 since those can't be TLB translated without our trap handlers. In order to achieve this: 1) Change sun4v_init_mondo_queues() so that it can operate in several modes. It can allocate the queues, or install them in the current processor, or both. The boot cpu does both in it's call early on. Later, the boot cpu allocates the sibling cpu queue, starts the sibling cpu, then the sibling cpu loads them in. 2) init_cur_cpu_trap() is changed to take the current_thread_info() as an argument instead of reading %g6 directly on the current cpu. 3) Create a trampoline stack for the sibling cpus. We do our basic kernel calls using this stack, which is locked into the kernel image, then go to our proper thread stack after taking over the trap table. 4) While we are in this delicate startup state, we put 0xdeadbeef into %g4/%g5/%g6 in order to catch accidental accesses. 5) On the final prom_set_trap_table*() call, we put &init_thread_union into %g6. This is a hack to make prom_world(0) work. All that wants to do is restore the %asi register using get_thread_current_ds(). Longer term we should just do the OBP calls to set the trap table by hand just like we do for everything else. This would avoid that silly prom_world(0) issue, then we can remove the init_thread_union hack. Signed-off-by: David S. Miller --- arch/sparc64/kernel/irq.c | 30 ++++++++----- arch/sparc64/kernel/setup.c | 2 +- arch/sparc64/kernel/smp.c | 6 +++ arch/sparc64/kernel/trampoline.S | 92 ++++++++++++++++++++++++---------------- arch/sparc64/kernel/traps.c | 4 +- 5 files changed, 84 insertions(+), 50 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c index 6eb44ca5dba..bb0bb34555d 100644 --- a/arch/sparc64/kernel/irq.c +++ b/arch/sparc64/kernel/irq.c @@ -1018,21 +1018,29 @@ static void __cpuinit init_cpu_send_mondo_info(struct trap_per_cpu *tb, int use_ } /* Allocate and register the mondo and error queues for this cpu. */ -void __cpuinit sun4v_init_mondo_queues(int use_bootmem) +void __cpuinit sun4v_init_mondo_queues(int use_bootmem, int cpu, int alloc, int load) { - int cpu = hard_smp_processor_id(); struct trap_per_cpu *tb = &trap_block[cpu]; - alloc_one_mondo(&tb->cpu_mondo_pa, use_bootmem); - alloc_one_mondo(&tb->dev_mondo_pa, use_bootmem); - alloc_one_mondo(&tb->resum_mondo_pa, use_bootmem); - alloc_one_kbuf(&tb->resum_kernel_buf_pa, use_bootmem); - alloc_one_mondo(&tb->nonresum_mondo_pa, use_bootmem); - alloc_one_kbuf(&tb->nonresum_kernel_buf_pa, use_bootmem); + if (alloc) { + alloc_one_mondo(&tb->cpu_mondo_pa, use_bootmem); + alloc_one_mondo(&tb->dev_mondo_pa, use_bootmem); + alloc_one_mondo(&tb->resum_mondo_pa, use_bootmem); + alloc_one_kbuf(&tb->resum_kernel_buf_pa, use_bootmem); + alloc_one_mondo(&tb->nonresum_mondo_pa, use_bootmem); + alloc_one_kbuf(&tb->nonresum_kernel_buf_pa, use_bootmem); - init_cpu_send_mondo_info(tb, use_bootmem); + init_cpu_send_mondo_info(tb, use_bootmem); + } - sun4v_register_mondo_queues(cpu); + if (load) { + if (cpu != hard_smp_processor_id()) { + prom_printf("SUN4V: init mondo on cpu %d not %d\n", + cpu, hard_smp_processor_id()); + prom_halt(); + } + sun4v_register_mondo_queues(cpu); + } } /* Only invoked on boot processor. */ @@ -1043,7 +1051,7 @@ void __init init_IRQ(void) memset(&ivector_table[0], 0, sizeof(ivector_table)); if (tlb_type == hypervisor) - sun4v_init_mondo_queues(1); + sun4v_init_mondo_queues(1, hard_smp_processor_id(), 1, 1); /* We need to clear any IRQ's pending in the soft interrupt * registers, a spurious one could be left around from the diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c index 06807cf95ee..9b0c409d5b6 100644 --- a/arch/sparc64/kernel/setup.c +++ b/arch/sparc64/kernel/setup.c @@ -384,7 +384,7 @@ void __init setup_arch(char **cmdline_p) paging_init(); /* Get boot processor trap_block[] setup. */ - init_cur_cpu_trap(); + init_cur_cpu_trap(current_thread_info()); } static int __init set_preferred_console(void) diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index 527dfd7ae21..b586345fe3b 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c @@ -316,6 +316,8 @@ static void smp_synchronize_one_tick(int cpu) spin_unlock_irqrestore(&itc_sync_lock, flags); } +extern void sun4v_init_mondo_queues(int use_bootmem, int cpu, int alloc, int load); + extern unsigned long sparc64_cpu_startup; /* The OBP cpu startup callback truncates the 3rd arg cookie to @@ -339,6 +341,9 @@ static int __devinit smp_boot_one_cpu(unsigned int cpu) cpu_set(cpu, cpu_callout_map); if (tlb_type == hypervisor) { + /* Alloc the mondo queues, cpu will load them. */ + sun4v_init_mondo_queues(0, cpu, 1, 0); + prom_startcpu_cpuid(cpu, entry, cookie); } else { int cpu_node; @@ -352,6 +357,7 @@ static int __devinit smp_boot_one_cpu(unsigned int cpu) break; udelay(100); } + if (callin_flag) { ret = 0; } else { diff --git a/arch/sparc64/kernel/trampoline.S b/arch/sparc64/kernel/trampoline.S index b9c9f54b0a0..a4dc01a3d23 100644 --- a/arch/sparc64/kernel/trampoline.S +++ b/arch/sparc64/kernel/trampoline.S @@ -30,12 +30,16 @@ itlb_load: dtlb_load: .asciz "SUNW,dtlb-load" + /* XXX __cpuinit this thing XXX */ +#define TRAMP_STACK_SIZE 1024 + .align 16 +tramp_stack: + .skip TRAMP_STACK_SIZE + .text .align 8 .globl sparc64_cpu_startup, sparc64_cpu_startup_end sparc64_cpu_startup: - flushw - BRANCH_IF_SUN4V(g1, niagara_startup) BRANCH_IF_CHEETAH_BASE(g1, g5, cheetah_startup) BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1, g5, cheetah_plus_startup) @@ -58,6 +62,7 @@ cheetah_startup: or %g5, DCU_DM | DCU_IM | DCU_DC | DCU_IC, %g5 stxa %g5, [%g0] ASI_DCU_CONTROL_REG membar #Sync + /* fallthru */ cheetah_generic_startup: mov TSB_EXTENSION_P, %g3 @@ -90,19 +95,17 @@ spitfire_startup: membar #Sync startup_continue: - wrpr %g0, 15, %pil - sethi %hi(0x80000000), %g2 sllx %g2, 32, %g2 wr %g2, 0, %tick_cmpr + mov %o0, %l0 + BRANCH_IF_SUN4V(g1, niagara_lock_tlb) /* Call OBP by hand to lock KERNBASE into i/d tlbs. * We lock 2 consequetive entries if we are 'bigkernel'. */ - mov %o0, %l0 - sethi %hi(prom_entry_lock), %g2 1: ldstub [%g2 + %lo(prom_entry_lock)], %g1 membar #StoreLoad | #StoreStore @@ -112,7 +115,6 @@ startup_continue: sethi %hi(p1275buf), %g2 or %g2, %lo(p1275buf), %g2 ldx [%g2 + 0x10], %l2 - mov %sp, %l1 add %l2, -(192 + 128), %sp flushw @@ -308,18 +310,9 @@ niagara_lock_tlb: ta HV_FAST_TRAP after_lock_tlb: - mov %l1, %sp - flushw - - mov %l0, %o0 - wrpr %g0, (PSTATE_PRIV | PSTATE_PEF), %pstate wr %g0, 0, %fprs - /* XXX Buggy PROM... */ - srl %o0, 0, %o0 - ldx [%o0], %g6 - wr %g0, ASI_P, %asi mov PRIMARY_CONTEXT, %g7 @@ -341,22 +334,25 @@ after_lock_tlb: membar #Sync - mov 1, %g5 - sllx %g5, THREAD_SHIFT, %g5 - sub %g5, (STACKFRAME_SZ + STACK_BIAS), %g5 - add %g6, %g5, %sp + /* Everything we do here, until we properly take over the + * trap table, must be done with extreme care. We cannot + * make any references to %g6 (current thread pointer), + * %g4 (current task pointer), or %g5 (base of current cpu's + * per-cpu area) until we properly take over the trap table + * from the firmware and hypervisor. + * + * Get onto temporary stack which is in the locked kernel image. + */ + sethi %hi(tramp_stack), %g1 + or %g1, %lo(tramp_stack), %g1 + add %g1, TRAMP_STACK_SIZE, %g1 + sub %g1, STACKFRAME_SZ + STACK_BIAS, %sp mov 0, %fp - wrpr %g0, 0, %wstate - wrpr %g0, 0, %tl - - /* Load TBA, then we can resurface. */ - sethi %hi(sparc64_ttable_tl0), %g5 - wrpr %g5, %tba - - ldx [%g6 + TI_TASK], %g4 - - wrpr %g0, 0, %wstate + /* Put garbage in these registers to trap any access to them. */ + set 0xdeadbeef, %g4 + set 0xdeadbeef, %g5 + set 0xdeadbeef, %g6 call init_irqwork_curcpu nop @@ -367,11 +363,17 @@ after_lock_tlb: bne,pt %icc, 1f nop + call hard_smp_processor_id + nop + + mov %o0, %o1 + mov 0, %o0 + mov 0, %o2 call sun4v_init_mondo_queues - mov 0, %o0 + mov 1, %o3 1: call init_cur_cpu_trap - nop + ldx [%l0], %o0 /* Start using proper page size encodings in ctx register. */ sethi %hi(sparc64_kern_pri_context), %g3 @@ -386,9 +388,14 @@ after_lock_tlb: membar #Sync - rdpr %pstate, %o1 - or %o1, PSTATE_IE, %o1 - wrpr %o1, 0, %pstate + wrpr %g0, 0, %wstate + + /* As a hack, put &init_thread_union into %g6. + * prom_world() loads from here to restore the %asi + * register. + */ + sethi %hi(init_thread_union), %g6 + or %g6, %lo(init_thread_union), %g6 sethi %hi(is_sun4v), %o0 lduw [%o0 + %lo(is_sun4v)], %o0 @@ -418,7 +425,20 @@ after_lock_tlb: 1: call prom_set_trap_table sethi %hi(sparc64_ttable_tl0), %o0 -2: call smp_callin +2: ldx [%l0], %g6 + ldx [%g6 + TI_TASK], %g4 + + mov 1, %g5 + sllx %g5, THREAD_SHIFT, %g5 + sub %g5, (STACKFRAME_SZ + STACK_BIAS), %g5 + add %g6, %g5, %sp + mov 0, %fp + + rdpr %pstate, %o1 + or %o1, PSTATE_IE, %o1 + wrpr %o1, 0, %pstate + + call smp_callin nop call cpu_idle mov 0, %o0 diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c index 5956d0a9400..c9484ae5bb8 100644 --- a/arch/sparc64/kernel/traps.c +++ b/arch/sparc64/kernel/traps.c @@ -2413,12 +2413,12 @@ struct trap_per_cpu trap_block[NR_CPUS]; /* This can get invoked before sched_init() so play it super safe * and use hard_smp_processor_id(). */ -void init_cur_cpu_trap(void) +void init_cur_cpu_trap(struct thread_info *t) { int cpu = hard_smp_processor_id(); struct trap_per_cpu *p = &trap_block[cpu]; - p->thread = current_thread_info(); + p->thread = t; p->pgd_paddr = 0; } -- cgit v1.2.3 From 101d5c18a928ef82b6c7bf99a9eaa536b5ccf593 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Fri, 17 Feb 2006 08:20:18 -0800 Subject: [SPARC64]: Fix PCI IRQ probing regression. If the top-level cnode had multi entries in it's "reg" property, we'd fail. The buffer wasn't large enough in such cases. Signed-off-by: David S. Miller --- arch/sparc64/kernel/pci_common.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/pci_common.c b/arch/sparc64/kernel/pci_common.c index f9101966a74..1448294a237 100644 --- a/arch/sparc64/kernel/pci_common.c +++ b/arch/sparc64/kernel/pci_common.c @@ -675,7 +675,7 @@ static int __init pci_intmap_match(struct pci_dev *pdev, unsigned int *interrupt { struct pcidev_cookie *dev_pcp = pdev->sysdata; struct pci_pbm_info *pbm = dev_pcp->pbm; - struct linux_prom_pci_registers reg; + struct linux_prom_pci_registers reg[PROMREG_MAX]; unsigned int hi, mid, lo, irq; int i, cnode, plen; @@ -683,7 +683,7 @@ static int __init pci_intmap_match(struct pci_dev *pdev, unsigned int *interrupt if (cnode == pbm->prom_node) goto success; - plen = prom_getproperty(cnode, "reg", (char *) ®, sizeof(reg)); + plen = prom_getproperty(cnode, "reg", (char *) reg, sizeof(reg)); if (plen <= 0 || (plen % sizeof(struct linux_prom_pci_registers)) != 0) { printk("%s: OBP node %x reg property has bad len %d\n", @@ -691,9 +691,9 @@ static int __init pci_intmap_match(struct pci_dev *pdev, unsigned int *interrupt goto fail; } - hi = reg.phys_hi & pbm->pbm_intmask.phys_hi; - mid = reg.phys_mid & pbm->pbm_intmask.phys_mid; - lo = reg.phys_lo & pbm->pbm_intmask.phys_lo; + hi = reg[0].phys_hi & pbm->pbm_intmask.phys_hi; + mid = reg[0].phys_mid & pbm->pbm_intmask.phys_mid; + lo = reg[0].phys_lo & pbm->pbm_intmask.phys_lo; irq = *interrupt & pbm->pbm_intmask.interrupt; for (i = 0; i < pbm->num_pbm_intmap; i++) { -- cgit v1.2.3 From ebd8c56c5ae154e2c6cfb7453a76a4e7265b2377 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Fri, 17 Feb 2006 08:38:06 -0800 Subject: [SPARC64]: Fix uniprocessor IRQ targetting on SUN4V. We need to use the real hardware processor ID when targetting interrupts, not the "define to 0" thing the uniprocessor build gives us. Also, fill in the Node-ID and Agent-ID fields properly on sun4u/Safari. Signed-off-by: David S. Miller --- arch/sparc64/kernel/entry.S | 4 +- arch/sparc64/kernel/irq.c | 98 ++++++++++++++++++++++----------------------- arch/sparc64/kernel/setup.c | 6 +-- 3 files changed, 53 insertions(+), 55 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S index f5c8a293979..bd332e41532 100644 --- a/arch/sparc64/kernel/entry.S +++ b/arch/sparc64/kernel/entry.S @@ -1692,10 +1692,12 @@ __flushw_user: #ifdef CONFIG_SMP .globl hard_smp_processor_id hard_smp_processor_id: +#endif + .globl real_hard_smp_processor_id +real_hard_smp_processor_id: __GET_CPUID(%o0) retl nop -#endif /* %o0: devhandle * %o1: devino diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c index bb0bb34555d..712b16cdd5f 100644 --- a/arch/sparc64/kernel/irq.c +++ b/arch/sparc64/kernel/irq.c @@ -138,11 +138,48 @@ out_unlock: return 0; } +extern unsigned long real_hard_smp_processor_id(void); + +static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid) +{ + unsigned int tid; + + if (this_is_starfire) { + tid = starfire_translate(imap, cpuid); + tid <<= IMAP_TID_SHIFT; + tid &= IMAP_TID_UPA; + } else { + if (tlb_type == cheetah || tlb_type == cheetah_plus) { + unsigned long ver; + + __asm__ ("rdpr %%ver, %0" : "=r" (ver)); + if ((ver >> 32UL) == __JALAPENO_ID || + (ver >> 32UL) == __SERRANO_ID) { + tid = cpuid << IMAP_TID_SHIFT; + tid &= IMAP_TID_JBUS; + } else { + unsigned int a = cpuid & 0x1f; + unsigned int n = (cpuid >> 5) & 0x1f; + + tid = ((a << IMAP_AID_SHIFT) | + (n << IMAP_NID_SHIFT)); + tid &= (IMAP_AID_SAFARI | + IMAP_NID_SAFARI);; + } + } else { + tid = cpuid << IMAP_TID_SHIFT; + tid &= IMAP_TID_UPA; + } + } + + return tid; +} + /* Now these are always passed a true fully specified sun4u INO. */ void enable_irq(unsigned int irq) { struct ino_bucket *bucket = __bucket(irq); - unsigned long imap; + unsigned long imap, cpuid; imap = bucket->imap; if (imap == 0UL) @@ -150,54 +187,25 @@ void enable_irq(unsigned int irq) preempt_disable(); + /* This gets the physical processor ID, even on uniprocessor, + * so we can always program the interrupt target correctly. + */ + cpuid = real_hard_smp_processor_id(); + if (tlb_type == hypervisor) { unsigned int ino = __irq_ino(irq); - int cpu = hard_smp_processor_id(); int err; - err = sun4v_intr_settarget(ino, cpu); + err = sun4v_intr_settarget(ino, cpuid); if (err != HV_EOK) - printk("sun4v_intr_settarget(%x,%d): err(%d)\n", - ino, cpu, err); + printk("sun4v_intr_settarget(%x,%lu): err(%d)\n", + ino, cpuid, err); err = sun4v_intr_setenabled(ino, HV_INTR_ENABLED); if (err != HV_EOK) printk("sun4v_intr_setenabled(%x): err(%d)\n", ino, err); } else { - unsigned long tid; - - if (tlb_type == cheetah || tlb_type == cheetah_plus) { - unsigned long ver; - - __asm__ ("rdpr %%ver, %0" : "=r" (ver)); - if ((ver >> 32) == __JALAPENO_ID || - (ver >> 32) == __SERRANO_ID) { - /* We set it to our JBUS ID. */ - __asm__ __volatile__("ldxa [%%g0] %1, %0" - : "=r" (tid) - : "i" (ASI_JBUS_CONFIG)); - tid = ((tid & (0x1fUL<<17)) << 9); - tid &= IMAP_TID_JBUS; - } else { - /* We set it to our Safari AID. */ - __asm__ __volatile__("ldxa [%%g0] %1, %0" - : "=r" (tid) - : "i"(ASI_SAFARI_CONFIG)); - tid = ((tid & (0x3ffUL<<17)) << 9); - tid &= IMAP_AID_SAFARI; - } - } else if (this_is_starfire == 0) { - /* We set it to our UPA MID. */ - __asm__ __volatile__("ldxa [%%g0] %1, %0" - : "=r" (tid) - : "i" (ASI_UPA_CONFIG)); - tid = ((tid & UPA_CONFIG_MID) << 9); - tid &= IMAP_TID_UPA; - } else { - tid = (starfire_translate(imap, - smp_processor_id()) << 26); - tid &= IMAP_TID_UPA; - } + unsigned int tid = sun4u_compute_tid(imap, cpuid); /* NOTE NOTE NOTE, IGN and INO are read-only, IGN is a product * of this SYSIO's preconfigured IGN in the SYSIO Control @@ -817,18 +825,8 @@ static int retarget_one_irq(struct irqaction *p, int goal_cpu) sun4v_intr_setenabled(ino, HV_INTR_ENABLED); } else { unsigned long imap = bucket->imap; - unsigned int tid; + unsigned int tid = sun4u_compute_tid(imap, goal_cpu); - if (tlb_type == cheetah || tlb_type == cheetah_plus) { - tid = goal_cpu << 26; - tid &= IMAP_AID_SAFARI; - } else if (this_is_starfire == 0) { - tid = goal_cpu << 26; - tid &= IMAP_TID_UPA; - } else { - tid = (starfire_translate(imap, goal_cpu) << 26); - tid &= IMAP_TID_UPA; - } upa_writel(tid | IMAP_VALID, imap); } diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c index 9b0c409d5b6..77066f1bbe2 100644 --- a/arch/sparc64/kernel/setup.c +++ b/arch/sparc64/kernel/setup.c @@ -222,7 +222,6 @@ static struct pt_regs fake_swapper_regs = { { 0, }, 0, 0, 0, 0 }; static void __init per_cpu_patch(void) { -#ifdef CONFIG_SMP struct cpuid_patch_entry *p; unsigned long ver; int is_jbus; @@ -233,8 +232,8 @@ static void __init per_cpu_patch(void) is_jbus = 0; if (tlb_type != hypervisor) { __asm__ ("rdpr %%ver, %0" : "=r" (ver)); - is_jbus = ((ver >> 32) == __JALAPENO_ID || - (ver >> 32) == __SERRANO_ID); + is_jbus = ((ver >> 32UL) == __JALAPENO_ID || + (ver >> 32UL) == __SERRANO_ID); } p = &__cpuid_patch; @@ -279,7 +278,6 @@ static void __init per_cpu_patch(void) p++; } -#endif } static void __init sun4v_patch(void) -- cgit v1.2.3 From 3b3ab2eb9cf07ef1bc7a676c19aab994adb41a87 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Fri, 17 Feb 2006 09:54:42 -0800 Subject: [SPARC64]: Use phys tsb address in tsb_insert() in SUN4V. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 6f860c39db8..0137d3dc6ae 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -252,7 +252,7 @@ static inline void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long { unsigned long tsb_addr = (unsigned long) ent; - if (tlb_type == cheetah_plus) + if (tlb_type == cheetah_plus || tlb_type == hypervisor) tsb_addr = __pa(tsb_addr); __tsb_insert(tsb_addr, tag, pte); -- cgit v1.2.3 From 46f860471483dce9ba5ce682a69c61cbceb54e52 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Fri, 17 Feb 2006 10:28:24 -0800 Subject: [SPARC64]: Put SUN4V ITSB miss into correct trap table entry. It's 0x9 not 0xb. Signed-off-by: David S. Miller --- arch/sparc64/kernel/ttable.S | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/ttable.S b/arch/sparc64/kernel/ttable.S index a9d210e11eb..d5a8dd52d1f 100644 --- a/arch/sparc64/kernel/ttable.S +++ b/arch/sparc64/kernel/ttable.S @@ -18,11 +18,10 @@ tl0_resv000: BOOT_KERNEL BTRAP(0x1) BTRAP(0x2) BTRAP(0x3) tl0_resv004: BTRAP(0x4) BTRAP(0x5) BTRAP(0x6) BTRAP(0x7) tl0_iax: membar #Sync TRAP_NOSAVE_7INSNS(__spitfire_insn_access_exception) -tl0_resv009: BTRAP(0x9) +tl0_itsb_4v: SUN4V_ITSB_MISS tl0_iae: membar #Sync TRAP_NOSAVE_7INSNS(__spitfire_access_error) -tl0_itsb_4v: SUN4V_ITSB_MISS -tl0_resv00c: BTRAP(0xc) BTRAP(0xd) BTRAP(0xe) BTRAP(0xf) +tl0_resv00b: BTRAP(0xb) BTRAP(0xc) BTRAP(0xd) BTRAP(0xe) BTRAP(0xf) tl0_ill: membar #Sync TRAP_7INSNS(do_illegal_instruction) tl0_privop: TRAP(do_privop) @@ -182,11 +181,10 @@ sparc64_ttable_tl1: tl1_resv000: BOOT_KERNEL BTRAPTL1(0x1) BTRAPTL1(0x2) BTRAPTL1(0x3) tl1_resv004: BTRAPTL1(0x4) BTRAPTL1(0x5) BTRAPTL1(0x6) BTRAPTL1(0x7) tl1_iax: TRAP_NOSAVE(__spitfire_insn_access_exception_tl1) -tl1_resv009: BTRAPTL1(0x9) +tl1_itsb_4v: SUN4V_ITSB_MISS tl1_iae: membar #Sync TRAP_NOSAVE_7INSNS(__spitfire_access_error) -tl1_itsb_4v: SUN4V_ITSB_MISS -tl1_resv00c: BTRAPTL1(0xc) BTRAPTL1(0xd) BTRAPTL1(0xe) BTRAPTL1(0xf) +tl1_resv00b: BTRAPTL1(0xb) BTRAPTL1(0xc) BTRAPTL1(0xd) BTRAPTL1(0xe) BTRAPTL1(0xf) tl1_ill: TRAPTL1(do_ill_tl1) tl1_privop: BTRAPTL1(0x11) tl1_resv012: BTRAPTL1(0x12) BTRAPTL1(0x13) BTRAPTL1(0x14) BTRAPTL1(0x15) -- cgit v1.2.3 From c857e3fdbc306e95fdcaad1d8f3ea6bc8e7eea99 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Fri, 17 Feb 2006 10:35:23 -0800 Subject: [SPARC64]: __bzero_noasi --> __clear_user Signed-off-by: David S. Miller --- arch/sparc64/kernel/sparc64_ksyms.c | 2 +- arch/sparc64/lib/bzero.S | 18 +++++++++--------- 2 files changed, 10 insertions(+), 10 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c index f1f01378d07..801fc0ce484 100644 --- a/arch/sparc64/kernel/sparc64_ksyms.c +++ b/arch/sparc64/kernel/sparc64_ksyms.c @@ -335,7 +335,7 @@ EXPORT_SYMBOL(copy_to_user_fixup); EXPORT_SYMBOL(copy_from_user_fixup); EXPORT_SYMBOL(copy_in_user_fixup); EXPORT_SYMBOL(__strncpy_from_user); -EXPORT_SYMBOL(__bzero_noasi); +EXPORT_SYMBOL(__clear_user); /* Various address conversion macros use this. */ EXPORT_SYMBOL(phys_base); diff --git a/arch/sparc64/lib/bzero.S b/arch/sparc64/lib/bzero.S index 1d2abcfa4e5..c7bbae8c590 100644 --- a/arch/sparc64/lib/bzero.S +++ b/arch/sparc64/lib/bzero.S @@ -98,12 +98,12 @@ __bzero_done: .text; \ .align 4; - .globl __bzero_noasi - .type __bzero_noasi, #function -__bzero_noasi: /* %o0=buf, %o1=len */ - brz,pn %o1, __bzero_noasi_done + .globl __clear_user + .type __clear_user, #function +__clear_user: /* %o0=buf, %o1=len */ + brz,pn %o1, __clear_user_done cmp %o1, 16 - bl,pn %icc, __bzero_noasi_tiny + bl,pn %icc, __clear_user_tiny EX_ST(prefetcha [%o0 + 0x00] %asi, #n_writes) andcc %o0, 0x3, %g0 be,pt %icc, 2f @@ -145,14 +145,14 @@ __bzero_noasi: /* %o0=buf, %o1=len */ subcc %g1, 8, %g1 bne,pt %icc, 5b add %o0, 0x8, %o0 -6: brz,pt %o1, __bzero_noasi_done +6: brz,pt %o1, __clear_user_done nop -__bzero_noasi_tiny: +__clear_user_tiny: 1: EX_ST(stba %g0, [%o0 + 0x00] %asi) subcc %o1, 1, %o1 bne,pt %icc, 1b add %o0, 1, %o0 -__bzero_noasi_done: +__clear_user_done: retl clr %o0 - .size __bzero_noasi, .-__bzero_noasi + .size __clear_user, .-__clear_user -- cgit v1.2.3 From 3f19a84e39619053f117bd5bb9183c5bfea7db45 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Fri, 17 Feb 2006 12:03:20 -0800 Subject: [SPARC64]: Set associativity of kernel TSB descriptor correctly. It should be 1, not 0. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 0137d3dc6ae..950d58082e2 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -1056,7 +1056,7 @@ static void __init sun4v_ktsb_init(void) break; }; - ktsb_descr[0].assoc = 0; + ktsb_descr[0].assoc = 1; ktsb_descr[0].num_ttes = KERNEL_TSB_NENTRIES; ktsb_descr[0].ctx_idx = 0; ktsb_descr[0].tsb_base = ktsb_pa; -- cgit v1.2.3 From 3763be32d591cacf808c36390a8af3f2784cde5f Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Fri, 17 Feb 2006 12:33:13 -0800 Subject: [SPARC64]: Define ARCH_HAS_READ_CURRENT_TIMER. This gives more consistent bogomips and delay() semantics, especially on sun4v. It gives weird looking values though... Signed-off-by: David S. Miller --- arch/sparc64/kernel/time.c | 8 +------- arch/sparc64/lib/delay.c | 19 ++++++++----------- 2 files changed, 9 insertions(+), 18 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/time.c b/arch/sparc64/kernel/time.c index 7041146f86f..f6275adbc81 100644 --- a/arch/sparc64/kernel/time.c +++ b/arch/sparc64/kernel/time.c @@ -1029,11 +1029,10 @@ static void sparc64_start_timers(irqreturn_t (*cfunc)(int, void *, struct pt_reg } struct freq_table { - unsigned long udelay_val_ref; unsigned long clock_tick_ref; unsigned int ref_freq; }; -static DEFINE_PER_CPU(struct freq_table, sparc64_freq_table) = { 0, 0, 0 }; +static DEFINE_PER_CPU(struct freq_table, sparc64_freq_table) = { 0, 0 }; unsigned long sparc64_get_clock_tick(unsigned int cpu) { @@ -1055,16 +1054,11 @@ static int sparc64_cpufreq_notifier(struct notifier_block *nb, unsigned long val if (!ft->ref_freq) { ft->ref_freq = freq->old; - ft->udelay_val_ref = cpu_data(cpu).udelay_val; ft->clock_tick_ref = cpu_data(cpu).clock_tick; } if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) || (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) || (val == CPUFREQ_RESUMECHANGE)) { - cpu_data(cpu).udelay_val = - cpufreq_scale(ft->udelay_val_ref, - ft->ref_freq, - freq->new); cpu_data(cpu).clock_tick = cpufreq_scale(ft->clock_tick_ref, ft->ref_freq, diff --git a/arch/sparc64/lib/delay.c b/arch/sparc64/lib/delay.c index e8808727617..fb27e54a03e 100644 --- a/arch/sparc64/lib/delay.c +++ b/arch/sparc64/lib/delay.c @@ -1,6 +1,6 @@ /* delay.c: Delay loops for sparc64 * - * Copyright (C) 2004 David S. Miller + * Copyright (C) 2004, 2006 David S. Miller * * Based heavily upon x86 variant which is: * Copyright (C) 1993 Linus Torvalds @@ -8,19 +8,16 @@ */ #include +#include void __delay(unsigned long loops) { - __asm__ __volatile__( -" b,pt %%xcc, 1f\n" -" cmp %0, 0\n" -" .align 32\n" -"1:\n" -" bne,pt %%xcc, 1b\n" -" subcc %0, 1, %0\n" - : "=&r" (loops) - : "0" (loops) - : "cc"); + unsigned long bclock, now; + + bclock = tick_ops->get_tick(); + do { + now = tick_ops->get_tick(); + } while ((now-bclock) < loops); } /* We used to multiply by HZ after shifting down by 32 bits -- cgit v1.2.3 From 52845cdb3b9c4c5fe1c2e295bd14457ff8dd6bcc Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 26 Feb 2006 23:32:33 -0800 Subject: [SPARC64]: Init boot cpu's trap_block[] before paging_init() It must be ready when we take over the trap table. Signed-off-by: David S. Miller --- arch/sparc64/kernel/setup.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c index 77066f1bbe2..0c68a76143a 100644 --- a/arch/sparc64/kernel/setup.c +++ b/arch/sparc64/kernel/setup.c @@ -379,10 +379,10 @@ void __init setup_arch(char **cmdline_p) smp_setup_cpu_possible_map(); - paging_init(); - /* Get boot processor trap_block[] setup. */ init_cur_cpu_trap(current_thread_info()); + + paging_init(); } static int __init set_preferred_console(void) -- cgit v1.2.3 From 12e126ad229abc718d05600027fcd5794c1e31e5 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Fri, 17 Feb 2006 14:40:30 -0800 Subject: [SPARC64]: Check for errors in hypervisor_tlb_lock(). Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'arch') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 950d58082e2..bd9e3205674 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -510,6 +510,11 @@ static void __init hypervisor_tlb_lock(unsigned long vaddr, "=&r" (arg3) : "0" (func), "1" (arg0), "2" (arg1), "3" (arg2), "4" (arg3)); + if (arg0 != 0) { + prom_printf("hypervisor_tlb_lock[%lx:%lx:%lx:%lx]: " + "errors with %lx\n", vaddr, 0, pte, mmu, arg0); + prom_halt(); + } } static unsigned long kern_large_tte(unsigned long paddr); -- cgit v1.2.3 From 6c8927c9634e8a1bc95d5291c55205707f9fa40a Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Fri, 17 Feb 2006 14:58:02 -0800 Subject: [SPARC64]: Fix some SUN4V TLB handling bugs. 1) Add error return checking for TLB load hypervisor calls. 2) Don't fallthru to dtlb tsb miss handler from itlb tsb miss handler, oops. 3) On window fixups, propagate fault information to fixup handler correctly. Signed-off-by: David S. Miller --- arch/sparc64/kernel/sun4v_tlb_miss.S | 57 ++++++++++++++++++++++++++++++++++-- arch/sparc64/kernel/traps.c | 34 +++++++++++++++++++++ arch/sparc64/kernel/tsb.S | 6 ++-- 3 files changed, 92 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/sun4v_tlb_miss.S b/arch/sparc64/kernel/sun4v_tlb_miss.S index df65d712dcc..244d50de849 100644 --- a/arch/sparc64/kernel/sun4v_tlb_miss.S +++ b/arch/sparc64/kernel/sun4v_tlb_miss.S @@ -84,8 +84,9 @@ sun4v_itlb_load: mov %g3, %o2 ! PTE mov HV_MMU_IMMU, %o3 ! flags ta HV_MMU_MAP_ADDR_TRAP + brnz,pn %o0, sun4v_itlb_error + mov %g2, %o1 ! restore %o1 mov %g1, %o0 ! restore %o0 - mov %g2, %o1 ! restore %o1 mov %g5, %o2 ! restore %o2 mov %g7, %o3 ! restore %o3 @@ -126,8 +127,9 @@ sun4v_dtlb_load: mov %g3, %o2 ! PTE mov HV_MMU_DMMU, %o3 ! flags ta HV_MMU_MAP_ADDR_TRAP + brnz,pn %o0, sun4v_dtlb_error + mov %g2, %o1 ! restore %o1 mov %g1, %o0 ! restore %o0 - mov %g2, %o1 ! restore %o1 mov %g5, %o2 ! restore %o2 mov %g7, %o3 ! restore %o3 @@ -154,6 +156,7 @@ sun4v_itsb_miss: ldxa [%g1] ASI_SCRATCHPAD, %g1 brz,pn %g5, kvmap_itlb_4v mov FAULT_CODE_ITLB, %g3 + ba,a,pt %xcc, sun4v_tsb_miss_common /* Called from trap table with TAG TARGET placed into * %g6 and SCRATCHPAD_UTSBREG1 contents in %g1. @@ -182,6 +185,56 @@ sun4v_tsb_miss_common: ba,pt %xcc, tsb_miss_page_table_walk_sun4v_fastpath ldx [%g2 + TRAP_PER_CPU_PGD_PADDR], %g7 +sun4v_itlb_error: + sethi %hi(sun4v_err_itlb_vaddr), %g1 + stx %g4, [%g1 + %lo(sun4v_err_itlb_vaddr)] + sethi %hi(sun4v_err_itlb_ctx), %g1 + srlx %g6, 48, %o1 ! ctx + stx %o1, [%g1 + %lo(sun4v_err_itlb_ctx)] + sethi %hi(sun4v_err_itlb_pte), %g1 + stx %g3, [%g1 + %lo(sun4v_err_itlb_pte)] + sethi %hi(sun4v_err_itlb_error), %g1 + stx %o0, [%g1 + %lo(sun4v_err_itlb_error)] + + rdpr %tl, %g4 + cmp %g4, 1 + ble,pt %icc, 1f + sethi %hi(2f), %g7 + ba,pt %xcc, etraptl1 + or %g7, %lo(2f), %g7 + +1: ba,pt %xcc, etrap +2: or %g7, %lo(2b), %g7 + call sun4v_itlb_error_report + add %sp, PTREGS_OFF, %o0 + + /* NOTREACHED */ + +sun4v_dtlb_error: + sethi %hi(sun4v_err_dtlb_vaddr), %g1 + stx %g4, [%g1 + %lo(sun4v_err_dtlb_vaddr)] + sethi %hi(sun4v_err_dtlb_ctx), %g1 + srlx %g6, 48, %o1 ! ctx + stx %o1, [%g1 + %lo(sun4v_err_dtlb_ctx)] + sethi %hi(sun4v_err_dtlb_pte), %g1 + stx %g3, [%g1 + %lo(sun4v_err_dtlb_pte)] + sethi %hi(sun4v_err_dtlb_error), %g1 + stx %o0, [%g1 + %lo(sun4v_err_dtlb_error)] + + rdpr %tl, %g4 + cmp %g4, 1 + ble,pt %icc, 1f + sethi %hi(2f), %g7 + ba,pt %xcc, etraptl1 + or %g7, %lo(2f), %g7 + +1: ba,pt %xcc, etrap +2: or %g7, %lo(2b), %g7 + call sun4v_dtlb_error_report + add %sp, PTREGS_OFF, %o0 + + /* NOTREACHED */ + /* Instruction Access Exception, tl0. */ sun4v_iacc: ldxa [%g0] ASI_SCRATCHPAD, %g2 diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c index c9484ae5bb8..5a157e92bfc 100644 --- a/arch/sparc64/kernel/traps.c +++ b/arch/sparc64/kernel/traps.c @@ -1928,6 +1928,40 @@ void sun4v_nonresum_overflow(struct pt_regs *regs) atomic_inc(&sun4v_nonresum_oflow_cnt); } +unsigned long sun4v_err_itlb_vaddr; +unsigned long sun4v_err_itlb_ctx; +unsigned long sun4v_err_itlb_pte; +unsigned long sun4v_err_itlb_error; + +void sun4v_itlb_error_report(struct pt_regs *regs, int tl) +{ + if (tl > 1) + dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); + + printk("SUN4V-ITLB: Error at TPC[%lx], tl %d\n", regs->tpc, tl); + printk("SUN4V-ITLB: vaddr[%lx] ctx[%lx] pte[%lx] error[%lx]\n", + sun4v_err_itlb_vaddr, sun4v_err_itlb_ctx, + sun4v_err_itlb_pte, sun4v_err_itlb_error); + prom_halt(); +} + +unsigned long sun4v_err_dtlb_vaddr; +unsigned long sun4v_err_dtlb_ctx; +unsigned long sun4v_err_dtlb_pte; +unsigned long sun4v_err_dtlb_error; + +void sun4v_dtlb_error_report(struct pt_regs *regs, int tl) +{ + if (tl > 1) + dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); + + printk("SUN4V-DTLB: Error at TPC[%lx], tl %d\n", regs->tpc, tl); + printk("SUN4V-DTLB: vaddr[%lx] ctx[%lx] pte[%lx] error[%lx]\n", + sun4v_err_dtlb_vaddr, sun4v_err_dtlb_ctx, + sun4v_err_dtlb_pte, sun4v_err_dtlb_error); + prom_halt(); +} + void do_fpe_common(struct pt_regs *regs) { if (regs->tstate & TSTATE_PRIV) { diff --git a/arch/sparc64/kernel/tsb.S b/arch/sparc64/kernel/tsb.S index 7996c9d6670..a17259cf34b 100644 --- a/arch/sparc64/kernel/tsb.S +++ b/arch/sparc64/kernel/tsb.S @@ -135,8 +135,8 @@ tsb_do_fault: wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate .section .sun4v_2insn_patch, "ax" .word 661b - nop - nop + SET_GL(1) + ldxa [%g0] ASI_SCRATCHPAD, %g2 .previous bne,pn %xcc, tsb_do_itlb_fault @@ -150,7 +150,7 @@ tsb_do_dtlb_fault: ldxa [%g4] ASI_DMMU, %g5 .section .sun4v_2insn_patch, "ax" .word 661b - mov %g4, %g5 + ldx [%g2 + HV_FAULT_D_ADDR_OFFSET], %g5 nop .previous -- cgit v1.2.3 From 7adb37fe80d06cbd40de9b225b12a3a9ec40b6bb Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Fri, 17 Feb 2006 15:07:43 -0800 Subject: [SPARC64]: Don't do anything in flush_ptrace_access() on SUN4V. Signed-off-by: David S. Miller --- arch/sparc64/kernel/ptrace.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch') diff --git a/arch/sparc64/kernel/ptrace.c b/arch/sparc64/kernel/ptrace.c index 3f9746f856d..eb93e9c5284 100644 --- a/arch/sparc64/kernel/ptrace.c +++ b/arch/sparc64/kernel/ptrace.c @@ -124,6 +124,9 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, { BUG_ON(len > PAGE_SIZE); + if (tlb_type == hypervisor) + return; + #ifdef DCACHE_ALIASING_POSSIBLE /* If bit 13 of the kernel address we used to access the * user page is the same as the virtual address that page -- cgit v1.2.3 From 8b234274418d6d79527c4ac3a72da446ca4cb35f Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Fri, 17 Feb 2006 18:01:02 -0800 Subject: [SPARC64]: More TLB/TSB handling fixes. The SUN4V convention with non-shared TSBs is that the context bit of the TAG is clear. So we have to choose an "invalid" bit and initialize new TSBs appropriately. Otherwise a zero TAG looks "valid". Make sure, for the window fixup cases, that we use the right global registers and that we don't potentially trample on the live global registers in etrap/rtrap handling (%g2 and %g6) and that we put the missing virtual address properly in %g5. Signed-off-by: David S. Miller --- arch/sparc64/kernel/dtlb_miss.S | 4 ++-- arch/sparc64/kernel/itlb_miss.S | 12 +++++------ arch/sparc64/kernel/ktlb.S | 14 ++++++++----- arch/sparc64/kernel/sun4v_tlb_miss.S | 39 +++++++++++++++++++----------------- arch/sparc64/kernel/tsb.S | 17 ++++++++++------ arch/sparc64/mm/init.c | 4 +++- arch/sparc64/mm/tsb.c | 25 +++++++++++------------ 7 files changed, 64 insertions(+), 51 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/dtlb_miss.S b/arch/sparc64/kernel/dtlb_miss.S index 2ef6f6e6e72..09a6a15a710 100644 --- a/arch/sparc64/kernel/dtlb_miss.S +++ b/arch/sparc64/kernel/dtlb_miss.S @@ -2,10 +2,10 @@ ldxa [%g0] ASI_DMMU_TSB_8KB_PTR, %g1 ! Get TSB 8K pointer ldxa [%g0] ASI_DMMU, %g6 ! Get TAG TARGET srlx %g6, 48, %g5 ! Get context + sllx %g6, 22, %g6 ! Zero out context brz,pn %g5, kvmap_dtlb ! Context 0 processing - nop ! Delay slot (fill me) + srlx %g6, 22, %g6 ! Delay slot TSB_LOAD_QUAD(%g1, %g4) ! Load TSB entry - nop ! Push branch to next I$ line cmp %g4, %g6 ! Compare TAG /* DTLB ** ICACHE line 2: TSB compare and TLB load */ diff --git a/arch/sparc64/kernel/itlb_miss.S b/arch/sparc64/kernel/itlb_miss.S index 730caa4a150..6dfe3968c37 100644 --- a/arch/sparc64/kernel/itlb_miss.S +++ b/arch/sparc64/kernel/itlb_miss.S @@ -2,25 +2,25 @@ ldxa [%g0] ASI_IMMU_TSB_8KB_PTR, %g1 ! Get TSB 8K pointer ldxa [%g0] ASI_IMMU, %g6 ! Get TAG TARGET srlx %g6, 48, %g5 ! Get context + sllx %g6, 22, %g6 ! Zero out context brz,pn %g5, kvmap_itlb ! Context 0 processing - nop ! Delay slot (fill me) + srlx %g6, 22, %g6 ! Delay slot TSB_LOAD_QUAD(%g1, %g4) ! Load TSB entry cmp %g4, %g6 ! Compare TAG - sethi %hi(PAGE_EXEC), %g4 ! Setup exec check /* ITLB ** ICACHE line 2: TSB compare and TLB load */ + sethi %hi(PAGE_EXEC), %g4 ! Setup exec check ldx [%g4 + %lo(PAGE_EXEC)], %g4 bne,pn %xcc, tsb_miss_itlb ! Miss mov FAULT_CODE_ITLB, %g3 andcc %g5, %g4, %g0 ! Executable? be,pn %xcc, tsb_do_fault nop ! Delay slot, fill me - stxa %g5, [%g0] ASI_ITLB_DATA_IN ! Load TLB - retry ! Trap done + nop /* ITLB ** ICACHE line 3: */ - nop - nop + stxa %g5, [%g0] ASI_ITLB_DATA_IN ! Load TLB + retry ! Trap done nop nop nop diff --git a/arch/sparc64/kernel/ktlb.S b/arch/sparc64/kernel/ktlb.S index 47dfd45971e..ac29da915d0 100644 --- a/arch/sparc64/kernel/ktlb.S +++ b/arch/sparc64/kernel/ktlb.S @@ -52,8 +52,10 @@ kvmap_itlb_vmalloc_addr: /* Load and check PTE. */ ldxa [%g5] ASI_PHYS_USE_EC, %g5 + mov 1, %g7 + sllx %g7, TSB_TAG_INVALID_BIT, %g7 brgez,a,pn %g5, kvmap_itlb_longpath - KTSB_STORE(%g1, %g0) + KTSB_STORE(%g1, %g7) KTSB_WRITE(%g1, %g5, %g6) @@ -146,8 +148,10 @@ kvmap_dtlb_vmalloc_addr: /* Load and check PTE. */ ldxa [%g5] ASI_PHYS_USE_EC, %g5 + mov 1, %g7 + sllx %g7, TSB_TAG_INVALID_BIT, %g7 brgez,a,pn %g5, kvmap_dtlb_longpath - KTSB_STORE(%g1, %g0) + KTSB_STORE(%g1, %g7) KTSB_WRITE(%g1, %g5, %g6) @@ -215,8 +219,8 @@ kvmap_dtlb_longpath: wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate .section .sun4v_2insn_patch, "ax" .word 661b - nop - nop + SET_GL(1) + ldxa [%g0] ASI_SCRATCHPAD, %g5 .previous rdpr %tl, %g3 @@ -226,7 +230,7 @@ kvmap_dtlb_longpath: ldxa [%g4] ASI_DMMU, %g5 .section .sun4v_2insn_patch, "ax" .word 661b - mov %g4, %g5 + ldx [%g5 + HV_FAULT_D_ADDR_OFFSET], %g5 nop .previous diff --git a/arch/sparc64/kernel/sun4v_tlb_miss.S b/arch/sparc64/kernel/sun4v_tlb_miss.S index 244d50de849..57ccdaec7cc 100644 --- a/arch/sparc64/kernel/sun4v_tlb_miss.S +++ b/arch/sparc64/kernel/sun4v_tlb_miss.S @@ -16,15 +16,14 @@ ldx [BASE + HV_FAULT_D_ADDR_OFFSET], VADDR; \ ldx [BASE + HV_FAULT_D_CTX_OFFSET], CTX; - /* DEST = (CTX << 48) | (VADDR >> 22) + /* DEST = (VADDR >> 22) * * Branch to ZERO_CTX_LABEL is context is zero. */ -#define COMPUTE_TAG_TARGET(DEST, VADDR, CTX, TMP, ZERO_CTX_LABEL) \ - srlx VADDR, 22, TMP; \ - sllx CTX, 48, DEST; \ +#define COMPUTE_TAG_TARGET(DEST, VADDR, CTX, ZERO_CTX_LABEL) \ + srlx VADDR, 22, DEST; \ brz,pn CTX, ZERO_CTX_LABEL; \ - or DEST, TMP, DEST; + nop; /* Create TSB pointer. This is something like: * @@ -53,7 +52,7 @@ sun4v_itlb_miss: ldxa [%g1] ASI_SCRATCHPAD, %g1 LOAD_ITLB_INFO(%g2, %g4, %g5) - COMPUTE_TAG_TARGET(%g6, %g4, %g5, %g3, kvmap_itlb_4v) + COMPUTE_TAG_TARGET(%g6, %g4, %g5, kvmap_itlb_4v) COMPUTE_TSB_PTR(%g1, %g4, %g3, %g7) /* Load TSB tag/pte into %g2/%g3 and compare the tag. */ @@ -72,15 +71,15 @@ sun4v_itlb_miss: * * %g3: PTE * %g4: vaddr - * %g6: TAG TARGET (only "CTX << 48" part matters) */ sun4v_itlb_load: + ldxa [%g0] ASI_SCRATCHPAD, %g6 mov %o0, %g1 ! save %o0 mov %o1, %g2 ! save %o1 mov %o2, %g5 ! save %o2 mov %o3, %g7 ! save %o3 mov %g4, %o0 ! vaddr - srlx %g6, 48, %o1 ! ctx + ldx [%g6 + HV_FAULT_I_CTX_OFFSET], %o1 ! ctx mov %g3, %o2 ! PTE mov HV_MMU_IMMU, %o3 ! flags ta HV_MMU_MAP_ADDR_TRAP @@ -101,7 +100,7 @@ sun4v_dtlb_miss: ldxa [%g1] ASI_SCRATCHPAD, %g1 LOAD_DTLB_INFO(%g2, %g4, %g5) - COMPUTE_TAG_TARGET(%g6, %g4, %g5, %g3, kvmap_dtlb_4v) + COMPUTE_TAG_TARGET(%g6, %g4, %g5, kvmap_dtlb_4v) COMPUTE_TSB_PTR(%g1, %g4, %g3, %g7) /* Load TSB tag/pte into %g2/%g3 and compare the tag. */ @@ -115,15 +114,15 @@ sun4v_dtlb_miss: * * %g3: PTE * %g4: vaddr - * %g6: TAG TARGET (only "CTX << 48" part matters) */ sun4v_dtlb_load: + ldxa [%g0] ASI_SCRATCHPAD, %g6 mov %o0, %g1 ! save %o0 mov %o1, %g2 ! save %o1 mov %o2, %g5 ! save %o2 mov %o3, %g7 ! save %o3 mov %g4, %o0 ! vaddr - srlx %g6, 48, %o1 ! ctx + ldx [%g6 + HV_FAULT_D_CTX_OFFSET], %o1 ! ctx mov %g3, %o2 ! PTE mov HV_MMU_DMMU, %o3 ! flags ta HV_MMU_MAP_ADDR_TRAP @@ -136,16 +135,18 @@ sun4v_dtlb_load: retry sun4v_dtlb_prot: + SET_GL(1) + /* Load MMU Miss base into %g2. */ - ldxa [%g0] ASI_SCRATCHPAD, %g2 + ldxa [%g0] ASI_SCRATCHPAD, %g5 - ldx [%g2 + HV_FAULT_D_ADDR_OFFSET], %g5 + ldx [%g5 + HV_FAULT_D_ADDR_OFFSET], %g5 rdpr %tl, %g1 cmp %g1, 1 - bgu,pn %xcc, winfix_trampoline + bgu,pn %xcc, winfix_trampoline nop - ba,pt %xcc, sparc64_realfault_common - mov FAULT_CODE_DTLB | FAULT_CODE_WRITE, %g4 + ba,pt %xcc, sparc64_realfault_common + mov FAULT_CODE_DTLB | FAULT_CODE_WRITE, %g4 /* Called from trap table with TAG TARGET placed into * %g6, SCRATCHPAD_UTSBREG1 contents in %g1, and @@ -189,7 +190,8 @@ sun4v_itlb_error: sethi %hi(sun4v_err_itlb_vaddr), %g1 stx %g4, [%g1 + %lo(sun4v_err_itlb_vaddr)] sethi %hi(sun4v_err_itlb_ctx), %g1 - srlx %g6, 48, %o1 ! ctx + ldxa [%g0] ASI_SCRATCHPAD, %g6 + ldx [%g6 + HV_FAULT_I_CTX_OFFSET], %o1 stx %o1, [%g1 + %lo(sun4v_err_itlb_ctx)] sethi %hi(sun4v_err_itlb_pte), %g1 stx %g3, [%g1 + %lo(sun4v_err_itlb_pte)] @@ -214,7 +216,8 @@ sun4v_dtlb_error: sethi %hi(sun4v_err_dtlb_vaddr), %g1 stx %g4, [%g1 + %lo(sun4v_err_dtlb_vaddr)] sethi %hi(sun4v_err_dtlb_ctx), %g1 - srlx %g6, 48, %o1 ! ctx + ldxa [%g0] ASI_SCRATCHPAD, %g6 + ldx [%g6 + HV_FAULT_D_CTX_OFFSET], %o1 stx %o1, [%g1 + %lo(sun4v_err_dtlb_ctx)] sethi %hi(sun4v_err_dtlb_pte), %g1 stx %g3, [%g1 + %lo(sun4v_err_dtlb_pte)] diff --git a/arch/sparc64/kernel/tsb.S b/arch/sparc64/kernel/tsb.S index a17259cf34b..cc225c0563c 100644 --- a/arch/sparc64/kernel/tsb.S +++ b/arch/sparc64/kernel/tsb.S @@ -36,7 +36,7 @@ tsb_miss_itlb: /* At this point we have: * %g4 -- missing virtual address * %g1 -- TSB entry address - * %g6 -- TAG TARGET ((vaddr >> 22) | (ctx << 48)) + * %g6 -- TAG TARGET (vaddr >> 22) */ tsb_miss_page_table_walk: TRAP_LOAD_PGD_PHYS(%g7, %g5) @@ -50,8 +50,10 @@ tsb_reload: /* Load and check PTE. */ ldxa [%g5] ASI_PHYS_USE_EC, %g5 + mov 1, %g7 + sllx %g7, TSB_TAG_INVALID_BIT, %g7 brgez,a,pn %g5, tsb_do_fault - TSB_STORE(%g1, %g0) + TSB_STORE(%g1, %g7) /* If it is larger than the base page size, don't * bother putting it into the TSB. @@ -62,8 +64,10 @@ tsb_reload: sethi %hi(_PAGE_SZBITS), %g7 ldx [%g7 + %lo(_PAGE_SZBITS)], %g7 cmp %g2, %g7 + mov 1, %g7 + sllx %g7, TSB_TAG_INVALID_BIT, %g7 bne,a,pn %xcc, tsb_tlb_reload - TSB_STORE(%g1, %g0) + TSB_STORE(%g1, %g7) TSB_WRITE(%g1, %g5, %g6) @@ -136,7 +140,7 @@ tsb_do_fault: .section .sun4v_2insn_patch, "ax" .word 661b SET_GL(1) - ldxa [%g0] ASI_SCRATCHPAD, %g2 + ldxa [%g0] ASI_SCRATCHPAD, %g4 .previous bne,pn %xcc, tsb_do_itlb_fault @@ -150,7 +154,7 @@ tsb_do_dtlb_fault: ldxa [%g4] ASI_DMMU, %g5 .section .sun4v_2insn_patch, "ax" .word 661b - ldx [%g2 + HV_FAULT_D_ADDR_OFFSET], %g5 + ldx [%g4 + HV_FAULT_D_ADDR_OFFSET], %g5 nop .previous @@ -217,8 +221,9 @@ tsb_flush: bne,pn %icc, 1b membar #LoadLoad cmp %g1, %o1 + mov 1, %o3 bne,pt %xcc, 2f - clr %o3 + sllx %o3, TSB_TAG_INVALID_BIT, %o3 TSB_CAS_TAG(%o0, %g1, %o3) cmp %g1, %o3 bne,pn %xcc, 1b diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index bd9e3205674..aa2aec6373c 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -296,7 +296,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p tsb = &mm->context.tsb[(address >> PAGE_SHIFT) & (mm->context.tsb_nentries - 1UL)]; - tag = (address >> 22UL) | CTX_HWBITS(mm->context) << 48UL; + tag = (address >> 22UL); tsb_insert(tsb, tag, pte_val(pte)); } } @@ -1110,6 +1110,8 @@ void __init paging_init(void) kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL; kern_size = (unsigned long)&_end - (unsigned long)KERNBASE; + memset(swapper_tsb, 0x40, sizeof(swapper_tsb)); + if (tlb_type == hypervisor) sun4v_pgprot_init(); else diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c index 3c1ff05038b..353cb060561 100644 --- a/arch/sparc64/mm/tsb.c +++ b/arch/sparc64/mm/tsb.c @@ -20,9 +20,9 @@ static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long nentries return vaddr & (nentries - 1); } -static inline int tag_compare(unsigned long tag, unsigned long vaddr, unsigned long context) +static inline int tag_compare(unsigned long tag, unsigned long vaddr) { - return (tag == ((vaddr >> 22) | (context << 48))); + return (tag == (vaddr >> 22)); } /* TSB flushes need only occur on the processor initiating the address @@ -38,8 +38,8 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end) unsigned long hash = tsb_hash(v, KERNEL_TSB_NENTRIES); struct tsb *ent = &swapper_tsb[hash]; - if (tag_compare(ent->tag, v, 0)) { - ent->tag = 0UL; + if (tag_compare(ent->tag, v)) { + ent->tag = (1UL << TSB_TAG_INVALID_BIT); membar_storeload_storestore(); } } @@ -50,14 +50,9 @@ void flush_tsb_user(struct mmu_gather *mp) struct mm_struct *mm = mp->mm; struct tsb *tsb = mm->context.tsb; unsigned long nentries = mm->context.tsb_nentries; - unsigned long ctx, base; + unsigned long base; int i; - if (unlikely(!CTX_VALID(mm->context))) - return; - - ctx = CTX_HWBITS(mm->context); - if (tlb_type == cheetah_plus || tlb_type == hypervisor) base = __pa(tsb); else @@ -71,7 +66,7 @@ void flush_tsb_user(struct mmu_gather *mp) hash = tsb_hash(v, nentries); ent = base + (hash * sizeof(struct tsb)); - tag = (v >> 22UL) | (ctx << 48UL); + tag = (v >> 22UL); tsb_flush(ent, tag); } @@ -243,7 +238,8 @@ static void copy_tsb(struct tsb *old_tsb, unsigned long old_size, "i" (ASI_NUCLEUS_QUAD_LDD)); } - if (!tag || (tag & (1UL << TSB_TAG_LOCK_BIT))) + if (tag & ((1UL << TSB_TAG_LOCK_BIT) | + (1UL << TSB_TAG_INVALID_BIT))) continue; /* We only put base page size PTEs into the TSB, @@ -315,10 +311,13 @@ void tsb_grow(struct mm_struct *mm, unsigned long rss, gfp_t gfp_flags) break; } - page = alloc_pages(gfp_flags | __GFP_ZERO, get_order(size)); + page = alloc_pages(gfp_flags, get_order(size)); if (unlikely(!page)) return; + /* Mark all tags as invalid. */ + memset(page_address(page), 0x40, size); + if (size == max_tsb_size) mm->context.tsb_rss_limit = ~0UL; else -- cgit v1.2.3 From 0f15952ac8641bde1045162ffd4a7b474cc318b0 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 18 Feb 2006 12:43:16 -0800 Subject: [SPARC64]: Export a PAGE_SHARED symbol. For drivers/media/*, noticed by Fabbione. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'arch') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index aa2aec6373c..c7aa4404edc 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -1353,6 +1353,10 @@ EXPORT_SYMBOL(PAGE_KERNEL); pgprot_t PAGE_KERNEL_LOCKED __read_mostly; pgprot_t PAGE_COPY __read_mostly; + +pgprot_t PAGE_SHARED __read_mostly; +EXPORT_SYMBOL(PAGE_SHARED); + pgprot_t PAGE_EXEC __read_mostly; unsigned long pg_iobits __read_mostly; @@ -1367,6 +1371,7 @@ static void prot_init_common(unsigned long page_none, unsigned long page_exec_bit) { PAGE_COPY = __pgprot(page_copy); + PAGE_SHARED = __pgprot(page_shared); protection_map[0x0] = __pgprot(page_none); protection_map[0x1] = __pgprot(page_readonly & ~page_exec_bit); -- cgit v1.2.3 From 6cc200db9500f53c6b884ea5d5bc7eabae7f5d5c Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 18 Feb 2006 16:36:39 -0800 Subject: [SPARC64]: Set %gl to 1 in kvmap_itlb_longpath on SUN4V. Just like kvmap_dtlb_longpath we have to force the global register level to one in order to mimick the PSTATE_MG --> PSTATE_AG trasition done on SUN4U. Signed-off-by: David S. Miller --- arch/sparc64/kernel/ktlb.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/ktlb.S b/arch/sparc64/kernel/ktlb.S index ac29da915d0..883180be3d5 100644 --- a/arch/sparc64/kernel/ktlb.S +++ b/arch/sparc64/kernel/ktlb.S @@ -92,7 +92,7 @@ kvmap_itlb_longpath: wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate .section .sun4v_2insn_patch, "ax" .word 661b - nop + SET_GL(1) nop .previous -- cgit v1.2.3 From 24c523ecc667dfeb28ef969cfabc531709bfffb8 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 18 Feb 2006 16:39:39 -0800 Subject: [SPARC64]: Fix unaligned access winfxup handling on SUN4V. Another case where we have to force ourselves into global register level one. Also make sure the arguments passed to sun4v_do_mna() are correct. This area actually needs some more work, for example spill fixup is not necessarily going to do the right thing for this case. Signed-off-by: David S. Miller --- arch/sparc64/kernel/sun4v_tlb_miss.S | 42 +++++++++++++++++++++++++----------- arch/sparc64/kernel/winfixup.S | 9 ++++---- 2 files changed, 34 insertions(+), 17 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/sun4v_tlb_miss.S b/arch/sparc64/kernel/sun4v_tlb_miss.S index 57ccdaec7cc..654244a3b04 100644 --- a/arch/sparc64/kernel/sun4v_tlb_miss.S +++ b/arch/sparc64/kernel/sun4v_tlb_miss.S @@ -137,7 +137,7 @@ sun4v_dtlb_load: sun4v_dtlb_prot: SET_GL(1) - /* Load MMU Miss base into %g2. */ + /* Load MMU Miss base into %g5. */ ldxa [%g0] ASI_SCRATCHPAD, %g5 ldx [%g5 + HV_FAULT_D_ADDR_OFFSET], %g5 @@ -148,9 +148,10 @@ sun4v_dtlb_prot: ba,pt %xcc, sparc64_realfault_common mov FAULT_CODE_DTLB | FAULT_CODE_WRITE, %g4 - /* Called from trap table with TAG TARGET placed into - * %g6, SCRATCHPAD_UTSBREG1 contents in %g1, and - * SCRATCHPAD_MMU_MISS contents in %g2. + /* Called from trap table: + * %g4: vaddr + * %g5: context + * %g6: TAG TARGET */ sun4v_itsb_miss: mov SCRATCHPAD_UTSBREG1, %g1 @@ -159,8 +160,10 @@ sun4v_itsb_miss: mov FAULT_CODE_ITLB, %g3 ba,a,pt %xcc, sun4v_tsb_miss_common - /* Called from trap table with TAG TARGET placed into - * %g6 and SCRATCHPAD_UTSBREG1 contents in %g1. + /* Called from trap table: + * %g4: vaddr + * %g5: context + * %g6: TAG TARGET */ sun4v_dtsb_miss: mov SCRATCHPAD_UTSBREG1, %g1 @@ -168,6 +171,8 @@ sun4v_dtsb_miss: brz,pn %g5, kvmap_dtlb_4v mov FAULT_CODE_DTLB, %g3 + /* fallthrough */ + /* Create TSB pointer into %g1. This is something like: * * index_mask = (512 << (tsb_reg & 0x7UL)) - 1UL; @@ -304,19 +309,30 @@ sun4v_dacc_tl1: /* Memory Address Unaligned. */ sun4v_mna: - ldxa [%g0] ASI_SCRATCHPAD, %g2 + /* Window fixup? */ + rdpr %tl, %g2 + cmp %g2, 1 + ble,pt %icc, 1f + nop + + SET_GL(1) + ldxa [%g0] ASI_SCRATCHPAD, %g5 + ldx [%g5 + HV_FAULT_D_ADDR_OFFSET], %g5 + mov HV_FAULT_TYPE_UNALIGNED, %g3 + ldx [%g5 + HV_FAULT_D_CTX_OFFSET], %g4 + sllx %g3, 16, %g3 + or %g4, %g3, %g4 + ba,pt %xcc, winfix_mna + rdpr %tpc, %g3 + /* not reached */ + +1: ldxa [%g0] ASI_SCRATCHPAD, %g2 mov HV_FAULT_TYPE_UNALIGNED, %g3 ldx [%g2 + HV_FAULT_D_ADDR_OFFSET], %g4 ldx [%g2 + HV_FAULT_D_CTX_OFFSET], %g5 sllx %g3, 16, %g3 or %g5, %g3, %g5 - /* Window fixup? */ - rdpr %tl, %g2 - cmp %g2, 1 - bgu,pn %icc, winfix_mna - rdpr %tpc, %g3 - ba,pt %xcc, etrap rd %pc, %g7 mov %l4, %o1 diff --git a/arch/sparc64/kernel/winfixup.S b/arch/sparc64/kernel/winfixup.S index 161371370e9..c4aa110a10e 100644 --- a/arch/sparc64/kernel/winfixup.S +++ b/arch/sparc64/kernel/winfixup.S @@ -115,16 +115,17 @@ fill_fixup_mna: ba,pt %xcc, etrap rd %pc, %g7 sethi %hi(tlb_type), %g1 - mov %l4, %o1 lduw [%g1 + %lo(tlb_type)], %g1 - mov %l5, %o2 cmp %g1, 3 bne,pt %icc, 1f add %sp, PTREGS_OFF, %o0 + mov %l4, %o2 call sun4v_do_mna - nop + mov %l5, %o1 ba,a,pt %xcc, rtrap_clr_l6 -1: call mem_address_unaligned +1: mov %l4, %o1 + mov %l5, %o2 + call mem_address_unaligned nop ba,a,pt %xcc, rtrap_clr_l6 -- cgit v1.2.3 From 04d74758eb7dce6dfb7d2101315827c267ffefc4 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 18 Feb 2006 17:06:28 -0800 Subject: [SPARC64]: Use KERN_EMERG in dump_tl1_traplog() and sun4v TLB errors. We're about to seriously die in these cases so it is important that the messages make it to the console. Signed-off-by: David S. Miller --- arch/sparc64/kernel/traps.c | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c index 5a157e92bfc..6d45867ee16 100644 --- a/arch/sparc64/kernel/traps.c +++ b/arch/sparc64/kernel/traps.c @@ -75,12 +75,12 @@ static void dump_tl1_traplog(struct tl1_traplog *p) { int i, limit; - printk("TRAPLOG: Error at trap level 0x%lx, dumping track stack.\n", - p->tl); + printk(KERN_EMERG "TRAPLOG: Error at trap level 0x%lx, " + "dumping track stack.\n", p->tl); limit = (tlb_type == hypervisor) ? 2 : 4; for (i = 0; i < 4; i++) { - printk(KERN_CRIT + printk(KERN_EMERG "TRAPLOG: Trap level %d TSTATE[%016lx] TPC[%016lx] " "TNPC[%016lx] TT[%lx]\n", i + 1, @@ -1938,10 +1938,13 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl) if (tl > 1) dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); - printk("SUN4V-ITLB: Error at TPC[%lx], tl %d\n", regs->tpc, tl); - printk("SUN4V-ITLB: vaddr[%lx] ctx[%lx] pte[%lx] error[%lx]\n", + printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n", + regs->tpc, tl); + printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] " + "pte[%lx] error[%lx]\n", sun4v_err_itlb_vaddr, sun4v_err_itlb_ctx, sun4v_err_itlb_pte, sun4v_err_itlb_error); + prom_halt(); } @@ -1955,10 +1958,13 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl) if (tl > 1) dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); - printk("SUN4V-DTLB: Error at TPC[%lx], tl %d\n", regs->tpc, tl); - printk("SUN4V-DTLB: vaddr[%lx] ctx[%lx] pte[%lx] error[%lx]\n", + printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n", + regs->tpc, tl); + printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] " + "pte[%lx] error[%lx]\n", sun4v_err_dtlb_vaddr, sun4v_err_dtlb_ctx, sun4v_err_dtlb_pte, sun4v_err_dtlb_error); + prom_halt(); } -- cgit v1.2.3 From 6a32fd4d0d42258004631dc0ac90665382a2e5dc Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 19 Feb 2006 22:21:32 -0800 Subject: [SPARC64]: Remove PGLIST_NENTS PCI IOMMU mapping limitation on SUN4V. Use a batching queue system for IOMMU mapping setup, with a page sized batch. Signed-off-by: David S. Miller --- arch/sparc64/kernel/pci_sun4v.c | 233 ++++++++++++++++++++++++------------ arch/sparc64/kernel/pci_sun4v.h | 10 +- arch/sparc64/kernel/pci_sun4v_asm.S | 11 +- 3 files changed, 171 insertions(+), 83 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c index 902d07c714f..4e9d3c451af 100644 --- a/arch/sparc64/kernel/pci_sun4v.c +++ b/arch/sparc64/kernel/pci_sun4v.c @@ -26,11 +26,86 @@ #define PGLIST_NENTS (PAGE_SIZE / sizeof(u64)) -struct sun4v_pglist { - u64 *pglist; +struct pci_iommu_batch { + struct pci_dev *pdev; /* Device mapping is for. */ + unsigned long prot; /* IOMMU page protections */ + unsigned long entry; /* Index into IOTSB. */ + u64 *pglist; /* List of physical pages */ + unsigned long npages; /* Number of pages in list. */ }; -static DEFINE_PER_CPU(struct sun4v_pglist, iommu_pglists); +static DEFINE_PER_CPU(struct pci_iommu_batch, pci_iommu_batch); + +/* Interrupts must be disabled. */ +static inline void pci_iommu_batch_start(struct pci_dev *pdev, unsigned long prot, unsigned long entry) +{ + struct pci_iommu_batch *p = &__get_cpu_var(pci_iommu_batch); + + p->pdev = pdev; + p->prot = prot; + p->entry = entry; + p->npages = 0; +} + +/* Interrupts must be disabled. */ +static long pci_iommu_batch_flush(struct pci_iommu_batch *p) +{ + struct pcidev_cookie *pcp = p->pdev->sysdata; + unsigned long devhandle = pcp->pbm->devhandle; + unsigned long prot = p->prot; + unsigned long entry = p->entry; + u64 *pglist = p->pglist; + unsigned long npages = p->npages; + + do { + long num; + + num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry), + npages, prot, __pa(pglist)); + if (unlikely(num < 0)) { + if (printk_ratelimit()) + printk("pci_iommu_batch_flush: IOMMU map of " + "[%08lx:%08lx:%lx:%lx:%lx] failed with " + "status %ld\n", + devhandle, HV_PCI_TSBID(0, entry), + npages, prot, __pa(pglist), num); + return -1; + } + + entry += num; + npages -= num; + pglist += num; + } while (npages != 0); + + p->entry = entry; + p->npages = 0; + + return 0; +} + +/* Interrupts must be disabled. */ +static inline long pci_iommu_batch_add(u64 phys_page) +{ + struct pci_iommu_batch *p = &__get_cpu_var(pci_iommu_batch); + + BUG_ON(p->npages >= PGLIST_NENTS); + + p->pglist[p->npages++] = phys_page; + if (p->npages == PGLIST_NENTS) + return pci_iommu_batch_flush(p); + + return 0; +} + +/* Interrupts must be disabled. */ +static inline long pci_iommu_batch_end(void) +{ + struct pci_iommu_batch *p = &__get_cpu_var(pci_iommu_batch); + + BUG_ON(p->npages >= PGLIST_NENTS); + + return pci_iommu_batch_flush(p); +} static long pci_arena_alloc(struct pci_iommu_arena *arena, unsigned long npages) { @@ -86,65 +161,64 @@ static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr unsigned long flags, order, first_page, npages, n; void *ret; long entry; - u64 *pglist; - u32 devhandle; - int cpu; size = IO_PAGE_ALIGN(size); order = get_order(size); - if (order >= MAX_ORDER) + if (unlikely(order >= MAX_ORDER)) return NULL; npages = size >> IO_PAGE_SHIFT; - if (npages > PGLIST_NENTS) - return NULL; first_page = __get_free_pages(GFP_ATOMIC, order); - if (first_page == 0UL) + if (unlikely(first_page == 0UL)) return NULL; memset((char *)first_page, 0, PAGE_SIZE << order); pcp = pdev->sysdata; - devhandle = pcp->pbm->devhandle; iommu = pcp->pbm->iommu; spin_lock_irqsave(&iommu->lock, flags); entry = pci_arena_alloc(&iommu->arena, npages); spin_unlock_irqrestore(&iommu->lock, flags); - if (unlikely(entry < 0L)) { - free_pages(first_page, order); - return NULL; - } + if (unlikely(entry < 0L)) + goto arena_alloc_fail; *dma_addrp = (iommu->page_table_map_base + (entry << IO_PAGE_SHIFT)); ret = (void *) first_page; first_page = __pa(first_page); - cpu = get_cpu(); + local_irq_save(flags); - pglist = __get_cpu_var(iommu_pglists).pglist; - for (n = 0; n < npages; n++) - pglist[n] = first_page + (n * PAGE_SIZE); + pci_iommu_batch_start(pdev, + (HV_PCI_MAP_ATTR_READ | + HV_PCI_MAP_ATTR_WRITE), + entry); - do { - unsigned long num; + for (n = 0; n < npages; n++) { + long err = pci_iommu_batch_add(first_page + (n * PAGE_SIZE)); + if (unlikely(err < 0L)) + goto iommu_map_fail; + } - num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry), - npages, - (HV_PCI_MAP_ATTR_READ | - HV_PCI_MAP_ATTR_WRITE), - __pa(pglist)); - entry += num; - npages -= num; - pglist += num; - } while (npages != 0); + if (unlikely(pci_iommu_batch_end() < 0L)) + goto iommu_map_fail; - put_cpu(); + local_irq_restore(flags); return ret; + +iommu_map_fail: + /* Interrupts are disabled. */ + spin_lock(&iommu->lock); + pci_arena_free(&iommu->arena, entry, npages); + spin_unlock_irqrestore(&iommu->lock, flags); + +arena_alloc_fail: + free_pages(first_page, order); + return NULL; } static void pci_4v_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma) @@ -186,15 +260,12 @@ static dma_addr_t pci_4v_map_single(struct pci_dev *pdev, void *ptr, size_t sz, struct pci_iommu *iommu; unsigned long flags, npages, oaddr; unsigned long i, base_paddr; - u32 devhandle, bus_addr, ret; + u32 bus_addr, ret; unsigned long prot; long entry; - u64 *pglist; - int cpu; pcp = pdev->sysdata; iommu = pcp->pbm->iommu; - devhandle = pcp->pbm->devhandle; if (unlikely(direction == PCI_DMA_NONE)) goto bad; @@ -202,8 +273,6 @@ static dma_addr_t pci_4v_map_single(struct pci_dev *pdev, void *ptr, size_t sz, oaddr = (unsigned long)ptr; npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); npages >>= IO_PAGE_SHIFT; - if (unlikely(npages > PGLIST_NENTS)) - goto bad; spin_lock_irqsave(&iommu->lock, flags); entry = pci_arena_alloc(&iommu->arena, npages); @@ -220,24 +289,19 @@ static dma_addr_t pci_4v_map_single(struct pci_dev *pdev, void *ptr, size_t sz, if (direction != PCI_DMA_TODEVICE) prot |= HV_PCI_MAP_ATTR_WRITE; - cpu = get_cpu(); - - pglist = __get_cpu_var(iommu_pglists).pglist; - for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) - pglist[i] = base_paddr; + local_irq_save(flags); - do { - unsigned long num; + pci_iommu_batch_start(pdev, prot, entry); - num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry), - npages, prot, - __pa(pglist)); - entry += num; - npages -= num; - pglist += num; - } while (npages != 0); + for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) { + long err = pci_iommu_batch_add(base_paddr); + if (unlikely(err < 0L)) + goto iommu_map_fail; + } + if (unlikely(pci_iommu_batch_end() < 0L)) + goto iommu_map_fail; - put_cpu(); + local_irq_restore(flags); return ret; @@ -245,6 +309,14 @@ bad: if (printk_ratelimit()) WARN_ON(1); return PCI_DMA_ERROR_CODE; + +iommu_map_fail: + /* Interrupts are disabled. */ + spin_lock(&iommu->lock); + pci_arena_free(&iommu->arena, entry, npages); + spin_unlock_irqrestore(&iommu->lock, flags); + + return PCI_DMA_ERROR_CODE; } static void pci_4v_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) @@ -289,18 +361,19 @@ static void pci_4v_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_ #define SG_ENT_PHYS_ADDRESS(SG) \ (__pa(page_address((SG)->page)) + (SG)->offset) -static inline void fill_sg(long entry, u32 devhandle, +static inline long fill_sg(long entry, struct pci_dev *pdev, struct scatterlist *sg, int nused, int nelems, unsigned long prot) { struct scatterlist *dma_sg = sg; struct scatterlist *sg_end = sg + nelems; - int i, cpu, pglist_ent; - u64 *pglist; + unsigned long flags; + int i; + + local_irq_save(flags); + + pci_iommu_batch_start(pdev, prot, entry); - cpu = get_cpu(); - pglist = __get_cpu_var(iommu_pglists).pglist; - pglist_ent = 0; for (i = 0; i < nused; i++) { unsigned long pteval = ~0UL; u32 dma_npages; @@ -338,7 +411,12 @@ static inline void fill_sg(long entry, u32 devhandle, pteval = (pteval & IOPTE_PAGE); while (len > 0) { - pglist[pglist_ent++] = pteval; + long err; + + err = pci_iommu_batch_add(pteval); + if (unlikely(err < 0L)) + goto iommu_map_failed; + pteval += IO_PAGE_SIZE; len -= (IO_PAGE_SIZE - offset); offset = 0; @@ -366,18 +444,15 @@ static inline void fill_sg(long entry, u32 devhandle, dma_sg++; } - BUG_ON(pglist_ent == 0); + if (unlikely(pci_iommu_batch_end() < 0L)) + goto iommu_map_failed; - do { - unsigned long num; - - num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry), - pglist_ent); - entry += num; - pglist_ent -= num; - } while (pglist_ent != 0); + local_irq_restore(flags); + return 0; - put_cpu(); +iommu_map_failed: + local_irq_restore(flags); + return -1L; } static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) @@ -385,9 +460,9 @@ static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int n struct pcidev_cookie *pcp; struct pci_iommu *iommu; unsigned long flags, npages, prot; - u32 devhandle, dma_base; + u32 dma_base; struct scatterlist *sgtmp; - long entry; + long entry, err; int used; /* Fast path single entry scatterlists. */ @@ -404,7 +479,6 @@ static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int n pcp = pdev->sysdata; iommu = pcp->pbm->iommu; - devhandle = pcp->pbm->devhandle; if (unlikely(direction == PCI_DMA_NONE)) goto bad; @@ -441,7 +515,9 @@ static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int n if (direction != PCI_DMA_TODEVICE) prot |= HV_PCI_MAP_ATTR_WRITE; - fill_sg(entry, devhandle, sglist, used, nelems, prot); + err = fill_sg(entry, pdev, sglist, used, nelems, prot); + if (unlikely(err < 0L)) + goto iommu_map_failed; return used; @@ -449,6 +525,13 @@ bad: if (printk_ratelimit()) WARN_ON(1); return 0; + +iommu_map_failed: + spin_lock_irqsave(&iommu->lock, flags); + pci_arena_free(&iommu->arena, entry, npages); + spin_unlock_irqrestore(&iommu->lock, flags); + + return 0; } static void pci_4v_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction) @@ -1011,13 +1094,13 @@ void sun4v_pci_init(int node, char *model_name) } } - for (i = 0; i < NR_CPUS; i++) { + for_each_cpu(i) { unsigned long page = get_zeroed_page(GFP_ATOMIC); if (!page) goto fatal_memory_error; - per_cpu(iommu_pglists, i).pglist = (u64 *) page; + per_cpu(pci_iommu_batch, i).pglist = (u64 *) page; } p = kmalloc(sizeof(struct pci_controller_info), GFP_ATOMIC); diff --git a/arch/sparc64/kernel/pci_sun4v.h b/arch/sparc64/kernel/pci_sun4v.h index 88f199e11a7..884d25f6158 100644 --- a/arch/sparc64/kernel/pci_sun4v.h +++ b/arch/sparc64/kernel/pci_sun4v.h @@ -6,11 +6,11 @@ #ifndef _PCI_SUN4V_H #define _PCI_SUN4V_H -extern unsigned long pci_sun4v_iommu_map(unsigned long devhandle, - unsigned long tsbid, - unsigned long num_ttes, - unsigned long io_attributes, - unsigned long io_page_list_pa); +extern long pci_sun4v_iommu_map(unsigned long devhandle, + unsigned long tsbid, + unsigned long num_ttes, + unsigned long io_attributes, + unsigned long io_page_list_pa); extern unsigned long pci_sun4v_iommu_demap(unsigned long devhandle, unsigned long tsbid, unsigned long num_ttes); diff --git a/arch/sparc64/kernel/pci_sun4v_asm.S b/arch/sparc64/kernel/pci_sun4v_asm.S index 424db652664..6604fdbf746 100644 --- a/arch/sparc64/kernel/pci_sun4v_asm.S +++ b/arch/sparc64/kernel/pci_sun4v_asm.S @@ -11,14 +11,19 @@ * %o3: io_attributes * %o4: io_page_list phys address * - * returns %o0: num ttes mapped + * returns %o0: -status if status was non-zero, else + * %o0: num pages mapped */ .globl pci_sun4v_iommu_map pci_sun4v_iommu_map: + mov %o5, %g1 mov HV_FAST_PCI_IOMMU_MAP, %o5 ta HV_FAST_TRAP - retl - mov %o1, %o0 + brnz,pn %o0, 1f + sub %g0, %o0, %o0 + mov %o1, %o0 +1: retl + nop /* %o0: devhandle * %o1: tsbid -- cgit v1.2.3 From 37133c006cd3ff4f4dccb3df88efc446863b515e Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Mon, 20 Feb 2006 00:36:57 -0800 Subject: [SPARC64]: Disable smp_report_regs() for now. It's extremely noisy and causes much grief on slow consoles with large numbers of cpus. We'll have to provide this some saner way in order to re-enable this. Signed-off-by: David S. Miller --- arch/sparc64/kernel/traps.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c index 6d45867ee16..4eb2ee1125d 100644 --- a/arch/sparc64/kernel/traps.c +++ b/arch/sparc64/kernel/traps.c @@ -2226,10 +2226,11 @@ void die_if_kernel(char *str, struct pt_regs *regs) } user_instruction_dump ((unsigned int __user *) regs->tpc); } +#if 0 #ifdef CONFIG_SMP smp_report_regs(); #endif - +#endif if (regs->tstate & TSTATE_PRIV) do_exit(SIGKILL); do_exit(SIGSEGV); -- cgit v1.2.3 From 39334a4b2c52a06960c480b1a4a05f5d5375c8bf Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Mon, 20 Feb 2006 00:54:09 -0800 Subject: [SPARC64]: Fix typo in dump_tl1_traplog() Actually make use of the 'limit' we compute. Signed-off-by: David S. Miller --- arch/sparc64/kernel/traps.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c index 4eb2ee1125d..78268217b06 100644 --- a/arch/sparc64/kernel/traps.c +++ b/arch/sparc64/kernel/traps.c @@ -79,7 +79,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p) "dumping track stack.\n", p->tl); limit = (tlb_type == hypervisor) ? 2 : 4; - for (i = 0; i < 4; i++) { + for (i = 0; i < limit; i++) { printk(KERN_EMERG "TRAPLOG: Trap level %d TSTATE[%016lx] TPC[%016lx] " "TNPC[%016lx] TT[%lx]\n", -- cgit v1.2.3 From abf3b7bd89c11492b145e338913c0b511084cff7 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Mon, 20 Feb 2006 01:09:01 -0800 Subject: [SPARC64]: Kill stray PGLIST_NENTS check in pci_sun4v.c I forgot to remove the one in pci_4v_map_sg() during the iommu batching commit. Signed-off-by: David S. Miller --- arch/sparc64/kernel/pci_sun4v.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c index 4e9d3c451af..d9ef0c7580e 100644 --- a/arch/sparc64/kernel/pci_sun4v.c +++ b/arch/sparc64/kernel/pci_sun4v.c @@ -485,8 +485,6 @@ static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int n /* Step 1: Prepare scatter list. */ npages = prepare_sg(sglist, nelems); - if (unlikely(npages > PGLIST_NENTS)) - goto bad; /* Step 2: Allocate a cluster and context, if necessary. */ spin_lock_irqsave(&iommu->lock, flags); -- cgit v1.2.3 From d82965c16781a7c5b8508c3784ba10bbeef34ff4 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Mon, 20 Feb 2006 01:42:51 -0800 Subject: [SPARC64]: Handle zero-length map requests in pci_sun4v.c By simply changing the do-while loop into a plain while loop. Signed-off-by: David S. Miller --- arch/sparc64/kernel/pci_sun4v.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c index d9ef0c7580e..a9c44c0ae0a 100644 --- a/arch/sparc64/kernel/pci_sun4v.c +++ b/arch/sparc64/kernel/pci_sun4v.c @@ -57,7 +57,7 @@ static long pci_iommu_batch_flush(struct pci_iommu_batch *p) u64 *pglist = p->pglist; unsigned long npages = p->npages; - do { + while (npages != 0) { long num; num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry), @@ -75,7 +75,7 @@ static long pci_iommu_batch_flush(struct pci_iommu_batch *p) entry += num; npages -= num; pglist += num; - } while (npages != 0); + } p->entry = entry; p->npages = 0; -- cgit v1.2.3 From 55555633bd4e72534b53c50525817259435e18df Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Mon, 20 Feb 2006 01:50:09 -0800 Subject: [SPARC64]: Typo in sun4v_data_access_exception log message. Should be "Dax" not "Iax". Signed-off-by: David S. Miller --- arch/sparc64/kernel/traps.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c index 78268217b06..8df0cf29e3e 100644 --- a/arch/sparc64/kernel/traps.c +++ b/arch/sparc64/kernel/traps.c @@ -283,7 +283,7 @@ void sun4v_data_access_exception(struct pt_regs *regs, unsigned long addr, unsig printk("sun4v_data_access_exception: ADDR[%016lx] " "CTX[%04x] TYPE[%04x], going.\n", addr, ctx, type); - die_if_kernel("Iax", regs); + die_if_kernel("Dax", regs); } if (test_thread_flag(TIF_32BIT)) { -- cgit v1.2.3 From 4e74ae800bafe79d4aaa529bc5d52425757c0115 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Mon, 20 Feb 2006 16:02:24 -0800 Subject: [SPARC64]: Handle unimplemented FPU square-root on Niagara. The math-emu code only expects unfinished fpop traps when emulating FPU sqrt instructions on pre-Niagara chips. On Niagara we can get unimplemented fpop, so handle that. Signed-off-by: David S. Miller --- arch/sparc64/math-emu/math.c | 25 +++++++++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/math-emu/math.c b/arch/sparc64/math-emu/math.c index 2ae05cd7b77..a93a3664c85 100644 --- a/arch/sparc64/math-emu/math.c +++ b/arch/sparc64/math-emu/math.c @@ -206,9 +206,30 @@ int do_mathemu(struct pt_regs *regs, struct fpustate *f) case FSTOQ: TYPE(3,3,1,1,1,0,0); break; case FDTOQ: TYPE(3,3,1,2,1,0,0); break; case FQTOI: TYPE(3,1,0,3,1,0,0); break; + + /* We can get either unimplemented or unfinished + * for these cases. Pre-Niagara systems generate + * unfinished fpop for SUBNORMAL cases, and Niagara + * always gives unimplemented fpop for fsqrt{s,d}. + */ + case FSQRTS: { + unsigned long x = current_thread_info()->xfsr[0]; + + x = (x >> 14) & 0xf; + TYPE(x,1,1,1,1,0,0); + printk("math-emu: type is %08x\n", type); + break; + } + + case FSQRTD: { + unsigned long x = current_thread_info()->xfsr[0]; + + x = (x >> 14) & 0xf; + TYPE(x,2,1,2,1,0,0); + break; + } + /* SUBNORMAL - ftt == 2 */ - case FSQRTS: TYPE(2,1,1,1,1,0,0); break; - case FSQRTD: TYPE(2,2,1,2,1,0,0); break; case FADDD: case FSUBD: case FMULD: -- cgit v1.2.3 From c79f76777d678ba454aa727800e1386a1fd1f2e8 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Mon, 20 Feb 2006 22:56:01 -0800 Subject: [SPARC64]: Args to SUNW,set-trap-table are 64-bit. They were getting truncated to 32-bit and this is very bad when your MMU fault status area is in physical memory above 4GB on SUN4V. Signed-off-by: David S. Miller --- arch/sparc64/prom/misc.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/prom/misc.c b/arch/sparc64/prom/misc.c index 36d2b9c1622..90df42141b1 100644 --- a/arch/sparc64/prom/misc.c +++ b/arch/sparc64/prom/misc.c @@ -133,12 +133,17 @@ int prom_getprev(void) /* Install Linux trap table so PROM uses that instead of its own. */ void prom_set_trap_table(unsigned long tba) { - p1275_cmd("SUNW,set-trap-table", P1275_INOUT(1, 0), tba); + p1275_cmd("SUNW,set-trap-table", + (P1275_ARG(0, P1275_ARG_IN_64B) | + P1275_INOUT(1, 0)), tba); } void prom_set_trap_table_sun4v(unsigned long tba, unsigned long mmfsa) { - p1275_cmd("SUNW,set-trap-table", P1275_INOUT(2, 0), tba, mmfsa); + p1275_cmd("SUNW,set-trap-table", + (P1275_ARG(0, P1275_ARG_IN_64B) | + P1275_ARG(1, P1275_ARG_IN_64B) | + P1275_INOUT(2, 0)), tba, mmfsa); } int prom_get_mmu_ihandle(void) -- cgit v1.2.3 From d371c0c17466b7e7bb4d395f96aa885a23df1073 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 21 Feb 2006 13:59:47 -0800 Subject: [SPARC64]: Pass multiple CPUs at once to hypervisor cross-call API. Signed-off-by: David S. Miller --- arch/sparc64/kernel/smp.c | 54 ----------------------------------------------- 1 file changed, 54 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index b586345fe3b..356d423ae14 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c @@ -557,7 +557,6 @@ retry: } } -#if 0 /* Multi-cpu list version. */ static int init_cpu_list(u16 *list, cpumask_t mask) { @@ -631,59 +630,6 @@ static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t put_cpu(); } -#else -/* Single-cpu list version. */ -static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask) -{ - int this_cpu = get_cpu(); - struct trap_per_cpu *tb = &trap_block[this_cpu]; - u64 *mondo = __va(tb->cpu_mondo_block_pa); - u16 *cpu_list = __va(tb->cpu_list_pa); - int i; - - mondo[0] = data0; - mondo[1] = data1; - mondo[2] = data2; - wmb(); - - for_each_cpu_mask(i, mask) { - int retries = 0; - - do { - register unsigned long func __asm__("%o5"); - register unsigned long arg0 __asm__("%o0"); - register unsigned long arg1 __asm__("%o1"); - register unsigned long arg2 __asm__("%o2"); - - cpu_list[0] = i; - func = HV_FAST_CPU_MONDO_SEND; - arg0 = 1; - arg1 = tb->cpu_list_pa; - arg2 = tb->cpu_mondo_block_pa; - - __asm__ __volatile__("ta %8" - : "=&r" (func), "=&r" (arg0), - "=&r" (arg1), "=&r" (arg2) - : "0" (func), "1" (arg0), - "2" (arg1), "3" (arg2), - "i" (HV_FAST_TRAP) - : "memory"); - if (likely(arg0 == HV_EOK)) - break; - - if (unlikely(++retries > 100)) { - printk("CPU[%d]: sun4v mondo error %lu\n", - this_cpu, func); - break; - } - - udelay(2 * i); - } while (1); - } - - put_cpu(); -} -#endif /* Send cross call to all processors mentioned in MASK * except self. -- cgit v1.2.3 From 8ca2557c48000daa8183b07d83f582a597705ebe Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 21 Feb 2006 14:29:42 -0800 Subject: [SPARC64]: Niagara optimized memset/bzero/clear_user. Signed-off-by: David S. Miller --- arch/sparc64/kernel/head.S | 2 + arch/sparc64/lib/Makefile | 2 +- arch/sparc64/lib/NGbzero.S | 162 +++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 165 insertions(+), 1 deletion(-) create mode 100644 arch/sparc64/lib/NGbzero.S (limited to 'arch') diff --git a/arch/sparc64/kernel/head.S b/arch/sparc64/kernel/head.S index 3222c8205b5..8c6c4694be9 100644 --- a/arch/sparc64/kernel/head.S +++ b/arch/sparc64/kernel/head.S @@ -420,6 +420,8 @@ niagara_tlb_fixup: /* Patch copy/clear ops. */ call niagara_patch_copyops nop + call niagara_patch_bzero + nop call niagara_patch_pageops nop diff --git a/arch/sparc64/lib/Makefile b/arch/sparc64/lib/Makefile index 3d0e9a24d7a..8812ded19f0 100644 --- a/arch/sparc64/lib/Makefile +++ b/arch/sparc64/lib/Makefile @@ -12,7 +12,7 @@ lib-y := PeeCeeI.o copy_page.o clear_page.o strlen.o strncmp.o \ U1memcpy.o U1copy_from_user.o U1copy_to_user.o \ U3memcpy.o U3copy_from_user.o U3copy_to_user.o U3patch.o \ NGmemcpy.o NGcopy_from_user.o NGcopy_to_user.o NGpatch.o \ - NGpage.o \ + NGpage.o NGbzero.o \ copy_in_user.o user_fixup.o memmove.o \ mcount.o ipcsum.o rwsem.o xor.o find_bit.o delay.o diff --git a/arch/sparc64/lib/NGbzero.S b/arch/sparc64/lib/NGbzero.S new file mode 100644 index 00000000000..fef584f745d --- /dev/null +++ b/arch/sparc64/lib/NGbzero.S @@ -0,0 +1,162 @@ +/* NGbzero.S: Niagara optimized memset/clear_user. + * + * Copyright (C) 2006 David S. Miller (davem@davemloft.net) + */ +#include + +#define EX_ST(x,y) \ +98: x,y; \ + .section .fixup; \ + .align 4; \ +99: retl; \ + mov %o1, %o0; \ + .section __ex_table; \ + .align 4; \ + .word 98b, 99b; \ + .text; \ + .align 4; + + .text + + .globl NGmemset + .type NGmemset, #function +NGmemset: /* %o0=buf, %o1=pat, %o2=len */ + and %o1, 0xff, %o3 + mov %o2, %o1 + sllx %o3, 8, %g1 + or %g1, %o3, %o2 + sllx %o2, 16, %g1 + or %g1, %o2, %o2 + sllx %o2, 32, %g1 + ba,pt %xcc, 1f + or %g1, %o2, %o2 + + .globl NGbzero + .type NGbzero, #function +NGbzero: + clr %o2 +1: brz,pn %o1, NGbzero_return + mov %o0, %o3 + + /* %o5: saved %asi, restored at NGbzero_done + * %g7: store-init %asi to use + * %o4: non-store-init %asi to use + */ + rd %asi, %o5 + mov ASI_BLK_INIT_QUAD_LDD_P, %g7 + mov ASI_P, %o4 + wr %o4, 0x0, %asi + +NGbzero_from_clear_user: + cmp %o1, 15 + bl,pn %icc, NGbzero_tiny + andcc %o0, 0x7, %g1 + be,pt %xcc, 2f + mov 8, %g2 + sub %g2, %g1, %g1 + sub %o1, %g1, %o1 +1: EX_ST(stba %o2, [%o0 + 0x00] %asi) + subcc %g1, 1, %g1 + bne,pt %xcc, 1b + add %o0, 1, %o0 +2: cmp %o1, 128 + bl,pn %icc, NGbzero_medium + andcc %o0, (64 - 1), %g1 + be,pt %xcc, NGbzero_pre_loop + mov 64, %g2 + sub %g2, %g1, %g1 + sub %o1, %g1, %o1 +1: EX_ST(stxa %o2, [%o0 + 0x00] %asi) + subcc %g1, 8, %g1 + bne,pt %xcc, 1b + add %o0, 8, %o0 + +NGbzero_pre_loop: + wr %g7, 0x0, %asi + andn %o1, (64 - 1), %g1 + sub %o1, %g1, %o1 +NGbzero_loop: + EX_ST(stxa %o2, [%o0 + 0x00] %asi) + EX_ST(stxa %o2, [%o0 + 0x08] %asi) + EX_ST(stxa %o2, [%o0 + 0x10] %asi) + EX_ST(stxa %o2, [%o0 + 0x18] %asi) + EX_ST(stxa %o2, [%o0 + 0x20] %asi) + EX_ST(stxa %o2, [%o0 + 0x28] %asi) + EX_ST(stxa %o2, [%o0 + 0x30] %asi) + EX_ST(stxa %o2, [%o0 + 0x38] %asi) + subcc %g1, 64, %g1 + bne,pt %xcc, NGbzero_loop + add %o0, 64, %o0 + + wr %o4, 0x0, %asi + brz,pn %o1, NGbzero_done +NGbzero_medium: + andncc %o1, 0x7, %g1 + be,pn %xcc, 2f + sub %o1, %g1, %o1 +1: EX_ST(stxa %o2, [%o0 + 0x00] %asi) + subcc %g1, 8, %g1 + bne,pt %xcc, 1b + add %o0, 8, %o0 +2: brz,pt %o1, NGbzero_done + nop + +NGbzero_tiny: +1: EX_ST(stba %o2, [%o0 + 0x00] %asi) + subcc %o1, 1, %o1 + bne,pt %icc, 1b + add %o0, 1, %o0 + + /* fallthrough */ + +NGbzero_done: + wr %o5, 0x0, %asi + +NGbzero_return: + retl + mov %o3, %o0 + .size NGbzero, .-NGbzero + .size NGmemset, .-NGmemset + + .globl NGclear_user + .type NGclear_user, #function +NGclear_user: /* %o0=buf, %o1=len */ + rd %asi, %o5 + brz,pn %o1, NGbzero_done + clr %o3 + cmp %o5, ASI_AIUS + bne,pn %icc, NGbzero + clr %o2 + mov ASI_BLK_INIT_QUAD_LDD_AIUS, %g7 + ba,pt %xcc, NGbzero_from_clear_user + mov ASI_AIUS, %o4 + .size NGclear_user, .-NGclear_user + +#define BRANCH_ALWAYS 0x10680000 +#define NOP 0x01000000 +#define NG_DO_PATCH(OLD, NEW) \ + sethi %hi(NEW), %g1; \ + or %g1, %lo(NEW), %g1; \ + sethi %hi(OLD), %g2; \ + or %g2, %lo(OLD), %g2; \ + sub %g1, %g2, %g1; \ + sethi %hi(BRANCH_ALWAYS), %g3; \ + sll %g1, 11, %g1; \ + srl %g1, 11 + 2, %g1; \ + or %g3, %lo(BRANCH_ALWAYS), %g3; \ + or %g3, %g1, %g3; \ + stw %g3, [%g2]; \ + sethi %hi(NOP), %g3; \ + or %g3, %lo(NOP), %g3; \ + stw %g3, [%g2 + 0x4]; \ + flush %g2; + + .globl niagara_patch_bzero + .type niagara_patch_bzero,#function +niagara_patch_bzero: + NG_DO_PATCH(memset, NGmemset) + NG_DO_PATCH(__bzero, NGbzero) + NG_DO_PATCH(__clear_user, NGclear_user) + retl + nop + .size niagara_patch_bzero,.-niagara_patch_bzero -- cgit v1.2.3 From 1bd0cd74d102a527b2a72907698d73fad4b82cbd Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 21 Feb 2006 15:41:01 -0800 Subject: [SPARC64]: Kill cpudata->idle_volume. Set, but never used. We used to use this for dynamic IRQ retargetting, but that code died a long time ago. Signed-off-by: David S. Miller --- arch/sparc64/kernel/process.c | 10 ++-------- arch/sparc64/kernel/smp.c | 2 -- 2 files changed, 2 insertions(+), 10 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/process.c b/arch/sparc64/kernel/process.c index d00cb7ad89b..1ab8283efc4 100644 --- a/arch/sparc64/kernel/process.c +++ b/arch/sparc64/kernel/process.c @@ -56,6 +56,8 @@ void default_idle(void) { } + + #ifndef CONFIG_SMP /* @@ -104,19 +106,11 @@ void cpu_idle(void) while(1) { if (need_resched()) { - cpuinfo->idle_volume = 0; preempt_enable_no_resched(); schedule(); preempt_disable(); check_pgt_cache(); } - cpuinfo->idle_volume++; - - /* The store ordering is so that IRQ handlers on - * other cpus see our increasing idleness for the buddy - * redistribution algorithm. -DaveM - */ - membar_storeload_storestore(); } } diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index 356d423ae14..0cd9b16612e 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c @@ -88,8 +88,6 @@ void __init smp_store_cpu_info(int id) cpu_data(id).clock_tick = prom_getintdefault(cpu_node, "clock-frequency", 0); - cpu_data(id).idle_volume = 1; - def = ((tlb_type == hypervisor) ? (8 * 1024) : (16 * 1024)); cpu_data(id).dcache_size = prom_getintdefault(cpu_node, "dcache-size", def); -- cgit v1.2.3 From 6f5374c91f0dd1d92408ed44c066c32bcce5ce69 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 21 Feb 2006 15:42:09 -0800 Subject: [SPARC64]: Add sun4v_cpu_yield(). Signed-off-by: David S. Miller --- arch/sparc64/kernel/entry.S | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'arch') diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S index bd332e41532..9f3048e64e8 100644 --- a/arch/sparc64/kernel/entry.S +++ b/arch/sparc64/kernel/entry.S @@ -1786,3 +1786,12 @@ sun4v_cpu_qconf: ta HV_FAST_TRAP retl nop + + /* returns %o0: status + */ + .globl sun4v_cpu_yield +sun4v_cpu_yield: + mov HV_FAST_CPU_YIELD, %o5 + ta HV_FAST_TRAP + retl + nop -- cgit v1.2.3 From 689126a48a4cf7efc77d0d090b2db4e8927d7819 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 21 Feb 2006 16:53:26 -0800 Subject: [SPARC64] math-emu: Delete debugging printk left by previous commit. Signed-off-by: David S. Miller --- arch/sparc64/math-emu/math.c | 1 - 1 file changed, 1 deletion(-) (limited to 'arch') diff --git a/arch/sparc64/math-emu/math.c b/arch/sparc64/math-emu/math.c index a93a3664c85..6ee496c2864 100644 --- a/arch/sparc64/math-emu/math.c +++ b/arch/sparc64/math-emu/math.c @@ -217,7 +217,6 @@ int do_mathemu(struct pt_regs *regs, struct fpustate *f) x = (x >> 14) & 0xf; TYPE(x,1,1,1,1,0,0); - printk("math-emu: type is %08x\n", type); break; } -- cgit v1.2.3 From 30c91d576e9ea41c963e7f28643219bda73b0ddc Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 21 Feb 2006 16:55:23 -0800 Subject: [SPARC64]: Use sun4v_cpu_idle() in cpu_idle() on SUN4V. We have to turn off the "polling nrflag" bit when we sleep the cpu like this, so that we'll get a cross-cpu interrupt to wake the processor up from the yield. We also have to disable PSTATE_IE in %pstate around the yield call and recheck need_resched() in order to avoid any races. Signed-off-by: David S. Miller --- arch/sparc64/kernel/process.c | 81 +++++++++++++++++-------------------------- 1 file changed, 32 insertions(+), 49 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/process.c b/arch/sparc64/kernel/process.c index 1ab8283efc4..1c7ca2f712d 100644 --- a/arch/sparc64/kernel/process.c +++ b/arch/sparc64/kernel/process.c @@ -46,62 +46,47 @@ #include #include #include +#include /* #define VERBOSE_SHOWREGS */ -/* - * Nothing special yet... - */ -void default_idle(void) -{ -} - - - -#ifndef CONFIG_SMP - -/* - * the idle loop on a Sparc... ;) - */ -void cpu_idle(void) +static void sparc64_yield(void) { - /* endless idle loop with no priority at all */ - for (;;) { - /* If current->work.need_resched is zero we should really - * setup for a system wakup event and execute a shutdown - * instruction. - * - * But this requires writing back the contents of the - * L2 cache etc. so implement this later. -DaveM - */ - while (!need_resched()) - barrier(); + if (tlb_type != hypervisor) + return; - preempt_enable_no_resched(); - schedule(); - preempt_disable(); - check_pgt_cache(); + clear_thread_flag(TIF_POLLING_NRFLAG); + smp_mb__after_clear_bit(); + + while (!need_resched()) { + unsigned long pstate; + + /* Disable interrupts. */ + __asm__ __volatile__( + "rdpr %%pstate, %0\n\t" + "andn %0, %1, %0\n\t" + "wrpr %0, %%g0, %%pstate" + : "=&r" (pstate) + : "i" (PSTATE_IE)); + + if (!need_resched()) + sun4v_cpu_yield(); + + /* Re-enable interrupts. */ + __asm__ __volatile__( + "rdpr %%pstate, %0\n\t" + "or %0, %1, %0\n\t" + "wrpr %0, %%g0, %%pstate" + : "=&r" (pstate) + : "i" (PSTATE_IE)); } -} -#else + set_thread_flag(TIF_POLLING_NRFLAG); +} -/* - * the idle loop on a UltraMultiPenguin... - * - * TIF_POLLING_NRFLAG is set because we do not sleep the cpu - * inside of the idler task, so an interrupt is not needed - * to get a clean fast response. - * - * XXX Reverify this assumption... -DaveM - * - * Addendum: We do want it to do something for the signal - * delivery case, we detect that by just seeing - * if we are trying to send this to an idler or not. - */ +/* The idle loop on sparc64. */ void cpu_idle(void) { - cpuinfo_sparc *cpuinfo = &local_cpu_data(); set_thread_flag(TIF_POLLING_NRFLAG); while(1) { @@ -109,13 +94,11 @@ void cpu_idle(void) preempt_enable_no_resched(); schedule(); preempt_disable(); - check_pgt_cache(); } + sparc64_yield(); } } -#endif - extern char reboot_command []; extern void (*prom_palette)(int); -- cgit v1.2.3 From 9cc3a1ac9a819cadff05ca37bb7f208013a22035 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 21 Feb 2006 20:51:13 -0800 Subject: [SPARC64]: Make use of Niagara 256MB PTEs for kernel mappings. We use a bitmap, one bit for every 256MB of memory. If the bit is set we can use a 256MB PTE for linear mappings, else we have to use a 4MB PTE. SUN4V support is there, and we can very easily add support for Panther cpu 256MB PTEs in the future. Signed-off-by: David S. Miller --- arch/sparc64/kernel/ktlb.S | 29 ++++++++++++++-- arch/sparc64/mm/init.c | 82 ++++++++++++++++++++++++++++++++++++++-------- 2 files changed, 96 insertions(+), 15 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/ktlb.S b/arch/sparc64/kernel/ktlb.S index 883180be3d5..ae1dac17bc8 100644 --- a/arch/sparc64/kernel/ktlb.S +++ b/arch/sparc64/kernel/ktlb.S @@ -133,8 +133,33 @@ kvmap_dtlb_4v: brgez,pn %g4, kvmap_dtlb_nonlinear nop - sethi %hi(kern_linear_pte_xor), %g2 - ldx [%g2 + %lo(kern_linear_pte_xor)], %g2 + sethi %hi(kpte_linear_bitmap), %g2 + or %g2, %lo(kpte_linear_bitmap), %g2 + + /* Clear the PAGE_OFFSET top virtual bits, then shift + * down to get a 256MB physical address index. + */ + sllx %g4, 21, %g5 + mov 1, %g7 + srlx %g5, 21 + 28, %g5 + + /* Don't try this at home kids... this depends upon srlx + * only taking the low 6 bits of the shift count in %g5. + */ + sllx %g7, %g5, %g7 + + /* Divide by 64 to get the offset into the bitmask. */ + srlx %g5, 6, %g5 + + /* kern_linear_pte_xor[((mask & bit) ? 1 : 0)] */ + ldx [%g2 + %g5], %g2 + andcc %g2, %g7, %g0 + sethi %hi(kern_linear_pte_xor), %g5 + or %g5, %lo(kern_linear_pte_xor), %g5 + bne,a,pt %xcc, 1f + add %g5, 8, %g5 + +1: ldx [%g5], %g2 .globl kvmap_linear_patch kvmap_linear_patch: diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index c7aa4404edc..b5869f00d2d 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -45,6 +45,19 @@ extern void device_scan(void); +#define MAX_PHYS_ADDRESS (1UL << 42UL) +#define KPTE_BITMAP_CHUNK_SZ (256UL * 1024UL * 1024UL) +#define KPTE_BITMAP_BYTES \ + ((MAX_PHYS_ADDRESS / KPTE_BITMAP_CHUNK_SZ) / 8) + +unsigned long kern_linear_pte_xor[2] __read_mostly; + +/* A bitmap, one bit for every 256MB of physical memory. If the bit + * is clear, we should use a 4MB page (via kern_linear_pte_xor[0]) else + * if set we should use a 256MB page (via kern_linear_pte_xor[1]). + */ +unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)]; + #define MAX_BANKS 32 static struct linux_prom64_registers pavail[MAX_BANKS] __initdata; @@ -119,7 +132,6 @@ unsigned long phys_base __read_mostly; unsigned long kern_base __read_mostly; unsigned long kern_size __read_mostly; unsigned long pfn_base __read_mostly; -unsigned long kern_linear_pte_xor __read_mostly; /* get_new_mmu_context() uses "cache + 1". */ DEFINE_SPINLOCK(ctx_alloc_lock); @@ -878,6 +890,9 @@ unsigned long __init bootmem_init(unsigned long *pages_avail) return end_pfn; } +static struct linux_prom64_registers pall[MAX_BANKS] __initdata; +static int pall_ents __initdata; + #ifdef CONFIG_DEBUG_PAGEALLOC static unsigned long kernel_map_range(unsigned long pstart, unsigned long pend, pgprot_t prot) { @@ -933,14 +948,41 @@ static unsigned long kernel_map_range(unsigned long pstart, unsigned long pend, return alloc_bytes; } -static struct linux_prom64_registers pall[MAX_BANKS] __initdata; -static int pall_ents __initdata; - extern unsigned int kvmap_linear_patch[1]; +#endif /* CONFIG_DEBUG_PAGEALLOC */ + +static void __init mark_kpte_bitmap(unsigned long start, unsigned long end) +{ + const unsigned long shift_256MB = 28; + const unsigned long mask_256MB = ((1UL << shift_256MB) - 1UL); + const unsigned long size_256MB = (1UL << shift_256MB); + + while (start < end) { + long remains; + + if (start & mask_256MB) { + start = (start + size_256MB) & ~mask_256MB; + continue; + } + + remains = end - start; + while (remains >= size_256MB) { + unsigned long index = start >> shift_256MB; + + __set_bit(index, kpte_linear_bitmap); + + start += size_256MB; + remains -= size_256MB; + } + } +} static void __init kernel_physical_mapping_init(void) { - unsigned long i, mem_alloced = 0UL; + unsigned long i; +#ifdef CONFIG_DEBUG_PAGEALLOC + unsigned long mem_alloced = 0UL; +#endif read_obp_memory("reg", &pall[0], &pall_ents); @@ -949,10 +991,16 @@ static void __init kernel_physical_mapping_init(void) phys_start = pall[i].phys_addr; phys_end = phys_start + pall[i].reg_size; + + mark_kpte_bitmap(phys_start, phys_end); + +#ifdef CONFIG_DEBUG_PAGEALLOC mem_alloced += kernel_map_range(phys_start, phys_end, PAGE_KERNEL); +#endif } +#ifdef CONFIG_DEBUG_PAGEALLOC printk("Allocated %ld bytes for kernel page tables.\n", mem_alloced); @@ -960,8 +1008,10 @@ static void __init kernel_physical_mapping_init(void) flushi(&kvmap_linear_patch[0]); __flush_tlb_all(); +#endif } +#ifdef CONFIG_DEBUG_PAGEALLOC void kernel_map_pages(struct page *page, int numpages, int enable) { unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT; @@ -1172,9 +1222,7 @@ void __init paging_init(void) pages_avail = 0; last_valid_pfn = end_pfn = bootmem_init(&pages_avail); -#ifdef CONFIG_DEBUG_PAGEALLOC kernel_physical_mapping_init(); -#endif { unsigned long zones_size[MAX_NR_ZONES]; @@ -1413,10 +1461,13 @@ static void __init sun4u_pgprot_init(void) pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4U | __DIRTY_BITS_4U | __ACCESS_BITS_4U | _PAGE_E_4U); - kern_linear_pte_xor = (_PAGE_VALID | _PAGE_SZ4MB_4U) ^ + kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4U) ^ 0xfffff80000000000; - kern_linear_pte_xor |= (_PAGE_CP_4U | _PAGE_CV_4U | - _PAGE_P_4U | _PAGE_W_4U); + kern_linear_pte_xor[0] |= (_PAGE_CP_4U | _PAGE_CV_4U | + _PAGE_P_4U | _PAGE_W_4U); + + /* XXX Should use 256MB on Panther. XXX */ + kern_linear_pte_xor[1] = kern_linear_pte_xor[0]; _PAGE_SZBITS = _PAGE_SZBITS_4U; _PAGE_ALL_SZ_BITS = (_PAGE_SZ4MB_4U | _PAGE_SZ512K_4U | @@ -1454,10 +1505,15 @@ static void __init sun4v_pgprot_init(void) _PAGE_E = _PAGE_E_4V; _PAGE_CACHE = _PAGE_CACHE_4V; - kern_linear_pte_xor = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^ + kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^ + 0xfffff80000000000; + kern_linear_pte_xor[0] |= (_PAGE_CP_4V | _PAGE_CV_4V | + _PAGE_P_4V | _PAGE_W_4V); + + kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZ256MB_4V) ^ 0xfffff80000000000; - kern_linear_pte_xor |= (_PAGE_CP_4V | _PAGE_CV_4V | - _PAGE_P_4V | _PAGE_W_4V); + kern_linear_pte_xor[1] |= (_PAGE_CP_4V | _PAGE_CV_4V | + _PAGE_P_4V | _PAGE_W_4V); pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4V | __DIRTY_BITS_4V | __ACCESS_BITS_4V | _PAGE_E_4V); -- cgit v1.2.3 From d7744a09504d5ae84edc8289a02254e1f2102410 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 21 Feb 2006 22:31:11 -0800 Subject: [SPARC64]: Create a seperate kernel TSB for 4MB/256MB mappings. It can map all of the linear kernel mappings with zero TSB hash conflicts for systems with 16GB or less ram. In such cases, on SUN4V, once we load up this TSB the first time with all the mappings, we never take a linear kernel mapping TLB miss ever again, the hypervisor handles them all. Signed-off-by: David S. Miller --- arch/sparc64/kernel/ktlb.S | 15 ++++++++++++++- arch/sparc64/mm/init.c | 24 +++++++++++++++++++----- 2 files changed, 33 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/ktlb.S b/arch/sparc64/kernel/ktlb.S index ae1dac17bc8..efcf38b6e28 100644 --- a/arch/sparc64/kernel/ktlb.S +++ b/arch/sparc64/kernel/ktlb.S @@ -121,6 +121,12 @@ kvmap_dtlb_obp: nop .align 32 +kvmap_dtlb_tsb4m_load: + KTSB_LOCK_TAG(%g1, %g2, %g7) + KTSB_WRITE(%g1, %g5, %g6) + ba,pt %xcc, kvmap_dtlb_load + nop + kvmap_dtlb: /* %g6: TAG TARGET */ mov TLB_TAG_ACCESS, %g4 @@ -133,6 +139,13 @@ kvmap_dtlb_4v: brgez,pn %g4, kvmap_dtlb_nonlinear nop + /* Correct TAG_TARGET is already in %g6, check 4mb TSB. */ + KERN_TSB4M_LOOKUP_TL1(%g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load) + + /* TSB entry address left in %g1, lookup linear PTE. + * Must preserve %g1 and %g6 (TAG). + */ +kvmap_dtlb_tsb4m_miss: sethi %hi(kpte_linear_bitmap), %g2 or %g2, %lo(kpte_linear_bitmap), %g2 @@ -163,7 +176,7 @@ kvmap_dtlb_4v: .globl kvmap_linear_patch kvmap_linear_patch: - ba,pt %xcc, kvmap_dtlb_load + ba,pt %xcc, kvmap_dtlb_tsb4m_load xor %g2, %g4, %g5 kvmap_dtlb_vmalloc_addr: diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index b5869f00d2d..2a123135b04 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -58,6 +58,9 @@ unsigned long kern_linear_pte_xor[2] __read_mostly; */ unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)]; +/* A special kernel TSB for 4MB and 256MB linear mappings. */ +struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES]; + #define MAX_BANKS 32 static struct linux_prom64_registers pavail[MAX_BANKS] __initdata; @@ -1086,6 +1089,7 @@ static void __init sun4v_ktsb_init(void) { unsigned long ktsb_pa; + /* First KTSB for PAGE_SIZE mappings. */ ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE); switch (PAGE_SIZE) { @@ -1117,9 +1121,18 @@ static void __init sun4v_ktsb_init(void) ktsb_descr[0].tsb_base = ktsb_pa; ktsb_descr[0].resv = 0; - /* XXX When we have a kernel large page size TSB, describe - * XXX it in ktsb_descr[1] here. - */ + /* Second KTSB for 4MB/256MB mappings. */ + ktsb_pa = (kern_base + + ((unsigned long)&swapper_4m_tsb[0] - KERNBASE)); + + ktsb_descr[1].pgsz_idx = HV_PGSZ_IDX_4MB; + ktsb_descr[1].pgsz_mask = (HV_PGSZ_MASK_4MB | + HV_PGSZ_MASK_256MB); + ktsb_descr[1].assoc = 1; + ktsb_descr[1].num_ttes = KERNEL_TSB4M_NENTRIES; + ktsb_descr[1].ctx_idx = 0; + ktsb_descr[1].tsb_base = ktsb_pa; + ktsb_descr[1].resv = 0; } void __cpuinit sun4v_ktsb_register(void) @@ -1132,8 +1145,7 @@ void __cpuinit sun4v_ktsb_register(void) pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE); func = HV_FAST_MMU_TSB_CTX0; - /* XXX set arg0 to 2 when we use ktsb_descr[1], see above XXX */ - arg0 = 1; + arg0 = 2; arg1 = pa; __asm__ __volatile__("ta %6" : "=&r" (func), "=&r" (arg0), "=&r" (arg1) @@ -1160,7 +1172,9 @@ void __init paging_init(void) kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL; kern_size = (unsigned long)&_end - (unsigned long)KERNBASE; + /* Invalidate both kernel TSBs. */ memset(swapper_tsb, 0x40, sizeof(swapper_tsb)); + memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb)); if (tlb_type == hypervisor) sun4v_pgprot_init(); -- cgit v1.2.3 From fc504928677049f0ad3f1fd4e0bb3908172df8f3 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 22 Feb 2006 16:15:45 -0800 Subject: [SPARC64]: Drop %gl to 0 before re-enabling PSTATE_IE in rtrap If we take a window fault, on SUN4V set %gl to zero before we turn PSTATE_IE back on in %pstate. Otherwise if we take an interrupt we'll end up with corrupt register state. Signed-off-by: David S. Miller --- arch/sparc64/kernel/rtrap.S | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/rtrap.S b/arch/sparc64/kernel/rtrap.S index 1e724fe172a..7130e866f93 100644 --- a/arch/sparc64/kernel/rtrap.S +++ b/arch/sparc64/kernel/rtrap.S @@ -333,7 +333,6 @@ user_rtt_fill_fixup: mov %g6, %l1 wrpr %g0, 0x0, %tl - wrpr %g0, RTRAP_PSTATE, %pstate 661: nop .section .sun4v_1insn_patch, "ax" @@ -341,6 +340,8 @@ user_rtt_fill_fixup: SET_GL(0) .previous + wrpr %g0, RTRAP_PSTATE, %pstate + mov %l1, %g6 ldx [%g6 + TI_TASK], %g4 LOAD_PER_CPU_BASE(%g5, %g6, %g1, %g2, %g3) -- cgit v1.2.3 From b2bef4424cb4522f53e34d98d3deb0916478338b Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 23 Feb 2006 01:55:55 -0800 Subject: [SPARC64]: Export _PAGE_E and _PAGE_CACHE to modules. SBUS flash driver needs it. Noticed by Fabbione. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 2a123135b04..16f0db38d93 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -1423,8 +1423,12 @@ pgprot_t PAGE_EXEC __read_mostly; unsigned long pg_iobits __read_mostly; unsigned long _PAGE_IE __read_mostly; + unsigned long _PAGE_E __read_mostly; +EXPORT_SYMBOL(_PAGE_E); + unsigned long _PAGE_CACHE __read_mostly; +EXPORT_SYMBOL(_PAGE_CACHE); static void prot_init_common(unsigned long page_none, unsigned long page_shared, -- cgit v1.2.3 From 074d82cf688fe2dfa7ba4a2317c56f62d13fb522 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 23 Feb 2006 02:28:25 -0800 Subject: [SPARC64]: Put syscall tables after trap table. Otherwise with too much stuff enabled in the kernel config we can end up with an unaligned trap table. Signed-off-by: David S. Miller --- arch/sparc64/kernel/head.S | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/head.S b/arch/sparc64/kernel/head.S index 8c6c4694be9..3eadac5e171 100644 --- a/arch/sparc64/kernel/head.S +++ b/arch/sparc64/kernel/head.S @@ -622,7 +622,6 @@ setup_tba: restore sparc64_boot_end: -#include "systbls.S" #include "ktlb.S" #include "tsb.S" #include "etrap.S" @@ -650,6 +649,8 @@ swapper_tsb: #include "ttable.S" +#include "systbls.S" + .data .align 8 .globl prom_tba, tlb_type -- cgit v1.2.3 From a0663a79ad4faebe1db4a56e2e767b120b12333a Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 23 Feb 2006 14:19:28 -0800 Subject: [SPARC64]: Fix TLB context allocation with SMT style shared TLBs. The context allocation scheme we use depends upon there being a 1<-->1 mapping from cpu to physical TLB for correctness. Chips like Niagara break this assumption. So what we do is notify all cpus with a cross call when the context version number changes, and if necessary this makes them allocate a valid context for the address space they are running at the time. Stress tested with make -j1024, make -j2048, and make -j4096 kernel builds on a 32-strand, 8 core, T2000 with 16GB of ram. Signed-off-by: David S. Miller --- arch/sparc64/kernel/smp.c | 40 +++++++++++++++++++++++++++++----------- arch/sparc64/mm/init.c | 9 ++++++++- 2 files changed, 37 insertions(+), 12 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index 0cd9b16612e..1ce94081149 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c @@ -885,26 +885,44 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page) put_cpu(); } +static void __smp_receive_signal_mask(cpumask_t mask) +{ + smp_cross_call_masked(&xcall_receive_signal, 0, 0, 0, mask); +} + void smp_receive_signal(int cpu) { cpumask_t mask = cpumask_of_cpu(cpu); - if (cpu_online(cpu)) { - u64 data0 = (((u64)&xcall_receive_signal) & 0xffffffff); - - if (tlb_type == spitfire) - spitfire_xcall_deliver(data0, 0, 0, mask); - else if (tlb_type == cheetah || tlb_type == cheetah_plus) - cheetah_xcall_deliver(data0, 0, 0, mask); - else if (tlb_type == hypervisor) - hypervisor_xcall_deliver(data0, 0, 0, mask); - } + if (cpu_online(cpu)) + __smp_receive_signal_mask(mask); } void smp_receive_signal_client(int irq, struct pt_regs *regs) { - /* Just return, rtrap takes care of the rest. */ + struct mm_struct *mm; + clear_softint(1 << irq); + + /* See if we need to allocate a new TLB context because + * the version of the one we are using is now out of date. + */ + mm = current->active_mm; + if (likely(mm)) { + if (unlikely(!CTX_VALID(mm->context))) { + unsigned long flags; + + spin_lock_irqsave(&mm->context.lock, flags); + get_new_mmu_context(mm); + load_secondary_context(mm); + spin_unlock_irqrestore(&mm->context.lock, flags); + } + } +} + +void smp_new_mmu_context_version(void) +{ + __smp_receive_signal_mask(cpu_online_map); } void smp_report_regs(void) diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 16f0db38d93..ccf083aecb6 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -629,17 +629,20 @@ void __flush_dcache_range(unsigned long start, unsigned long end) * let the user have CTX 0 (nucleus) or we ever use a CTX * version of zero (and thus NO_CONTEXT would not be caught * by version mis-match tests in mmu_context.h). + * + * Always invoked with interrupts disabled. */ void get_new_mmu_context(struct mm_struct *mm) { unsigned long ctx, new_ctx; unsigned long orig_pgsz_bits; - + int new_version; spin_lock(&ctx_alloc_lock); orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK); ctx = (tlb_context_cache + 1) & CTX_NR_MASK; new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx); + new_version = 0; if (new_ctx >= (1 << CTX_NR_BITS)) { new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1); if (new_ctx >= ctx) { @@ -662,6 +665,7 @@ void get_new_mmu_context(struct mm_struct *mm) mmu_context_bmap[i + 2] = 0; mmu_context_bmap[i + 3] = 0; } + new_version = 1; goto out; } } @@ -671,6 +675,9 @@ out: tlb_context_cache = new_ctx; mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits; spin_unlock(&ctx_alloc_lock); + + if (unlikely(new_version)) + smp_new_mmu_context_version(); } void sparc_ultra_dump_itlb(void) -- cgit v1.2.3 From 77b838fa1ef0ab02f75afc09834c60d87b86772f Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 23 Feb 2006 21:40:15 -0800 Subject: [SPARC64]: destroy_context() needs to disable interrupts. get_new_mmu_context() can be invoked from interrupt context now for the new SMP version wrap handling. So disable interrupt while taking ctx_alloc_lock in destroy_context() so we don't deadlock. Signed-off-by: David S. Miller --- arch/sparc64/mm/tsb.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c index 353cb060561..534ac281989 100644 --- a/arch/sparc64/mm/tsb.c +++ b/arch/sparc64/mm/tsb.c @@ -373,6 +373,7 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) void destroy_context(struct mm_struct *mm) { unsigned long size = mm->context.tsb_nentries * sizeof(struct tsb); + unsigned long flags; free_pages((unsigned long) mm->context.tsb, get_order(size)); @@ -383,12 +384,12 @@ void destroy_context(struct mm_struct *mm) mm->context.tsb = NULL; mm->context.tsb_reg_val = 0UL; - spin_lock(&ctx_alloc_lock); + spin_lock_irqsave(&ctx_alloc_lock, flags); if (CTX_VALID(mm->context)) { unsigned long nr = CTX_NRBITS(mm->context); mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63)); } - spin_unlock(&ctx_alloc_lock); + spin_unlock_irqrestore(&ctx_alloc_lock, flags); } -- cgit v1.2.3 From c4e9249b1924118693f298ee8d38f7fe43587af3 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Fri, 24 Feb 2006 13:21:18 -0800 Subject: [SPARC64]: Fix binfmt_aout32.c build. Signed-off-by: Andrew Morton Signed-off-by: David S. Miller --- arch/sparc64/kernel/binfmt_aout32.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/binfmt_aout32.c b/arch/sparc64/kernel/binfmt_aout32.c index 181c8cdf954..cb9ecd0172c 100644 --- a/arch/sparc64/kernel/binfmt_aout32.c +++ b/arch/sparc64/kernel/binfmt_aout32.c @@ -330,7 +330,7 @@ beyond_if: current->mm->start_stack = (unsigned long) create_aout32_tables((char __user *)bprm->p, bprm); - tsb_context_switch(mm); + tsb_context_switch(current->mm); start_thread32(regs, ex.a_entry, current->mm->start_stack); if (current->ptrace & PT_PTRACED) -- cgit v1.2.3 From 36344762396ca868d6076c41a84bda25f1ed9d3c Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 25 Feb 2006 17:16:29 -0800 Subject: [SPARC64]: Niagara optimized XOR functions for RAID. Signed-off-by: David S. Miller --- arch/sparc64/kernel/sparc64_ksyms.c | 13 ++ arch/sparc64/lib/xor.S | 300 +++++++++++++++++++++++++++++++++++- 2 files changed, 312 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c index 801fc0ce484..e87fe7dfc7d 100644 --- a/arch/sparc64/kernel/sparc64_ksyms.c +++ b/arch/sparc64/kernel/sparc64_ksyms.c @@ -108,6 +108,14 @@ extern void xor_vis_4(unsigned long, unsigned long *, unsigned long *, extern void xor_vis_5(unsigned long, unsigned long *, unsigned long *, unsigned long *, unsigned long *, unsigned long *); +extern void xor_niagara_2(unsigned long, unsigned long *, unsigned long *); +extern void xor_niagara_3(unsigned long, unsigned long *, unsigned long *, + unsigned long *); +extern void xor_niagara_4(unsigned long, unsigned long *, unsigned long *, + unsigned long *, unsigned long *); +extern void xor_niagara_5(unsigned long, unsigned long *, unsigned long *, + unsigned long *, unsigned long *, unsigned long *); + /* Per-CPU information table */ EXPORT_PER_CPU_SYMBOL(__cpu_data); @@ -388,4 +396,9 @@ EXPORT_SYMBOL(xor_vis_3); EXPORT_SYMBOL(xor_vis_4); EXPORT_SYMBOL(xor_vis_5); +EXPORT_SYMBOL(xor_niagara_2); +EXPORT_SYMBOL(xor_niagara_3); +EXPORT_SYMBOL(xor_niagara_4); +EXPORT_SYMBOL(xor_niagara_5); + EXPORT_SYMBOL(prom_palette); diff --git a/arch/sparc64/lib/xor.S b/arch/sparc64/lib/xor.S index 4cd5d2be1ae..a79c8888170 100644 --- a/arch/sparc64/lib/xor.S +++ b/arch/sparc64/lib/xor.S @@ -2,9 +2,10 @@ * arch/sparc64/lib/xor.S * * High speed xor_block operation for RAID4/5 utilizing the - * UltraSparc Visual Instruction Set. + * UltraSparc Visual Instruction Set and Niagara store-init/twin-load. * * Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz) + * Copyright (C) 2006 David S. Miller */ #include @@ -19,6 +20,8 @@ */ .text .align 32 + + /* VIS versions. */ .globl xor_vis_2 .type xor_vis_2,#function xor_vis_2: @@ -352,3 +355,298 @@ xor_vis_5: ret restore .size xor_vis_5, .-xor_vis_5 + + /* Niagara versions. */ + .globl xor_niagara_2 + .type xor_niagara_2,#function +xor_niagara_2: /* %o0=bytes, %o1=dest, %o2=src */ + save %sp, -192, %sp + prefetch [%i1], #n_writes + prefetch [%i2], #one_read + rd %asi, %g7 + wr %g0, ASI_BLK_INIT_QUAD_LDD_P, %asi + srlx %i0, 6, %g1 + mov %i1, %i0 + mov %i2, %i1 +1: ldda [%i1 + 0x00] %asi, %i2 /* %i2/%i3 = src + 0x00 */ + ldda [%i1 + 0x10] %asi, %i4 /* %i4/%i5 = src + 0x10 */ + ldda [%i1 + 0x20] %asi, %g2 /* %g2/%g3 = src + 0x20 */ + ldda [%i1 + 0x30] %asi, %l0 /* %l0/%l1 = src + 0x30 */ + prefetch [%i1 + 0x40], #one_read + ldda [%i0 + 0x00] %asi, %o0 /* %o0/%o1 = dest + 0x00 */ + ldda [%i0 + 0x10] %asi, %o2 /* %o2/%o3 = dest + 0x10 */ + ldda [%i0 + 0x20] %asi, %o4 /* %o4/%o5 = dest + 0x20 */ + ldda [%i0 + 0x30] %asi, %l2 /* %l2/%l3 = dest + 0x30 */ + prefetch [%i0 + 0x40], #n_writes + xor %o0, %i2, %o0 + xor %o1, %i3, %o1 + stxa %o0, [%i0 + 0x00] %asi + stxa %o1, [%i0 + 0x08] %asi + xor %o2, %i4, %o2 + xor %o3, %i5, %o3 + stxa %o2, [%i0 + 0x10] %asi + stxa %o3, [%i0 + 0x18] %asi + xor %o4, %g2, %o4 + xor %o5, %g3, %o5 + stxa %o4, [%i0 + 0x20] %asi + stxa %o5, [%i0 + 0x28] %asi + xor %l2, %l0, %l2 + xor %l3, %l1, %l3 + stxa %l2, [%i0 + 0x30] %asi + stxa %l3, [%i0 + 0x38] %asi + add %i0, 0x40, %i0 + subcc %g1, 1, %g1 + bne,pt %xcc, 1b + add %i1, 0x40, %i1 + membar #Sync + wr %g7, 0x0, %asi + ret + restore + .size xor_niagara_2, .-xor_niagara_2 + + .globl xor_niagara_3 + .type xor_niagara_3,#function +xor_niagara_3: /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2 */ + save %sp, -192, %sp + prefetch [%i1], #n_writes + prefetch [%i2], #one_read + prefetch [%i3], #one_read + rd %asi, %g7 + wr %g0, ASI_BLK_INIT_QUAD_LDD_P, %asi + srlx %i0, 6, %g1 + mov %i1, %i0 + mov %i2, %i1 + mov %i3, %l7 +1: ldda [%i1 + 0x00] %asi, %i2 /* %i2/%i3 = src1 + 0x00 */ + ldda [%i1 + 0x10] %asi, %i4 /* %i4/%i5 = src1 + 0x10 */ + ldda [%l7 + 0x00] %asi, %g2 /* %g2/%g3 = src2 + 0x00 */ + ldda [%l7 + 0x10] %asi, %l0 /* %l0/%l1 = src2 + 0x10 */ + ldda [%i0 + 0x00] %asi, %o0 /* %o0/%o1 = dest + 0x00 */ + ldda [%i0 + 0x10] %asi, %o2 /* %o2/%o3 = dest + 0x10 */ + xor %g2, %i2, %g2 + xor %g3, %i3, %g3 + xor %o0, %g2, %o0 + xor %o1, %g3, %o1 + stxa %o0, [%i0 + 0x00] %asi + stxa %o1, [%i0 + 0x08] %asi + ldda [%i1 + 0x20] %asi, %i2 /* %i2/%i3 = src1 + 0x20 */ + ldda [%l7 + 0x20] %asi, %g2 /* %g2/%g3 = src2 + 0x20 */ + ldda [%i0 + 0x20] %asi, %o0 /* %o0/%o1 = dest + 0x20 */ + xor %l0, %i4, %l0 + xor %l1, %i5, %l1 + xor %o2, %l0, %o2 + xor %o3, %l1, %o3 + stxa %o2, [%i0 + 0x10] %asi + stxa %o3, [%i0 + 0x18] %asi + ldda [%i1 + 0x30] %asi, %i4 /* %i4/%i5 = src1 + 0x30 */ + ldda [%l7 + 0x30] %asi, %l0 /* %l0/%l1 = src2 + 0x30 */ + ldda [%i0 + 0x30] %asi, %o2 /* %o2/%o3 = dest + 0x30 */ + prefetch [%i1 + 0x40], #one_read + prefetch [%l7 + 0x40], #one_read + prefetch [%i0 + 0x40], #n_writes + xor %g2, %i2, %g2 + xor %g3, %i3, %g3 + xor %o0, %g2, %o0 + xor %o1, %g3, %o1 + stxa %o0, [%i0 + 0x20] %asi + stxa %o1, [%i0 + 0x28] %asi + xor %l0, %i4, %l0 + xor %l1, %i5, %l1 + xor %o2, %l0, %o2 + xor %o3, %l1, %o3 + stxa %o2, [%i0 + 0x30] %asi + stxa %o3, [%i0 + 0x38] %asi + add %i0, 0x40, %i0 + add %i1, 0x40, %i1 + subcc %g1, 1, %g1 + bne,pt %xcc, 1b + add %l7, 0x40, %l7 + membar #Sync + wr %g7, 0x0, %asi + ret + restore + .size xor_niagara_3, .-xor_niagara_3 + + .globl xor_niagara_4 + .type xor_niagara_4,#function +xor_niagara_4: /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3 */ + save %sp, -192, %sp + prefetch [%i1], #n_writes + prefetch [%i2], #one_read + prefetch [%i3], #one_read + prefetch [%i4], #one_read + rd %asi, %g7 + wr %g0, ASI_BLK_INIT_QUAD_LDD_P, %asi + srlx %i0, 6, %g1 + mov %i1, %i0 + mov %i2, %i1 + mov %i3, %l7 + mov %i4, %l6 +1: ldda [%i1 + 0x00] %asi, %i2 /* %i2/%i3 = src1 + 0x00 */ + ldda [%l7 + 0x00] %asi, %i4 /* %i4/%i5 = src2 + 0x00 */ + ldda [%l6 + 0x00] %asi, %g2 /* %g2/%g3 = src3 + 0x00 */ + ldda [%i0 + 0x00] %asi, %l0 /* %l0/%l1 = dest + 0x00 */ + xor %i4, %i2, %i4 + xor %i5, %i3, %i5 + ldda [%i1 + 0x10] %asi, %i2 /* %i2/%i3 = src1 + 0x10 */ + xor %g2, %i4, %g2 + xor %g3, %i5, %g3 + ldda [%i7 + 0x10] %asi, %i4 /* %i4/%i5 = src2 + 0x10 */ + xor %l0, %g2, %l0 + xor %l1, %g3, %l1 + stxa %l0, [%i0 + 0x00] %asi + stxa %l1, [%i0 + 0x08] %asi + ldda [%i6 + 0x10] %asi, %g2 /* %g2/%g3 = src3 + 0x10 */ + ldda [%i0 + 0x10] %asi, %l0 /* %l0/%l1 = dest + 0x10 */ + + xor %i4, %i2, %i4 + xor %i5, %i3, %i5 + ldda [%i1 + 0x20] %asi, %i2 /* %i2/%i3 = src1 + 0x20 */ + xor %g2, %i4, %g2 + xor %g3, %i5, %g3 + ldda [%i7 + 0x20] %asi, %i4 /* %i4/%i5 = src2 + 0x20 */ + xor %l0, %g2, %l0 + xor %l1, %g3, %l1 + stxa %l0, [%i0 + 0x10] %asi + stxa %l1, [%i0 + 0x18] %asi + ldda [%i6 + 0x20] %asi, %g2 /* %g2/%g3 = src3 + 0x20 */ + ldda [%i0 + 0x20] %asi, %l0 /* %l0/%l1 = dest + 0x20 */ + + xor %i4, %i2, %i4 + xor %i5, %i3, %i5 + ldda [%i1 + 0x30] %asi, %i2 /* %i2/%i3 = src1 + 0x30 */ + xor %g2, %i4, %g2 + xor %g3, %i5, %g3 + ldda [%i7 + 0x30] %asi, %i4 /* %i4/%i5 = src2 + 0x30 */ + xor %l0, %g2, %l0 + xor %l1, %g3, %l1 + stxa %l0, [%i0 + 0x20] %asi + stxa %l1, [%i0 + 0x28] %asi + ldda [%i6 + 0x30] %asi, %g2 /* %g2/%g3 = src3 + 0x30 */ + ldda [%i0 + 0x30] %asi, %l0 /* %l0/%l1 = dest + 0x30 */ + + prefetch [%i1 + 0x40], #one_read + prefetch [%l7 + 0x40], #one_read + prefetch [%l6 + 0x40], #one_read + prefetch [%i0 + 0x40], #n_writes + + xor %i4, %i2, %i4 + xor %i5, %i3, %i5 + xor %g2, %i4, %g2 + xor %g3, %i5, %g3 + xor %l0, %g2, %l0 + xor %l1, %g3, %l1 + stxa %l0, [%i0 + 0x30] %asi + stxa %l1, [%i0 + 0x38] %asi + + add %i0, 0x40, %i0 + add %i1, 0x40, %i1 + add %l7, 0x40, %l7 + subcc %g1, 1, %g1 + bne,pt %xcc, 1b + add %l6, 0x40, %l6 + membar #Sync + wr %g7, 0x0, %asi + ret + restore + .size xor_niagara_4, .-xor_niagara_4 + + .globl xor_niagara_5 + .type xor_niagara_5,#function +xor_niagara_5: /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3, %o5=src4 */ + save %sp, -192, %sp + prefetch [%i1], #n_writes + prefetch [%i2], #one_read + prefetch [%i3], #one_read + prefetch [%i4], #one_read + prefetch [%i5], #one_read + rd %asi, %g7 + wr %g0, ASI_BLK_INIT_QUAD_LDD_P, %asi + srlx %i0, 6, %g1 + mov %i1, %i0 + mov %i2, %i1 + mov %i3, %l7 + mov %i4, %l6 + mov %i5, %l5 +1: ldda [%i1 + 0x00] %asi, %i2 /* %i2/%i3 = src1 + 0x00 */ + ldda [%l7 + 0x00] %asi, %i4 /* %i4/%i5 = src2 + 0x00 */ + ldda [%l6 + 0x00] %asi, %g2 /* %g2/%g3 = src3 + 0x00 */ + ldda [%l5 + 0x00] %asi, %l0 /* %l0/%l1 = src4 + 0x00 */ + ldda [%i0 + 0x00] %asi, %l2 /* %l2/%l3 = dest + 0x00 */ + xor %i4, %i2, %i4 + xor %i5, %i3, %i5 + ldda [%i1 + 0x10] %asi, %i2 /* %i2/%i3 = src1 + 0x10 */ + xor %g2, %i4, %g2 + xor %g3, %i5, %g3 + ldda [%l7 + 0x10] %asi, %i4 /* %i4/%i5 = src2 + 0x10 */ + xor %l0, %g2, %l0 + xor %l1, %g3, %l1 + ldda [%l6 + 0x10] %asi, %g2 /* %g2/%g3 = src3 + 0x10 */ + xor %l2, %l0, %l2 + xor %l3, %l1, %l3 + stxa %l2, [%i0 + 0x00] %asi + stxa %l3, [%i0 + 0x08] %asi + ldda [%l5 + 0x10] %asi, %l0 /* %l0/%l1 = src4 + 0x10 */ + ldda [%i0 + 0x10] %asi, %l2 /* %l2/%l3 = dest + 0x10 */ + + xor %i4, %i2, %i4 + xor %i5, %i3, %i5 + ldda [%i1 + 0x20] %asi, %i2 /* %i2/%i3 = src1 + 0x20 */ + xor %g2, %i4, %g2 + xor %g3, %i5, %g3 + ldda [%l7 + 0x20] %asi, %i4 /* %i4/%i5 = src2 + 0x20 */ + xor %l0, %g2, %l0 + xor %l1, %g3, %l1 + ldda [%l6 + 0x20] %asi, %g2 /* %g2/%g3 = src3 + 0x20 */ + xor %l2, %l0, %l2 + xor %l3, %l1, %l3 + stxa %l2, [%i0 + 0x10] %asi + stxa %l3, [%i0 + 0x18] %asi + ldda [%l5 + 0x20] %asi, %l0 /* %l0/%l1 = src4 + 0x20 */ + ldda [%i0 + 0x20] %asi, %l2 /* %l2/%l3 = dest + 0x20 */ + + xor %i4, %i2, %i4 + xor %i5, %i3, %i5 + ldda [%i1 + 0x30] %asi, %i2 /* %i2/%i3 = src1 + 0x30 */ + xor %g2, %i4, %g2 + xor %g3, %i5, %g3 + ldda [%l7 + 0x30] %asi, %i4 /* %i4/%i5 = src2 + 0x30 */ + xor %l0, %g2, %l0 + xor %l1, %g3, %l1 + ldda [%l6 + 0x30] %asi, %g2 /* %g2/%g3 = src3 + 0x30 */ + xor %l2, %l0, %l2 + xor %l3, %l1, %l3 + stxa %l2, [%i0 + 0x20] %asi + stxa %l3, [%i0 + 0x28] %asi + ldda [%l5 + 0x30] %asi, %l0 /* %l0/%l1 = src4 + 0x30 */ + ldda [%i0 + 0x30] %asi, %l2 /* %l2/%l3 = dest + 0x30 */ + + prefetch [%i1 + 0x40], #one_read + prefetch [%l7 + 0x40], #one_read + prefetch [%l6 + 0x40], #one_read + prefetch [%l5 + 0x40], #one_read + prefetch [%i0 + 0x40], #n_writes + + xor %i4, %i2, %i4 + xor %i5, %i3, %i5 + xor %g2, %i4, %g2 + xor %g3, %i5, %g3 + xor %l0, %g2, %l0 + xor %l1, %g3, %l1 + xor %l2, %l0, %l2 + xor %l3, %l1, %l3 + stxa %l2, [%i0 + 0x30] %asi + stxa %l3, [%i0 + 0x38] %asi + + add %i0, 0x40, %i0 + add %i1, 0x40, %i1 + add %l7, 0x40, %l7 + add %l6, 0x40, %l6 + subcc %g1, 1, %g1 + bne,pt %xcc, 1b + add %l5, 0x40, %l5 + membar #Sync + wr %g7, 0x0, %asi + ret + restore + .size xor_niagara_5, .-xor_niagara_5 -- cgit v1.2.3 From 6cc80cfab8b2ce1919ad5862a43f6b7bcf163c80 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 26 Feb 2006 14:51:29 -0800 Subject: [SPARC64]: Report mondo error correctly in hypervisor_xcall_deliver(). It's in "arg0" not "func". Signed-off-by: David S. Miller --- arch/sparc64/kernel/smp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index 1ce94081149..5ff2483e70b 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c @@ -617,7 +617,7 @@ static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t if (unlikely(++retries > 100)) { printk("CPU[%d]: sun4v mondo error %lu\n", - this_cpu, func); + this_cpu, arg0); break; } -- cgit v1.2.3 From 2a3a5f5ddbefde498e87f10924d4bf741c5bf37f Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 26 Feb 2006 19:31:49 -0800 Subject: [SPARC64]: Bulletproof hypervisor TLB flushing. Check TLB flush hypervisor calls for errors and report them. Pass HV_MMU_ALL always for now, we can add back the optimization to avoid the I-TLB flush later. Always explicitly page align the virtual address arguments. Signed-off-by: David S. Miller --- arch/sparc64/kernel/traps.c | 12 ++++++ arch/sparc64/mm/ultra.S | 94 ++++++++++++++++++++++++++++++++------------- 2 files changed, 80 insertions(+), 26 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c index 8df0cf29e3e..043a72658f6 100644 --- a/arch/sparc64/kernel/traps.c +++ b/arch/sparc64/kernel/traps.c @@ -1968,6 +1968,18 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl) prom_halt(); } +void hypervisor_tlbop_error(unsigned long err, unsigned long op) +{ + printk(KERN_CRIT "SUN4V: TLB hv call error %lu for op %lu\n", + err, op); +} + +void hypervisor_tlbop_error_xcall(unsigned long err, unsigned long op) +{ + printk(KERN_CRIT "SUN4V: XCALL TLB hv call error %lu for op %lu\n", + err, op); +} + void do_fpe_common(struct pt_regs *regs) { if (regs->tstate & TSTATE_PRIV) { diff --git a/arch/sparc64/mm/ultra.S b/arch/sparc64/mm/ultra.S index 725f8b34af4..bd8b0b4f878 100644 --- a/arch/sparc64/mm/ultra.S +++ b/arch/sparc64/mm/ultra.S @@ -257,17 +257,27 @@ __cheetah_flush_dcache_page: /* 11 insns */ #endif /* DCACHE_ALIASING_POSSIBLE */ /* Hypervisor specific versions, patched at boot time. */ -__hypervisor_flush_tlb_mm: /* 8 insns */ +__hypervisor_tlb_tl0_error: + save %sp, -192, %sp + mov %i0, %o0 + call hypervisor_tlbop_error + mov %i1, %o1 + ret + restore + +__hypervisor_flush_tlb_mm: /* 10 insns */ mov %o0, %o2 /* ARG2: mmu context */ mov 0, %o0 /* ARG0: CPU lists unimplemented */ mov 0, %o1 /* ARG1: CPU lists unimplemented */ mov HV_MMU_ALL, %o3 /* ARG3: flags */ mov HV_FAST_MMU_DEMAP_CTX, %o5 ta HV_FAST_TRAP + brnz,pn %o0, __hypervisor_tlb_tl0_error + mov HV_FAST_MMU_DEMAP_CTX, %o1 retl nop -__hypervisor_flush_tlb_pending: /* 15 insns */ +__hypervisor_flush_tlb_pending: /* 16 insns */ /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ sllx %o1, 3, %g1 mov %o2, %g2 @@ -275,17 +285,18 @@ __hypervisor_flush_tlb_pending: /* 15 insns */ 1: sub %g1, (1 << 3), %g1 ldx [%g2 + %g1], %o0 /* ARG0: vaddr + IMMU-bit */ mov %g3, %o1 /* ARG1: mmu context */ - mov HV_MMU_DMMU, %o2 - andcc %o0, 1, %g0 - movne %icc, HV_MMU_ALL, %o2 /* ARG2: flags */ - andn %o0, 1, %o0 + mov HV_MMU_ALL, %o2 /* ARG2: flags */ + srlx %o0, PAGE_SHIFT, %o0 + sllx %o0, PAGE_SHIFT, %o0 ta HV_MMU_UNMAP_ADDR_TRAP + brnz,pn %o0, __hypervisor_tlb_tl0_error + mov HV_MMU_UNMAP_ADDR_TRAP, %o1 brnz,pt %g1, 1b nop retl nop -__hypervisor_flush_tlb_kernel_range: /* 14 insns */ +__hypervisor_flush_tlb_kernel_range: /* 16 insns */ /* %o0=start, %o1=end */ cmp %o0, %o1 be,pn %xcc, 2f @@ -297,6 +308,8 @@ __hypervisor_flush_tlb_kernel_range: /* 14 insns */ mov 0, %o1 /* ARG1: mmu context */ mov HV_MMU_ALL, %o2 /* ARG2: flags */ ta HV_MMU_UNMAP_ADDR_TRAP + brnz,pn %o0, __hypervisor_tlb_tl0_error + mov HV_MMU_UNMAP_ADDR_TRAP, %o1 brnz,pt %g2, 1b sub %g2, %g3, %g2 2: retl @@ -369,7 +382,7 @@ cheetah_patch_cachetlbops: */ .align 32 .globl xcall_flush_tlb_mm -xcall_flush_tlb_mm: /* 18 insns */ +xcall_flush_tlb_mm: /* 21 insns */ mov PRIMARY_CONTEXT, %g2 ldxa [%g2] ASI_DMMU, %g3 srlx %g3, CTX_PGSZ1_NUC_SHIFT, %g4 @@ -388,9 +401,12 @@ xcall_flush_tlb_mm: /* 18 insns */ nop nop nop + nop + nop + nop .globl xcall_flush_tlb_pending -xcall_flush_tlb_pending: /* 20 insns */ +xcall_flush_tlb_pending: /* 21 insns */ /* %g5=context, %g1=nr, %g7=vaddrs[] */ sllx %g1, 3, %g1 mov PRIMARY_CONTEXT, %g4 @@ -413,9 +429,10 @@ xcall_flush_tlb_pending: /* 20 insns */ nop stxa %g2, [%g4] ASI_DMMU retry + nop .globl xcall_flush_tlb_kernel_range -xcall_flush_tlb_kernel_range: /* 22 insns */ +xcall_flush_tlb_kernel_range: /* 25 insns */ sethi %hi(PAGE_SIZE - 1), %g2 or %g2, %lo(PAGE_SIZE - 1), %g2 andn %g1, %g2, %g1 @@ -438,6 +455,9 @@ xcall_flush_tlb_kernel_range: /* 22 insns */ nop nop nop + nop + nop + nop /* This runs in a very controlled environment, so we do * not need to worry about BH races etc. @@ -545,8 +565,21 @@ xcall_flush_dcache_page_spitfire: /* %g1 == physical page address nop nop + /* %g5: error + * %g6: tlb op + */ +__hypervisor_tlb_xcall_error: + mov %g5, %g4 + mov %g6, %g5 + ba,pt %xcc, etrap + rd %pc, %g7 + mov %l4, %o0 + call hypervisor_tlbop_error_xcall + mov %l5, %o1 + ba,a,pt %xcc, rtrap_clr_l6 + .globl __hypervisor_xcall_flush_tlb_mm -__hypervisor_xcall_flush_tlb_mm: /* 18 insns */ +__hypervisor_xcall_flush_tlb_mm: /* 21 insns */ /* %g5=ctx, g1,g2,g3,g4,g7=scratch, %g6=unusable */ mov %o0, %g2 mov %o1, %g3 @@ -559,6 +592,9 @@ __hypervisor_xcall_flush_tlb_mm: /* 18 insns */ mov HV_MMU_ALL, %o3 /* ARG3: flags */ mov HV_FAST_MMU_DEMAP_CTX, %o5 ta HV_FAST_TRAP + mov HV_FAST_MMU_DEMAP_CTX, %g6 + brnz,pn %o0, __hypervisor_tlb_xcall_error + mov %o0, %g5 mov %g2, %o0 mov %g3, %o1 mov %g4, %o2 @@ -568,8 +604,8 @@ __hypervisor_xcall_flush_tlb_mm: /* 18 insns */ retry .globl __hypervisor_xcall_flush_tlb_pending -__hypervisor_xcall_flush_tlb_pending: /* 18 insns */ - /* %g5=ctx, %g1=nr, %g7=vaddrs[], %g2,%g3,%g4=scratch, %g6=unusable */ +__hypervisor_xcall_flush_tlb_pending: /* 21 insns */ + /* %g5=ctx, %g1=nr, %g7=vaddrs[], %g2,%g3,%g4,g6=scratch */ sllx %g1, 3, %g1 mov %o0, %g2 mov %o1, %g3 @@ -577,10 +613,13 @@ __hypervisor_xcall_flush_tlb_pending: /* 18 insns */ 1: sub %g1, (1 << 3), %g1 ldx [%g7 + %g1], %o0 /* ARG0: virtual address */ mov %g5, %o1 /* ARG1: mmu context */ - mov HV_MMU_DMMU, %o2 - andcc %o0, 1, %g0 - movne %icc, HV_MMU_ALL, %o2 /* ARG2: flags */ + mov HV_MMU_ALL, %o2 /* ARG2: flags */ + srlx %o0, PAGE_SHIFT, %o0 + sllx %o0, PAGE_SHIFT, %o0 ta HV_MMU_UNMAP_ADDR_TRAP + mov HV_MMU_UNMAP_ADDR_TRAP, %g6 + brnz,a,pn %o0, __hypervisor_tlb_xcall_error + mov %o0, %g5 brnz,pt %g1, 1b nop mov %g2, %o0 @@ -590,8 +629,8 @@ __hypervisor_xcall_flush_tlb_pending: /* 18 insns */ retry .globl __hypervisor_xcall_flush_tlb_kernel_range -__hypervisor_xcall_flush_tlb_kernel_range: /* 22 insns */ - /* %g1=start, %g7=end, g2,g3,g4,g5=scratch, g6=unusable */ +__hypervisor_xcall_flush_tlb_kernel_range: /* 25 insns */ + /* %g1=start, %g7=end, g2,g3,g4,g5,g6=scratch */ sethi %hi(PAGE_SIZE - 1), %g2 or %g2, %lo(PAGE_SIZE - 1), %g2 andn %g1, %g2, %g1 @@ -601,17 +640,20 @@ __hypervisor_xcall_flush_tlb_kernel_range: /* 22 insns */ sub %g3, %g2, %g3 mov %o0, %g2 mov %o1, %g4 - mov %o2, %g5 + mov %o2, %g7 1: add %g1, %g3, %o0 /* ARG0: virtual address */ mov 0, %o1 /* ARG1: mmu context */ mov HV_MMU_ALL, %o2 /* ARG2: flags */ ta HV_MMU_UNMAP_ADDR_TRAP + mov HV_MMU_UNMAP_ADDR_TRAP, %g6 + brnz,pn %o0, __hypervisor_tlb_xcall_error + mov %o0, %g5 sethi %hi(PAGE_SIZE), %o2 brnz,pt %g3, 1b sub %g3, %o2, %g3 mov %g2, %o0 mov %g4, %o1 - mov %g5, %o2 + mov %g7, %o2 membar #Sync retry @@ -643,21 +685,21 @@ hypervisor_patch_cachetlbops: sethi %hi(__hypervisor_flush_tlb_mm), %o1 or %o1, %lo(__hypervisor_flush_tlb_mm), %o1 call tlb_patch_one - mov 8, %o2 + mov 10, %o2 sethi %hi(__flush_tlb_pending), %o0 or %o0, %lo(__flush_tlb_pending), %o0 sethi %hi(__hypervisor_flush_tlb_pending), %o1 or %o1, %lo(__hypervisor_flush_tlb_pending), %o1 call tlb_patch_one - mov 15, %o2 + mov 16, %o2 sethi %hi(__flush_tlb_kernel_range), %o0 or %o0, %lo(__flush_tlb_kernel_range), %o0 sethi %hi(__hypervisor_flush_tlb_kernel_range), %o1 or %o1, %lo(__hypervisor_flush_tlb_kernel_range), %o1 call tlb_patch_one - mov 14, %o2 + mov 16, %o2 #ifdef DCACHE_ALIASING_POSSIBLE sethi %hi(__flush_dcache_page), %o0 @@ -674,21 +716,21 @@ hypervisor_patch_cachetlbops: sethi %hi(__hypervisor_xcall_flush_tlb_mm), %o1 or %o1, %lo(__hypervisor_xcall_flush_tlb_mm), %o1 call tlb_patch_one - mov 18, %o2 + mov 21, %o2 sethi %hi(xcall_flush_tlb_pending), %o0 or %o0, %lo(xcall_flush_tlb_pending), %o0 sethi %hi(__hypervisor_xcall_flush_tlb_pending), %o1 or %o1, %lo(__hypervisor_xcall_flush_tlb_pending), %o1 call tlb_patch_one - mov 18, %o2 + mov 21, %o2 sethi %hi(xcall_flush_tlb_kernel_range), %o0 or %o0, %lo(xcall_flush_tlb_kernel_range), %o0 sethi %hi(__hypervisor_xcall_flush_tlb_kernel_range), %o1 or %o1, %lo(__hypervisor_xcall_flush_tlb_kernel_range), %o1 call tlb_patch_one - mov 22, %o2 + mov 25, %o2 #endif /* CONFIG_SMP */ ret -- cgit v1.2.3 From 7a591cfe4efef8a232e4938d44ae6693b319f6d7 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 26 Feb 2006 19:44:50 -0800 Subject: [SPARC64]: Avoid dcache-dirty page state management on sun4v. It is totally wasted work, since we have no D-cache aliasing issues on sun4v. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 50 ++++++++++++++++++++++++++++---------------------- arch/sparc64/mm/tlb.c | 3 ++- 2 files changed, 30 insertions(+), 23 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index ccf083aecb6..87d5d1af1ad 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -188,8 +188,9 @@ atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0); #endif #endif -__inline__ void flush_dcache_page_impl(struct page *page) +inline void flush_dcache_page_impl(struct page *page) { + BUG_ON(tlb_type == hypervisor); #ifdef CONFIG_DEBUG_DCFLUSH atomic_inc(&dcpage_flushes); #endif @@ -279,29 +280,31 @@ unsigned long _PAGE_SZBITS __read_mostly; void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) { struct mm_struct *mm; - struct page *page; - unsigned long pfn; - unsigned long pg_flags; - - pfn = pte_pfn(pte); - if (pfn_valid(pfn) && - (page = pfn_to_page(pfn), page_mapping(page)) && - ((pg_flags = page->flags) & (1UL << PG_dcache_dirty))) { - int cpu = ((pg_flags >> PG_dcache_cpu_shift) & - PG_dcache_cpu_mask); - int this_cpu = get_cpu(); - - /* This is just to optimize away some function calls - * in the SMP case. - */ - if (cpu == this_cpu) - flush_dcache_page_impl(page); - else - smp_flush_dcache_page_impl(page, cpu); - clear_dcache_dirty_cpu(page, cpu); + if (tlb_type != hypervisor) { + unsigned long pfn = pte_pfn(pte); + unsigned long pg_flags; + struct page *page; + + if (pfn_valid(pfn) && + (page = pfn_to_page(pfn), page_mapping(page)) && + ((pg_flags = page->flags) & (1UL << PG_dcache_dirty))) { + int cpu = ((pg_flags >> PG_dcache_cpu_shift) & + PG_dcache_cpu_mask); + int this_cpu = get_cpu(); + + /* This is just to optimize away some function calls + * in the SMP case. + */ + if (cpu == this_cpu) + flush_dcache_page_impl(page); + else + smp_flush_dcache_page_impl(page, cpu); + + clear_dcache_dirty_cpu(page, cpu); - put_cpu(); + put_cpu(); + } } mm = vma->vm_mm; @@ -321,6 +324,9 @@ void flush_dcache_page(struct page *page) struct address_space *mapping; int this_cpu; + if (tlb_type == hypervisor) + return; + /* Do not bother with the expensive D-cache flush if it * is merely the zero page. The 'bigcore' testcase in GDB * causes this case to run millions of times. diff --git a/arch/sparc64/mm/tlb.c b/arch/sparc64/mm/tlb.c index 78357cc2a0b..a079cf42505 100644 --- a/arch/sparc64/mm/tlb.c +++ b/arch/sparc64/mm/tlb.c @@ -49,7 +49,8 @@ void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t if (pte_exec(orig)) vaddr |= 0x1UL; - if (pte_dirty(orig)) { + if (tlb_type != hypervisor && + pte_dirty(orig)) { unsigned long paddr, pfn = pte_pfn(orig); struct address_space *mapping; struct page *page; -- cgit v1.2.3 From 6889331a1260e42b0275f42c13d6342d6cc1a03d Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 26 Feb 2006 23:09:37 -0800 Subject: [SPARC64]: Fix indexing into kpte_linear_bitmap. Need to shift back up by 3 bits to get 8-byte entry index. Signed-off-by: David S. Miller --- arch/sparc64/kernel/ktlb.S | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/sparc64/kernel/ktlb.S b/arch/sparc64/kernel/ktlb.S index efcf38b6e28..31da1e564c9 100644 --- a/arch/sparc64/kernel/ktlb.S +++ b/arch/sparc64/kernel/ktlb.S @@ -163,6 +163,7 @@ kvmap_dtlb_tsb4m_miss: /* Divide by 64 to get the offset into the bitmask. */ srlx %g5, 6, %g5 + sllx %g5, 3, %g5 /* kern_linear_pte_xor[((mask & bit) ? 1 : 0)] */ ldx [%g2 + %g5], %g2 -- cgit v1.2.3 From aac0aadf09b98ba36eab0bb02a560ebcb82ac39f Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Mon, 27 Feb 2006 17:56:51 -0800 Subject: [SPARC64]: Fix bugs in SMP TLB context version expiration handling. 1) We must flush the TLB, duh. 2) Even if the sw context was seen to be valid, the local cpu's hw context can be out of date, so reload it unconditionally. Signed-off-by: David S. Miller --- arch/sparc64/kernel/smp.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index 5ff2483e70b..eb7c0f855ba 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c @@ -909,14 +909,18 @@ void smp_receive_signal_client(int irq, struct pt_regs *regs) */ mm = current->active_mm; if (likely(mm)) { - if (unlikely(!CTX_VALID(mm->context))) { - unsigned long flags; + unsigned long flags; - spin_lock_irqsave(&mm->context.lock, flags); + spin_lock_irqsave(&mm->context.lock, flags); + + if (unlikely(!CTX_VALID(mm->context))) get_new_mmu_context(mm); - load_secondary_context(mm); - spin_unlock_irqrestore(&mm->context.lock, flags); - } + + load_secondary_context(mm); + __flush_tlb_mm(CTX_HWBITS(mm->context), + SECONDARY_CONTEXT); + + spin_unlock_irqrestore(&mm->context.lock, flags); } } -- cgit v1.2.3 From b830ab665ad96c6b20d51a89b35cbc09ab5a2c29 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 28 Feb 2006 15:10:26 -0800 Subject: [SPARC64]: Fix bugs in SUN4V cpu mondo dispatch. There were several bugs in the SUN4V cpu mondo dispatch code. In fact, if we ever got a EWOULDBLOCK or other error from the hypervisor call, we'd potentially send a cpu mondo multiple times to the same cpu and even worse we could loop until the timeout resending the same mondo over and over to such cpus. So let's bulletproof this thing as follows: 1) Implement cpu_mondo_send() and cpu_state() hypervisor calls in arch/sparc64/kernel/entry.S, add prototypes to asm/hypervisor.h 2) Don't build and update the cpulist using inline functions, this was causing the cpu mask to not get updated in the caller. 3) Disable interrupts during the entire mondo send, otherwise our cpu list and/or mondo block could get overwritten if we take an interrupt and do a cpu mondo send on the current cpu. 4) Check for all possible error return types from the cpu_mondo_send() hypervisor call. In particular: HV_EOK) Our work is done, all cpus have received the mondo. HV_CPUERROR) One or more of the cpus in the cpu list we passed to the hypervisor are in error state. Use cpu_state() calls over the entries in the cpu list to see which ones. Record them in "error_mask" and report this after we are done sending the mondo to cpus which are not in error state. HV_EWOULDBLOCK) We need to keep trying. Any other error we consider fatal, we report the event and exit immediately. 5) We only timeout if forward progress is not made. Forward progress is defined as having at least one cpu get the mondo successfully in a given cpu_mondo_send() call. Otherwise we bump a counter and delay a little. If the counter hits a limit, we signal an error and report the event. Also, smp_call_function_mask() error handling reports the number of cpus incorrectly. Signed-off-by: David S. Miller --- arch/sparc64/kernel/entry.S | 28 +++++++ arch/sparc64/kernel/smp.c | 180 ++++++++++++++++++++++++++++++-------------- 2 files changed, 151 insertions(+), 57 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S index 9f3048e64e8..6d0b3ed77a0 100644 --- a/arch/sparc64/kernel/entry.S +++ b/arch/sparc64/kernel/entry.S @@ -1795,3 +1795,31 @@ sun4v_cpu_yield: ta HV_FAST_TRAP retl nop + + /* %o0: num cpus in cpu list + * %o1: cpu list paddr + * %o2: mondo block paddr + * + * returns %o0: status + */ + .globl sun4v_cpu_mondo_send +sun4v_cpu_mondo_send: + mov HV_FAST_CPU_MONDO_SEND, %o5 + ta HV_FAST_TRAP + retl + nop + + /* %o0: CPU ID + * + * returns %o0: -status if status non-zero, else + * %o0: cpu state as HV_CPU_STATE_* + */ + .globl sun4v_cpu_state +sun4v_cpu_state: + mov HV_FAST_CPU_STATE, %o5 + ta HV_FAST_TRAP + brnz,pn %o0, 1f + sub %g0, %o0, %o0 + mov %o1, %o0 +1: retl + nop diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index eb7c0f855ba..6bc7fd47e44 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c @@ -556,77 +556,144 @@ retry: } /* Multi-cpu list version. */ -static int init_cpu_list(u16 *list, cpumask_t mask) -{ - int i, cnt; - - cnt = 0; - for_each_cpu_mask(i, mask) - list[cnt++] = i; - - return cnt; -} - -static int update_cpu_list(u16 *list, int orig_cnt, cpumask_t mask) -{ - int i; - - for (i = 0; i < orig_cnt; i++) { - if (list[i] == 0xffff) - cpu_clear(i, mask); - } - - return init_cpu_list(list, mask); -} - static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask) { - int this_cpu = get_cpu(); - struct trap_per_cpu *tb = &trap_block[this_cpu]; - u64 *mondo = __va(tb->cpu_mondo_block_pa); - u16 *cpu_list = __va(tb->cpu_list_pa); - int cnt, retries; + struct trap_per_cpu *tb; + u16 *cpu_list; + u64 *mondo; + cpumask_t error_mask; + unsigned long flags, status; + int cnt, retries, this_cpu, i; + + /* We have to do this whole thing with interrupts fully disabled. + * Otherwise if we send an xcall from interrupt context it will + * corrupt both our mondo block and cpu list state. + * + * One consequence of this is that we cannot use timeout mechanisms + * that depend upon interrupts being delivered locally. So, for + * example, we cannot sample jiffies and expect it to advance. + * + * Fortunately, udelay() uses %stick/%tick so we can use that. + */ + local_irq_save(flags); + + this_cpu = smp_processor_id(); + tb = &trap_block[this_cpu]; + mondo = __va(tb->cpu_mondo_block_pa); mondo[0] = data0; mondo[1] = data1; mondo[2] = data2; wmb(); + cpu_list = __va(tb->cpu_list_pa); + + /* Setup the initial cpu list. */ + cnt = 0; + for_each_cpu_mask(i, mask) + cpu_list[cnt++] = i; + + cpus_clear(error_mask); retries = 0; - cnt = init_cpu_list(cpu_list, mask); do { - register unsigned long func __asm__("%o5"); - register unsigned long arg0 __asm__("%o0"); - register unsigned long arg1 __asm__("%o1"); - register unsigned long arg2 __asm__("%o2"); - - func = HV_FAST_CPU_MONDO_SEND; - arg0 = cnt; - arg1 = tb->cpu_list_pa; - arg2 = tb->cpu_mondo_block_pa; - - __asm__ __volatile__("ta %8" - : "=&r" (func), "=&r" (arg0), - "=&r" (arg1), "=&r" (arg2) - : "0" (func), "1" (arg0), - "2" (arg1), "3" (arg2), - "i" (HV_FAST_TRAP) - : "memory"); - if (likely(arg0 == HV_EOK)) - break; + int forward_progress; + + status = sun4v_cpu_mondo_send(cnt, + tb->cpu_list_pa, + tb->cpu_mondo_block_pa); - if (unlikely(++retries > 100)) { - printk("CPU[%d]: sun4v mondo error %lu\n", - this_cpu, arg0); + /* HV_EOK means all cpus received the xcall, we're done. */ + if (likely(status == HV_EOK)) break; + + /* First, clear out all the cpus in the mask that were + * successfully sent to. The hypervisor indicates this + * by setting the cpu list entry of such cpus to 0xffff. + */ + forward_progress = 0; + for (i = 0; i < cnt; i++) { + if (cpu_list[i] == 0xffff) { + cpu_clear(i, mask); + forward_progress = 1; + } } - cnt = update_cpu_list(cpu_list, cnt, mask); + /* If we get a HV_ECPUERROR, then one or more of the cpus + * in the list are in error state. Use the cpu_state() + * hypervisor call to find out which cpus are in error state. + */ + if (unlikely(status == HV_ECPUERROR)) { + for (i = 0; i < cnt; i++) { + long err; + u16 cpu; + + cpu = cpu_list[i]; + if (cpu == 0xffff) + continue; + + err = sun4v_cpu_state(cpu); + if (err >= 0 && + err == HV_CPU_STATE_ERROR) { + cpu_clear(cpu, mask); + cpu_set(cpu, error_mask); + } + } + } else if (unlikely(status != HV_EWOULDBLOCK)) + goto fatal_mondo_error; + + /* Rebuild the cpu_list[] array and try again. */ + cnt = 0; + for_each_cpu_mask(i, mask) + cpu_list[cnt++] = i; - udelay(2 * cnt); + if (unlikely(!forward_progress)) { + if (unlikely(++retries > 10000)) + goto fatal_mondo_timeout; + + /* Delay a little bit to let other cpus catch up + * on their cpu mondo queue work. + */ + udelay(2 * cnt); + } } while (1); - put_cpu(); + local_irq_restore(flags); + + if (unlikely(!cpus_empty(error_mask))) + goto fatal_mondo_cpu_error; + + return; + +fatal_mondo_cpu_error: + printk(KERN_CRIT "CPU[%d]: SUN4V mondo cpu error, some target cpus " + "were in error state\n", + this_cpu); + printk(KERN_CRIT "CPU[%d]: Error mask [ ", this_cpu); + for_each_cpu_mask(i, error_mask) + printk("%d ", i); + printk("]\n"); + return; + +fatal_mondo_timeout: + local_irq_restore(flags); + printk(KERN_CRIT "CPU[%d]: SUN4V mondo timeout, no forward " + " progress after %d retries.\n", + this_cpu, retries); + goto dump_cpu_list_and_out; + +fatal_mondo_error: + local_irq_restore(flags); + printk(KERN_CRIT "CPU[%d]: Unexpected SUN4V mondo error %lu\n", + this_cpu, status); + printk(KERN_CRIT "CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) " + "mondo_block_pa(%lx)\n", + this_cpu, cnt, tb->cpu_list_pa, tb->cpu_mondo_block_pa); + +dump_cpu_list_and_out: + printk(KERN_CRIT "CPU[%d]: CPU list [ ", this_cpu); + for (i = 0; i < cnt; i++) + printk("%u ", cpu_list[i]); + printk("]\n"); } /* Send cross call to all processors mentioned in MASK @@ -723,9 +790,8 @@ static int smp_call_function_mask(void (*func)(void *info), void *info, out_timeout: spin_unlock(&call_lock); - printk("XCALL: Remote cpus not responding, ncpus=%ld finished=%ld\n", - (long) num_online_cpus() - 1L, - (long) atomic_read(&data.finished)); + printk("XCALL: Remote cpus not responding, ncpus=%d finished=%d\n", + cpus, atomic_read(&data.finished)); return 0; } -- cgit v1.2.3 From 8ba706a95bb92c3b14b812f6d507890336d19136 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 1 Mar 2006 17:32:46 -0800 Subject: [SPARC64]: Add mini-RTC driver for Starfire and SUN4V. Signed-off-by: David S. Miller --- arch/sparc64/kernel/time.c | 279 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 279 insertions(+) (limited to 'arch') diff --git a/arch/sparc64/kernel/time.c b/arch/sparc64/kernel/time.c index f6275adbc81..d50ebeae144 100644 --- a/arch/sparc64/kernel/time.c +++ b/arch/sparc64/kernel/time.c @@ -30,6 +30,8 @@ #include #include #include +#include +#include #include #include @@ -45,6 +47,7 @@ #include #include #include +#include DEFINE_SPINLOCK(mostek_lock); DEFINE_SPINLOCK(rtc_lock); @@ -702,6 +705,14 @@ static u32 starfire_get_time(void) return unix_tod; } +static int starfire_set_time(u32 val) +{ + /* Do nothing, time is set using the service processor + * console on this platform. + */ + return 0; +} + static u32 hypervisor_get_time(void) { register unsigned long func asm("%o5"); @@ -731,6 +742,33 @@ retry: return 0; } +static int hypervisor_set_time(u32 secs) +{ + register unsigned long func asm("%o5"); + register unsigned long arg0 asm("%o0"); + int retries = 10000; + +retry: + func = HV_FAST_TOD_SET; + arg0 = secs; + __asm__ __volatile__("ta %4" + : "=&r" (func), "=&r" (arg0) + : "0" (func), "1" (arg0), + "i" (HV_FAST_TRAP)); + if (arg0 == HV_EOK) + return 0; + if (arg0 == HV_EWOULDBLOCK) { + if (--retries > 0) { + udelay(100); + goto retry; + } + printk(KERN_WARNING "SUN4V: tod_set() timed out.\n"); + return -EAGAIN; + } + printk(KERN_WARNING "SUN4V: tod_set() not supported.\n"); + return -EOPNOTSUPP; +} + void __init clock_probe(void) { struct linux_prom_registers clk_reg[2]; @@ -1221,3 +1259,244 @@ static int set_rtc_mmss(unsigned long nowtime) return retval; } } + +#define RTC_IS_OPEN 0x01 /* means /dev/rtc is in use */ +static unsigned char mini_rtc_status; /* bitmapped status byte. */ + +/* months start at 0 now */ +static unsigned char days_in_mo[] = +{31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}; + +#define FEBRUARY 2 +#define STARTOFTIME 1970 +#define SECDAY 86400L +#define SECYR (SECDAY * 365) +#define leapyear(year) ((year) % 4 == 0 && \ + ((year) % 100 != 0 || (year) % 400 == 0)) +#define days_in_year(a) (leapyear(a) ? 366 : 365) +#define days_in_month(a) (month_days[(a) - 1]) + +static int month_days[12] = { + 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 +}; + +/* + * This only works for the Gregorian calendar - i.e. after 1752 (in the UK) + */ +static void GregorianDay(struct rtc_time * tm) +{ + int leapsToDate; + int lastYear; + int day; + int MonthOffset[] = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 }; + + lastYear = tm->tm_year - 1; + + /* + * Number of leap corrections to apply up to end of last year + */ + leapsToDate = lastYear / 4 - lastYear / 100 + lastYear / 400; + + /* + * This year is a leap year if it is divisible by 4 except when it is + * divisible by 100 unless it is divisible by 400 + * + * e.g. 1904 was a leap year, 1900 was not, 1996 is, and 2000 was + */ + day = tm->tm_mon > 2 && leapyear(tm->tm_year); + + day += lastYear*365 + leapsToDate + MonthOffset[tm->tm_mon-1] + + tm->tm_mday; + + tm->tm_wday = day % 7; +} + +static void to_tm(int tim, struct rtc_time *tm) +{ + register int i; + register long hms, day; + + day = tim / SECDAY; + hms = tim % SECDAY; + + /* Hours, minutes, seconds are easy */ + tm->tm_hour = hms / 3600; + tm->tm_min = (hms % 3600) / 60; + tm->tm_sec = (hms % 3600) % 60; + + /* Number of years in days */ + for (i = STARTOFTIME; day >= days_in_year(i); i++) + day -= days_in_year(i); + tm->tm_year = i; + + /* Number of months in days left */ + if (leapyear(tm->tm_year)) + days_in_month(FEBRUARY) = 29; + for (i = 1; day >= days_in_month(i); i++) + day -= days_in_month(i); + days_in_month(FEBRUARY) = 28; + tm->tm_mon = i; + + /* Days are what is left over (+1) from all that. */ + tm->tm_mday = day + 1; + + /* + * Determine the day of week + */ + GregorianDay(tm); +} + +/* Both Starfire and SUN4V give us seconds since Jan 1st, 1970, + * aka Unix time. So we have to convert to/from rtc_time. + */ +static inline void mini_get_rtc_time(struct rtc_time *time) +{ + unsigned long flags; + u32 seconds; + + spin_lock_irqsave(&rtc_lock, flags); + seconds = 0; + if (this_is_starfire) + seconds = starfire_get_time(); + else if (tlb_type == hypervisor) + seconds = hypervisor_get_time(); + spin_unlock_irqrestore(&rtc_lock, flags); + + to_tm(seconds, time); +} + +static inline int mini_set_rtc_time(struct rtc_time *time) +{ + u32 seconds = mktime(time->tm_year + 1900, time->tm_mon + 1, + time->tm_mday, time->tm_hour, + time->tm_min, time->tm_sec); + unsigned long flags; + int err; + + spin_lock_irqsave(&rtc_lock, flags); + err = -ENODEV; + if (this_is_starfire) + err = starfire_set_time(seconds); + else if (tlb_type == hypervisor) + err = hypervisor_set_time(seconds); + spin_unlock_irqrestore(&rtc_lock, flags); + + return err; +} + +static int mini_rtc_ioctl(struct inode *inode, struct file *file, + unsigned int cmd, unsigned long arg) +{ + struct rtc_time wtime; + void __user *argp = (void __user *)arg; + + switch (cmd) { + + case RTC_PLL_GET: + return -EINVAL; + + case RTC_PLL_SET: + return -EINVAL; + + case RTC_UIE_OFF: /* disable ints from RTC updates. */ + return 0; + + case RTC_UIE_ON: /* enable ints for RTC updates. */ + return -EINVAL; + + case RTC_RD_TIME: /* Read the time/date from RTC */ + /* this doesn't get week-day, who cares */ + memset(&wtime, 0, sizeof(wtime)); + mini_get_rtc_time(&wtime); + + return copy_to_user(argp, &wtime, sizeof(wtime)) ? -EFAULT : 0; + + case RTC_SET_TIME: /* Set the RTC */ + { + int year; + unsigned char leap_yr; + + if (!capable(CAP_SYS_TIME)) + return -EACCES; + + if (copy_from_user(&wtime, argp, sizeof(wtime))) + return -EFAULT; + + year = wtime.tm_year + 1900; + leap_yr = ((!(year % 4) && (year % 100)) || + !(year % 400)); + + if ((wtime.tm_mon < 0 || wtime.tm_mon > 11) || (wtime.tm_mday < 1)) + return -EINVAL; + + if (wtime.tm_mday < 0 || wtime.tm_mday > + (days_in_mo[wtime.tm_mon] + ((wtime.tm_mon == 1) && leap_yr))) + return -EINVAL; + + if (wtime.tm_hour < 0 || wtime.tm_hour >= 24 || + wtime.tm_min < 0 || wtime.tm_min >= 60 || + wtime.tm_sec < 0 || wtime.tm_sec >= 60) + return -EINVAL; + + return mini_set_rtc_time(&wtime); + } + } + + return -EINVAL; +} + +static int mini_rtc_open(struct inode *inode, struct file *file) +{ + if (mini_rtc_status & RTC_IS_OPEN) + return -EBUSY; + + mini_rtc_status |= RTC_IS_OPEN; + + return 0; +} + +static int mini_rtc_release(struct inode *inode, struct file *file) +{ + mini_rtc_status &= ~RTC_IS_OPEN; + return 0; +} + + +static struct file_operations mini_rtc_fops = { + .owner = THIS_MODULE, + .ioctl = mini_rtc_ioctl, + .open = mini_rtc_open, + .release = mini_rtc_release, +}; + +static struct miscdevice rtc_mini_dev = +{ + .minor = RTC_MINOR, + .name = "rtc", + .fops = &mini_rtc_fops, +}; + +static int __init rtc_mini_init(void) +{ + int retval; + + if (tlb_type != hypervisor && !this_is_starfire) + return -ENODEV; + + printk(KERN_INFO "Mini RTC Driver\n"); + + retval = misc_register(&rtc_mini_dev); + if (retval < 0) + return retval; + + return 0; +} + +static void __exit rtc_mini_exit(void) +{ + misc_deregister(&rtc_mini_dev); +} + + +module_init(rtc_mini_init); +module_exit(rtc_mini_exit); -- cgit v1.2.3 From 92daa77e9a829350fd3900ff58d9c69820ad0e3d Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 1 Mar 2006 22:27:09 -0800 Subject: [SPARC64]: Fix typo in SUN4V D-TLB miss handler. Should put FAULT_CODE_DTLB into %g3 not FAULT_CODE_ITLB. Signed-off-by: David S. Miller --- arch/sparc64/kernel/sun4v_tlb_miss.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/sun4v_tlb_miss.S b/arch/sparc64/kernel/sun4v_tlb_miss.S index 654244a3b04..3dccbd67818 100644 --- a/arch/sparc64/kernel/sun4v_tlb_miss.S +++ b/arch/sparc64/kernel/sun4v_tlb_miss.S @@ -107,7 +107,7 @@ sun4v_dtlb_miss: ldda [%g1] ASI_QUAD_LDD_PHYS_4V, %g2 cmp %g2, %g6 bne,a,pn %xcc, tsb_miss_page_table_walk - mov FAULT_CODE_ITLB, %g3 + mov FAULT_CODE_DTLB, %g3 /* We have a valid entry, make hypervisor call to load * D-TLB and return from trap. -- cgit v1.2.3 From 45f791eb0f03e760183d30d3f1f18dc2b8e902fe Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 1 Mar 2006 22:42:18 -0800 Subject: [SPARC64]: Fix _PAGE_EXEC handling. First of all, use the known _PAGE_EXEC_{4U,4V} value instead of loading _PAGE_EXEC from memory. We either know which one to use by context, or we can code patch the test. Next, we need to check executability of a PTE in the generic TSB miss handler. Signed-off-by: David S. Miller --- arch/sparc64/kernel/itlb_miss.S | 10 +++++----- arch/sparc64/kernel/sun4v_tlb_miss.S | 4 +--- arch/sparc64/kernel/tsb.S | 9 +++++++++ 3 files changed, 15 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/itlb_miss.S b/arch/sparc64/kernel/itlb_miss.S index 6dfe3968c37..ad46e2024f4 100644 --- a/arch/sparc64/kernel/itlb_miss.S +++ b/arch/sparc64/kernel/itlb_miss.S @@ -9,18 +9,18 @@ cmp %g4, %g6 ! Compare TAG /* ITLB ** ICACHE line 2: TSB compare and TLB load */ - sethi %hi(PAGE_EXEC), %g4 ! Setup exec check - ldx [%g4 + %lo(PAGE_EXEC)], %g4 bne,pn %xcc, tsb_miss_itlb ! Miss mov FAULT_CODE_ITLB, %g3 - andcc %g5, %g4, %g0 ! Executable? + andcc %g5, _PAGE_EXEC_4U, %g0 ! Executable? be,pn %xcc, tsb_do_fault nop ! Delay slot, fill me + stxa %g5, [%g0] ASI_ITLB_DATA_IN ! Load TLB + retry ! Trap done nop /* ITLB ** ICACHE line 3: */ - stxa %g5, [%g0] ASI_ITLB_DATA_IN ! Load TLB - retry ! Trap done + nop + nop nop nop nop diff --git a/arch/sparc64/kernel/sun4v_tlb_miss.S b/arch/sparc64/kernel/sun4v_tlb_miss.S index 3dccbd67818..3eed8db9684 100644 --- a/arch/sparc64/kernel/sun4v_tlb_miss.S +++ b/arch/sparc64/kernel/sun4v_tlb_miss.S @@ -58,11 +58,9 @@ sun4v_itlb_miss: /* Load TSB tag/pte into %g2/%g3 and compare the tag. */ ldda [%g1] ASI_QUAD_LDD_PHYS_4V, %g2 cmp %g2, %g6 - sethi %hi(PAGE_EXEC), %g7 - ldx [%g7 + %lo(PAGE_EXEC)], %g7 bne,a,pn %xcc, tsb_miss_page_table_walk mov FAULT_CODE_ITLB, %g3 - andcc %g3, %g7, %g0 + andcc %g3, _PAGE_EXEC_4V, %g0 be,a,pn %xcc, tsb_do_fault mov FAULT_CODE_ITLB, %g3 diff --git a/arch/sparc64/kernel/tsb.S b/arch/sparc64/kernel/tsb.S index cc225c0563c..563852bf359 100644 --- a/arch/sparc64/kernel/tsb.S +++ b/arch/sparc64/kernel/tsb.S @@ -103,6 +103,15 @@ tsb_dtlb_load: mov %g5, %g3 tsb_itlb_load: + /* Executable bit must be set. */ +661: andcc %g5, _PAGE_EXEC_4U, %g0 + .section .sun4v_1insn_patch, "ax" + .word 661b + andcc %g5, _PAGE_EXEC_4V, %g0 + .previous + + be,pn %xcc, tsb_do_fault + nop 661: stxa %g5, [%g0] ASI_ITLB_DATA_IN retry -- cgit v1.2.3 From 8bcd17411643beb9a601e032d0cf1016909a81d3 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 2 Mar 2006 18:12:27 -0800 Subject: [SPARC64]: Do not allow mapping pages within 4GB of 64-bit VA hole. The UltraSPARC T1 manual recommends this because the chip could instruction prefetch into the VA hole, and this would also make decoding certain kinds of memory access traps more difficult (because the chip sign extends certain pieces of trap state). Signed-off-by: David S. Miller --- arch/sparc64/kernel/sys_sparc.c | 90 +++++++++++++++++++++++++++++++---------- 1 file changed, 68 insertions(+), 22 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/sys_sparc.c b/arch/sparc64/kernel/sys_sparc.c index 5f8c822a2b4..095db723bb8 100644 --- a/arch/sparc64/kernel/sys_sparc.c +++ b/arch/sparc64/kernel/sys_sparc.c @@ -33,14 +33,55 @@ /* #define DEBUG_UNIMP_SYSCALL */ -/* XXX Make this per-binary type, this way we can detect the type of - * XXX a binary. Every Sparc executable calls this very early on. - */ asmlinkage unsigned long sys_getpagesize(void) { return PAGE_SIZE; } +#define VA_EXCLUDE_START (0x0000080000000000UL - (1UL << 32UL)) +#define VA_EXCLUDE_END (0xfffff80000000000UL + (1UL << 32UL)) + +/* Does addr --> addr+len fall within 4GB of the VA-space hole or + * overflow past the end of the 64-bit address space? + */ +static inline int invalid_64bit_range(unsigned long addr, unsigned long len) +{ + unsigned long va_exclude_start, va_exclude_end; + + va_exclude_start = VA_EXCLUDE_START; + va_exclude_end = VA_EXCLUDE_END; + + if (unlikely(len >= va_exclude_start)) + return 1; + + if (unlikely((addr + len) < addr)) + return 1; + + if (unlikely((addr >= va_exclude_start && addr < va_exclude_end) || + ((addr + len) >= va_exclude_start && + (addr + len) < va_exclude_end))) + return 1; + + return 0; +} + +/* Does start,end straddle the VA-space hole? */ +static inline int straddles_64bit_va_hole(unsigned long start, unsigned long end) +{ + unsigned long va_exclude_start, va_exclude_end; + + va_exclude_start = VA_EXCLUDE_START; + va_exclude_end = VA_EXCLUDE_END; + + if (likely(start < va_exclude_start && end < va_exclude_start)) + return 0; + + if (likely(start >= va_exclude_end && end >= va_exclude_end)) + return 0; + + return 1; +} + #define COLOUR_ALIGN(addr,pgoff) \ ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \ (((pgoff)< task_size || len > -PAGE_OFFSET) + if (len > task_size || len >= VA_EXCLUDE_START) return -ENOMEM; do_color_align = 0; @@ -100,9 +141,10 @@ full_search: for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { /* At this point: (!vma || addr < vma->vm_end). */ - if (addr < PAGE_OFFSET && -PAGE_OFFSET - len < addr) { - addr = PAGE_OFFSET; - vma = find_vma(mm, PAGE_OFFSET); + if (addr < VA_EXCLUDE_START && + (addr + len) >= VA_EXCLUDE_START) { + addr = VA_EXCLUDE_END; + vma = find_vma(mm, VA_EXCLUDE_END); } if (task_size < addr) { if (start_addr != TASK_UNMAPPED_BASE) { @@ -174,12 +216,12 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u asmlinkage unsigned long sparc_brk(unsigned long brk) { /* People could try to be nasty and use ta 0x6d in 32bit programs */ - if (test_thread_flag(TIF_32BIT) && - brk >= 0xf0000000UL) + if (test_thread_flag(TIF_32BIT) && brk >= 0xf0000000UL) return current->mm->brk; - if ((current->mm->brk & PAGE_OFFSET) != (brk & PAGE_OFFSET)) + if (unlikely(straddles_64bit_va_hole(current->mm->brk, brk))) return current->mm->brk; + return sys_brk(brk); } @@ -340,13 +382,16 @@ asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len, retval = -EINVAL; if (test_thread_flag(TIF_32BIT)) { - if (len > 0xf0000000UL || - ((flags & MAP_FIXED) && addr > 0xf0000000UL - len)) + if (len >= 0xf0000000UL) + goto out_putf; + + if ((flags & MAP_FIXED) && addr > 0xf0000000UL - len) goto out_putf; } else { - if (len > -PAGE_OFFSET || - ((flags & MAP_FIXED) && - addr < PAGE_OFFSET && addr + len > -PAGE_OFFSET)) + if (len >= VA_EXCLUDE_START) + goto out_putf; + + if ((flags & MAP_FIXED) && invalid_64bit_range(addr, len)) goto out_putf; } @@ -365,9 +410,9 @@ asmlinkage long sys64_munmap(unsigned long addr, size_t len) { long ret; - if (len > -PAGE_OFFSET || - (addr < PAGE_OFFSET && addr + len > -PAGE_OFFSET)) + if (invalid_64bit_range(addr, len)) return -EINVAL; + down_write(¤t->mm->mmap_sem); ret = do_munmap(current->mm, addr, len); up_write(¤t->mm->mmap_sem); @@ -384,18 +429,19 @@ asmlinkage unsigned long sys64_mremap(unsigned long addr, { struct vm_area_struct *vma; unsigned long ret = -EINVAL; + if (test_thread_flag(TIF_32BIT)) goto out; - if (old_len > -PAGE_OFFSET || new_len > -PAGE_OFFSET) + if (unlikely(new_len >= VA_EXCLUDE_START)) goto out; - if (addr < PAGE_OFFSET && addr + old_len > -PAGE_OFFSET) + if (unlikely(invalid_64bit_range(addr, old_len))) goto out; + down_write(¤t->mm->mmap_sem); if (flags & MREMAP_FIXED) { - if (new_addr < PAGE_OFFSET && - new_addr + new_len > -PAGE_OFFSET) + if (invalid_64bit_range(new_addr, new_len)) goto out_sem; - } else if (addr < PAGE_OFFSET && addr + new_len > -PAGE_OFFSET) { + } else if (invalid_64bit_range(addr, new_len)) { unsigned long map_flags = 0; struct file *file = NULL; -- cgit v1.2.3 From c4f8ef77f941e7d89a52fad1fbe04d59397e2bd1 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 2 Mar 2006 20:28:34 -0800 Subject: [SPARC64]: Fix mini RTC driver reading. Need to subtract 1900 from year and 1 from month before giving it back to userspace. Signed-off-by: David S. Miller --- arch/sparc64/kernel/time.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch') diff --git a/arch/sparc64/kernel/time.c b/arch/sparc64/kernel/time.c index d50ebeae144..7d61f1bfd3d 100644 --- a/arch/sparc64/kernel/time.c +++ b/arch/sparc64/kernel/time.c @@ -1363,6 +1363,8 @@ static inline void mini_get_rtc_time(struct rtc_time *time) spin_unlock_irqrestore(&rtc_lock, flags); to_tm(seconds, time); + time->tm_year -= 1900; + time->tm_mon -= 1; } static inline int mini_set_rtc_time(struct rtc_time *time) -- cgit v1.2.3 From bcc28ee0bf390df0d81cc9dafe980faef6b2771a Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 2 Mar 2006 20:42:53 -0800 Subject: [SPARC64]: Fix sun4v mna winfixup handling. We were clobbering a base register before we were done using it. Fix a comment typo while we're here. Signed-off-by: David S. Miller --- arch/sparc64/kernel/sun4v_tlb_miss.S | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/sun4v_tlb_miss.S b/arch/sparc64/kernel/sun4v_tlb_miss.S index 3eed8db9684..ab23ddb7116 100644 --- a/arch/sparc64/kernel/sun4v_tlb_miss.S +++ b/arch/sparc64/kernel/sun4v_tlb_miss.S @@ -18,7 +18,7 @@ /* DEST = (VADDR >> 22) * - * Branch to ZERO_CTX_LABEL is context is zero. + * Branch to ZERO_CTX_LABEL if context is zero. */ #define COMPUTE_TAG_TARGET(DEST, VADDR, CTX, ZERO_CTX_LABEL) \ srlx VADDR, 22, DEST; \ @@ -314,10 +314,10 @@ sun4v_mna: nop SET_GL(1) - ldxa [%g0] ASI_SCRATCHPAD, %g5 - ldx [%g5 + HV_FAULT_D_ADDR_OFFSET], %g5 + ldxa [%g0] ASI_SCRATCHPAD, %g2 + ldx [%g2 + HV_FAULT_D_ADDR_OFFSET], %g5 mov HV_FAULT_TYPE_UNALIGNED, %g3 - ldx [%g5 + HV_FAULT_D_CTX_OFFSET], %g4 + ldx [%g2 + HV_FAULT_D_CTX_OFFSET], %g4 sllx %g3, 16, %g3 or %g4, %g3, %g4 ba,pt %xcc, winfix_mna -- cgit v1.2.3 From 3cab0c3e8636d5005041aa52224f796c3a4ef872 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 2 Mar 2006 21:50:47 -0800 Subject: [SPARC64]: More SUN4V cpu mondo bug fixing. This cpu mondo sending interface isn't all that easy to use correctly... We were clearing out the wrong bits from the "mask" after getting something other than EOK from the hypervisor. It turns out the hypervisor can just be resent the same cpu_list[] array, with the 0xffff "done" entries still in there, and it will do the right thing. So don't update or try to rebuild the cpu_list[] array to condense it. This requires the "forward_progress" check to be done slightly differently, but this new scheme is less bug prone than what we were doing before. Signed-off-by: David S. Miller --- arch/sparc64/kernel/smp.c | 40 ++++++++++++++++++++++++---------------- 1 file changed, 24 insertions(+), 16 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index 6bc7fd47e44..c4548a88953 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c @@ -563,7 +563,7 @@ static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t u64 *mondo; cpumask_t error_mask; unsigned long flags, status; - int cnt, retries, this_cpu, i; + int cnt, retries, this_cpu, prev_sent, i; /* We have to do this whole thing with interrupts fully disabled. * Otherwise if we send an xcall from interrupt context it will @@ -595,8 +595,9 @@ static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t cpus_clear(error_mask); retries = 0; + prev_sent = 0; do { - int forward_progress; + int forward_progress, n_sent; status = sun4v_cpu_mondo_send(cnt, tb->cpu_list_pa, @@ -606,18 +607,23 @@ static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t if (likely(status == HV_EOK)) break; - /* First, clear out all the cpus in the mask that were - * successfully sent to. The hypervisor indicates this - * by setting the cpu list entry of such cpus to 0xffff. + /* First, see if we made any forward progress. + * + * The hypervisor indicates successful sends by setting + * cpu list entries to the value 0xffff. */ - forward_progress = 0; + n_sent = 0; for (i = 0; i < cnt; i++) { - if (cpu_list[i] == 0xffff) { - cpu_clear(i, mask); - forward_progress = 1; - } + if (likely(cpu_list[i] == 0xffff)) + n_sent++; } + forward_progress = 0; + if (n_sent > prev_sent) + forward_progress = 1; + + prev_sent = n_sent; + /* If we get a HV_ECPUERROR, then one or more of the cpus * in the list are in error state. Use the cpu_state() * hypervisor call to find out which cpus are in error state. @@ -634,18 +640,20 @@ static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t err = sun4v_cpu_state(cpu); if (err >= 0 && err == HV_CPU_STATE_ERROR) { - cpu_clear(cpu, mask); + cpu_list[i] = 0xffff; cpu_set(cpu, error_mask); } } } else if (unlikely(status != HV_EWOULDBLOCK)) goto fatal_mondo_error; - /* Rebuild the cpu_list[] array and try again. */ - cnt = 0; - for_each_cpu_mask(i, mask) - cpu_list[cnt++] = i; - + /* Don't bother rewriting the CPU list, just leave the + * 0xffff and non-0xffff entries in there and the + * hypervisor will do the right thing. + * + * Only advance timeout state if we didn't make any + * forward progress. + */ if (unlikely(!forward_progress)) { if (unlikely(++retries > 10000)) goto fatal_mondo_timeout; -- cgit v1.2.3 From 74ae998772041b62e9ad420d602e4f7dbb182cd6 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 5 Mar 2006 18:26:24 -0800 Subject: [SPARC64]: Simplify TSB insert checks. Don't try to avoid putting non-base page sized entries into the user TSB. It actually costs us more to check this than it helps. Eventually we'll have a multiple TSB scheme for user processes. Once a process starts using larger pages, we'll allocate and use such a TSB. Signed-off-by: David S. Miller --- arch/sparc64/kernel/tsb.S | 14 -------------- arch/sparc64/mm/init.c | 15 ++++++--------- 2 files changed, 6 insertions(+), 23 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/tsb.S b/arch/sparc64/kernel/tsb.S index 563852bf359..d738910153f 100644 --- a/arch/sparc64/kernel/tsb.S +++ b/arch/sparc64/kernel/tsb.S @@ -55,20 +55,6 @@ tsb_reload: brgez,a,pn %g5, tsb_do_fault TSB_STORE(%g1, %g7) - /* If it is larger than the base page size, don't - * bother putting it into the TSB. - */ - sethi %hi(_PAGE_ALL_SZ_BITS), %g7 - ldx [%g7 + %lo(_PAGE_ALL_SZ_BITS)], %g7 - and %g5, %g7, %g2 - sethi %hi(_PAGE_SZBITS), %g7 - ldx [%g7 + %lo(_PAGE_SZBITS)], %g7 - cmp %g2, %g7 - mov 1, %g7 - sllx %g7, TSB_TAG_INVALID_BIT, %g7 - bne,a,pn %xcc, tsb_tlb_reload - TSB_STORE(%g1, %g7) - TSB_WRITE(%g1, %g5, %g6) /* Finally, load TLB and return from trap. */ diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 87d5d1af1ad..5930e87dafb 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -280,6 +280,8 @@ unsigned long _PAGE_SZBITS __read_mostly; void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) { struct mm_struct *mm; + struct tsb *tsb; + unsigned long tag; if (tlb_type != hypervisor) { unsigned long pfn = pte_pfn(pte); @@ -308,15 +310,10 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p } mm = vma->vm_mm; - if ((pte_val(pte) & _PAGE_ALL_SZ_BITS) == _PAGE_SZBITS) { - struct tsb *tsb; - unsigned long tag; - - tsb = &mm->context.tsb[(address >> PAGE_SHIFT) & - (mm->context.tsb_nentries - 1UL)]; - tag = (address >> 22UL); - tsb_insert(tsb, tag, pte_val(pte)); - } + tsb = &mm->context.tsb[(address >> PAGE_SHIFT) & + (mm->context.tsb_nentries - 1UL)]; + tag = (address >> 22UL); + tsb_insert(tsb, tag, pte_val(pte)); } void flush_dcache_page(struct page *page) -- cgit v1.2.3 From f7c00338cfeef125032aa12aa8ebeacf9e117e81 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 5 Mar 2006 22:18:50 -0800 Subject: [SPARC64]: Fix loop termination in mark_kpte_bitmap() If we were aligned, but didn't have at least 256MB left to process, we would loop forever. Thanks to fabbione for the report and testing the fix. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 5930e87dafb..9bbd0bf64af 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -973,12 +973,15 @@ static void __init mark_kpte_bitmap(unsigned long start, unsigned long end) while (start < end) { long remains; + remains = end - start; + if (remains < size_256MB) + break; + if (start & mask_256MB) { start = (start + size_256MB) & ~mask_256MB; continue; } - remains = end - start; while (remains >= size_256MB) { unsigned long index = start >> shift_256MB; -- cgit v1.2.3 From 9132983ae140a8ca81e95e081d5a4c0dd7a7f670 Mon Sep 17 00:00:00 2001 From: Eric Sesterhenn Date: Mon, 6 Mar 2006 13:48:40 -0800 Subject: [SPARC64]: kzalloc() conversion this patch converts arch/sparc64 to kzalloc usage. Crosscompile tested with allyesconfig. Signed-off-by: Eric Sesterhenn Signed-off-by: David S. Miller --- arch/sparc64/kernel/ebus.c | 3 +-- arch/sparc64/kernel/irq.c | 6 ++---- arch/sparc64/kernel/pci_common.c | 9 +++------ arch/sparc64/kernel/pci_iommu.c | 3 +-- arch/sparc64/kernel/pci_psycho.c | 9 +++------ arch/sparc64/kernel/pci_sabre.c | 9 +++------ arch/sparc64/kernel/pci_schizo.c | 12 ++++-------- arch/sparc64/kernel/setup.c | 14 +++++--------- arch/sparc64/kernel/sys_sparc.c | 4 +--- arch/sparc64/kernel/us2e_cpufreq.c | 8 ++------ arch/sparc64/kernel/us3_cpufreq.c | 8 ++------ 11 files changed, 27 insertions(+), 58 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/ebus.c b/arch/sparc64/kernel/ebus.c index 7991e919d8a..c69504aa638 100644 --- a/arch/sparc64/kernel/ebus.c +++ b/arch/sparc64/kernel/ebus.c @@ -277,10 +277,9 @@ static inline void *ebus_alloc(size_t size) { void *mem; - mem = kmalloc(size, GFP_ATOMIC); + mem = kzalloc(size, GFP_ATOMIC); if (!mem) panic("ebus_alloc: out of memory"); - memset((char *)mem, 0, size); return mem; } diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c index 712b16cdd5f..8c93ba655b3 100644 --- a/arch/sparc64/kernel/irq.c +++ b/arch/sparc64/kernel/irq.c @@ -316,12 +316,11 @@ unsigned int build_irq(int pil, int inofixup, unsigned long iclr, unsigned long goto out; } - bucket->irq_info = kmalloc(sizeof(struct irq_desc), GFP_ATOMIC); + bucket->irq_info = kzalloc(sizeof(struct irq_desc), GFP_ATOMIC); if (!bucket->irq_info) { prom_printf("IRQ: Error, kmalloc(irq_desc) failed.\n"); prom_halt(); } - memset(bucket->irq_info, 0, sizeof(struct irq_desc)); /* Ok, looks good, set it up. Don't touch the irq_chain or * the pending flag. @@ -357,12 +356,11 @@ unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino, int pil, unsign bucket->pil = pil; bucket->flags = flags; - bucket->irq_info = kmalloc(sizeof(struct irq_desc), GFP_ATOMIC); + bucket->irq_info = kzalloc(sizeof(struct irq_desc), GFP_ATOMIC); if (!bucket->irq_info) { prom_printf("IRQ: Error, kmalloc(irq_desc) failed.\n"); prom_halt(); } - memset(bucket->irq_info, 0, sizeof(struct irq_desc)); return __irq(bucket); } diff --git a/arch/sparc64/kernel/pci_common.c b/arch/sparc64/kernel/pci_common.c index 1448294a237..33dedb1aacd 100644 --- a/arch/sparc64/kernel/pci_common.c +++ b/arch/sparc64/kernel/pci_common.c @@ -977,33 +977,30 @@ void pci_register_legacy_regions(struct resource *io_res, struct resource *p; /* VGA Video RAM. */ - p = kmalloc(sizeof(*p), GFP_KERNEL); + p = kzalloc(sizeof(*p), GFP_KERNEL); if (!p) return; - memset(p, 0, sizeof(*p)); p->name = "Video RAM area"; p->start = mem_res->start + 0xa0000UL; p->end = p->start + 0x1ffffUL; p->flags = IORESOURCE_BUSY; request_resource(mem_res, p); - p = kmalloc(sizeof(*p), GFP_KERNEL); + p = kzalloc(sizeof(*p), GFP_KERNEL); if (!p) return; - memset(p, 0, sizeof(*p)); p->name = "System ROM"; p->start = mem_res->start + 0xf0000UL; p->end = p->start + 0xffffUL; p->flags = IORESOURCE_BUSY; request_resource(mem_res, p); - p = kmalloc(sizeof(*p), GFP_KERNEL); + p = kzalloc(sizeof(*p), GFP_KERNEL); if (!p) return; - memset(p, 0, sizeof(*p)); p->name = "Video ROM"; p->start = mem_res->start + 0xc0000UL; p->end = p->start + 0x7fffUL; diff --git a/arch/sparc64/kernel/pci_iommu.c b/arch/sparc64/kernel/pci_iommu.c index c9320eac45d..8efbc139769 100644 --- a/arch/sparc64/kernel/pci_iommu.c +++ b/arch/sparc64/kernel/pci_iommu.c @@ -139,12 +139,11 @@ void pci_iommu_table_init(struct pci_iommu *iommu, int tsbsize, u32 dma_offset, /* Allocate and initialize the free area map. */ sz = num_tsb_entries / 8; sz = (sz + 7UL) & ~7UL; - iommu->arena.map = kmalloc(sz, GFP_KERNEL); + iommu->arena.map = kzalloc(sz, GFP_KERNEL); if (!iommu->arena.map) { prom_printf("PCI_IOMMU: Error, kmalloc(arena.map) failed.\n"); prom_halt(); } - memset(iommu->arena.map, 0, sz); iommu->arena.limit = num_tsb_entries; /* Allocate and initialize the dummy page which we diff --git a/arch/sparc64/kernel/pci_psycho.c b/arch/sparc64/kernel/pci_psycho.c index c03ed5f49d3..f7b16e49c28 100644 --- a/arch/sparc64/kernel/pci_psycho.c +++ b/arch/sparc64/kernel/pci_psycho.c @@ -1164,7 +1164,7 @@ static void pbm_config_busmastering(struct pci_pbm_info *pbm) static void pbm_scan_bus(struct pci_controller_info *p, struct pci_pbm_info *pbm) { - struct pcidev_cookie *cookie = kmalloc(sizeof(*cookie), GFP_KERNEL); + struct pcidev_cookie *cookie = kzalloc(sizeof(*cookie), GFP_KERNEL); if (!cookie) { prom_printf("PSYCHO: Critical allocation failure.\n"); @@ -1172,7 +1172,6 @@ static void pbm_scan_bus(struct pci_controller_info *p, } /* All we care about is the PBM. */ - memset(cookie, 0, sizeof(*cookie)); cookie->pbm = pbm; pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno, @@ -1465,18 +1464,16 @@ void psycho_init(int node, char *model_name) } } - p = kmalloc(sizeof(struct pci_controller_info), GFP_ATOMIC); + p = kzalloc(sizeof(struct pci_controller_info), GFP_ATOMIC); if (!p) { prom_printf("PSYCHO: Fatal memory allocation error.\n"); prom_halt(); } - memset(p, 0, sizeof(*p)); - iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC); + iommu = kzalloc(sizeof(struct pci_iommu), GFP_ATOMIC); if (!iommu) { prom_printf("PSYCHO: Fatal memory allocation error.\n"); prom_halt(); } - memset(iommu, 0, sizeof(*iommu)); p->pbm_A.iommu = p->pbm_B.iommu = iommu; p->next = pci_controller_root; diff --git a/arch/sparc64/kernel/pci_sabre.c b/arch/sparc64/kernel/pci_sabre.c index da8e1364194..5ddc9293197 100644 --- a/arch/sparc64/kernel/pci_sabre.c +++ b/arch/sparc64/kernel/pci_sabre.c @@ -1167,7 +1167,7 @@ static void apb_init(struct pci_controller_info *p, struct pci_bus *sabre_bus) static struct pcidev_cookie *alloc_bridge_cookie(struct pci_pbm_info *pbm) { - struct pcidev_cookie *cookie = kmalloc(sizeof(*cookie), GFP_KERNEL); + struct pcidev_cookie *cookie = kzalloc(sizeof(*cookie), GFP_KERNEL); if (!cookie) { prom_printf("SABRE: Critical allocation failure.\n"); @@ -1175,7 +1175,6 @@ static struct pcidev_cookie *alloc_bridge_cookie(struct pci_pbm_info *pbm) } /* All we care about is the PBM. */ - memset(cookie, 0, sizeof(*cookie)); cookie->pbm = pbm; return cookie; @@ -1556,19 +1555,17 @@ void sabre_init(int pnode, char *model_name) } } - p = kmalloc(sizeof(*p), GFP_ATOMIC); + p = kzalloc(sizeof(*p), GFP_ATOMIC); if (!p) { prom_printf("SABRE: Error, kmalloc(pci_controller_info) failed.\n"); prom_halt(); } - memset(p, 0, sizeof(*p)); - iommu = kmalloc(sizeof(*iommu), GFP_ATOMIC); + iommu = kzalloc(sizeof(*iommu), GFP_ATOMIC); if (!iommu) { prom_printf("SABRE: Error, kmalloc(pci_iommu) failed.\n"); prom_halt(); } - memset(iommu, 0, sizeof(*iommu)); p->pbm_A.iommu = p->pbm_B.iommu = iommu; upa_portid = prom_getintdefault(pnode, "upa-portid", 0xff); diff --git a/arch/sparc64/kernel/pci_schizo.c b/arch/sparc64/kernel/pci_schizo.c index d8c4e0919b4..3a89f29e27d 100644 --- a/arch/sparc64/kernel/pci_schizo.c +++ b/arch/sparc64/kernel/pci_schizo.c @@ -1525,7 +1525,7 @@ static void pbm_config_busmastering(struct pci_pbm_info *pbm) static void pbm_scan_bus(struct pci_controller_info *p, struct pci_pbm_info *pbm) { - struct pcidev_cookie *cookie = kmalloc(sizeof(*cookie), GFP_KERNEL); + struct pcidev_cookie *cookie = kzalloc(sizeof(*cookie), GFP_KERNEL); if (!cookie) { prom_printf("%s: Critical allocation failure.\n", pbm->name); @@ -1533,7 +1533,6 @@ static void pbm_scan_bus(struct pci_controller_info *p, } /* All we care about is the PBM. */ - memset(cookie, 0, sizeof(*cookie)); cookie->pbm = pbm; pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno, @@ -2120,27 +2119,24 @@ static void __schizo_init(int node, char *model_name, int chip_type) } } - p = kmalloc(sizeof(struct pci_controller_info), GFP_ATOMIC); + p = kzalloc(sizeof(struct pci_controller_info), GFP_ATOMIC); if (!p) { prom_printf("SCHIZO: Fatal memory allocation error.\n"); prom_halt(); } - memset(p, 0, sizeof(*p)); - iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC); + iommu = kzalloc(sizeof(struct pci_iommu), GFP_ATOMIC); if (!iommu) { prom_printf("SCHIZO: Fatal memory allocation error.\n"); prom_halt(); } - memset(iommu, 0, sizeof(*iommu)); p->pbm_A.iommu = iommu; - iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC); + iommu = kzalloc(sizeof(struct pci_iommu), GFP_ATOMIC); if (!iommu) { prom_printf("SCHIZO: Fatal memory allocation error.\n"); prom_halt(); } - memset(iommu, 0, sizeof(*iommu)); p->pbm_B.iommu = iommu; p->next = pci_controller_root; diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c index 0c68a76143a..2a2a8a6cd17 100644 --- a/arch/sparc64/kernel/setup.c +++ b/arch/sparc64/kernel/setup.c @@ -536,15 +536,11 @@ static int __init topology_init(void) while (!cpu_find_by_instance(ncpus_probed, NULL, NULL)) ncpus_probed++; - for (i = 0; i < NR_CPUS; i++) { - if (cpu_possible(i)) { - struct cpu *p = kmalloc(sizeof(*p), GFP_KERNEL); - - if (p) { - memset(p, 0, sizeof(*p)); - register_cpu(p, i, NULL); - err = 0; - } + for_each_cpu(i) { + struct cpu *p = kzalloc(sizeof(*p), GFP_KERNEL); + if (p) { + register_cpu(p, i, NULL); + err = 0; } } diff --git a/arch/sparc64/kernel/sys_sparc.c b/arch/sparc64/kernel/sys_sparc.c index 095db723bb8..8840415408b 100644 --- a/arch/sparc64/kernel/sys_sparc.c +++ b/arch/sparc64/kernel/sys_sparc.c @@ -600,12 +600,10 @@ asmlinkage long sys_utrap_install(utrap_entry_t type, } if (!current_thread_info()->utraps) { current_thread_info()->utraps = - kmalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long), GFP_KERNEL); + kzalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long), GFP_KERNEL); if (!current_thread_info()->utraps) return -ENOMEM; current_thread_info()->utraps[0] = 1; - memset(current_thread_info()->utraps+1, 0, - UT_TRAP_INSTRUCTION_31*sizeof(long)); } else { if ((utrap_handler_t)current_thread_info()->utraps[type] != new_p && current_thread_info()->utraps[0] > 1) { diff --git a/arch/sparc64/kernel/us2e_cpufreq.c b/arch/sparc64/kernel/us2e_cpufreq.c index 669fb83dd4f..1f83fe6a82d 100644 --- a/arch/sparc64/kernel/us2e_cpufreq.c +++ b/arch/sparc64/kernel/us2e_cpufreq.c @@ -357,20 +357,16 @@ static int __init us2e_freq_init(void) struct cpufreq_driver *driver; ret = -ENOMEM; - driver = kmalloc(sizeof(struct cpufreq_driver), GFP_KERNEL); + driver = kzalloc(sizeof(struct cpufreq_driver), GFP_KERNEL); if (!driver) goto err_out; - memset(driver, 0, sizeof(*driver)); - us2e_freq_table = kmalloc( + us2e_freq_table = kzalloc( (NR_CPUS * sizeof(struct us2e_freq_percpu_info)), GFP_KERNEL); if (!us2e_freq_table) goto err_out; - memset(us2e_freq_table, 0, - (NR_CPUS * sizeof(struct us2e_freq_percpu_info))); - driver->init = us2e_freq_cpu_init; driver->verify = us2e_freq_verify; driver->target = us2e_freq_target; diff --git a/arch/sparc64/kernel/us3_cpufreq.c b/arch/sparc64/kernel/us3_cpufreq.c index a912c45bdc0..47e3acafb5b 100644 --- a/arch/sparc64/kernel/us3_cpufreq.c +++ b/arch/sparc64/kernel/us3_cpufreq.c @@ -218,20 +218,16 @@ static int __init us3_freq_init(void) struct cpufreq_driver *driver; ret = -ENOMEM; - driver = kmalloc(sizeof(struct cpufreq_driver), GFP_KERNEL); + driver = kzalloc(sizeof(struct cpufreq_driver), GFP_KERNEL); if (!driver) goto err_out; - memset(driver, 0, sizeof(*driver)); - us3_freq_table = kmalloc( + us3_freq_table = kzalloc( (NR_CPUS * sizeof(struct us3_freq_percpu_info)), GFP_KERNEL); if (!us3_freq_table) goto err_out; - memset(us3_freq_table, 0, - (NR_CPUS * sizeof(struct us3_freq_percpu_info))); - driver->init = us3_freq_cpu_init; driver->verify = us3_freq_verify; driver->target = us3_freq_target; -- cgit v1.2.3 From a77754b4d0731321db266c6c60ffcd7c62757da5 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Mon, 6 Mar 2006 19:59:50 -0800 Subject: [SPARC64]: Bulletproof MMU context locking. 1) Always spin_lock_init() in init_context(). The caller essentially clears it out, or copies the mm info from the parent. In both cases we need to explicitly initialize the spinlock. 2) Always do explicit IRQ disabling while taking mm->context.lock and ctx_alloc_lock. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 5 +++-- arch/sparc64/mm/tsb.c | 1 + 2 files changed, 4 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 9bbd0bf64af..a63939347b3 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -639,9 +639,10 @@ void get_new_mmu_context(struct mm_struct *mm) { unsigned long ctx, new_ctx; unsigned long orig_pgsz_bits; + unsigned long flags; int new_version; - spin_lock(&ctx_alloc_lock); + spin_lock_irqsave(&ctx_alloc_lock, flags); orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK); ctx = (tlb_context_cache + 1) & CTX_NR_MASK; new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx); @@ -677,7 +678,7 @@ void get_new_mmu_context(struct mm_struct *mm) out: tlb_context_cache = new_ctx; mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits; - spin_unlock(&ctx_alloc_lock); + spin_unlock_irqrestore(&ctx_alloc_lock, flags); if (unlikely(new_version)) smp_new_mmu_context_version(); diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c index 534ac281989..f36799b7152 100644 --- a/arch/sparc64/mm/tsb.c +++ b/arch/sparc64/mm/tsb.c @@ -354,6 +354,7 @@ void tsb_grow(struct mm_struct *mm, unsigned long rss, gfp_t gfp_flags) int init_new_context(struct task_struct *tsk, struct mm_struct *mm) { + spin_lock_init(&mm->context.lock); mm->context.sparc64_ctx_val = 0UL; -- cgit v1.2.3 From ee29074d3bd23848905f52c515974e0cd0219faa Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Mon, 6 Mar 2006 22:50:44 -0800 Subject: [SPARC64]: Fix new context version SMP handling. Don't piggy back the SMP receive signal code to do the context version change handling. Instead allocate another fixed PIL number for this asynchronous cross-call. We can't use smp_call_function() because this thing is invoked with interrupts disabled and a few spinlocks held. Also, fix smp_call_function_mask() to count "cpus" correctly. There is no guarentee that the local cpu is in the mask yet that is exactly what this code was assuming. Signed-off-by: David S. Miller --- arch/sparc64/kernel/devices.c | 2 +- arch/sparc64/kernel/pci_psycho.c | 14 +++++++------- arch/sparc64/kernel/pci_sabre.c | 14 +++++++------- arch/sparc64/kernel/pci_schizo.c | 12 ++++++------ arch/sparc64/kernel/pci_sun4v.c | 6 +++--- arch/sparc64/kernel/sbus.c | 10 +++++----- arch/sparc64/kernel/smp.c | 39 ++++++++++++++++++++++++--------------- arch/sparc64/kernel/ttable.S | 3 ++- arch/sparc64/mm/ultra.S | 5 +++++ 9 files changed, 60 insertions(+), 45 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/devices.c b/arch/sparc64/kernel/devices.c index 1341b99ca7a..007e8922cd1 100644 --- a/arch/sparc64/kernel/devices.c +++ b/arch/sparc64/kernel/devices.c @@ -157,7 +157,7 @@ unsigned int sun4v_vdev_device_interrupt(unsigned int dev_node) return 0; } - return sun4v_build_irq(sun4v_vdev_devhandle, irq, 4, 0); + return sun4v_build_irq(sun4v_vdev_devhandle, irq, 5, 0); } static const char *cpu_mid_prop(void) diff --git a/arch/sparc64/kernel/pci_psycho.c b/arch/sparc64/kernel/pci_psycho.c index f7b16e49c28..d17878b145c 100644 --- a/arch/sparc64/kernel/pci_psycho.c +++ b/arch/sparc64/kernel/pci_psycho.c @@ -286,17 +286,17 @@ static unsigned char psycho_pil_table[] = { /*0x14*/0, 0, 0, 0, /* PCI B slot 1 Int A, B, C, D */ /*0x18*/0, 0, 0, 0, /* PCI B slot 2 Int A, B, C, D */ /*0x1c*/0, 0, 0, 0, /* PCI B slot 3 Int A, B, C, D */ -/*0x20*/4, /* SCSI */ +/*0x20*/5, /* SCSI */ /*0x21*/5, /* Ethernet */ /*0x22*/8, /* Parallel Port */ /*0x23*/13, /* Audio Record */ /*0x24*/14, /* Audio Playback */ /*0x25*/15, /* PowerFail */ -/*0x26*/4, /* second SCSI */ +/*0x26*/5, /* second SCSI */ /*0x27*/11, /* Floppy */ -/*0x28*/4, /* Spare Hardware */ +/*0x28*/5, /* Spare Hardware */ /*0x29*/9, /* Keyboard */ -/*0x2a*/4, /* Mouse */ +/*0x2a*/5, /* Mouse */ /*0x2b*/12, /* Serial */ /*0x2c*/10, /* Timer 0 */ /*0x2d*/11, /* Timer 1 */ @@ -313,11 +313,11 @@ static int psycho_ino_to_pil(struct pci_dev *pdev, unsigned int ino) ret = psycho_pil_table[ino]; if (ret == 0 && pdev == NULL) { - ret = 4; + ret = 5; } else if (ret == 0) { switch ((pdev->class >> 16) & 0xff) { case PCI_BASE_CLASS_STORAGE: - ret = 4; + ret = 5; break; case PCI_BASE_CLASS_NETWORK: @@ -336,7 +336,7 @@ static int psycho_ino_to_pil(struct pci_dev *pdev, unsigned int ino) break; default: - ret = 4; + ret = 5; break; }; } diff --git a/arch/sparc64/kernel/pci_sabre.c b/arch/sparc64/kernel/pci_sabre.c index 5ddc9293197..f67bb7f078c 100644 --- a/arch/sparc64/kernel/pci_sabre.c +++ b/arch/sparc64/kernel/pci_sabre.c @@ -533,17 +533,17 @@ static unsigned char sabre_pil_table[] = { /*0x14*/0, 0, 0, 0, /* PCI B slot 1 Int A, B, C, D */ /*0x18*/0, 0, 0, 0, /* PCI B slot 2 Int A, B, C, D */ /*0x1c*/0, 0, 0, 0, /* PCI B slot 3 Int A, B, C, D */ -/*0x20*/4, /* SCSI */ +/*0x20*/5, /* SCSI */ /*0x21*/5, /* Ethernet */ /*0x22*/8, /* Parallel Port */ /*0x23*/13, /* Audio Record */ /*0x24*/14, /* Audio Playback */ /*0x25*/15, /* PowerFail */ -/*0x26*/4, /* second SCSI */ +/*0x26*/5, /* second SCSI */ /*0x27*/11, /* Floppy */ -/*0x28*/4, /* Spare Hardware */ +/*0x28*/5, /* Spare Hardware */ /*0x29*/9, /* Keyboard */ -/*0x2a*/4, /* Mouse */ +/*0x2a*/5, /* Mouse */ /*0x2b*/12, /* Serial */ /*0x2c*/10, /* Timer 0 */ /*0x2d*/11, /* Timer 1 */ @@ -565,11 +565,11 @@ static int sabre_ino_to_pil(struct pci_dev *pdev, unsigned int ino) ret = sabre_pil_table[ino]; if (ret == 0 && pdev == NULL) { - ret = 4; + ret = 5; } else if (ret == 0) { switch ((pdev->class >> 16) & 0xff) { case PCI_BASE_CLASS_STORAGE: - ret = 4; + ret = 5; break; case PCI_BASE_CLASS_NETWORK: @@ -588,7 +588,7 @@ static int sabre_ino_to_pil(struct pci_dev *pdev, unsigned int ino) break; default: - ret = 4; + ret = 5; break; }; } diff --git a/arch/sparc64/kernel/pci_schizo.c b/arch/sparc64/kernel/pci_schizo.c index 3a89f29e27d..7fe4de03ac2 100644 --- a/arch/sparc64/kernel/pci_schizo.c +++ b/arch/sparc64/kernel/pci_schizo.c @@ -243,8 +243,8 @@ static unsigned char schizo_pil_table[] = { /*0x0c*/0, 0, 0, 0, /* PCI slot 3 Int A, B, C, D */ /*0x10*/0, 0, 0, 0, /* PCI slot 4 Int A, B, C, D */ /*0x14*/0, 0, 0, 0, /* PCI slot 5 Int A, B, C, D */ -/*0x18*/4, /* SCSI */ -/*0x19*/4, /* second SCSI */ +/*0x18*/5, /* SCSI */ +/*0x19*/5, /* second SCSI */ /*0x1a*/0, /* UNKNOWN */ /*0x1b*/0, /* UNKNOWN */ /*0x1c*/8, /* Parallel */ @@ -254,7 +254,7 @@ static unsigned char schizo_pil_table[] = { /*0x20*/13, /* Audio Record */ /*0x21*/14, /* Audio Playback */ /*0x22*/12, /* Serial */ -/*0x23*/4, /* EBUS I2C */ +/*0x23*/5, /* EBUS I2C */ /*0x24*/10, /* RTC Clock */ /*0x25*/11, /* Floppy */ /*0x26*/0, /* UNKNOWN */ @@ -296,11 +296,11 @@ static int schizo_ino_to_pil(struct pci_dev *pdev, unsigned int ino) ret = schizo_pil_table[ino]; if (ret == 0 && pdev == NULL) { - ret = 4; + ret = 5; } else if (ret == 0) { switch ((pdev->class >> 16) & 0xff) { case PCI_BASE_CLASS_STORAGE: - ret = 4; + ret = 5; break; case PCI_BASE_CLASS_NETWORK: @@ -319,7 +319,7 @@ static int schizo_ino_to_pil(struct pci_dev *pdev, unsigned int ino) break; default: - ret = 4; + ret = 5; break; }; } diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c index a9c44c0ae0a..9372d4f376d 100644 --- a/arch/sparc64/kernel/pci_sun4v.c +++ b/arch/sparc64/kernel/pci_sun4v.c @@ -735,11 +735,11 @@ static unsigned int pci_sun4v_irq_build(struct pci_pbm_info *pbm, u32 devhandle = pbm->devhandle; int pil; - pil = 4; + pil = 5; if (pdev) { switch ((pdev->class >> 16) & 0xff) { case PCI_BASE_CLASS_STORAGE: - pil = 4; + pil = 5; break; case PCI_BASE_CLASS_NETWORK: @@ -758,7 +758,7 @@ static unsigned int pci_sun4v_irq_build(struct pci_pbm_info *pbm, break; default: - pil = 4; + pil = 5; break; }; } diff --git a/arch/sparc64/kernel/sbus.c b/arch/sparc64/kernel/sbus.c index d95a1bcf163..1d6ffdeabd4 100644 --- a/arch/sparc64/kernel/sbus.c +++ b/arch/sparc64/kernel/sbus.c @@ -693,11 +693,11 @@ void sbus_set_sbus64(struct sbus_dev *sdev, int bursts) /* SBUS SYSIO INO number to Sparc PIL level. */ static unsigned char sysio_ino_to_pil[] = { - 0, 4, 4, 7, 5, 7, 8, 9, /* SBUS slot 0 */ - 0, 4, 4, 7, 5, 7, 8, 9, /* SBUS slot 1 */ - 0, 4, 4, 7, 5, 7, 8, 9, /* SBUS slot 2 */ - 0, 4, 4, 7, 5, 7, 8, 9, /* SBUS slot 3 */ - 4, /* Onboard SCSI */ + 0, 5, 5, 7, 5, 7, 8, 9, /* SBUS slot 0 */ + 0, 5, 5, 7, 5, 7, 8, 9, /* SBUS slot 1 */ + 0, 5, 5, 7, 5, 7, 8, 9, /* SBUS slot 2 */ + 0, 5, 5, 7, 5, 7, 8, 9, /* SBUS slot 3 */ + 5, /* Onboard SCSI */ 5, /* Onboard Ethernet */ /*XXX*/ 8, /* Onboard BPP */ 0, /* Bogon */ diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index c4548a88953..cf56128097c 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c @@ -760,12 +760,9 @@ static int smp_call_function_mask(void (*func)(void *info), void *info, int nonatomic, int wait, cpumask_t mask) { struct call_data_struct data; - int cpus = cpus_weight(mask) - 1; + int cpus; long timeout; - if (!cpus) - return 0; - /* Can deadlock when called with interrupts disabled */ WARN_ON(irqs_disabled()); @@ -776,6 +773,11 @@ static int smp_call_function_mask(void (*func)(void *info), void *info, spin_lock(&call_lock); + cpu_clear(smp_processor_id(), mask); + cpus = cpus_weight(mask); + if (!cpus) + goto out_unlock; + call_data = &data; smp_cross_call_masked(&xcall_call_function, 0, 0, 0, mask); @@ -792,6 +794,7 @@ static int smp_call_function_mask(void (*func)(void *info), void *info, udelay(1); } +out_unlock: spin_unlock(&call_lock); return 0; @@ -845,6 +848,7 @@ extern unsigned long xcall_flush_tlb_pending; extern unsigned long xcall_flush_tlb_kernel_range; extern unsigned long xcall_report_regs; extern unsigned long xcall_receive_signal; +extern unsigned long xcall_new_mmu_context_version; #ifdef DCACHE_ALIASING_POSSIBLE extern unsigned long xcall_flush_dcache_page_cheetah; @@ -973,8 +977,14 @@ void smp_receive_signal(int cpu) } void smp_receive_signal_client(int irq, struct pt_regs *regs) +{ + clear_softint(1 << irq); +} + +void smp_new_mmu_context_version_client(int irq, struct pt_regs *regs) { struct mm_struct *mm; + unsigned long flags; clear_softint(1 << irq); @@ -982,25 +992,24 @@ void smp_receive_signal_client(int irq, struct pt_regs *regs) * the version of the one we are using is now out of date. */ mm = current->active_mm; - if (likely(mm)) { - unsigned long flags; + if (unlikely(!mm || (mm == &init_mm))) + return; - spin_lock_irqsave(&mm->context.lock, flags); + spin_lock_irqsave(&mm->context.lock, flags); - if (unlikely(!CTX_VALID(mm->context))) - get_new_mmu_context(mm); + if (unlikely(!CTX_VALID(mm->context))) + get_new_mmu_context(mm); - load_secondary_context(mm); - __flush_tlb_mm(CTX_HWBITS(mm->context), - SECONDARY_CONTEXT); + spin_unlock_irqrestore(&mm->context.lock, flags); - spin_unlock_irqrestore(&mm->context.lock, flags); - } + load_secondary_context(mm); + __flush_tlb_mm(CTX_HWBITS(mm->context), + SECONDARY_CONTEXT); } void smp_new_mmu_context_version(void) { - __smp_receive_signal_mask(cpu_online_map); + smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0); } void smp_report_regs(void) diff --git a/arch/sparc64/kernel/ttable.S b/arch/sparc64/kernel/ttable.S index d5a8dd52d1f..5d901519db5 100644 --- a/arch/sparc64/kernel/ttable.S +++ b/arch/sparc64/kernel/ttable.S @@ -51,12 +51,13 @@ tl0_resv03e: BTRAP(0x3e) BTRAP(0x3f) BTRAP(0x40) tl0_irq1: TRAP_IRQ(smp_call_function_client, 1) tl0_irq2: TRAP_IRQ(smp_receive_signal_client, 2) tl0_irq3: TRAP_IRQ(smp_penguin_jailcell, 3) +tl0_irq4: TRAP_IRQ(smp_new_mmu_context_version_client, 4) #else tl0_irq1: BTRAP(0x41) tl0_irq2: BTRAP(0x42) tl0_irq3: BTRAP(0x43) +tl0_irq4: BTRAP(0x44) #endif -tl0_irq4: TRAP_IRQ(handler_irq, 4) tl0_irq5: TRAP_IRQ(handler_irq, 5) TRAP_IRQ(handler_irq, 6) tl0_irq7: TRAP_IRQ(handler_irq, 7) TRAP_IRQ(handler_irq, 8) tl0_irq9: TRAP_IRQ(handler_irq, 9) TRAP_IRQ(handler_irq, 10) diff --git a/arch/sparc64/mm/ultra.S b/arch/sparc64/mm/ultra.S index bd8b0b4f878..f8479fad404 100644 --- a/arch/sparc64/mm/ultra.S +++ b/arch/sparc64/mm/ultra.S @@ -673,6 +673,11 @@ xcall_capture: wr %g0, (1 << PIL_SMP_CAPTURE), %set_softint retry + .globl xcall_new_mmu_context_version +xcall_new_mmu_context_version: + wr %g0, (1 << PIL_SMP_CTX_NEW_VERSION), %set_softint + retry + #endif /* CONFIG_SMP */ -- cgit v1.2.3 From d1112018b4bc82adf5c8a9c15a08954328f023ae Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 8 Mar 2006 02:16:07 -0800 Subject: [SPARC64]: Move over to sparsemem. This has been pending for a long time, and the fact that we waste a ton of ram on some configurations kind of pushed things over the edge. Signed-off-by: David S. Miller --- arch/sparc64/Kconfig | 6 ++ arch/sparc64/kernel/sparc64_ksyms.c | 7 -- arch/sparc64/mm/init.c | 134 ++++++++++++++++++++++++++---------- 3 files changed, 104 insertions(+), 43 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/Kconfig b/arch/sparc64/Kconfig index 4c0a50a7655..a253a39c3ff 100644 --- a/arch/sparc64/Kconfig +++ b/arch/sparc64/Kconfig @@ -186,6 +186,12 @@ endchoice endmenu +config ARCH_SPARSEMEM_ENABLE + def_bool y + +config ARCH_SPARSEMEM_DEFAULT + def_bool y + source "mm/Kconfig" config GENERIC_ISA_DMA diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c index e87fe7dfc7d..9914a17651b 100644 --- a/arch/sparc64/kernel/sparc64_ksyms.c +++ b/arch/sparc64/kernel/sparc64_ksyms.c @@ -95,9 +95,6 @@ extern int __ashrdi3(int, int); extern int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs); -extern unsigned long phys_base; -extern unsigned long pfn_base; - extern unsigned int sys_call_table[]; extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *); @@ -346,11 +343,7 @@ EXPORT_SYMBOL(__strncpy_from_user); EXPORT_SYMBOL(__clear_user); /* Various address conversion macros use this. */ -EXPORT_SYMBOL(phys_base); -EXPORT_SYMBOL(pfn_base); EXPORT_SYMBOL(sparc64_valid_addr_bitmap); -EXPORT_SYMBOL(page_to_pfn); -EXPORT_SYMBOL(pfn_to_page); /* No version information on this, heavily used in inline asm, * and will always be 'void __ret_efault(void)'. diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index a63939347b3..5f67b53b3a5 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -130,11 +130,9 @@ static void __init read_obp_memory(const char *property, unsigned long *sparc64_valid_addr_bitmap __read_mostly; -/* Ugly, but necessary... -DaveM */ -unsigned long phys_base __read_mostly; +/* Kernel physical address base and size in bytes. */ unsigned long kern_base __read_mostly; unsigned long kern_size __read_mostly; -unsigned long pfn_base __read_mostly; /* get_new_mmu_context() uses "cache + 1". */ DEFINE_SPINLOCK(ctx_alloc_lock); @@ -368,16 +366,6 @@ void __kprobes flush_icache_range(unsigned long start, unsigned long end) } } -unsigned long page_to_pfn(struct page *page) -{ - return (unsigned long) ((page - mem_map) + pfn_base); -} - -struct page *pfn_to_page(unsigned long pfn) -{ - return (mem_map + (pfn - pfn_base)); -} - void show_mem(void) { printk("Mem-info:\n"); @@ -773,9 +761,78 @@ void sparc_ultra_dump_dtlb(void) extern unsigned long cmdline_memory_size; -unsigned long __init bootmem_init(unsigned long *pages_avail) +/* Find a free area for the bootmem map, avoiding the kernel image + * and the initial ramdisk. + */ +static unsigned long __init choose_bootmap_pfn(unsigned long start_pfn, + unsigned long end_pfn) { - unsigned long bootmap_size, start_pfn, end_pfn; + unsigned long avoid_start, avoid_end, bootmap_size; + int i; + + bootmap_size = ((end_pfn - start_pfn) + 7) / 8; + bootmap_size = ALIGN(bootmap_size, sizeof(long)); + + avoid_start = avoid_end = 0; +#ifdef CONFIG_BLK_DEV_INITRD + avoid_start = initrd_start; + avoid_end = PAGE_ALIGN(initrd_end); +#endif + +#ifdef CONFIG_DEBUG_BOOTMEM + prom_printf("choose_bootmap_pfn: kern[%lx:%lx] avoid[%lx:%lx]\n", + kern_base, PAGE_ALIGN(kern_base + kern_size), + avoid_start, avoid_end); +#endif + for (i = 0; i < pavail_ents; i++) { + unsigned long start, end; + + start = pavail[i].phys_addr; + end = start + pavail[i].reg_size; + + while (start < end) { + if (start >= kern_base && + start < PAGE_ALIGN(kern_base + kern_size)) { + start = PAGE_ALIGN(kern_base + kern_size); + continue; + } + if (start >= avoid_start && start < avoid_end) { + start = avoid_end; + continue; + } + + if ((end - start) < bootmap_size) + break; + + if (start < kern_base && + (start + bootmap_size) > kern_base) { + start = PAGE_ALIGN(kern_base + kern_size); + continue; + } + + if (start < avoid_start && + (start + bootmap_size) > avoid_start) { + start = avoid_end; + continue; + } + + /* OK, it doesn't overlap anything, use it. */ +#ifdef CONFIG_DEBUG_BOOTMEM + prom_printf("choose_bootmap_pfn: Using %lx [%lx]\n", + start >> PAGE_SHIFT, start); +#endif + return start >> PAGE_SHIFT; + } + } + + prom_printf("Cannot find free area for bootmap, aborting.\n"); + prom_halt(); +} + +static unsigned long __init bootmem_init(unsigned long *pages_avail, + unsigned long phys_base) +{ + unsigned long bootmap_size, end_pfn; unsigned long end_of_phys_memory = 0UL; unsigned long bootmap_pfn, bytes_avail, size; int i; @@ -813,14 +870,6 @@ unsigned long __init bootmem_init(unsigned long *pages_avail) *pages_avail = bytes_avail >> PAGE_SHIFT; - /* Start with page aligned address of last symbol in kernel - * image. The kernel is hard mapped below PAGE_OFFSET in a - * 4MB locked TLB translation. - */ - start_pfn = PAGE_ALIGN(kern_base + kern_size) >> PAGE_SHIFT; - - bootmap_pfn = start_pfn; - end_pfn = end_of_phys_memory >> PAGE_SHIFT; #ifdef CONFIG_BLK_DEV_INITRD @@ -837,23 +886,23 @@ unsigned long __init bootmem_init(unsigned long *pages_avail) "(0x%016lx > 0x%016lx)\ndisabling initrd\n", initrd_end, end_of_phys_memory); initrd_start = 0; - } - if (initrd_start) { - if (initrd_start >= (start_pfn << PAGE_SHIFT) && - initrd_start < (start_pfn << PAGE_SHIFT) + 2 * PAGE_SIZE) - bootmap_pfn = PAGE_ALIGN (initrd_end) >> PAGE_SHIFT; + initrd_end = 0; } } #endif /* Initialize the boot-time allocator. */ max_pfn = max_low_pfn = end_pfn; - min_low_pfn = pfn_base; + min_low_pfn = (phys_base >> PAGE_SHIFT); + + bootmap_pfn = choose_bootmap_pfn(min_low_pfn, end_pfn); #ifdef CONFIG_DEBUG_BOOTMEM prom_printf("init_bootmem(min[%lx], bootmap[%lx], max[%lx])\n", min_low_pfn, bootmap_pfn, max_low_pfn); #endif - bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn, pfn_base, end_pfn); + bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn, + (phys_base >> PAGE_SHIFT), + end_pfn); /* Now register the available physical memory with the * allocator. @@ -901,6 +950,20 @@ unsigned long __init bootmem_init(unsigned long *pages_avail) reserve_bootmem((bootmap_pfn << PAGE_SHIFT), size); *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT; + for (i = 0; i < pavail_ents; i++) { + unsigned long start_pfn, end_pfn; + + start_pfn = pavail[i].phys_addr >> PAGE_SHIFT; + end_pfn = (start_pfn + (pavail[i].reg_size >> PAGE_SHIFT)); +#ifdef CONFIG_DEBUG_BOOTMEM + prom_printf("memory_present(0, %lx, %lx)\n", + start_pfn, end_pfn); +#endif + memory_present(0, start_pfn, end_pfn); + } + + sparse_init(); + return end_pfn; } @@ -1180,7 +1243,7 @@ static void sun4v_pgprot_init(void); void __init paging_init(void) { - unsigned long end_pfn, pages_avail, shift; + unsigned long end_pfn, pages_avail, shift, phys_base; unsigned long real_end, i; kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL; @@ -1211,8 +1274,6 @@ void __init paging_init(void) for (i = 0; i < pavail_ents; i++) phys_base = min(phys_base, pavail[i].phys_addr); - pfn_base = phys_base >> PAGE_SHIFT; - set_bit(0, mmu_context_bmap); shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE); @@ -1248,7 +1309,9 @@ void __init paging_init(void) /* Setup bootmem... */ pages_avail = 0; - last_valid_pfn = end_pfn = bootmem_init(&pages_avail); + last_valid_pfn = end_pfn = bootmem_init(&pages_avail, phys_base); + + max_mapnr = last_valid_pfn - (phys_base >> PAGE_SHIFT); kernel_physical_mapping_init(); @@ -1261,7 +1324,7 @@ void __init paging_init(void) for (znum = 0; znum < MAX_NR_ZONES; znum++) zones_size[znum] = zholes_size[znum] = 0; - npages = end_pfn - pfn_base; + npages = end_pfn - (phys_base >> PAGE_SHIFT); zones_size[ZONE_DMA] = npages; zholes_size[ZONE_DMA] = npages - pages_avail; @@ -1336,7 +1399,6 @@ void __init mem_init(void) taint_real_pages(); - max_mapnr = last_valid_pfn - pfn_base; high_memory = __va(last_valid_pfn << PAGE_SHIFT); #ifdef CONFIG_DEBUG_BOOTMEM -- cgit v1.2.3 From 17b0e199a10184d8c5bbbd79a4cee993bb1fb257 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 8 Mar 2006 15:57:03 -0800 Subject: [SPARC64]: Fix 32-bit truncation which broke sparsemem. The page->flags manipulations done by the D-cache dirty state tracking was broken because the constants were not marked with "UL" to make them 64-bit, which means we were clobbering the upper 32-bits of page->flags all the time. This doesn't jive well with sparsemem which stores the section and indexing information in the top 32-bits of page->flags. This is yet another sparc64 bug which has been with us forever. While we're here, tidy up some things in bootmem_init() and paginig_init(): 1) Pass min_low_pfn to init_bootmem_node(), it's identical to (phys_base >> PAGE_SHIFT) but we should use consistent with the variable names we print in CONFIG_BOOTMEM_DEBUG 2) max_mapnr, although no longer used, was being set inaccurately, we shouldn't subtract pfn_base any more. 3) All the games with phys_base in the zones_*[] arrays we pass to free_area_init_node() are no longer necessary. Thanks to Josh Grebe and Fabbione for the bug reports and testing. Fix also verified locally on an SB2500 which had a memory layout that triggered the same problem. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 5f67b53b3a5..b40f6477dea 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -205,8 +205,8 @@ inline void flush_dcache_page_impl(struct page *page) } #define PG_dcache_dirty PG_arch_1 -#define PG_dcache_cpu_shift 24 -#define PG_dcache_cpu_mask (256 - 1) +#define PG_dcache_cpu_shift 24UL +#define PG_dcache_cpu_mask (256UL - 1UL) #if NR_CPUS > 256 #error D-cache dirty tracking and thread_info->cpu need fixing for > 256 cpus @@ -901,8 +901,7 @@ static unsigned long __init bootmem_init(unsigned long *pages_avail, min_low_pfn, bootmap_pfn, max_low_pfn); #endif bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn, - (phys_base >> PAGE_SHIFT), - end_pfn); + min_low_pfn, end_pfn); /* Now register the available physical memory with the * allocator. @@ -1311,25 +1310,24 @@ void __init paging_init(void) pages_avail = 0; last_valid_pfn = end_pfn = bootmem_init(&pages_avail, phys_base); - max_mapnr = last_valid_pfn - (phys_base >> PAGE_SHIFT); + max_mapnr = last_valid_pfn; kernel_physical_mapping_init(); { unsigned long zones_size[MAX_NR_ZONES]; unsigned long zholes_size[MAX_NR_ZONES]; - unsigned long npages; int znum; for (znum = 0; znum < MAX_NR_ZONES; znum++) zones_size[znum] = zholes_size[znum] = 0; - npages = end_pfn - (phys_base >> PAGE_SHIFT); - zones_size[ZONE_DMA] = npages; - zholes_size[ZONE_DMA] = npages - pages_avail; + zones_size[ZONE_DMA] = end_pfn; + zholes_size[ZONE_DMA] = end_pfn - pages_avail; free_area_init_node(0, &contig_page_data, zones_size, - phys_base >> PAGE_SHIFT, zholes_size); + __pa(PAGE_OFFSET) >> PAGE_SHIFT, + zholes_size); } device_scan(); -- cgit v1.2.3 From 8935dced547afbf37d0fcfcac9a3556494e53104 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 8 Mar 2006 16:09:19 -0800 Subject: [SPARC64]: Add SMT scheduling support for Niagara. The mapping is a simple "(cpuid >> 2) == core" for now. Later we'll add more sophisticated code that will walk the sun4v machine description and figure this out from there. We should also add core mappings for jaguar and panther processors. Signed-off-by: David S. Miller --- arch/sparc64/Kconfig | 9 +++++++++ arch/sparc64/kernel/smp.c | 18 ++++++++++++++++++ 2 files changed, 27 insertions(+) (limited to 'arch') diff --git a/arch/sparc64/Kconfig b/arch/sparc64/Kconfig index a253a39c3ff..49b652f9b1d 100644 --- a/arch/sparc64/Kconfig +++ b/arch/sparc64/Kconfig @@ -356,6 +356,15 @@ config SOLARIS_EMUL endmenu +config SCHED_SMT + bool "SMT (Hyperthreading) scheduler support" + depends on SMP + default y + help + SMT scheduler support improves the CPU scheduler's decision making + when dealing with UltraSPARC cpus at a cost of slightly increased + overhead in some places. If unsure say N here. + config CMDLINE_BOOL bool "Default bootloader kernel arguments" diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index cf56128097c..373a701c90a 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c @@ -47,6 +47,8 @@ static unsigned char boot_cpu_id; cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE; cpumask_t phys_cpu_present_map __read_mostly = CPU_MASK_NONE; +cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly = + { [0 ... NR_CPUS-1] = CPU_MASK_NONE }; static cpumask_t smp_commenced_mask; static cpumask_t cpu_callout_map; @@ -1291,6 +1293,8 @@ int setup_profiling_timer(unsigned int multiplier) /* Constrain the number of cpus to max_cpus. */ void __init smp_prepare_cpus(unsigned int max_cpus) { + int i; + if (num_possible_cpus() > max_cpus) { int instance, mid; @@ -1305,6 +1309,20 @@ void __init smp_prepare_cpus(unsigned int max_cpus) } } + for_each_cpu(i) { + if (tlb_type == hypervisor) { + int j; + + /* XXX get this mapping from machine description */ + for_each_cpu(j) { + if ((j >> 2) == (i >> 2)) + cpu_set(j, cpu_sibling_map[i]); + } + } else { + cpu_set(i, cpu_sibling_map[i]); + } + } + smp_store_cpu_info(boot_cpu_id); } -- cgit v1.2.3 From 90a6646bf6a1ca821f32d5510e935855612904df Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 8 Mar 2006 17:18:19 -0800 Subject: [SPARC64]: Fix system type in /proc/cpuinfo and remove bogus OBP check. Report 'sun4v' when appropriate in /proc/cpuinfo Remove all the verifications of the OBP version string. Just make sure it's there, and report it raw in the bootup logs and via /proc/cpuinfo. Signed-off-by: David S. Miller --- arch/sparc64/kernel/setup.c | 13 ++++++------ arch/sparc64/prom/init.c | 48 ++++----------------------------------------- arch/sparc64/prom/misc.c | 18 ----------------- 3 files changed, 10 insertions(+), 69 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c index 2a2a8a6cd17..7ae4027a919 100644 --- a/arch/sparc64/kernel/setup.c +++ b/arch/sparc64/kernel/setup.c @@ -442,9 +442,8 @@ static int show_cpuinfo(struct seq_file *m, void *__unused) seq_printf(m, "cpu\t\t: %s\n" "fpu\t\t: %s\n" - "promlib\t\t: Version 3 Revision %d\n" - "prom\t\t: %d.%d.%d\n" - "type\t\t: sun4u\n" + "prom\t\t: %s\n" + "type\t\t: %s\n" "ncpus probed\t: %d\n" "ncpus active\t: %d\n" "D$ parity tl1\t: %u\n" @@ -456,10 +455,10 @@ static int show_cpuinfo(struct seq_file *m, void *__unused) , sparc_cpu_type, sparc_fpu_type, - prom_rev, - prom_prev >> 16, - (prom_prev >> 8) & 0xff, - prom_prev & 0xff, + prom_version, + ((tlb_type == hypervisor) ? + "sun4v" : + "sun4u"), ncpus_probed, num_online_cpus(), dcache_parity_tl1_occurred, diff --git a/arch/sparc64/prom/init.c b/arch/sparc64/prom/init.c index 095755e428a..1c0db842a6f 100644 --- a/arch/sparc64/prom/init.c +++ b/arch/sparc64/prom/init.c @@ -14,8 +14,8 @@ #include #include -enum prom_major_version prom_vers; -unsigned int prom_rev, prom_prev; +/* OBP version string. */ +char prom_version[80]; /* The root node of the prom device tree. */ int prom_stdin, prom_stdout; @@ -30,13 +30,7 @@ extern void prom_cif_init(void *, void *); void __init prom_init(void *cif_handler, void *cif_stack) { - char buffer[80], *p; - int ints[3]; int node; - int i = 0; - int bufadjust; - - prom_vers = PROM_P1275; prom_cif_init(cif_handler, cif_stack); @@ -51,44 +45,10 @@ void __init prom_init(void *cif_handler, void *cif_stack) if (!node || node == -1) prom_halt(); - prom_getstring(node, "version", buffer, sizeof (buffer)); + prom_getstring(node, "version", prom_version, sizeof(prom_version)); prom_printf("\n"); - if (strncmp(buffer, "OBP ", 4)) - goto strange_version; - - /* - * Version field is expected to be 'OBP xx.yy.zz date...' - * However, Sun can't stick to this format very well, so - * we need to check for 'OBP xx.yy.zz date...' and adjust - * accordingly. -spot - */ - - if (strncmp(buffer, "OBP ", 5)) - bufadjust = 4; - else - bufadjust = 5; - - p = buffer + bufadjust; - while (p && isdigit(*p) && i < 3) { - ints[i++] = simple_strtoul(p, NULL, 0); - if ((p = strchr(p, '.')) != NULL) - p++; - } - if (i != 3) - goto strange_version; - - prom_rev = ints[1]; - prom_prev = (ints[0] << 16) | (ints[1] << 8) | ints[2]; - - printk("PROMLIB: Sun IEEE Boot Prom %s\n", buffer + bufadjust); + printk("PROMLIB: Sun IEEE Boot Prom '%s'\n", prom_version); printk("PROMLIB: Root node compatible: %s\n", prom_root_compatible); - - /* Initialization successful. */ - return; - -strange_version: - prom_printf ("Strange OBP version `%s'.\n", buffer); - prom_halt (); } diff --git a/arch/sparc64/prom/misc.c b/arch/sparc64/prom/misc.c index 90df42141b1..577bde8b664 100644 --- a/arch/sparc64/prom/misc.c +++ b/arch/sparc64/prom/misc.c @@ -112,24 +112,6 @@ unsigned char prom_get_idprom(char *idbuf, int num_bytes) return 0xff; } -/* Get the major prom version number. */ -int prom_version(void) -{ - return PROM_P1275; -} - -/* Get the prom plugin-revision. */ -int prom_getrev(void) -{ - return prom_rev; -} - -/* Get the prom firmware print revision. */ -int prom_getprev(void) -{ - return prom_prev; -} - /* Install Linux trap table so PROM uses that instead of its own. */ void prom_set_trap_table(unsigned long tba) { -- cgit v1.2.3 From 0c51ed93ca0ecbf44ec096f4bd04c12a3e761e6b Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Mon, 13 Mar 2006 01:27:34 -0800 Subject: [SPARC64]: First cut at VIS simulator for Niagara. Niagara does not implement some of the VIS instructions in hardware, so we have to emulate them. Signed-off-by: David S. Miller --- arch/sparc64/kernel/Makefile | 3 +- arch/sparc64/kernel/traps.c | 5 + arch/sparc64/kernel/visemul.c | 894 ++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 901 insertions(+), 1 deletion(-) create mode 100644 arch/sparc64/kernel/visemul.c (limited to 'arch') diff --git a/arch/sparc64/kernel/Makefile b/arch/sparc64/kernel/Makefile index fedfd9c6729..6f6816488b0 100644 --- a/arch/sparc64/kernel/Makefile +++ b/arch/sparc64/kernel/Makefile @@ -11,7 +11,8 @@ obj-y := process.o setup.o cpu.o idprom.o \ traps.o devices.o auxio.o una_asm.o \ irq.o ptrace.o time.o sys_sparc.o signal.o \ unaligned.o central.o pci.o starfire.o semaphore.o \ - power.o sbus.o iommu_common.o sparc64_ksyms.o chmc.o + power.o sbus.o iommu_common.o sparc64_ksyms.o chmc.o \ + visemul.o obj-$(CONFIG_PCI) += ebus.o isa.o pci_common.o pci_iommu.o \ pci_psycho.o pci_sabre.o pci_schizo.o \ diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c index 043a72658f6..7f7dba0ca96 100644 --- a/arch/sparc64/kernel/traps.c +++ b/arch/sparc64/kernel/traps.c @@ -2273,6 +2273,11 @@ void do_illegal_instruction(struct pt_regs *regs) } else if ((insn & 0xc1580000) == 0xc1100000) /* LDQ/STQ */ { if (handle_ldf_stq(insn, regs)) return; + } else if (tlb_type == hypervisor) { + extern int vis_emul(struct pt_regs *, unsigned int); + + if (!vis_emul(regs, insn)) + return; } } info.si_signo = SIGILL; diff --git a/arch/sparc64/kernel/visemul.c b/arch/sparc64/kernel/visemul.c new file mode 100644 index 00000000000..84fedaa38aa --- /dev/null +++ b/arch/sparc64/kernel/visemul.c @@ -0,0 +1,894 @@ +/* visemul.c: Emulation of VIS instructions. + * + * Copyright (C) 2006 David S. Miller (davem@davemloft.net) + */ +#include +#include +#include + +#include +#include +#include +#include +#include + +/* OPF field of various VIS instructions. */ + +/* 000111011 - four 16-bit packs */ +#define FPACK16_OPF 0x03b + +/* 000111010 - two 32-bit packs */ +#define FPACK32_OPF 0x03a + +/* 000111101 - four 16-bit packs */ +#define FPACKFIX_OPF 0x03d + +/* 001001101 - four 16-bit expands */ +#define FEXPAND_OPF 0x04d + +/* 001001011 - two 32-bit merges */ +#define FPMERGE_OPF 0x04b + +/* 000110001 - 8-by-16-bit partitoned product */ +#define FMUL8x16_OPF 0x031 + +/* 000110011 - 8-by-16-bit upper alpha partitioned product */ +#define FMUL8x16AU_OPF 0x033 + +/* 000110101 - 8-by-16-bit lower alpha partitioned product */ +#define FMUL8x16AL_OPF 0x035 + +/* 000110110 - upper 8-by-16-bit partitioned product */ +#define FMUL8SUx16_OPF 0x036 + +/* 000110111 - lower 8-by-16-bit partitioned product */ +#define FMUL8ULx16_OPF 0x037 + +/* 000111000 - upper 8-by-16-bit partitioned product */ +#define FMULD8SUx16_OPF 0x038 + +/* 000111001 - lower unsigned 8-by-16-bit partitioned product */ +#define FMULD8ULx16_OPF 0x039 + +/* 000101000 - four 16-bit compare; set rd if src1 > src2 */ +#define FCMPGT16_OPF 0x028 + +/* 000101100 - two 32-bit compare; set rd if src1 > src2 */ +#define FCMPGT32_OPF 0x02c + +/* 000100000 - four 16-bit compare; set rd if src1 <= src2 */ +#define FCMPLE16_OPF 0x020 + +/* 000100100 - two 32-bit compare; set rd if src1 <= src2 */ +#define FCMPLE32_OPF 0x024 + +/* 000100010 - four 16-bit compare; set rd if src1 != src2 */ +#define FCMPNE16_OPF 0x022 + +/* 000100110 - two 32-bit compare; set rd if src1 != src2 */ +#define FCMPNE32_OPF 0x026 + +/* 000101010 - four 16-bit compare; set rd if src1 == src2 */ +#define FCMPEQ16_OPF 0x02a + +/* 000101110 - two 32-bit compare; set rd if src1 == src2 */ +#define FCMPEQ32_OPF 0x02e + +/* 000000000 - Eight 8-bit edge boundary processing */ +#define EDGE8_OPF 0x000 + +/* 000000001 - Eight 8-bit edge boundary processing, no CC */ +#define EDGE8N_OPF 0x001 + +/* 000000010 - Eight 8-bit edge boundary processing, little-endian */ +#define EDGE8L_OPF 0x002 + +/* 000000011 - Eight 8-bit edge boundary processing, little-endian, no CC */ +#define EDGE8LN_OPF 0x003 + +/* 000000100 - Four 16-bit edge boundary processing */ +#define EDGE16_OPF 0x004 + +/* 000000101 - Four 16-bit edge boundary processing, no CC */ +#define EDGE16N_OPF 0x005 + +/* 000000110 - Four 16-bit edge boundary processing, little-endian */ +#define EDGE16L_OPF 0x006 + +/* 000000111 - Four 16-bit edge boundary processing, little-endian, no CC */ +#define EDGE16LN_OPF 0x007 + +/* 000001000 - Two 32-bit edge boundary processing */ +#define EDGE32_OPF 0x008 + +/* 000001001 - Two 32-bit edge boundary processing, no CC */ +#define EDGE32N_OPF 0x009 + +/* 000001010 - Two 32-bit edge boundary processing, little-endian */ +#define EDGE32L_OPF 0x00a + +/* 000001011 - Two 32-bit edge boundary processing, little-endian, no CC */ +#define EDGE32LN_OPF 0x00b + +/* 000111110 - distance between 8 8-bit components */ +#define PDIST_OPF 0x03e + +/* 000010000 - convert 8-bit 3-D address to blocked byte address */ +#define ARRAY8_OPF 0x010 + +/* 000010010 - convert 16-bit 3-D address to blocked byte address */ +#define ARRAY16_OPF 0x012 + +/* 000010100 - convert 32-bit 3-D address to blocked byte address */ +#define ARRAY32_OPF 0x014 + +/* 000011001 - Set the GSR.MASK field in preparation for a BSHUFFLE */ +#define BMASK_OPF 0x019 + +/* 001001100 - Permute bytes as specified by GSR.MASK */ +#define BSHUFFLE_OPF 0x04c + +#define VIS_OPCODE_MASK ((0x3 << 30) | (0x3f << 19)) +#define VIS_OPCODE_VAL ((0x2 << 30) | (0x36 << 19)) + +#define VIS_OPF_SHIFT 5 +#define VIS_OPF_MASK (0x1ff << VIS_OPF_SHIFT) + +#define RS1(INSN) (((INSN) >> 24) & 0x1f) +#define RS2(INSN) (((INSN) >> 0) & 0x1f) +#define RD(INSN) (((INSN) >> 25) & 0x1f) + +static inline void maybe_flush_windows(unsigned int rs1, unsigned int rs2, + unsigned int rd, int from_kernel) +{ + if (rs2 >= 16 || rs1 >= 16 || rd >= 16) { + if (from_kernel != 0) + __asm__ __volatile__("flushw"); + else + flushw_user(); + } +} + +static unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs) +{ + unsigned long value; + + if (reg < 16) + return (!reg ? 0 : regs->u_regs[reg]); + if (regs->tstate & TSTATE_PRIV) { + struct reg_window *win; + win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS); + value = win->locals[reg - 16]; + } else if (test_thread_flag(TIF_32BIT)) { + struct reg_window32 __user *win32; + win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP])); + get_user(value, &win32->locals[reg - 16]); + } else { + struct reg_window __user *win; + win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS); + get_user(value, &win->locals[reg - 16]); + } + return value; +} + +static inline unsigned long __user *__fetch_reg_addr_user(unsigned int reg, + struct pt_regs *regs) +{ + BUG_ON(reg < 16); + BUG_ON(regs->tstate & TSTATE_PRIV); + + if (test_thread_flag(TIF_32BIT)) { + struct reg_window32 __user *win32; + win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP])); + return (unsigned long __user *)&win32->locals[reg - 16]; + } else { + struct reg_window __user *win; + win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS); + return &win->locals[reg - 16]; + } +} + +static inline unsigned long *__fetch_reg_addr_kern(unsigned int reg, + struct pt_regs *regs) +{ + BUG_ON(reg >= 16); + BUG_ON(regs->tstate & TSTATE_PRIV); + + return ®s->u_regs[reg]; +} + +static void store_reg(struct pt_regs *regs, unsigned long val, unsigned long rd) +{ + if (rd < 16) { + unsigned long *rd_kern = __fetch_reg_addr_kern(rd, regs); + + *rd_kern = val; + } else { + unsigned long __user *rd_user = __fetch_reg_addr_user(rd, regs); + + if (test_thread_flag(TIF_32BIT)) + __put_user((u32)val, (u32 __user *)rd_user); + else + __put_user(val, rd_user); + } +} + +static inline unsigned long fpd_regval(struct fpustate *f, + unsigned int insn_regnum) +{ + insn_regnum = (((insn_regnum & 1) << 5) | + (insn_regnum & 0x1e)); + + return *(unsigned long *) &f->regs[insn_regnum]; +} + +static inline unsigned long *fpd_regaddr(struct fpustate *f, + unsigned int insn_regnum) +{ + insn_regnum = (((insn_regnum & 1) << 5) | + (insn_regnum & 0x1e)); + + return (unsigned long *) &f->regs[insn_regnum]; +} + +static inline unsigned int fps_regval(struct fpustate *f, + unsigned int insn_regnum) +{ + return f->regs[insn_regnum]; +} + +static inline unsigned int *fps_regaddr(struct fpustate *f, + unsigned int insn_regnum) +{ + return &f->regs[insn_regnum]; +} + +struct edge_tab { + u16 left, right; +}; +struct edge_tab edge8_tab[8] = { + { 0xff, 0x80 }, + { 0x7f, 0xc0 }, + { 0x3f, 0xe0 }, + { 0x1f, 0xf0 }, + { 0x0f, 0xf8 }, + { 0x07, 0xfc }, + { 0x03, 0xfe }, + { 0x01, 0xff }, +}; +struct edge_tab edge8_tab_l[8] = { + { 0xff, 0x01 }, + { 0xfe, 0x03 }, + { 0xfc, 0x07 }, + { 0xf8, 0x0f }, + { 0xf0, 0x1f }, + { 0xe0, 0x3f }, + { 0xc0, 0x7f }, + { 0x80, 0xff }, +}; +struct edge_tab edge16_tab[4] = { + { 0xf, 0x8 }, + { 0x7, 0xc }, + { 0x3, 0xe }, + { 0x1, 0xf }, +}; +struct edge_tab edge16_tab_l[4] = { + { 0xf, 0x1 }, + { 0xe, 0x3 }, + { 0xc, 0x7 }, + { 0x8, 0xf }, +}; +struct edge_tab edge32_tab[2] = { + { 0x3, 0x2 }, + { 0x1, 0x3 }, +}; +struct edge_tab edge32_tab_l[2] = { + { 0x3, 0x1 }, + { 0x2, 0x3 }, +}; + +static void edge(struct pt_regs *regs, unsigned int insn, unsigned int opf) +{ + unsigned long orig_rs1, rs1, orig_rs2, rs2, rd_val; + u16 left, right; + + maybe_flush_windows(RS1(insn), RS2(insn), RD(insn), 0); + orig_rs1 = rs1 = fetch_reg(RS1(insn), regs); + orig_rs2 = rs2 = fetch_reg(RS2(insn), regs); + + if (test_thread_flag(TIF_32BIT)) { + rs1 = rs1 & 0xffffffff; + rs2 = rs2 & 0xffffffff; + } + switch (opf) { + default: + case EDGE8_OPF: + case EDGE8N_OPF: + left = edge8_tab[rs1 & 0x7].left; + right = edge8_tab[rs2 & 0x7].right; + break; + case EDGE8L_OPF: + case EDGE8LN_OPF: + left = edge8_tab_l[rs1 & 0x7].left; + right = edge8_tab_l[rs2 & 0x7].right; + break; + + case EDGE16_OPF: + case EDGE16N_OPF: + left = edge16_tab[(rs1 >> 1) & 0x3].left; + right = edge16_tab[(rs2 >> 1) & 0x3].right; + break; + + case EDGE16L_OPF: + case EDGE16LN_OPF: + left = edge16_tab_l[(rs1 >> 1) & 0x3].left; + right = edge16_tab_l[(rs2 >> 1) & 0x3].right; + break; + + case EDGE32_OPF: + case EDGE32N_OPF: + left = edge32_tab[(rs1 >> 2) & 0x1].left; + right = edge32_tab[(rs2 >> 2) & 0x1].right; + break; + + case EDGE32L_OPF: + case EDGE32LN_OPF: + left = edge32_tab_l[(rs1 >> 2) & 0x1].left; + right = edge32_tab_l[(rs2 >> 2) & 0x1].right; + break; + }; + + if ((rs1 & ~0x7UL) == (rs2 & ~0x7UL)) + rd_val = right & left; + else + rd_val = left; + + store_reg(regs, rd_val, RD(insn)); + + switch (opf) { + case EDGE8_OPF: + case EDGE8L_OPF: + case EDGE16_OPF: + case EDGE16L_OPF: + case EDGE32_OPF: + case EDGE32L_OPF: { + unsigned long ccr, tstate; + + __asm__ __volatile__("subcc %1, %2, %%g0\n\t" + "rd %%ccr, %0" + : "=r" (ccr) + : "r" (orig_rs1), "r" (orig_rs2) + : "cc"); + tstate = regs->tstate & ~(TSTATE_XCC | TSTATE_ICC); + regs->tstate = tstate | (ccr << 32UL); + } + }; +} + +static void array(struct pt_regs *regs, unsigned int insn, unsigned int opf) +{ + unsigned long rs1, rs2, rd_val; + unsigned int bits, bits_mask; + + maybe_flush_windows(RS1(insn), RS2(insn), RD(insn), 0); + rs1 = fetch_reg(RS1(insn), regs); + rs2 = fetch_reg(RS2(insn), regs); + + bits = (rs2 > 5 ? 5 : rs2); + bits_mask = (1UL << bits) - 1UL; + + rd_val = ((((rs1 >> 11) & 0x3) << 0) | + (((rs1 >> 33) & 0x3) << 2) | + (((rs1 >> 55) & 0x1) << 4) | + (((rs1 >> 13) & 0xf) << 5) | + (((rs1 >> 35) & 0xf) << 9) | + (((rs1 >> 56) & 0xf) << 13) | + (((rs1 >> 17) & bits_mask) << 17) | + (((rs1 >> 39) & bits_mask) << (17 + bits)) | + (((rs1 >> 60) & 0xf) << (17 + (2*bits)))); + + switch (opf) { + case ARRAY16_OPF: + rd_val <<= 1; + break; + + case ARRAY32_OPF: + rd_val <<= 2; + }; + + store_reg(regs, rd_val, RD(insn)); +} + +static void bmask(struct pt_regs *regs, unsigned int insn) +{ + unsigned long rs1, rs2, rd_val, gsr; + + maybe_flush_windows(RS1(insn), RS2(insn), RD(insn), 0); + rs1 = fetch_reg(RS1(insn), regs); + rs2 = fetch_reg(RS2(insn), regs); + rd_val = rs1 + rs2; + + store_reg(regs, rd_val, RD(insn)); + + gsr = current_thread_info()->gsr[0] & 0xffffffff; + gsr |= rd_val << 32UL; + current_thread_info()->gsr[0] = gsr; +} + +static void bshuffle(struct pt_regs *regs, unsigned int insn) +{ + struct fpustate *f = FPUSTATE; + unsigned long rs1, rs2, rd_val; + unsigned long bmask, i; + + bmask = current_thread_info()->gsr[0] >> 32UL; + + rs1 = fpd_regval(f, RS1(insn)); + rs2 = fpd_regval(f, RS2(insn)); + + rd_val = 0UL; + for (i = 0; i < 8; i++) { + unsigned long which = (bmask >> (i * 4)) & 0xf; + unsigned long byte; + + if (which < 8) + byte = (rs1 >> (which * 8)) & 0xff; + else + byte = (rs2 >> ((which-8)*8)) & 0xff; + rd_val |= (byte << (i * 8)); + } + + *fpd_regaddr(f, RD(insn)) = rd_val; +} + +static void pdist(struct pt_regs *regs, unsigned int insn) +{ + struct fpustate *f = FPUSTATE; + unsigned long rs1, rs2, *rd, rd_val; + unsigned long i; + + rs1 = fpd_regval(f, RS1(insn)); + rs2 = fpd_regval(f, RS1(insn)); + rd = fpd_regaddr(f, RD(insn)); + + rd_val = *rd; + + for (i = 0; i < 8; i++) { + s16 s1, s2; + + s1 = (rs1 >> (56 - (i * 8))) & 0xff; + s2 = (rs2 >> (56 - (i * 8))) & 0xff; + + /* Absolute value of difference. */ + s1 -= s2; + if (s1 < 0) + s1 = ~s1 + 1; + + rd_val += s1; + } + + *rd = rd_val; +} + +static void pformat(struct pt_regs *regs, unsigned int insn, unsigned int opf) +{ + struct fpustate *f = FPUSTATE; + unsigned long rs1, rs2, gsr, scale, rd_val; + + gsr = current_thread_info()->gsr[0]; + scale = (gsr >> 3) & (opf == FPACK16_OPF ? 0xf : 0x1f); + switch (opf) { + case FPACK16_OPF: { + unsigned long byte; + + rs2 = fpd_regval(f, RS2(insn)); + rd_val = 0; + for (byte = 0; byte < 4; byte++) { + unsigned int val; + s16 src = (rs2 >> (byte * 16UL)) & 0xffffUL; + int scaled = src << scale; + int from_fixed = scaled >> 7; + + val = ((from_fixed < 0) ? + 0 : + (from_fixed > 255) ? + 255 : from_fixed); + + rd_val |= (val << (8 * byte)); + } + *fps_regaddr(f, RD(insn)) = rd_val; + break; + } + + case FPACK32_OPF: { + unsigned long word; + + rs1 = fpd_regval(f, RS1(insn)); + rs2 = fpd_regval(f, RS2(insn)); + rd_val = (rs1 << 8) & ~(0x000000ff000000ffUL); + for (word = 0; word < 2; word++) { + unsigned long val; + s32 src = (rs2 >> (word * 32UL)); + s64 scaled = src << scale; + s64 from_fixed = scaled >> 23; + + val = ((from_fixed < 0) ? + 0 : + (from_fixed > 255) ? + 255 : from_fixed); + + rd_val |= (val << (32 * word)); + } + *fpd_regaddr(f, RD(insn)) = rd_val; + break; + } + + case FPACKFIX_OPF: { + unsigned long word; + + rs2 = fpd_regval(f, RS2(insn)); + + rd_val = 0; + for (word = 0; word < 2; word++) { + long val; + s32 src = (rs2 >> (word * 32UL)); + s64 scaled = src << scale; + s64 from_fixed = scaled >> 16; + + val = ((from_fixed < -32768) ? + -32768 : + (from_fixed > 32767) ? + 32767 : from_fixed); + + rd_val |= ((val & 0xffff) << (word * 16)); + } + *fps_regaddr(f, RD(insn)) = rd_val; + break; + } + + case FEXPAND_OPF: { + unsigned long byte; + + rs2 = fps_regval(f, RS2(insn)); + + rd_val = 0; + for (byte = 0; byte < 4; byte++) { + unsigned long val; + u8 src = (rs2 >> (byte * 8)) & 0xff; + + val = src << 4; + + rd_val |= (val << (byte * 16)); + } + *fpd_regaddr(f, RD(insn)) = rd_val; + break; + } + + case FPMERGE_OPF: { + rs1 = fps_regval(f, RS1(insn)); + rs2 = fps_regval(f, RS2(insn)); + + rd_val = (((rs2 & 0x000000ff) << 0) | + ((rs1 & 0x000000ff) << 8) | + ((rs2 & 0x0000ff00) << 8) | + ((rs1 & 0x0000ff00) << 16) | + ((rs2 & 0x00ff0000) << 16) | + ((rs1 & 0x00ff0000) << 24) | + ((rs2 & 0xff000000) << 24) | + ((rs1 & 0xff000000) << 32)); + *fpd_regaddr(f, RD(insn)) = rd_val; + break; + } + }; +} + +static void pmul(struct pt_regs *regs, unsigned int insn, unsigned int opf) +{ + struct fpustate *f = FPUSTATE; + unsigned long rs1, rs2, rd_val; + + switch (opf) { + case FMUL8x16_OPF: { + unsigned long byte; + + rs1 = fps_regval(f, RS1(insn)); + rs2 = fpd_regval(f, RS2(insn)); + + rd_val = 0; + for (byte = 0; byte < 4; byte++) { + u16 src1 = (rs1 >> (byte * 8)) & 0x00ff; + s16 src2 = (rs2 >> (byte * 16)) & 0xffff; + u32 prod = src1 * src2; + u16 scaled = ((prod & 0x00ffff00) >> 8); + + /* Round up. */ + if (prod & 0x80) + scaled++; + rd_val |= ((scaled & 0xffffUL) << (byte * 16UL)); + } + + *fpd_regaddr(f, RD(insn)) = rd_val; + break; + } + + case FMUL8x16AU_OPF: + case FMUL8x16AL_OPF: { + unsigned long byte; + s16 src2; + + rs1 = fps_regval(f, RS1(insn)); + rs2 = fps_regval(f, RS2(insn)); + + rd_val = 0; + src2 = (rs2 >> (opf == FMUL8x16AU_OPF) ? 16 : 0); + for (byte = 0; byte < 4; byte++) { + u16 src1 = (rs1 >> (byte * 8)) & 0x00ff; + u32 prod = src1 * src2; + u16 scaled = ((prod & 0x00ffff00) >> 8); + + /* Round up. */ + if (prod & 0x80) + scaled++; + rd_val |= ((scaled & 0xffffUL) << (byte * 16UL)); + } + + *fpd_regaddr(f, RD(insn)) = rd_val; + break; + } + + case FMUL8SUx16_OPF: + case FMUL8ULx16_OPF: { + unsigned long byte, ushift; + + rs1 = fpd_regval(f, RS1(insn)); + rs2 = fpd_regval(f, RS2(insn)); + + rd_val = 0; + ushift = (opf == FMUL8SUx16_OPF) ? 8 : 0; + for (byte = 0; byte < 4; byte++) { + u16 src1; + s16 src2; + u32 prod; + u16 scaled; + + src1 = ((rs1 >> ((16 * byte) + ushift)) & 0x00ff); + src2 = ((rs2 >> (16 * byte)) & 0xffff); + prod = src1 * src2; + scaled = ((prod & 0x00ffff00) >> 8); + + /* Round up. */ + if (prod & 0x80) + scaled++; + rd_val |= ((scaled & 0xffffUL) << (byte * 16UL)); + } + + *fpd_regaddr(f, RD(insn)) = rd_val; + break; + } + + case FMULD8SUx16_OPF: + case FMULD8ULx16_OPF: { + unsigned long byte, ushift; + + rs1 = fps_regval(f, RS1(insn)); + rs2 = fps_regval(f, RS2(insn)); + + rd_val = 0; + ushift = (opf == FMULD8SUx16_OPF) ? 8 : 0; + for (byte = 0; byte < 2; byte++) { + u16 src1; + s16 src2; + u32 prod; + u16 scaled; + + src1 = ((rs1 >> ((16 * byte) + ushift)) & 0x00ff); + src2 = ((rs2 >> (16 * byte)) & 0xffff); + prod = src1 * src2; + scaled = ((prod & 0x00ffff00) >> 8); + + /* Round up. */ + if (prod & 0x80) + scaled++; + rd_val |= ((scaled & 0xffffUL) << + ((byte * 32UL) + 7UL)); + } + *fpd_regaddr(f, RD(insn)) = rd_val; + break; + } + }; +} + +static void pcmp(struct pt_regs *regs, unsigned int insn, unsigned int opf) +{ + struct fpustate *f = FPUSTATE; + unsigned long rs1, rs2, rd_val, i; + + rs1 = fpd_regval(f, RS1(insn)); + rs2 = fpd_regval(f, RS2(insn)); + + rd_val = 0; + + switch (opf) { + case FCMPGT16_OPF: + for (i = 0; i < 4; i++) { + s16 a = (rs1 >> (i * 16)) & 0xffff; + s16 b = (rs2 >> (i * 16)) & 0xffff; + + if (a > b) + rd_val |= 1 << i; + } + break; + + case FCMPGT32_OPF: + for (i = 0; i < 2; i++) { + s32 a = (rs1 >> (i * 32)) & 0xffff; + s32 b = (rs2 >> (i * 32)) & 0xffff; + + if (a > b) + rd_val |= 1 << i; + } + break; + + case FCMPLE16_OPF: + for (i = 0; i < 4; i++) { + s16 a = (rs1 >> (i * 16)) & 0xffff; + s16 b = (rs2 >> (i * 16)) & 0xffff; + + if (a <= b) + rd_val |= 1 << i; + } + break; + + case FCMPLE32_OPF: + for (i = 0; i < 2; i++) { + s32 a = (rs1 >> (i * 32)) & 0xffff; + s32 b = (rs2 >> (i * 32)) & 0xffff; + + if (a <= b) + rd_val |= 1 << i; + } + break; + + case FCMPNE16_OPF: + for (i = 0; i < 4; i++) { + s16 a = (rs1 >> (i * 16)) & 0xffff; + s16 b = (rs2 >> (i * 16)) & 0xffff; + + if (a != b) + rd_val |= 1 << i; + } + break; + + case FCMPNE32_OPF: + for (i = 0; i < 2; i++) { + s32 a = (rs1 >> (i * 32)) & 0xffff; + s32 b = (rs2 >> (i * 32)) & 0xffff; + + if (a != b) + rd_val |= 1 << i; + } + break; + + case FCMPEQ16_OPF: + for (i = 0; i < 4; i++) { + s16 a = (rs1 >> (i * 16)) & 0xffff; + s16 b = (rs2 >> (i * 16)) & 0xffff; + + if (a == b) + rd_val |= 1 << i; + } + break; + + case FCMPEQ32_OPF: + for (i = 0; i < 2; i++) { + s32 a = (rs1 >> (i * 32)) & 0xffff; + s32 b = (rs2 >> (i * 32)) & 0xffff; + + if (a == b) + rd_val |= 1 << i; + } + break; + }; + + maybe_flush_windows(0, 0, RD(insn), 0); + store_reg(regs, rd_val, RD(insn)); +} + +/* Emulate the VIS instructions which are not implemented in + * hardware on Niagara. + */ +int vis_emul(struct pt_regs *regs, unsigned int insn) +{ + unsigned long pc = regs->tpc; + unsigned int opf; + + BUG_ON(regs->tstate & TSTATE_PRIV); + + if (test_thread_flag(TIF_32BIT)) + pc = (u32)pc; + + if (get_user(insn, (u32 __user *) pc)) + return -EFAULT; + + if ((insn & VIS_OPCODE_MASK) != VIS_OPCODE_VAL) + return -EINVAL; + + opf = (insn & VIS_OPF_MASK) >> VIS_OPF_SHIFT; + switch (opf) { + default: + return -EINVAL; + + /* Pixel Formatting Instructions. */ + case FPACK16_OPF: + case FPACK32_OPF: + case FPACKFIX_OPF: + case FEXPAND_OPF: + case FPMERGE_OPF: + pformat(regs, insn, opf); + break; + + /* Partitioned Multiply Instructions */ + case FMUL8x16_OPF: + case FMUL8x16AU_OPF: + case FMUL8x16AL_OPF: + case FMUL8SUx16_OPF: + case FMUL8ULx16_OPF: + case FMULD8SUx16_OPF: + case FMULD8ULx16_OPF: + pmul(regs, insn, opf); + break; + + /* Pixel Compare Instructions */ + case FCMPGT16_OPF: + case FCMPGT32_OPF: + case FCMPLE16_OPF: + case FCMPLE32_OPF: + case FCMPNE16_OPF: + case FCMPNE32_OPF: + case FCMPEQ16_OPF: + case FCMPEQ32_OPF: + pcmp(regs, insn, opf); + break; + + /* Edge Handling Instructions */ + case EDGE8_OPF: + case EDGE8N_OPF: + case EDGE8L_OPF: + case EDGE8LN_OPF: + case EDGE16_OPF: + case EDGE16N_OPF: + case EDGE16L_OPF: + case EDGE16LN_OPF: + case EDGE32_OPF: + case EDGE32N_OPF: + case EDGE32L_OPF: + case EDGE32LN_OPF: + edge(regs, insn, opf); + break; + + /* Pixel Component Distance */ + case PDIST_OPF: + pdist(regs, insn); + break; + + /* Three-Dimensional Array Addressing Instructions */ + case ARRAY8_OPF: + case ARRAY16_OPF: + case ARRAY32_OPF: + array(regs, insn, opf); + break; + + /* Byte Mask and Shuffle Instructions */ + case BMASK_OPF: + bmask(regs, insn); + break; + + case BSHUFFLE_OPF: + bshuffle(regs, insn); + break; + }; + + regs->tpc = regs->tnpc; + regs->tnpc += 4; + return 0; +} -- cgit v1.2.3 From 7a1ac5264108fc3ed22d17a3cdd76212ed1666d1 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 16 Mar 2006 02:02:32 -0800 Subject: [SPARC64]: Fix and re-enable dynamic TSB sizing. This is good for up to %50 performance improvement of some test cases. The problem has been the race conditions, and hopefully I've plugged them all up here. 1) There was a serious race in switch_mm() wrt. lazy TLB switching to and from kernel threads. We could erroneously skip a tsb_context_switch() and thus use a stale TSB across a TSB grow event. There is a big comment now in that function describing exactly how it can happen. 2) All code paths that do something with the TSB need to be guarded with the mm->context.lock spinlock. This makes page table flushing paths properly synchronize with both TSB growing and TLB context changes. 3) TSB growing events are moved to the end of successful fault processing. Previously it was in update_mmu_cache() but that is deadlock prone. At the end of do_sparc64_fault() we hold no spinlocks that could deadlock the TSB grow sequence. We also have dropped the address space semaphore. While we're here, add prefetching to the copy_tsb() routine and put it in assembler into the tsb.S file. This piece of code is quite time critical. There are some small negative side effects to this code which can be improved upon. In particular we grab the mm->context.lock even for the tsb insert done by update_mmu_cache() now and that's a bit excessive. We can get rid of that locking, and the same lock taking in flush_tsb_user(), by disabling PSTATE_IE around the whole operation including the capturing of the tsb pointer and tsb_nentries value. That would work because anyone growing the TSB won't free up the old TSB until all cpus respond to the TSB change cross call. I'm not quite so confident in that optimization to put it in right now, but eventually we might be able to and the description is here for reference. This code seems very solid now. It passes several parallel GCC bootstrap builds, and our favorite "nut cruncher" stress test which is a full "make -j8192" build of a "make allmodconfig" kernel. That puts about 256 processes on each cpu's run queue, makes lots of process cpu migrations occur, causes lots of page table and TLB flushing activity, incurs many context version number changes, and it swaps the machine real far out to disk even though there is 16GB of ram on this test system. :-) Signed-off-by: David S. Miller --- arch/sparc64/kernel/tsb.S | 71 +++++++++++++++++- arch/sparc64/mm/fault.c | 8 +- arch/sparc64/mm/init.c | 7 +- arch/sparc64/mm/tsb.c | 185 ++++++++++++++++++++-------------------------- 4 files changed, 165 insertions(+), 106 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/tsb.S b/arch/sparc64/kernel/tsb.S index d738910153f..1b154c86362 100644 --- a/arch/sparc64/kernel/tsb.S +++ b/arch/sparc64/kernel/tsb.S @@ -34,8 +34,9 @@ tsb_miss_itlb: ldxa [%g4] ASI_IMMU, %g4 /* At this point we have: - * %g4 -- missing virtual address * %g1 -- TSB entry address + * %g3 -- FAULT_CODE_{D,I}TLB + * %g4 -- missing virtual address * %g6 -- TAG TARGET (vaddr >> 22) */ tsb_miss_page_table_walk: @@ -45,6 +46,12 @@ tsb_miss_page_table_walk: tsb_miss_page_table_walk_sun4v_fastpath: USER_PGTABLE_WALK_TL1(%g4, %g7, %g5, %g2, tsb_do_fault) + /* At this point we have: + * %g1 -- TSB entry address + * %g3 -- FAULT_CODE_{D,I}TLB + * %g5 -- physical address of PTE in Linux page tables + * %g6 -- TAG TARGET (vaddr >> 22) + */ tsb_reload: TSB_LOCK_TAG(%g1, %g2, %g7) @@ -199,6 +206,7 @@ __tsb_insert: wrpr %o5, %pstate retl nop + .size __tsb_insert, .-__tsb_insert /* Flush the given TSB entry if it has the matching * tag. @@ -208,6 +216,7 @@ __tsb_insert: */ .align 32 .globl tsb_flush + .type tsb_flush,#function tsb_flush: sethi %hi(TSB_TAG_LOCK_HIGH), %g2 1: TSB_LOAD_TAG(%o0, %g1) @@ -225,6 +234,7 @@ tsb_flush: nop 2: retl TSB_MEMBAR + .size tsb_flush, .-tsb_flush /* Reload MMU related context switch state at * schedule() time. @@ -241,6 +251,7 @@ tsb_flush: */ .align 32 .globl __tsb_context_switch + .type __tsb_context_switch,#function __tsb_context_switch: rdpr %pstate, %o5 wrpr %o5, PSTATE_IE, %pstate @@ -302,3 +313,61 @@ __tsb_context_switch: retl nop + .size __tsb_context_switch, .-__tsb_context_switch + +#define TSB_PASS_BITS ((1 << TSB_TAG_LOCK_BIT) | \ + (1 << TSB_TAG_INVALID_BIT)) + + .align 32 + .globl copy_tsb + .type copy_tsb,#function +copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size + * %o2=new_tsb_base, %o3=new_tsb_size + */ + sethi %uhi(TSB_PASS_BITS), %g7 + srlx %o3, 4, %o3 + add %o0, %o1, %g1 /* end of old tsb */ + sllx %g7, 32, %g7 + sub %o3, 1, %o3 /* %o3 == new tsb hash mask */ + +661: prefetcha [%o0] ASI_N, #one_read + .section .tsb_phys_patch, "ax" + .word 661b + prefetcha [%o0] ASI_PHYS_USE_EC, #one_read + .previous + +90: andcc %o0, (64 - 1), %g0 + bne 1f + add %o0, 64, %o5 + +661: prefetcha [%o5] ASI_N, #one_read + .section .tsb_phys_patch, "ax" + .word 661b + prefetcha [%o5] ASI_PHYS_USE_EC, #one_read + .previous + +1: TSB_LOAD_QUAD(%o0, %g2) /* %g2/%g3 == TSB entry */ + andcc %g2, %g7, %g0 /* LOCK or INVALID set? */ + bne,pn %xcc, 80f /* Skip it */ + sllx %g2, 22, %o4 /* TAG --> VADDR */ + + /* This can definitely be computed faster... */ + srlx %o0, 4, %o5 /* Build index */ + and %o5, 511, %o5 /* Mask index */ + sllx %o5, PAGE_SHIFT, %o5 /* Put into vaddr position */ + or %o4, %o5, %o4 /* Full VADDR. */ + srlx %o4, PAGE_SHIFT, %o4 /* Shift down to create index */ + and %o4, %o3, %o4 /* Mask with new_tsb_nents-1 */ + sllx %o4, 4, %o4 /* Shift back up into tsb ent offset */ + TSB_STORE(%o2 + %o4, %g2) /* Store TAG */ + add %o4, 0x8, %o4 /* Advance to TTE */ + TSB_STORE(%o2 + %o4, %g3) /* Store TTE */ + +80: add %o0, 16, %o0 + cmp %o0, %g1 + bne,pt %xcc, 90b + nop + + retl + TSB_MEMBAR + .size copy_tsb, .-copy_tsb diff --git a/arch/sparc64/mm/fault.c b/arch/sparc64/mm/fault.c index b97bd054aad..63b6cc0cd5d 100644 --- a/arch/sparc64/mm/fault.c +++ b/arch/sparc64/mm/fault.c @@ -29,6 +29,7 @@ #include #include #include +#include /* * To debug kernel to catch accesses to certain virtual/physical addresses. @@ -258,7 +259,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) struct vm_area_struct *vma; unsigned int insn = 0; int si_code, fault_code; - unsigned long address; + unsigned long address, mm_rss; fault_code = get_thread_fault_code(); @@ -407,6 +408,11 @@ good_area: } up_read(&mm->mmap_sem); + + mm_rss = get_mm_rss(mm); + if (unlikely(mm_rss >= mm->context.tsb_rss_limit)) + tsb_grow(mm, mm_rss); + return; /* diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index b40f6477dea..d703b67bc7b 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -279,7 +279,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p { struct mm_struct *mm; struct tsb *tsb; - unsigned long tag; + unsigned long tag, flags; if (tlb_type != hypervisor) { unsigned long pfn = pte_pfn(pte); @@ -308,10 +308,15 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p } mm = vma->vm_mm; + + spin_lock_irqsave(&mm->context.lock, flags); + tsb = &mm->context.tsb[(address >> PAGE_SHIFT) & (mm->context.tsb_nentries - 1UL)]; tag = (address >> 22UL); tsb_insert(tsb, tag, pte_val(pte)); + + spin_unlock_irqrestore(&mm->context.lock, flags); } void flush_dcache_page(struct page *page) diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c index f36799b7152..7fbe1e0cd10 100644 --- a/arch/sparc64/mm/tsb.c +++ b/arch/sparc64/mm/tsb.c @@ -48,11 +48,15 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end) void flush_tsb_user(struct mmu_gather *mp) { struct mm_struct *mm = mp->mm; - struct tsb *tsb = mm->context.tsb; - unsigned long nentries = mm->context.tsb_nentries; - unsigned long base; + unsigned long nentries, base, flags; + struct tsb *tsb; int i; + spin_lock_irqsave(&mm->context.lock, flags); + + tsb = mm->context.tsb; + nentries = mm->context.tsb_nentries; + if (tlb_type == cheetah_plus || tlb_type == hypervisor) base = __pa(tsb); else @@ -70,6 +74,8 @@ void flush_tsb_user(struct mmu_gather *mp) tsb_flush(ent, tag); } + + spin_unlock_irqrestore(&mm->context.lock, flags); } static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes) @@ -201,86 +207,9 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes) } } -/* The page tables are locked against modifications while this - * runs. - * - * XXX do some prefetching... - */ -static void copy_tsb(struct tsb *old_tsb, unsigned long old_size, - struct tsb *new_tsb, unsigned long new_size) -{ - unsigned long old_nentries = old_size / sizeof(struct tsb); - unsigned long new_nentries = new_size / sizeof(struct tsb); - unsigned long i; - - for (i = 0; i < old_nentries; i++) { - register unsigned long tag asm("o4"); - register unsigned long pte asm("o5"); - unsigned long v, hash; - - if (tlb_type == hypervisor) { - __asm__ __volatile__( - "ldda [%2] %3, %0" - : "=r" (tag), "=r" (pte) - : "r" (__pa(&old_tsb[i])), - "i" (ASI_QUAD_LDD_PHYS_4V)); - } else if (tlb_type == cheetah_plus) { - __asm__ __volatile__( - "ldda [%2] %3, %0" - : "=r" (tag), "=r" (pte) - : "r" (__pa(&old_tsb[i])), - "i" (ASI_QUAD_LDD_PHYS)); - } else { - __asm__ __volatile__( - "ldda [%2] %3, %0" - : "=r" (tag), "=r" (pte) - : "r" (&old_tsb[i]), - "i" (ASI_NUCLEUS_QUAD_LDD)); - } - - if (tag & ((1UL << TSB_TAG_LOCK_BIT) | - (1UL << TSB_TAG_INVALID_BIT))) - continue; - - /* We only put base page size PTEs into the TSB, - * but that might change in the future. This code - * would need to be changed if we start putting larger - * page size PTEs into there. - */ - WARN_ON((pte & _PAGE_ALL_SZ_BITS) != _PAGE_SZBITS); - - /* The tag holds bits 22 to 63 of the virtual address - * and the context. Clear out the context, and shift - * up to make a virtual address. - */ - v = (tag & ((1UL << 42UL) - 1UL)) << 22UL; - - /* The implied bits of the tag (bits 13 to 21) are - * determined by the TSB entry index, so fill that in. - */ - v |= (i & (512UL - 1UL)) << 13UL; - - hash = tsb_hash(v, new_nentries); - if (tlb_type == cheetah_plus || - tlb_type == hypervisor) { - __asm__ __volatile__( - "stxa %0, [%1] %2\n\t" - "stxa %3, [%4] %2" - : /* no outputs */ - : "r" (tag), - "r" (__pa(&new_tsb[hash].tag)), - "i" (ASI_PHYS_USE_EC), - "r" (pte), - "r" (__pa(&new_tsb[hash].pte))); - } else { - new_tsb[hash].tag = tag; - new_tsb[hash].pte = pte; - } - } -} - /* When the RSS of an address space exceeds mm->context.tsb_rss_limit, - * update_mmu_cache() invokes this routine to try and grow the TSB. + * do_sparc64_fault() invokes this routine to try and grow the TSB. + * * When we reach the maximum TSB size supported, we stick ~0UL into * mm->context.tsb_rss_limit so the grow checks in update_mmu_cache() * will not trigger any longer. @@ -293,12 +222,12 @@ static void copy_tsb(struct tsb *old_tsb, unsigned long old_size, * the number of entries that the current TSB can hold at once. Currently, * we trigger when the RSS hits 3/4 of the TSB capacity. */ -void tsb_grow(struct mm_struct *mm, unsigned long rss, gfp_t gfp_flags) +void tsb_grow(struct mm_struct *mm, unsigned long rss) { unsigned long max_tsb_size = 1 * 1024 * 1024; - unsigned long size, old_size; + unsigned long size, old_size, flags; struct page *page; - struct tsb *old_tsb; + struct tsb *old_tsb, *new_tsb; if (max_tsb_size > (PAGE_SIZE << MAX_ORDER)) max_tsb_size = (PAGE_SIZE << MAX_ORDER); @@ -311,12 +240,51 @@ void tsb_grow(struct mm_struct *mm, unsigned long rss, gfp_t gfp_flags) break; } - page = alloc_pages(gfp_flags, get_order(size)); + page = alloc_pages(GFP_KERNEL, get_order(size)); if (unlikely(!page)) return; /* Mark all tags as invalid. */ - memset(page_address(page), 0x40, size); + new_tsb = page_address(page); + memset(new_tsb, 0x40, size); + + /* Ok, we are about to commit the changes. If we are + * growing an existing TSB the locking is very tricky, + * so WATCH OUT! + * + * We have to hold mm->context.lock while committing to the + * new TSB, this synchronizes us with processors in + * flush_tsb_user() and switch_mm() for this address space. + * + * But even with that lock held, processors run asynchronously + * accessing the old TSB via TLB miss handling. This is OK + * because those actions are just propagating state from the + * Linux page tables into the TSB, page table mappings are not + * being changed. If a real fault occurs, the processor will + * synchronize with us when it hits flush_tsb_user(), this is + * also true for the case where vmscan is modifying the page + * tables. The only thing we need to be careful with is to + * skip any locked TSB entries during copy_tsb(). + * + * When we finish committing to the new TSB, we have to drop + * the lock and ask all other cpus running this address space + * to run tsb_context_switch() to see the new TSB table. + */ + spin_lock_irqsave(&mm->context.lock, flags); + + old_tsb = mm->context.tsb; + old_size = mm->context.tsb_nentries * sizeof(struct tsb); + + /* Handle multiple threads trying to grow the TSB at the same time. + * One will get in here first, and bump the size and the RSS limit. + * The others will get in here next and hit this check. + */ + if (unlikely(old_tsb && (rss < mm->context.tsb_rss_limit))) { + spin_unlock_irqrestore(&mm->context.lock, flags); + + free_pages((unsigned long) new_tsb, get_order(size)); + return; + } if (size == max_tsb_size) mm->context.tsb_rss_limit = ~0UL; @@ -324,30 +292,37 @@ void tsb_grow(struct mm_struct *mm, unsigned long rss, gfp_t gfp_flags) mm->context.tsb_rss_limit = ((size / sizeof(struct tsb)) * 3) / 4; - old_tsb = mm->context.tsb; - old_size = mm->context.tsb_nentries * sizeof(struct tsb); - - if (old_tsb) - copy_tsb(old_tsb, old_size, page_address(page), size); + if (old_tsb) { + extern void copy_tsb(unsigned long old_tsb_base, + unsigned long old_tsb_size, + unsigned long new_tsb_base, + unsigned long new_tsb_size); + unsigned long old_tsb_base = (unsigned long) old_tsb; + unsigned long new_tsb_base = (unsigned long) new_tsb; + + if (tlb_type == cheetah_plus || tlb_type == hypervisor) { + old_tsb_base = __pa(old_tsb_base); + new_tsb_base = __pa(new_tsb_base); + } + copy_tsb(old_tsb_base, old_size, new_tsb_base, size); + } - mm->context.tsb = page_address(page); + mm->context.tsb = new_tsb; setup_tsb_params(mm, size); + spin_unlock_irqrestore(&mm->context.lock, flags); + /* If old_tsb is NULL, we're being invoked for the first time * from init_new_context(). */ if (old_tsb) { - /* Now force all other processors to reload the new - * TSB state. - */ - smp_tsb_sync(mm); - - /* Finally reload it on the local cpu. No further - * references will remain to the old TSB and we can - * thus free it up. - */ + /* Reload it on the local cpu. */ tsb_context_switch(mm); + /* Now force other processors to do the same. */ + smp_tsb_sync(mm); + + /* Now it is safe to free the old tsb. */ free_pages((unsigned long) old_tsb, get_order(old_size)); } } @@ -363,7 +338,11 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) * will be confused and think there is an older TSB to free up. */ mm->context.tsb = NULL; - tsb_grow(mm, 0, GFP_KERNEL); + + /* If this is fork, inherit the parent's TSB size. We would + * grow it to that size on the first page fault anyways. + */ + tsb_grow(mm, get_mm_rss(mm)); if (unlikely(!mm->context.tsb)) return -ENOMEM; -- cgit v1.2.3 From a91690ddd05ab0b7fbdd37733875525ac75c20f2 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Fri, 17 Mar 2006 14:41:03 -0800 Subject: [SPARC64]: Top-down address space allocation for 32-bit tasks. Currently allocations are very constrained for 32-bit processes. It grows down-up from 0x70000000 to 0xf0000000 which gives about 2GB of stack + dynamic mmap() space. So support the top-down method, and we need to override the generic helper function in order to deal with D-cache coloring. With these changes I was able to squeeze out a mmap() just over 3.6GB in size in a 32-bit process. Signed-off-by: David S. Miller --- arch/sparc64/kernel/binfmt_aout32.c | 2 + arch/sparc64/kernel/sys_sparc.c | 190 ++++++++++++++++++++++++++++++++++-- 2 files changed, 183 insertions(+), 9 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/binfmt_aout32.c b/arch/sparc64/kernel/binfmt_aout32.c index cb9ecd0172c..d7caa60a007 100644 --- a/arch/sparc64/kernel/binfmt_aout32.c +++ b/arch/sparc64/kernel/binfmt_aout32.c @@ -239,6 +239,8 @@ static int load_aout32_binary(struct linux_binprm * bprm, struct pt_regs * regs) (current->mm->start_data = N_DATADDR(ex)); current->mm->brk = ex.a_bss + (current->mm->start_brk = N_BSSADDR(ex)); + current->mm->free_area_cache = current->mm->mmap_base; + current->mm->cached_hole_size = 0; current->mm->mmap = NULL; compute_creds(bprm); diff --git a/arch/sparc64/kernel/sys_sparc.c b/arch/sparc64/kernel/sys_sparc.c index 8840415408b..61dffb9349b 100644 --- a/arch/sparc64/kernel/sys_sparc.c +++ b/arch/sparc64/kernel/sys_sparc.c @@ -82,9 +82,34 @@ static inline int straddles_64bit_va_hole(unsigned long start, unsigned long end return 1; } -#define COLOUR_ALIGN(addr,pgoff) \ - ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \ - (((pgoff)< task_size || len >= VA_EXCLUDE_START) + if (unlikely(len > task_size || len >= VA_EXCLUDE_START)) return -ENOMEM; do_color_align = 0; @@ -125,11 +150,12 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi return addr; } - if (len <= mm->cached_hole_size) { + if (len > mm->cached_hole_size) { + start_addr = addr = mm->free_area_cache; + } else { + start_addr = addr = TASK_UNMAPPED_BASE; mm->cached_hole_size = 0; - mm->free_area_cache = TASK_UNMAPPED_BASE; } - start_addr = addr = mm->free_area_cache; task_size -= len; @@ -146,7 +172,7 @@ full_search: addr = VA_EXCLUDE_END; vma = find_vma(mm, VA_EXCLUDE_END); } - if (task_size < addr) { + if (unlikely(task_size < addr)) { if (start_addr != TASK_UNMAPPED_BASE) { start_addr = addr = TASK_UNMAPPED_BASE; mm->cached_hole_size = 0; @@ -154,7 +180,7 @@ full_search: } return -ENOMEM; } - if (!vma || addr + len <= vma->vm_start) { + if (likely(!vma || addr + len <= vma->vm_start)) { /* * Remember the place where we stopped the search: */ @@ -170,6 +196,121 @@ full_search: } } +unsigned long +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + const unsigned long len, const unsigned long pgoff, + const unsigned long flags) +{ + struct vm_area_struct *vma; + struct mm_struct *mm = current->mm; + unsigned long task_size = 0xf0000000UL; + unsigned long addr = addr0; + int do_color_align; + + /* This should only ever run for 32-bit processes. */ + BUG_ON(!test_thread_flag(TIF_32BIT)); + + if (flags & MAP_FIXED) { + /* We do not accept a shared mapping if it would violate + * cache aliasing constraints. + */ + if ((flags & MAP_SHARED) && + ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))) + return -EINVAL; + return addr; + } + + if (unlikely(len > task_size)) + return -ENOMEM; + + do_color_align = 0; + if (filp || (flags & MAP_SHARED)) + do_color_align = 1; + + /* requesting a specific address */ + if (addr) { + if (do_color_align) + addr = COLOUR_ALIGN(addr, pgoff); + else + addr = PAGE_ALIGN(addr); + + vma = find_vma(mm, addr); + if (task_size - len >= addr && + (!vma || addr + len <= vma->vm_start)) + return addr; + } + + /* check if free_area_cache is useful for us */ + if (len <= mm->cached_hole_size) { + mm->cached_hole_size = 0; + mm->free_area_cache = mm->mmap_base; + } + + /* either no address requested or can't fit in requested address hole */ + addr = mm->free_area_cache; + if (do_color_align) { + unsigned long base = COLOUR_ALIGN_DOWN(addr-len, pgoff); + + addr = base + len; + } + + /* make sure it can fit in the remaining address space */ + if (likely(addr > len)) { + vma = find_vma(mm, addr-len); + if (!vma || addr <= vma->vm_start) { + /* remember the address as a hint for next time */ + return (mm->free_area_cache = addr-len); + } + } + + if (unlikely(mm->mmap_base < len)) + goto bottomup; + + addr = mm->mmap_base-len; + if (do_color_align) + addr = COLOUR_ALIGN_DOWN(addr, pgoff); + + do { + /* + * Lookup failure means no vma is above this address, + * else if new region fits below vma->vm_start, + * return with success: + */ + vma = find_vma(mm, addr); + if (likely(!vma || addr+len <= vma->vm_start)) { + /* remember the address as a hint for next time */ + return (mm->free_area_cache = addr); + } + + /* remember the largest hole we saw so far */ + if (addr + mm->cached_hole_size < vma->vm_start) + mm->cached_hole_size = vma->vm_start - addr; + + /* try just below the current vma->vm_start */ + addr = vma->vm_start-len; + if (do_color_align) + addr = COLOUR_ALIGN_DOWN(addr, pgoff); + } while (likely(len < vma->vm_start)); + +bottomup: + /* + * A failed mmap() very likely causes application failure, + * so fall back to the bottom-up function here. This scenario + * can happen with large stack limits and large mmap() + * allocations. + */ + mm->cached_hole_size = ~0UL; + mm->free_area_cache = TASK_UNMAPPED_BASE; + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); + /* + * Restore the topdown base: + */ + mm->free_area_cache = mm->mmap_base; + mm->cached_hole_size = ~0UL; + + return addr; +} + /* Try to align mapping such that we align it as much as possible. */ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, unsigned long len, unsigned long pgoff, unsigned long flags) { @@ -213,6 +354,37 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u return addr; } +/* Essentially the same as PowerPC... */ +void arch_pick_mmap_layout(struct mm_struct *mm) +{ + /* + * Fall back to the standard layout if the personality + * bit is set, or if the expected stack growth is unlimited: + */ + if (!test_thread_flag(TIF_32BIT) || + (current->personality & ADDR_COMPAT_LAYOUT) || + current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY || + sysctl_legacy_va_layout) { + mm->mmap_base = TASK_UNMAPPED_BASE; + mm->get_unmapped_area = arch_get_unmapped_area; + mm->unmap_area = arch_unmap_area; + } else { + /* We know it's 32-bit */ + unsigned long task_size = 0xf0000000UL; + unsigned long gap; + + gap = current->signal->rlim[RLIMIT_STACK].rlim_cur; + if (gap < 128 * 1024 * 1024) + gap = 128 * 1024 * 1024; + if (gap > (task_size / 6 * 5)) + gap = (task_size / 6 * 5); + + mm->mmap_base = task_size - (gap & PAGE_MASK); + mm->get_unmapped_area = arch_get_unmapped_area_topdown; + mm->unmap_area = arch_unmap_area_topdown; + } +} + asmlinkage unsigned long sparc_brk(unsigned long brk) { /* People could try to be nasty and use ta 0x6d in 32bit programs */ -- cgit v1.2.3 From d61e16df940e02e25679bdc1aee8c25786f6de90 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Fri, 17 Mar 2006 17:33:56 -0800 Subject: [SPARC64]: Increase top of 32-bit process stack. Put it one page below the top of the 32-bit address space. This gives us ~16MB more address space to work with. Signed-off-by: David S. Miller --- arch/sparc64/kernel/binfmt_elf32.c | 4 +++- arch/sparc64/kernel/sys_sparc.c | 13 +++++++------ arch/sparc64/kernel/sys_sparc32.c | 9 +++++---- arch/sparc64/solaris/misc.c | 4 ++-- 4 files changed, 17 insertions(+), 13 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/binfmt_elf32.c b/arch/sparc64/kernel/binfmt_elf32.c index a1a12d2aa35..8a2abcce273 100644 --- a/arch/sparc64/kernel/binfmt_elf32.c +++ b/arch/sparc64/kernel/binfmt_elf32.c @@ -153,7 +153,9 @@ MODULE_AUTHOR("Eric Youngdale, David S. Miller, Jakub Jelinek"); #undef MODULE_DESCRIPTION #undef MODULE_AUTHOR +#include + #undef TASK_SIZE -#define TASK_SIZE 0xf0000000 +#define TASK_SIZE STACK_TOP32 #include "../../../fs/binfmt_elf.c" diff --git a/arch/sparc64/kernel/sys_sparc.c b/arch/sparc64/kernel/sys_sparc.c index 61dffb9349b..9019b41fc02 100644 --- a/arch/sparc64/kernel/sys_sparc.c +++ b/arch/sparc64/kernel/sys_sparc.c @@ -30,6 +30,7 @@ #include #include #include +#include /* #define DEBUG_UNIMP_SYSCALL */ @@ -130,7 +131,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi } if (test_thread_flag(TIF_32BIT)) - task_size = 0xf0000000UL; + task_size = STACK_TOP32; if (unlikely(len > task_size || len >= VA_EXCLUDE_START)) return -ENOMEM; @@ -203,7 +204,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, { struct vm_area_struct *vma; struct mm_struct *mm = current->mm; - unsigned long task_size = 0xf0000000UL; + unsigned long task_size = STACK_TOP32; unsigned long addr = addr0; int do_color_align; @@ -370,7 +371,7 @@ void arch_pick_mmap_layout(struct mm_struct *mm) mm->unmap_area = arch_unmap_area; } else { /* We know it's 32-bit */ - unsigned long task_size = 0xf0000000UL; + unsigned long task_size = STACK_TOP32; unsigned long gap; gap = current->signal->rlim[RLIMIT_STACK].rlim_cur; @@ -388,7 +389,7 @@ void arch_pick_mmap_layout(struct mm_struct *mm) asmlinkage unsigned long sparc_brk(unsigned long brk) { /* People could try to be nasty and use ta 0x6d in 32bit programs */ - if (test_thread_flag(TIF_32BIT) && brk >= 0xf0000000UL) + if (test_thread_flag(TIF_32BIT) && brk >= STACK_TOP32) return current->mm->brk; if (unlikely(straddles_64bit_va_hole(current->mm->brk, brk))) @@ -554,10 +555,10 @@ asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len, retval = -EINVAL; if (test_thread_flag(TIF_32BIT)) { - if (len >= 0xf0000000UL) + if (len >= STACK_TOP32) goto out_putf; - if ((flags & MAP_FIXED) && addr > 0xf0000000UL - len) + if ((flags & MAP_FIXED) && addr > STACK_TOP32 - len) goto out_putf; } else { if (len >= VA_EXCLUDE_START) diff --git a/arch/sparc64/kernel/sys_sparc32.c b/arch/sparc64/kernel/sys_sparc32.c index 417727bd87b..0e41df02448 100644 --- a/arch/sparc64/kernel/sys_sparc32.c +++ b/arch/sparc64/kernel/sys_sparc32.c @@ -62,6 +62,7 @@ #include #include #include +#include asmlinkage long sys32_chown16(const char __user * filename, u16 user, u16 group) { @@ -1039,15 +1040,15 @@ asmlinkage unsigned long sys32_mremap(unsigned long addr, unsigned long ret = -EINVAL; unsigned long new_addr = __new_addr; - if (old_len > 0xf0000000UL || new_len > 0xf0000000UL) + if (old_len > STACK_TOP32 || new_len > STACK_TOP32) goto out; - if (addr > 0xf0000000UL - old_len) + if (addr > STACK_TOP32 - old_len) goto out; down_write(¤t->mm->mmap_sem); if (flags & MREMAP_FIXED) { - if (new_addr > 0xf0000000UL - new_len) + if (new_addr > STACK_TOP32 - new_len) goto out_sem; - } else if (addr > 0xf0000000UL - new_len) { + } else if (addr > STACK_TOP32 - new_len) { unsigned long map_flags = 0; struct file *file = NULL; diff --git a/arch/sparc64/solaris/misc.c b/arch/sparc64/solaris/misc.c index 3ab4677395f..5284996780a 100644 --- a/arch/sparc64/solaris/misc.c +++ b/arch/sparc64/solaris/misc.c @@ -90,7 +90,7 @@ static u32 do_solaris_mmap(u32 addr, u32 len, u32 prot, u32 flags, u32 fd, u64 o len = PAGE_ALIGN(len); if(!(flags & MAP_FIXED)) addr = 0; - else if (len > 0xf0000000UL || addr > 0xf0000000UL - len) + else if (len > STACK_TOP32 || addr > STACK_TOP32 - len) goto out_putf; ret_type = flags & _MAP_NEW; flags &= ~_MAP_NEW; @@ -102,7 +102,7 @@ static u32 do_solaris_mmap(u32 addr, u32 len, u32 prot, u32 flags, u32 fd, u64 o (unsigned long) prot, (unsigned long) flags, off); up_write(¤t->mm->mmap_sem); if(!ret_type) - retval = ((retval < 0xf0000000) ? 0 : retval); + retval = ((retval < STACK_TOP32) ? 0 : retval); out_putf: if (file) -- cgit v1.2.3 From 05f9ca83596c7801549a2b4eba469d51baf5480f Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Fri, 17 Mar 2006 17:42:57 -0800 Subject: [SPARC64]: Randomize mm->mmap_base when PF_RANDOMIZE is set. Signed-off-by: David S. Miller --- arch/sparc64/kernel/sys_sparc.c | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/sys_sparc.c b/arch/sparc64/kernel/sys_sparc.c index 9019b41fc02..7a869138c37 100644 --- a/arch/sparc64/kernel/sys_sparc.c +++ b/arch/sparc64/kernel/sys_sparc.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include @@ -358,6 +359,17 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u /* Essentially the same as PowerPC... */ void arch_pick_mmap_layout(struct mm_struct *mm) { + unsigned long random_factor = 0UL; + + if (current->flags & PF_RANDOMIZE) { + random_factor = get_random_int(); + if (test_thread_flag(TIF_32BIT)) + random_factor &= ((1 * 1024 * 1024) - 1); + else + random_factor = ((random_factor << PAGE_SHIFT) & + 0xffffffffUL); + } + /* * Fall back to the standard layout if the personality * bit is set, or if the expected stack growth is unlimited: @@ -366,7 +378,7 @@ void arch_pick_mmap_layout(struct mm_struct *mm) (current->personality & ADDR_COMPAT_LAYOUT) || current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY || sysctl_legacy_va_layout) { - mm->mmap_base = TASK_UNMAPPED_BASE; + mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; mm->get_unmapped_area = arch_get_unmapped_area; mm->unmap_area = arch_unmap_area; } else { @@ -380,7 +392,7 @@ void arch_pick_mmap_layout(struct mm_struct *mm) if (gap > (task_size / 6 * 5)) gap = (task_size / 6 * 5); - mm->mmap_base = task_size - (gap & PAGE_MASK); + mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor); mm->get_unmapped_area = arch_get_unmapped_area_topdown; mm->unmap_area = arch_unmap_area_topdown; } -- cgit v1.2.3 From b52439c22c63dbbefd5395f2151c0ef4f667e949 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Fri, 17 Mar 2006 23:40:47 -0800 Subject: [SPARC64]: Don't kill the page allocator when growing a TSB. Try only lightly on > 1 order allocations. If a grow fails, we are under memory pressure, so do not try to grow the TSB for this address space any more. If a > 0 order TSB allocation fails on a new fork, retry using a 0 order allocation. Signed-off-by: David S. Miller --- arch/sparc64/mm/tsb.c | 43 +++++++++++++++++++++++++++++++++++-------- 1 file changed, 35 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c index 7fbe1e0cd10..3eb8670282f 100644 --- a/arch/sparc64/mm/tsb.c +++ b/arch/sparc64/mm/tsb.c @@ -216,7 +216,8 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes) * * The TSB can be anywhere from 8K to 1MB in size, in increasing powers * of two. The TSB must be aligned to it's size, so f.e. a 512K TSB - * must be 512K aligned. + * must be 512K aligned. It also must be physically contiguous, so we + * cannot use vmalloc(). * * The idea here is to grow the TSB when the RSS of the process approaches * the number of entries that the current TSB can hold at once. Currently, @@ -228,6 +229,8 @@ void tsb_grow(struct mm_struct *mm, unsigned long rss) unsigned long size, old_size, flags; struct page *page; struct tsb *old_tsb, *new_tsb; + unsigned long order, new_rss_limit; + gfp_t gfp_flags; if (max_tsb_size > (PAGE_SIZE << MAX_ORDER)) max_tsb_size = (PAGE_SIZE << MAX_ORDER); @@ -240,9 +243,37 @@ void tsb_grow(struct mm_struct *mm, unsigned long rss) break; } - page = alloc_pages(GFP_KERNEL, get_order(size)); - if (unlikely(!page)) + if (size == max_tsb_size) + new_rss_limit = ~0UL; + else + new_rss_limit = ((size / sizeof(struct tsb)) * 3) / 4; + +retry_page_alloc: + order = get_order(size); + gfp_flags = GFP_KERNEL; + if (order > 1) + gfp_flags = __GFP_NOWARN | __GFP_NORETRY; + + page = alloc_pages(gfp_flags, order); + if (unlikely(!page)) { + /* Not being able to fork due to a high-order TSB + * allocation failure is very bad behavior. Just back + * down to a 0-order allocation and force no TSB + * growing for this address space. + */ + if (mm->context.tsb == NULL && order > 0) { + size = PAGE_SIZE; + new_rss_limit = ~0UL; + goto retry_page_alloc; + } + + /* If we failed on a TSB grow, we are under serious + * memory pressure so don't try to grow any more. + */ + if (mm->context.tsb != NULL) + mm->context.tsb_rss_limit = ~0UL; return; + } /* Mark all tags as invalid. */ new_tsb = page_address(page); @@ -286,11 +317,7 @@ void tsb_grow(struct mm_struct *mm, unsigned long rss) return; } - if (size == max_tsb_size) - mm->context.tsb_rss_limit = ~0UL; - else - mm->context.tsb_rss_limit = - ((size / sizeof(struct tsb)) * 3) / 4; + mm->context.tsb_rss_limit = new_rss_limit; if (old_tsb) { extern void copy_tsb(unsigned long old_tsb_base, -- cgit v1.2.3 From 9b4006dcf6a8c43bd482b9c1ec576f0ed270ef23 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 18 Mar 2006 18:12:42 -0800 Subject: [SPARC64]: Use SLAB caches for TSB tables. Signed-off-by: David S. Miller --- arch/sparc64/Kconfig | 3 ++ arch/sparc64/mm/init.c | 5 ++- arch/sparc64/mm/tsb.c | 86 ++++++++++++++++++++++++++++++++++++-------------- 3 files changed, 69 insertions(+), 25 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/Kconfig b/arch/sparc64/Kconfig index 49b652f9b1d..c3685b314d7 100644 --- a/arch/sparc64/Kconfig +++ b/arch/sparc64/Kconfig @@ -192,6 +192,9 @@ config ARCH_SPARSEMEM_ENABLE config ARCH_SPARSEMEM_DEFAULT def_bool y +config LARGE_ALLOCS + def_bool y + source "mm/Kconfig" config GENERIC_ISA_DMA diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index d703b67bc7b..a1a364e537c 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -165,6 +165,8 @@ static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags) clear_page(addr); } +extern void tsb_cache_init(void); + void pgtable_cache_init(void) { pgtable_cache = kmem_cache_create("pgtable_cache", @@ -174,9 +176,10 @@ void pgtable_cache_init(void) zero_ctor, NULL); if (!pgtable_cache) { - prom_printf("pgtable_cache_init(): Could not create!\n"); + prom_printf("Could not create pgtable_cache\n"); prom_halt(); } + tsb_cache_init(); } #ifdef CONFIG_DEBUG_DCFLUSH diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c index 3eb8670282f..1af797a0a09 100644 --- a/arch/sparc64/mm/tsb.c +++ b/arch/sparc64/mm/tsb.c @@ -11,6 +11,7 @@ #include #include #include +#include extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES]; @@ -207,6 +208,39 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes) } } +static kmem_cache_t *tsb_caches[8] __read_mostly; + +static const char *tsb_cache_names[8] = { + "tsb_8KB", + "tsb_16KB", + "tsb_32KB", + "tsb_64KB", + "tsb_128KB", + "tsb_256KB", + "tsb_512KB", + "tsb_1MB", +}; + +void __init tsb_cache_init(void) +{ + unsigned long i; + + for (i = 0; i < 8; i++) { + unsigned long size = 8192 << i; + const char *name = tsb_cache_names[i]; + + tsb_caches[i] = kmem_cache_create(name, + size, size, + SLAB_HWCACHE_ALIGN | + SLAB_MUST_HWCACHE_ALIGN, + NULL, NULL); + if (!tsb_caches[i]) { + prom_printf("Could not create %s cache\n", name); + prom_halt(); + } + } +} + /* When the RSS of an address space exceeds mm->context.tsb_rss_limit, * do_sparc64_fault() invokes this routine to try and grow the TSB. * @@ -226,45 +260,48 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes) void tsb_grow(struct mm_struct *mm, unsigned long rss) { unsigned long max_tsb_size = 1 * 1024 * 1024; - unsigned long size, old_size, flags; - struct page *page; + unsigned long new_size, old_size, flags; struct tsb *old_tsb, *new_tsb; - unsigned long order, new_rss_limit; + unsigned long new_cache_index, old_cache_index; + unsigned long new_rss_limit; gfp_t gfp_flags; if (max_tsb_size > (PAGE_SIZE << MAX_ORDER)) max_tsb_size = (PAGE_SIZE << MAX_ORDER); - for (size = PAGE_SIZE; size < max_tsb_size; size <<= 1UL) { - unsigned long n_entries = size / sizeof(struct tsb); + new_cache_index = 0; + for (new_size = 8192; new_size < max_tsb_size; new_size <<= 1UL) { + unsigned long n_entries = new_size / sizeof(struct tsb); n_entries = (n_entries * 3) / 4; if (n_entries > rss) break; + + new_cache_index++; } - if (size == max_tsb_size) + if (new_size == max_tsb_size) new_rss_limit = ~0UL; else - new_rss_limit = ((size / sizeof(struct tsb)) * 3) / 4; + new_rss_limit = ((new_size / sizeof(struct tsb)) * 3) / 4; -retry_page_alloc: - order = get_order(size); +retry_tsb_alloc: gfp_flags = GFP_KERNEL; - if (order > 1) + if (new_size > (PAGE_SIZE * 2)) gfp_flags = __GFP_NOWARN | __GFP_NORETRY; - page = alloc_pages(gfp_flags, order); - if (unlikely(!page)) { + new_tsb = kmem_cache_alloc(tsb_caches[new_cache_index], gfp_flags); + if (unlikely(!new_tsb)) { /* Not being able to fork due to a high-order TSB * allocation failure is very bad behavior. Just back * down to a 0-order allocation and force no TSB * growing for this address space. */ - if (mm->context.tsb == NULL && order > 0) { - size = PAGE_SIZE; + if (mm->context.tsb == NULL && new_cache_index > 0) { + new_cache_index = 0; + new_size = 8192; new_rss_limit = ~0UL; - goto retry_page_alloc; + goto retry_tsb_alloc; } /* If we failed on a TSB grow, we are under serious @@ -276,8 +313,7 @@ retry_page_alloc: } /* Mark all tags as invalid. */ - new_tsb = page_address(page); - memset(new_tsb, 0x40, size); + memset(new_tsb, 0x40, new_size); /* Ok, we are about to commit the changes. If we are * growing an existing TSB the locking is very tricky, @@ -304,8 +340,10 @@ retry_page_alloc: spin_lock_irqsave(&mm->context.lock, flags); old_tsb = mm->context.tsb; + old_cache_index = (mm->context.tsb_reg_val & 0x7UL); old_size = mm->context.tsb_nentries * sizeof(struct tsb); + /* Handle multiple threads trying to grow the TSB at the same time. * One will get in here first, and bump the size and the RSS limit. * The others will get in here next and hit this check. @@ -313,7 +351,7 @@ retry_page_alloc: if (unlikely(old_tsb && (rss < mm->context.tsb_rss_limit))) { spin_unlock_irqrestore(&mm->context.lock, flags); - free_pages((unsigned long) new_tsb, get_order(size)); + kmem_cache_free(tsb_caches[new_cache_index], new_tsb); return; } @@ -331,11 +369,11 @@ retry_page_alloc: old_tsb_base = __pa(old_tsb_base); new_tsb_base = __pa(new_tsb_base); } - copy_tsb(old_tsb_base, old_size, new_tsb_base, size); + copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size); } mm->context.tsb = new_tsb; - setup_tsb_params(mm, size); + setup_tsb_params(mm, new_size); spin_unlock_irqrestore(&mm->context.lock, flags); @@ -350,7 +388,7 @@ retry_page_alloc: smp_tsb_sync(mm); /* Now it is safe to free the old tsb. */ - free_pages((unsigned long) old_tsb, get_order(old_size)); + kmem_cache_free(tsb_caches[old_cache_index], old_tsb); } } @@ -379,10 +417,10 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) void destroy_context(struct mm_struct *mm) { - unsigned long size = mm->context.tsb_nentries * sizeof(struct tsb); - unsigned long flags; + unsigned long flags, cache_index; - free_pages((unsigned long) mm->context.tsb, get_order(size)); + cache_index = (mm->context.tsb_reg_val & 0x7UL); + kmem_cache_free(tsb_caches[cache_index], mm->context.tsb); /* We can remove these later, but for now it's useful * to catch any bogus post-destroy_context() references -- cgit v1.2.3 From 88d7079458f87d6f2d2261b2f87b7b9416019f5e Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 18 Mar 2006 19:16:23 -0800 Subject: [SPARC64]: Allow CONFIG_MEMORY_HOTPLUG to build. online_page() is straightforward, and then add a dummy remove_memory() that returns -EINVAL just like i386. There is no point in implementing remove_memory() since __remove_pages() has no implementation either. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) (limited to 'arch') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index a1a364e537c..c2b556106fc 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -1805,3 +1805,21 @@ void __flush_tlb_all(void) __asm__ __volatile__("wrpr %0, 0, %%pstate" : : "r" (pstate)); } + +#ifdef CONFIG_MEMORY_HOTPLUG + +void online_page(struct page *page) +{ + ClearPageReserved(page); + set_page_count(page, 0); + free_cold_page(page); + totalram_pages++; + num_physpages++; +} + +int remove_memory(u64 start, u64 size) +{ + return -EINVAL; +} + +#endif /* CONFIG_MEMORY_HOTPLUG */ -- cgit v1.2.3 From bb8646d8340fa7c1b66a037428e39f85f8738f0a Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 18 Mar 2006 23:55:11 -0800 Subject: [SPARC64]: Optimized TSB table initialization. We only need to write an invalid tag every 16 bytes, so taking advantage of this can save many instructions compared to the simple memset() call we make now. A prefetching implementation is implemented for sun4u and a block-init store version if implemented for Niagara. The next trick is to be able to perform an init and a copy_tsb() in parallel when growing a TSB table. Signed-off-by: David S. Miller --- arch/sparc64/kernel/tsb.S | 69 ++++++++++++++++++++++++++++++++++++++++++++++ arch/sparc64/lib/NGbzero.S | 1 + arch/sparc64/mm/tsb.c | 2 +- 3 files changed, 71 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/tsb.S b/arch/sparc64/kernel/tsb.S index 1b154c86362..118baea44f6 100644 --- a/arch/sparc64/kernel/tsb.S +++ b/arch/sparc64/kernel/tsb.S @@ -371,3 +371,72 @@ copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size retl TSB_MEMBAR .size copy_tsb, .-copy_tsb + + /* Set the invalid bit in all TSB entries. */ + .align 32 + .globl tsb_init + .type tsb_init,#function +tsb_init: /* %o0 = TSB vaddr, %o1 = size in bytes */ + prefetch [%o0 + 0x000], #n_writes + mov 1, %g1 + prefetch [%o0 + 0x040], #n_writes + sllx %g1, TSB_TAG_INVALID_BIT, %g1 + prefetch [%o0 + 0x080], #n_writes +1: prefetch [%o0 + 0x0c0], #n_writes + stx %g1, [%o0 + 0x00] + stx %g1, [%o0 + 0x10] + stx %g1, [%o0 + 0x20] + stx %g1, [%o0 + 0x30] + prefetch [%o0 + 0x100], #n_writes + stx %g1, [%o0 + 0x40] + stx %g1, [%o0 + 0x50] + stx %g1, [%o0 + 0x60] + stx %g1, [%o0 + 0x70] + prefetch [%o0 + 0x140], #n_writes + stx %g1, [%o0 + 0x80] + stx %g1, [%o0 + 0x90] + stx %g1, [%o0 + 0xa0] + stx %g1, [%o0 + 0xb0] + prefetch [%o0 + 0x180], #n_writes + stx %g1, [%o0 + 0xc0] + stx %g1, [%o0 + 0xd0] + stx %g1, [%o0 + 0xe0] + stx %g1, [%o0 + 0xf0] + subcc %o1, 0x100, %o1 + bne,pt %xcc, 1b + add %o0, 0x100, %o0 + retl + nop + nop + nop + .size tsb_init, .-tsb_init + + .globl NGtsb_init + .type NGtsb_init,#function +NGtsb_init: + rd %asi, %g2 + mov 1, %g1 + wr %g0, ASI_BLK_INIT_QUAD_LDD_P, %asi + sllx %g1, TSB_TAG_INVALID_BIT, %g1 +1: stxa %g1, [%o0 + 0x00] %asi + stxa %g1, [%o0 + 0x10] %asi + stxa %g1, [%o0 + 0x20] %asi + stxa %g1, [%o0 + 0x30] %asi + stxa %g1, [%o0 + 0x40] %asi + stxa %g1, [%o0 + 0x50] %asi + stxa %g1, [%o0 + 0x60] %asi + stxa %g1, [%o0 + 0x70] %asi + stxa %g1, [%o0 + 0x80] %asi + stxa %g1, [%o0 + 0x90] %asi + stxa %g1, [%o0 + 0xa0] %asi + stxa %g1, [%o0 + 0xb0] %asi + stxa %g1, [%o0 + 0xc0] %asi + stxa %g1, [%o0 + 0xd0] %asi + stxa %g1, [%o0 + 0xe0] %asi + stxa %g1, [%o0 + 0xf0] %asi + subcc %o1, 0x100, %o1 + bne,pt %xcc, 1b + add %o0, 0x100, %o0 + retl + wr %g2, 0x0, %asi + .size NGtsb_init, .-NGtsb_init diff --git a/arch/sparc64/lib/NGbzero.S b/arch/sparc64/lib/NGbzero.S index fef584f745d..e86baece5cc 100644 --- a/arch/sparc64/lib/NGbzero.S +++ b/arch/sparc64/lib/NGbzero.S @@ -157,6 +157,7 @@ niagara_patch_bzero: NG_DO_PATCH(memset, NGmemset) NG_DO_PATCH(__bzero, NGbzero) NG_DO_PATCH(__clear_user, NGclear_user) + NG_DO_PATCH(tsb_init, NGtsb_init) retl nop .size niagara_patch_bzero,.-niagara_patch_bzero diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c index 1af797a0a09..b2064e2a44d 100644 --- a/arch/sparc64/mm/tsb.c +++ b/arch/sparc64/mm/tsb.c @@ -313,7 +313,7 @@ retry_tsb_alloc: } /* Mark all tags as invalid. */ - memset(new_tsb, 0x40, new_size); + tsb_init(new_tsb, new_size); /* Ok, we are about to commit the changes. If we are * growing an existing TSB the locking is very tricky, -- cgit v1.2.3 From 467418f3508b426adbc7df795ebf3baaed4fbefc Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Sun, 19 Mar 2006 12:46:55 -0800 Subject: [SPARC64]: CONFIG_BLK_DEV_RAM fix init/do_mounts_rd.c depends upon CONFIG_BLK_DEV_RAM, not CONFIG_BLK_DEV_INITRD. Signed-off-by: Andrew Morton Signed-off-by: David S. Miller --- arch/sparc64/kernel/setup.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c index 7ae4027a919..7d0e67c1ce5 100644 --- a/arch/sparc64/kernel/setup.c +++ b/arch/sparc64/kernel/setup.c @@ -349,7 +349,7 @@ void __init setup_arch(char **cmdline_p) if (!root_flags) root_mountflags &= ~MS_RDONLY; ROOT_DEV = old_decode_dev(root_dev); -#ifdef CONFIG_BLK_DEV_INITRD +#ifdef CONFIG_BLK_DEV_RAM rd_image_start = ram_flags & RAMDISK_IMAGE_START_MASK; rd_prompt = ((ram_flags & RAMDISK_PROMPT_FLAG) != 0); rd_doload = ((ram_flags & RAMDISK_LOAD_FLAG) != 0); -- cgit v1.2.3 From f6b83f070e9b7ad9075f7cc5646260e56c7d0219 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Mon, 20 Mar 2006 01:17:17 -0800 Subject: [SPARC64]: Fix 2 bugs in huge page support. 1) huge_pte_offset() did not check the page table hierarchy elements as being empty correctly, resulting in an OOPS 2) Need platform specific hugetlb_get_unmapped_area() to handle the top-down vs. bottom-up address space allocation strategies. Signed-off-by: David S. Miller --- arch/sparc64/mm/hugetlbpage.c | 179 +++++++++++++++++++++++++++++++++++++++++- 1 file changed, 175 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/mm/hugetlbpage.c b/arch/sparc64/mm/hugetlbpage.c index 625cbb336a2..a7a24869d04 100644 --- a/arch/sparc64/mm/hugetlbpage.c +++ b/arch/sparc64/mm/hugetlbpage.c @@ -1,7 +1,7 @@ /* * SPARC64 Huge TLB page support. * - * Copyright (C) 2002, 2003 David S. Miller (davem@redhat.com) + * Copyright (C) 2002, 2003, 2006 David S. Miller (davem@davemloft.net) */ #include @@ -22,6 +22,175 @@ #include #include +/* Slightly simplified from the non-hugepage variant because by + * definition we don't have to worry about any page coloring stuff + */ +#define VA_EXCLUDE_START (0x0000080000000000UL - (1UL << 32UL)) +#define VA_EXCLUDE_END (0xfffff80000000000UL + (1UL << 32UL)) + +static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp, + unsigned long addr, + unsigned long len, + unsigned long pgoff, + unsigned long flags) +{ + struct mm_struct *mm = current->mm; + struct vm_area_struct * vma; + unsigned long task_size = TASK_SIZE; + unsigned long start_addr; + + if (test_thread_flag(TIF_32BIT)) + task_size = STACK_TOP32; + if (unlikely(len >= VA_EXCLUDE_START)) + return -ENOMEM; + + if (len > mm->cached_hole_size) { + start_addr = addr = mm->free_area_cache; + } else { + start_addr = addr = TASK_UNMAPPED_BASE; + mm->cached_hole_size = 0; + } + + task_size -= len; + +full_search: + addr = ALIGN(addr, HPAGE_SIZE); + + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { + /* At this point: (!vma || addr < vma->vm_end). */ + if (addr < VA_EXCLUDE_START && + (addr + len) >= VA_EXCLUDE_START) { + addr = VA_EXCLUDE_END; + vma = find_vma(mm, VA_EXCLUDE_END); + } + if (unlikely(task_size < addr)) { + if (start_addr != TASK_UNMAPPED_BASE) { + start_addr = addr = TASK_UNMAPPED_BASE; + mm->cached_hole_size = 0; + goto full_search; + } + return -ENOMEM; + } + if (likely(!vma || addr + len <= vma->vm_start)) { + /* + * Remember the place where we stopped the search: + */ + mm->free_area_cache = addr + len; + return addr; + } + if (addr + mm->cached_hole_size < vma->vm_start) + mm->cached_hole_size = vma->vm_start - addr; + + addr = ALIGN(vma->vm_end, HPAGE_SIZE); + } +} + +static unsigned long +hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + const unsigned long len, + const unsigned long pgoff, + const unsigned long flags) +{ + struct vm_area_struct *vma; + struct mm_struct *mm = current->mm; + unsigned long addr = addr0; + + /* This should only ever run for 32-bit processes. */ + BUG_ON(!test_thread_flag(TIF_32BIT)); + + /* check if free_area_cache is useful for us */ + if (len <= mm->cached_hole_size) { + mm->cached_hole_size = 0; + mm->free_area_cache = mm->mmap_base; + } + + /* either no address requested or can't fit in requested address hole */ + addr = mm->free_area_cache & HPAGE_MASK; + + /* make sure it can fit in the remaining address space */ + if (likely(addr > len)) { + vma = find_vma(mm, addr-len); + if (!vma || addr <= vma->vm_start) { + /* remember the address as a hint for next time */ + return (mm->free_area_cache = addr-len); + } + } + + if (unlikely(mm->mmap_base < len)) + goto bottomup; + + addr = (mm->mmap_base-len) & HPAGE_MASK; + + do { + /* + * Lookup failure means no vma is above this address, + * else if new region fits below vma->vm_start, + * return with success: + */ + vma = find_vma(mm, addr); + if (likely(!vma || addr+len <= vma->vm_start)) { + /* remember the address as a hint for next time */ + return (mm->free_area_cache = addr); + } + + /* remember the largest hole we saw so far */ + if (addr + mm->cached_hole_size < vma->vm_start) + mm->cached_hole_size = vma->vm_start - addr; + + /* try just below the current vma->vm_start */ + addr = (vma->vm_start-len) & HPAGE_MASK; + } while (likely(len < vma->vm_start)); + +bottomup: + /* + * A failed mmap() very likely causes application failure, + * so fall back to the bottom-up function here. This scenario + * can happen with large stack limits and large mmap() + * allocations. + */ + mm->cached_hole_size = ~0UL; + mm->free_area_cache = TASK_UNMAPPED_BASE; + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); + /* + * Restore the topdown base: + */ + mm->free_area_cache = mm->mmap_base; + mm->cached_hole_size = ~0UL; + + return addr; +} + +unsigned long +hugetlb_get_unmapped_area(struct file *file, unsigned long addr, + unsigned long len, unsigned long pgoff, unsigned long flags) +{ + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; + unsigned long task_size = TASK_SIZE; + + if (test_thread_flag(TIF_32BIT)) + task_size = STACK_TOP32; + + if (len & ~HPAGE_MASK) + return -EINVAL; + if (len > task_size) + return -ENOMEM; + + if (addr) { + addr = ALIGN(addr, HPAGE_SIZE); + vma = find_vma(mm, addr); + if (task_size - len >= addr && + (!vma || addr + len <= vma->vm_start)) + return addr; + } + if (mm->get_unmapped_area == arch_get_unmapped_area) + return hugetlb_get_unmapped_area_bottomup(file, addr, len, + pgoff, flags); + else + return hugetlb_get_unmapped_area_topdown(file, addr, len, + pgoff, flags); +} + pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr) { pgd_t *pgd; @@ -48,12 +217,14 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) pmd_t *pmd; pte_t *pte = NULL; + addr &= HPAGE_MASK; + pgd = pgd_offset(mm, addr); - if (pgd) { + if (!pgd_none(*pgd)) { pud = pud_offset(pgd, addr); - if (pud) { + if (!pud_none(*pud)) { pmd = pmd_offset(pud, addr); - if (pmd) + if (!pmd_none(*pmd)) pte = pte_offset_map(pmd, addr); } } -- cgit v1.2.3 From ac0eb3eb7e54b700386068be025a43d2a3958ee5 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Mon, 20 Mar 2006 01:23:43 -0800 Subject: [SPARC64]: Update defconfig. Signed-off-by: David S. Miller --- arch/sparc64/defconfig | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) (limited to 'arch') diff --git a/arch/sparc64/defconfig b/arch/sparc64/defconfig index 069d49777b2..f819a9663a8 100644 --- a/arch/sparc64/defconfig +++ b/arch/sparc64/defconfig @@ -1,7 +1,7 @@ # # Automatically generated make config: don't edit -# Linux kernel version: 2.6.16-rc2 -# Tue Feb 7 17:47:18 2006 +# Linux kernel version: 2.6.16 +# Mon Mar 20 01:23:21 2006 # CONFIG_SPARC=y CONFIG_SPARC64=y @@ -115,14 +115,20 @@ CONFIG_GENERIC_CALIBRATE_DELAY=y CONFIG_HUGETLB_PAGE_SIZE_4MB=y # CONFIG_HUGETLB_PAGE_SIZE_512K is not set # CONFIG_HUGETLB_PAGE_SIZE_64K is not set +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_ARCH_SPARSEMEM_DEFAULT=y +CONFIG_LARGE_ALLOCS=y CONFIG_SELECT_MEMORY_MODEL=y -CONFIG_FLATMEM_MANUAL=y +# CONFIG_FLATMEM_MANUAL is not set # CONFIG_DISCONTIGMEM_MANUAL is not set -# CONFIG_SPARSEMEM_MANUAL is not set -CONFIG_FLATMEM=y -CONFIG_FLAT_NODE_MEM_MAP=y +CONFIG_SPARSEMEM_MANUAL=y +CONFIG_SPARSEMEM=y +CONFIG_HAVE_MEMORY_PRESENT=y # CONFIG_SPARSEMEM_STATIC is not set +CONFIG_SPARSEMEM_EXTREME=y +CONFIG_MEMORY_HOTPLUG=y CONFIG_SPLIT_PTLOCK_CPUS=4 +CONFIG_MIGRATION=y CONFIG_GENERIC_ISA_DMA=y CONFIG_SBUS=y CONFIG_SBUSCHAR=y @@ -655,6 +661,7 @@ CONFIG_SERIAL_SUNCORE=y CONFIG_SERIAL_SUNSU=y CONFIG_SERIAL_SUNSU_CONSOLE=y CONFIG_SERIAL_SUNSAB=m +CONFIG_SERIAL_SUNHV=y CONFIG_SERIAL_CORE=y CONFIG_SERIAL_CORE_CONSOLE=y # CONFIG_SERIAL_JSM is not set @@ -1116,11 +1123,7 @@ CONFIG_USB_HIDDEV=y # CONFIG_INFINIBAND is not set # -# SN Devices -# - -# -# EDAC - error detection and reporting (RAS) +# EDAC - error detection and reporting (RAS) (EXPERIMENTAL) # # -- cgit v1.2.3