diff options
Diffstat (limited to 'arch/sparc64/kernel')
-rw-r--r-- | arch/sparc64/kernel/kprobes.c | 83 | ||||
-rw-r--r-- | arch/sparc64/kernel/pci_iommu.c | 221 | ||||
-rw-r--r-- | arch/sparc64/kernel/pci_psycho.c | 2 | ||||
-rw-r--r-- | arch/sparc64/kernel/pci_sabre.c | 2 | ||||
-rw-r--r-- | arch/sparc64/kernel/pci_schizo.c | 2 | ||||
-rw-r--r-- | arch/sparc64/kernel/sbus.c | 41 | ||||
-rw-r--r-- | arch/sparc64/kernel/setup.c | 11 | ||||
-rw-r--r-- | arch/sparc64/kernel/signal32.c | 2 | ||||
-rw-r--r-- | arch/sparc64/kernel/smp.c | 3 | ||||
-rw-r--r-- | arch/sparc64/kernel/sys_sparc.c | 8 | ||||
-rw-r--r-- | arch/sparc64/kernel/traps.c | 19 |
11 files changed, 257 insertions, 137 deletions
diff --git a/arch/sparc64/kernel/kprobes.c b/arch/sparc64/kernel/kprobes.c index 7066d7ba667..bdac631cf01 100644 --- a/arch/sparc64/kernel/kprobes.c +++ b/arch/sparc64/kernel/kprobes.c @@ -6,7 +6,6 @@ #include <linux/config.h> #include <linux/kernel.h> #include <linux/kprobes.h> - #include <asm/kdebug.h> #include <asm/signal.h> @@ -47,25 +46,59 @@ void arch_copy_kprobe(struct kprobe *p) { p->ainsn.insn[0] = *p->addr; p->ainsn.insn[1] = BREAKPOINT_INSTRUCTION_2; + p->opcode = *p->addr; } -void arch_remove_kprobe(struct kprobe *p) +void arch_arm_kprobe(struct kprobe *p) { + *p->addr = BREAKPOINT_INSTRUCTION; + flushi(p->addr); } -/* kprobe_status settings */ -#define KPROBE_HIT_ACTIVE 0x00000001 -#define KPROBE_HIT_SS 0x00000002 +void arch_disarm_kprobe(struct kprobe *p) +{ + *p->addr = p->opcode; + flushi(p->addr); +} + +void arch_remove_kprobe(struct kprobe *p) +{ +} static struct kprobe *current_kprobe; static unsigned long current_kprobe_orig_tnpc; static unsigned long current_kprobe_orig_tstate_pil; static unsigned int kprobe_status; +static struct kprobe *kprobe_prev; +static unsigned long kprobe_orig_tnpc_prev; +static unsigned long kprobe_orig_tstate_pil_prev; +static unsigned int kprobe_status_prev; -static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs) +static inline void save_previous_kprobe(void) +{ + kprobe_status_prev = kprobe_status; + kprobe_orig_tnpc_prev = current_kprobe_orig_tnpc; + kprobe_orig_tstate_pil_prev = current_kprobe_orig_tstate_pil; + kprobe_prev = current_kprobe; +} + +static inline void restore_previous_kprobe(void) +{ + kprobe_status = kprobe_status_prev; + current_kprobe_orig_tnpc = kprobe_orig_tnpc_prev; + current_kprobe_orig_tstate_pil = kprobe_orig_tstate_pil_prev; + current_kprobe = kprobe_prev; +} + +static inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs) { current_kprobe_orig_tnpc = regs->tnpc; current_kprobe_orig_tstate_pil = (regs->tstate & TSTATE_PIL); + current_kprobe = p; +} + +static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs) +{ regs->tstate |= TSTATE_PIL; /*single step inline, if it a breakpoint instruction*/ @@ -78,17 +111,6 @@ static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs) } } -static inline void disarm_kprobe(struct kprobe *p, struct pt_regs *regs) -{ - *p->addr = p->opcode; - flushi(p->addr); - - regs->tpc = (unsigned long) p->addr; - regs->tnpc = current_kprobe_orig_tnpc; - regs->tstate = ((regs->tstate & ~TSTATE_PIL) | - current_kprobe_orig_tstate_pil); -} - static int kprobe_handler(struct pt_regs *regs) { struct kprobe *p; @@ -109,8 +131,18 @@ static int kprobe_handler(struct pt_regs *regs) unlock_kprobes(); goto no_kprobe; } - disarm_kprobe(p, regs); - ret = 1; + /* We have reentered the kprobe_handler(), since + * another probe was hit while within the handler. + * We here save the original kprobes variables and + * just single step on the instruction of the new probe + * without calling any user handlers. + */ + save_previous_kprobe(); + set_current_kprobe(p, regs); + p->nmissed++; + kprobe_status = KPROBE_REENTER; + prepare_singlestep(p, regs); + return 1; } else { p = current_kprobe; if (p->break_handler && p->break_handler(p, regs)) @@ -138,8 +170,8 @@ static int kprobe_handler(struct pt_regs *regs) goto no_kprobe; } + set_current_kprobe(p, regs); kprobe_status = KPROBE_HIT_ACTIVE; - current_kprobe = p; if (p->pre_handler && p->pre_handler(p, regs)) return 1; @@ -245,12 +277,20 @@ static inline int post_kprobe_handler(struct pt_regs *regs) if (!kprobe_running()) return 0; - if (current_kprobe->post_handler) + if ((kprobe_status != KPROBE_REENTER) && current_kprobe->post_handler) { + kprobe_status = KPROBE_HIT_SSDONE; current_kprobe->post_handler(current_kprobe, regs, 0); + } resume_execution(current_kprobe, regs); + /*Restore back the original saved kprobes variables and continue. */ + if (kprobe_status == KPROBE_REENTER) { + restore_previous_kprobe(); + goto out; + } unlock_kprobes(); +out: preempt_enable_no_resched(); return 1; @@ -392,3 +432,4 @@ int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) } return 0; } + diff --git a/arch/sparc64/kernel/pci_iommu.c b/arch/sparc64/kernel/pci_iommu.c index 292983413ae..2803bc7c2c7 100644 --- a/arch/sparc64/kernel/pci_iommu.c +++ b/arch/sparc64/kernel/pci_iommu.c @@ -8,6 +8,7 @@ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/mm.h> +#include <linux/delay.h> #include <asm/pbm.h> @@ -195,6 +196,34 @@ static iopte_t *alloc_consistent_cluster(struct pci_iommu *iommu, unsigned long return NULL; } +static int iommu_alloc_ctx(struct pci_iommu *iommu) +{ + int lowest = iommu->ctx_lowest_free; + int sz = IOMMU_NUM_CTXS - lowest; + int n = find_next_zero_bit(iommu->ctx_bitmap, sz, lowest); + + if (unlikely(n == sz)) { + n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1); + if (unlikely(n == lowest)) { + printk(KERN_WARNING "IOMMU: Ran out of contexts.\n"); + n = 0; + } + } + if (n) + __set_bit(n, iommu->ctx_bitmap); + + return n; +} + +static inline void iommu_free_ctx(struct pci_iommu *iommu, int ctx) +{ + if (likely(ctx)) { + __clear_bit(ctx, iommu->ctx_bitmap); + if (ctx < iommu->ctx_lowest_free) + iommu->ctx_lowest_free = ctx; + } +} + /* Allocate and map kernel buffer of size SIZE using consistent mode * DMA for PCI device PDEV. Return non-NULL cpu-side address if * successful and set *DMA_ADDRP to the PCI side dma address. @@ -235,7 +264,7 @@ void *pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_ad npages = size >> IO_PAGE_SHIFT; ctx = 0; if (iommu->iommu_ctxflush) - ctx = iommu->iommu_cur_ctx++; + ctx = iommu_alloc_ctx(iommu); first_page = __pa(first_page); while (npages--) { iopte_val(*iopte) = (IOPTE_CONSISTENT(ctx) | @@ -316,6 +345,8 @@ void pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_ } } + iommu_free_ctx(iommu, ctx); + spin_unlock_irqrestore(&iommu->lock, flags); order = get_order(size); @@ -359,7 +390,7 @@ dma_addr_t pci_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direct base_paddr = __pa(oaddr & IO_PAGE_MASK); ctx = 0; if (iommu->iommu_ctxflush) - ctx = iommu->iommu_cur_ctx++; + ctx = iommu_alloc_ctx(iommu); if (strbuf->strbuf_enabled) iopte_protection = IOPTE_STREAMING(ctx); else @@ -379,6 +410,70 @@ bad: return PCI_DMA_ERROR_CODE; } +static void pci_strbuf_flush(struct pci_strbuf *strbuf, struct pci_iommu *iommu, u32 vaddr, unsigned long ctx, unsigned long npages, int direction) +{ + int limit; + + if (strbuf->strbuf_ctxflush && + iommu->iommu_ctxflush) { + unsigned long matchreg, flushreg; + u64 val; + + flushreg = strbuf->strbuf_ctxflush; + matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx); + + pci_iommu_write(flushreg, ctx); + val = pci_iommu_read(matchreg); + val &= 0xffff; + if (!val) + goto do_flush_sync; + + while (val) { + if (val & 0x1) + pci_iommu_write(flushreg, ctx); + val >>= 1; + } + val = pci_iommu_read(matchreg); + if (unlikely(val)) { + printk(KERN_WARNING "pci_strbuf_flush: ctx flush " + "timeout matchreg[%lx] ctx[%lx]\n", + val, ctx); + goto do_page_flush; + } + } else { + unsigned long i; + + do_page_flush: + for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE) + pci_iommu_write(strbuf->strbuf_pflush, vaddr); + } + +do_flush_sync: + /* If the device could not have possibly put dirty data into + * the streaming cache, no flush-flag synchronization needs + * to be performed. + */ + if (direction == PCI_DMA_TODEVICE) + return; + + PCI_STC_FLUSHFLAG_INIT(strbuf); + pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa); + (void) pci_iommu_read(iommu->write_complete_reg); + + limit = 100000; + while (!PCI_STC_FLUSHFLAG_SET(strbuf)) { + limit--; + if (!limit) + break; + udelay(1); + membar("#LoadLoad"); + } + if (!limit) + printk(KERN_WARNING "pci_strbuf_flush: flushflag timeout " + "vaddr[%08x] ctx[%lx] npages[%ld]\n", + vaddr, ctx, npages); +} + /* Unmap a single streaming mode DMA translation. */ void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) { @@ -386,7 +481,7 @@ void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int struct pci_iommu *iommu; struct pci_strbuf *strbuf; iopte_t *base; - unsigned long flags, npages, i, ctx; + unsigned long flags, npages, ctx; if (direction == PCI_DMA_NONE) BUG(); @@ -414,29 +509,8 @@ void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL; /* Step 1: Kick data out of streaming buffers if necessary. */ - if (strbuf->strbuf_enabled) { - u32 vaddr = bus_addr; - - PCI_STC_FLUSHFLAG_INIT(strbuf); - if (strbuf->strbuf_ctxflush && - iommu->iommu_ctxflush) { - unsigned long matchreg, flushreg; - - flushreg = strbuf->strbuf_ctxflush; - matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx); - do { - pci_iommu_write(flushreg, ctx); - } while(((long)pci_iommu_read(matchreg)) < 0L); - } else { - for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE) - pci_iommu_write(strbuf->strbuf_pflush, vaddr); - } - - pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa); - (void) pci_iommu_read(iommu->write_complete_reg); - while (!PCI_STC_FLUSHFLAG_SET(strbuf)) - membar("#LoadLoad"); - } + if (strbuf->strbuf_enabled) + pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction); /* Step 2: Clear out first TSB entry. */ iopte_make_dummy(iommu, base); @@ -444,6 +518,8 @@ void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int free_streaming_cluster(iommu, bus_addr - iommu->page_table_map_base, npages, ctx); + iommu_free_ctx(iommu, ctx); + spin_unlock_irqrestore(&iommu->lock, flags); } @@ -583,7 +659,7 @@ int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int /* Step 4: Choose a context if necessary. */ ctx = 0; if (iommu->iommu_ctxflush) - ctx = iommu->iommu_cur_ctx++; + ctx = iommu_alloc_ctx(iommu); /* Step 5: Create the mappings. */ if (strbuf->strbuf_enabled) @@ -647,29 +723,8 @@ void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL; /* Step 1: Kick data out of streaming buffers if necessary. */ - if (strbuf->strbuf_enabled) { - u32 vaddr = (u32) bus_addr; - - PCI_STC_FLUSHFLAG_INIT(strbuf); - if (strbuf->strbuf_ctxflush && - iommu->iommu_ctxflush) { - unsigned long matchreg, flushreg; - - flushreg = strbuf->strbuf_ctxflush; - matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx); - do { - pci_iommu_write(flushreg, ctx); - } while(((long)pci_iommu_read(matchreg)) < 0L); - } else { - for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE) - pci_iommu_write(strbuf->strbuf_pflush, vaddr); - } - - pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa); - (void) pci_iommu_read(iommu->write_complete_reg); - while (!PCI_STC_FLUSHFLAG_SET(strbuf)) - membar("#LoadLoad"); - } + if (strbuf->strbuf_enabled) + pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction); /* Step 2: Clear out first TSB entry. */ iopte_make_dummy(iommu, base); @@ -677,6 +732,8 @@ void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, free_streaming_cluster(iommu, bus_addr - iommu->page_table_map_base, npages, ctx); + iommu_free_ctx(iommu, ctx); + spin_unlock_irqrestore(&iommu->lock, flags); } @@ -715,28 +772,7 @@ void pci_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size } /* Step 2: Kick data out of streaming buffers. */ - PCI_STC_FLUSHFLAG_INIT(strbuf); - if (iommu->iommu_ctxflush && - strbuf->strbuf_ctxflush) { - unsigned long matchreg, flushreg; - - flushreg = strbuf->strbuf_ctxflush; - matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx); - do { - pci_iommu_write(flushreg, ctx); - } while(((long)pci_iommu_read(matchreg)) < 0L); - } else { - unsigned long i; - - for (i = 0; i < npages; i++, bus_addr += IO_PAGE_SIZE) - pci_iommu_write(strbuf->strbuf_pflush, bus_addr); - } - - /* Step 3: Perform flush synchronization sequence. */ - pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa); - (void) pci_iommu_read(iommu->write_complete_reg); - while (!PCI_STC_FLUSHFLAG_SET(strbuf)) - membar("#LoadLoad"); + pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction); spin_unlock_irqrestore(&iommu->lock, flags); } @@ -749,7 +785,8 @@ void pci_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, i struct pcidev_cookie *pcp; struct pci_iommu *iommu; struct pci_strbuf *strbuf; - unsigned long flags, ctx; + unsigned long flags, ctx, npages, i; + u32 bus_addr; pcp = pdev->sysdata; iommu = pcp->pbm->iommu; @@ -772,36 +809,14 @@ void pci_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, i } /* Step 2: Kick data out of streaming buffers. */ - PCI_STC_FLUSHFLAG_INIT(strbuf); - if (iommu->iommu_ctxflush && - strbuf->strbuf_ctxflush) { - unsigned long matchreg, flushreg; - - flushreg = strbuf->strbuf_ctxflush; - matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx); - do { - pci_iommu_write(flushreg, ctx); - } while (((long)pci_iommu_read(matchreg)) < 0L); - } else { - unsigned long i, npages; - u32 bus_addr; - - bus_addr = sglist[0].dma_address & IO_PAGE_MASK; - - for(i = 1; i < nelems; i++) - if (!sglist[i].dma_length) - break; - i--; - npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) - bus_addr) >> IO_PAGE_SHIFT; - for (i = 0; i < npages; i++, bus_addr += IO_PAGE_SIZE) - pci_iommu_write(strbuf->strbuf_pflush, bus_addr); - } - - /* Step 3: Perform flush synchronization sequence. */ - pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa); - (void) pci_iommu_read(iommu->write_complete_reg); - while (!PCI_STC_FLUSHFLAG_SET(strbuf)) - membar("#LoadLoad"); + bus_addr = sglist[0].dma_address & IO_PAGE_MASK; + for(i = 1; i < nelems; i++) + if (!sglist[i].dma_length) + break; + i--; + npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) + - bus_addr) >> IO_PAGE_SHIFT; + pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction); spin_unlock_irqrestore(&iommu->lock, flags); } diff --git a/arch/sparc64/kernel/pci_psycho.c b/arch/sparc64/kernel/pci_psycho.c index 3567fa879e1..534320ef0db 100644 --- a/arch/sparc64/kernel/pci_psycho.c +++ b/arch/sparc64/kernel/pci_psycho.c @@ -1212,7 +1212,7 @@ static void __init psycho_iommu_init(struct pci_controller_info *p) /* Setup initial software IOMMU state. */ spin_lock_init(&iommu->lock); - iommu->iommu_cur_ctx = 0; + iommu->ctx_lowest_free = 1; /* Register addresses. */ iommu->iommu_control = p->pbm_A.controller_regs + PSYCHO_IOMMU_CONTROL; diff --git a/arch/sparc64/kernel/pci_sabre.c b/arch/sparc64/kernel/pci_sabre.c index 5525d1ec4af..53d333b4a4e 100644 --- a/arch/sparc64/kernel/pci_sabre.c +++ b/arch/sparc64/kernel/pci_sabre.c @@ -1265,7 +1265,7 @@ static void __init sabre_iommu_init(struct pci_controller_info *p, /* Setup initial software IOMMU state. */ spin_lock_init(&iommu->lock); - iommu->iommu_cur_ctx = 0; + iommu->ctx_lowest_free = 1; /* Register addresses. */ iommu->iommu_control = p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL; diff --git a/arch/sparc64/kernel/pci_schizo.c b/arch/sparc64/kernel/pci_schizo.c index e93fcadc372..5753175b94e 100644 --- a/arch/sparc64/kernel/pci_schizo.c +++ b/arch/sparc64/kernel/pci_schizo.c @@ -1753,7 +1753,7 @@ static void schizo_pbm_iommu_init(struct pci_pbm_info *pbm) /* Setup initial software IOMMU state. */ spin_lock_init(&iommu->lock); - iommu->iommu_cur_ctx = 0; + iommu->ctx_lowest_free = 1; /* Register addresses, SCHIZO has iommu ctx flushing. */ iommu->iommu_control = pbm->pbm_regs + SCHIZO_IOMMU_CONTROL; diff --git a/arch/sparc64/kernel/sbus.c b/arch/sparc64/kernel/sbus.c index 14d9c3a21b9..89f5e019f24 100644 --- a/arch/sparc64/kernel/sbus.c +++ b/arch/sparc64/kernel/sbus.c @@ -117,19 +117,42 @@ static void iommu_flush(struct sbus_iommu *iommu, u32 base, unsigned long npages #define STRBUF_TAG_VALID 0x02UL -static void strbuf_flush(struct sbus_iommu *iommu, u32 base, unsigned long npages) +static void sbus_strbuf_flush(struct sbus_iommu *iommu, u32 base, unsigned long npages, int direction) { - iommu->strbuf_flushflag = 0UL; - while (npages--) - upa_writeq(base + (npages << IO_PAGE_SHIFT), + unsigned long n; + int limit; + + n = npages; + while (n--) + upa_writeq(base + (n << IO_PAGE_SHIFT), iommu->strbuf_regs + STRBUF_PFLUSH); + /* If the device could not have possibly put dirty data into + * the streaming cache, no flush-flag synchronization needs + * to be performed. + */ + if (direction == SBUS_DMA_TODEVICE) + return; + + iommu->strbuf_flushflag = 0UL; + /* Whoopee cushion! */ upa_writeq(__pa(&iommu->strbuf_flushflag), iommu->strbuf_regs + STRBUF_FSYNC); upa_readq(iommu->sbus_control_reg); - while (iommu->strbuf_flushflag == 0UL) + + limit = 100000; + while (iommu->strbuf_flushflag == 0UL) { + limit--; + if (!limit) + break; + udelay(1); membar("#LoadLoad"); + } + if (!limit) + printk(KERN_WARNING "sbus_strbuf_flush: flushflag timeout " + "vaddr[%08x] npages[%ld]\n", + base, npages); } static iopte_t *alloc_streaming_cluster(struct sbus_iommu *iommu, unsigned long npages) @@ -406,7 +429,7 @@ void sbus_unmap_single(struct sbus_dev *sdev, dma_addr_t dma_addr, size_t size, spin_lock_irqsave(&iommu->lock, flags); free_streaming_cluster(iommu, dma_base, size >> IO_PAGE_SHIFT); - strbuf_flush(iommu, dma_base, size >> IO_PAGE_SHIFT); + sbus_strbuf_flush(iommu, dma_base, size >> IO_PAGE_SHIFT, direction); spin_unlock_irqrestore(&iommu->lock, flags); } @@ -569,7 +592,7 @@ void sbus_unmap_sg(struct sbus_dev *sdev, struct scatterlist *sg, int nents, int iommu = sdev->bus->iommu; spin_lock_irqsave(&iommu->lock, flags); free_streaming_cluster(iommu, dvma_base, size >> IO_PAGE_SHIFT); - strbuf_flush(iommu, dvma_base, size >> IO_PAGE_SHIFT); + sbus_strbuf_flush(iommu, dvma_base, size >> IO_PAGE_SHIFT, direction); spin_unlock_irqrestore(&iommu->lock, flags); } @@ -581,7 +604,7 @@ void sbus_dma_sync_single_for_cpu(struct sbus_dev *sdev, dma_addr_t base, size_t size = (IO_PAGE_ALIGN(base + size) - (base & IO_PAGE_MASK)); spin_lock_irqsave(&iommu->lock, flags); - strbuf_flush(iommu, base & IO_PAGE_MASK, size >> IO_PAGE_SHIFT); + sbus_strbuf_flush(iommu, base & IO_PAGE_MASK, size >> IO_PAGE_SHIFT, direction); spin_unlock_irqrestore(&iommu->lock, flags); } @@ -605,7 +628,7 @@ void sbus_dma_sync_sg_for_cpu(struct sbus_dev *sdev, struct scatterlist *sg, int size = IO_PAGE_ALIGN(sg[i].dma_address + sg[i].dma_length) - base; spin_lock_irqsave(&iommu->lock, flags); - strbuf_flush(iommu, base, size >> IO_PAGE_SHIFT); + sbus_strbuf_flush(iommu, base, size >> IO_PAGE_SHIFT, direction); spin_unlock_irqrestore(&iommu->lock, flags); } diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c index 12c3d84b746..b7e6a91952b 100644 --- a/arch/sparc64/kernel/setup.c +++ b/arch/sparc64/kernel/setup.c @@ -383,6 +383,17 @@ static void __init process_switch(char c) /* Use PROM debug console. */ register_console(&prom_debug_console); break; + case 'P': + /* Force UltraSPARC-III P-Cache on. */ + if (tlb_type != cheetah) { + printk("BOOT: Ignoring P-Cache force option.\n"); + break; + } + cheetah_pcache_forced_on = 1; + add_taint(TAINT_MACHINE_CHECK); + cheetah_enable_pcache(); + break; + default: printk("Unknown boot switch (-%c)\n", c); break; diff --git a/arch/sparc64/kernel/signal32.c b/arch/sparc64/kernel/signal32.c index 9a375e975cf..f28428f4170 100644 --- a/arch/sparc64/kernel/signal32.c +++ b/arch/sparc64/kernel/signal32.c @@ -102,7 +102,7 @@ typedef struct compat_siginfo{ /* POSIX.1b timers */ struct { - timer_t _tid; /* timer id */ + compat_timer_t _tid; /* timer id */ int _overrun; /* overrun count */ compat_sigval_t _sigval; /* same as below */ int _sys_private; /* not to be passed to user */ diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index 6dff06a44e7..e5b9c7a2778 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c @@ -123,6 +123,9 @@ void __init smp_callin(void) smp_setup_percpu_timer(); + if (cheetah_pcache_forced_on) + cheetah_enable_pcache(); + local_irq_enable(); calibrate_delay(); diff --git a/arch/sparc64/kernel/sys_sparc.c b/arch/sparc64/kernel/sys_sparc.c index 0077f02f4b3..5f8c822a2b4 100644 --- a/arch/sparc64/kernel/sys_sparc.c +++ b/arch/sparc64/kernel/sys_sparc.c @@ -84,6 +84,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi return addr; } + if (len <= mm->cached_hole_size) { + mm->cached_hole_size = 0; + mm->free_area_cache = TASK_UNMAPPED_BASE; + } start_addr = addr = mm->free_area_cache; task_size -= len; @@ -103,6 +107,7 @@ full_search: if (task_size < addr) { if (start_addr != TASK_UNMAPPED_BASE) { start_addr = addr = TASK_UNMAPPED_BASE; + mm->cached_hole_size = 0; goto full_search; } return -ENOMEM; @@ -114,6 +119,9 @@ full_search: mm->free_area_cache = addr + len; return addr; } + if (addr + mm->cached_hole_size < vma->vm_start) + mm->cached_hole_size = vma->vm_start - addr; + addr = vma->vm_end; if (do_color_align) addr = COLOUR_ALIGN(addr, pgoff); diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c index 56b203a2af6..a9f4596d7c2 100644 --- a/arch/sparc64/kernel/traps.c +++ b/arch/sparc64/kernel/traps.c @@ -421,6 +421,25 @@ asmlinkage void cee_log(unsigned long ce_status, } } +int cheetah_pcache_forced_on; + +void cheetah_enable_pcache(void) +{ + unsigned long dcr; + + printk("CHEETAH: Enabling P-Cache on cpu %d.\n", + smp_processor_id()); + + __asm__ __volatile__("ldxa [%%g0] %1, %0" + : "=r" (dcr) + : "i" (ASI_DCU_CONTROL_REG)); + dcr |= (DCU_PE | DCU_HPE | DCU_SPE | DCU_SL); + __asm__ __volatile__("stxa %0, [%%g0] %1\n\t" + "membar #Sync" + : /* no outputs */ + : "r" (dcr), "i" (ASI_DCU_CONTROL_REG)); +} + /* Cheetah error trap handling. */ static unsigned long ecache_flush_physbase; static unsigned long ecache_flush_linesize; |