From 4187377b2411d43ea4470b35162917a5093857bf Mon Sep 17 00:00:00 2001 From: Eric Dujardin Date: Sat, 23 Feb 2008 22:51:28 -0700 Subject: [POWERPC] Add export for mpc52xx_set_psc_clkdiv mpc52xx_set_psc_clkdiv is needed by PSC device drivers. Signed-off-by: Eric Dujardin Signed-off-by: Grant Likely --- arch/powerpc/platforms/52xx/mpc52xx_common.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/powerpc/platforms') diff --git a/arch/powerpc/platforms/52xx/mpc52xx_common.c b/arch/powerpc/platforms/52xx/mpc52xx_common.c index 9aa4425d80b..4d5fd1dbd40 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_common.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_common.c @@ -199,6 +199,7 @@ int mpc52xx_set_psc_clkdiv(int psc_id, int clkdiv) return 0; } +EXPORT_SYMBOL(mpc52xx_set_psc_clkdiv); /** * mpc52xx_restart: ppc_md->restart hook for mpc5200 using the watchdog timer -- cgit v1.2.3 From d58831375d68a3bd39d5ebab9eca711fbb4ee108 Mon Sep 17 00:00:00 2001 From: Jeremy Kerr Date: Tue, 26 Feb 2008 13:31:42 +1100 Subject: [POWERPC] spufs: fix context destruction during psmap fault We have a small window where a spu context may be destroyed while we're servicing a page fault (from another thread) to the context's problem state mapping. After we up_read() the mmap_sem, it's possible that the context is destroyed by its owning thread, and so the later references to ctx are invalid. This can maifest as a deadlock on the (now free()-ed) context state mutex. This change adds a reference to the context before we release the mmap_sem, so that the context cannot be destroyed. Signed-off-by: Jeremy Kerr --- arch/powerpc/platforms/cell/spufs/file.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) (limited to 'arch/powerpc/platforms') diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c index c66c3756970..f7a7e8635fb 100644 --- a/arch/powerpc/platforms/cell/spufs/file.c +++ b/arch/powerpc/platforms/cell/spufs/file.c @@ -366,6 +366,13 @@ static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma, if (offset >= ps_size) return NOPFN_SIGBUS; + /* + * Because we release the mmap_sem, the context may be destroyed while + * we're in spu_wait. Grab an extra reference so it isn't destroyed + * in the meantime. + */ + get_spu_context(ctx); + /* * We have to wait for context to be loaded before we have * pages to hand out to the user, but we don't want to wait @@ -375,7 +382,7 @@ static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma, * hanged. */ if (spu_acquire(ctx)) - return NOPFN_REFAULT; + goto refault; if (ctx->state == SPU_STATE_SAVED) { up_read(¤t->mm->mmap_sem); @@ -391,6 +398,9 @@ static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma, if (!ret) spu_release(ctx); + +refault: + put_spu_context(ctx); return NOPFN_REFAULT; } -- cgit v1.2.3 From 0111a701867a796a7ca6ecbc385e4befc9f35066 Mon Sep 17 00:00:00 2001 From: Jeremy Kerr Date: Wed, 27 Feb 2008 19:08:13 +1100 Subject: [POWERPC] spufs: fix invalid scheduling of forgotten contexts At present, we have a situation where a context with no owner is re-scheduled by spu_forget: Thread 1: reading regs file Thread 2: context owner spu_forget() - ctx->owner = NULL - set SPU_SCHED_WAS_ACTIVE spu_acquire_saved() - context is in saved state spu_release_saved() - SPU_SCHED_WAS_ACTIVE is set, so spu_activate() the context, which now has no owner In spu_forget(), we shouldn't be requesting a re-schedule by setting SPU_SCHED_WAS_ACTIVE. This change removes the set_bit in spu_forget(), so that spu_release_saved() doesn't reinsert this destroyed context on to the run queue. Signed-off-by: Jeremy Kerr --- arch/powerpc/platforms/cell/spufs/context.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'arch/powerpc/platforms') diff --git a/arch/powerpc/platforms/cell/spufs/context.c b/arch/powerpc/platforms/cell/spufs/context.c index 133995ed5cc..cf6c2c89211 100644 --- a/arch/powerpc/platforms/cell/spufs/context.c +++ b/arch/powerpc/platforms/cell/spufs/context.c @@ -109,13 +109,12 @@ void spu_forget(struct spu_context *ctx) /* * This is basically an open-coded spu_acquire_saved, except that - * we don't acquire the state mutex interruptible. + * we don't acquire the state mutex interruptible, and we don't + * want this context to be rescheduled on release. */ mutex_lock(&ctx->state_mutex); - if (ctx->state != SPU_STATE_SAVED) { - set_bit(SPU_SCHED_WAS_ACTIVE, &ctx->sched_flags); + if (ctx->state != SPU_STATE_SAVED) spu_deactivate(ctx); - } mm = ctx->owner; ctx->owner = NULL; -- cgit v1.2.3 From 71791bee90dd29b292c7e55c1c00857578c912bd Mon Sep 17 00:00:00 2001 From: Jeremy Kerr Date: Mon, 25 Feb 2008 14:58:37 +1100 Subject: [POWERPC] spufs: fix order of sputrace thread IDs Currently, we get the following output from sputrace: [5.097935954] 1606: spufs_ps_nopfn__enter (thread = 1605, spu = -1) [5.097958164] 1606: spufs_ps_nopfn__insert (thread = 1605, spu = 15) [5.097973529] 1607: spufs_ps_nopfn__enter (thread = 1605, spu = -1) [5.097989174] 1607: spufs_ps_nopfn__insert (thread = 1605, spu = 14) Which leads me to believe that 160[67] is the current thread ID, and 1605 is the context backing the psmap. However, the 'current' and 'owner' tids are reversed - the 'current' tid is on the right. This change puts the current thread ID in the left-hand column instead, and renames the right to 'ctxthread'. Signed-off-by: Jeremy Kerr --- arch/powerpc/platforms/cell/spufs/sputrace.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'arch/powerpc/platforms') diff --git a/arch/powerpc/platforms/cell/spufs/sputrace.c b/arch/powerpc/platforms/cell/spufs/sputrace.c index 01974f7776e..79aa773f3c9 100644 --- a/arch/powerpc/platforms/cell/spufs/sputrace.c +++ b/arch/powerpc/platforms/cell/spufs/sputrace.c @@ -58,12 +58,12 @@ static int sputrace_sprint(char *tbuf, int n) ktime_to_timespec(ktime_sub(t->tstamp, sputrace_start)); return snprintf(tbuf, n, - "[%lu.%09lu] %d: %s (thread = %d, spu = %d)\n", + "[%lu.%09lu] %d: %s (ctxthread = %d, spu = %d)\n", (unsigned long) tv.tv_sec, (unsigned long) tv.tv_nsec, - t->owner_tid, - t->name, t->curr_tid, + t->name, + t->owner_tid, t->number); } @@ -188,6 +188,7 @@ struct spu_probe spu_probes[] = { { "spufs_ps_nopfn__insert", "%p %p", spu_context_event }, { "spu_acquire_saved__enter", "%p", spu_context_nospu_event }, { "destroy_spu_context__enter", "%p", spu_context_nospu_event }, + { "spufs_stop_callback__enter", "%p %p", spu_context_event }, }; static int __init sputrace_init(void) -- cgit v1.2.3 From fae9ca791507876c3ccaa8ab686b2ce42dc7a560 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 29 Feb 2008 15:16:48 +1100 Subject: [POWERPC] spufs: synchronize IRQ when disabling There is a small race between the context save procedure and the SPU interrupt handling, where we expect all interrupt processing to have finished after disabling them, while an interrupt is still being processed on another CPU. The obvious fix is to call synchronize_irq() after disabling the interrupts at the start of the context save procedure to make sure we never access the SPU any more during an ongoing save or even after that. Thanks to Benjamin Herrenschmidt for pointing this out. Acked-by: Benjamin Herrenschmidt Signed-off-by: Arnd Bergmann Signed-off-by: Jeremy Kerr --- arch/powerpc/platforms/cell/spufs/switch.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'arch/powerpc/platforms') diff --git a/arch/powerpc/platforms/cell/spufs/switch.c b/arch/powerpc/platforms/cell/spufs/switch.c index 6f5886c7b1f..e9dc7a55d1b 100644 --- a/arch/powerpc/platforms/cell/spufs/switch.c +++ b/arch/powerpc/platforms/cell/spufs/switch.c @@ -34,6 +34,7 @@ #include #include +#include #include #include #include @@ -117,6 +118,8 @@ static inline void disable_interrupts(struct spu_state *csa, struct spu *spu) * Write INT_MASK_class1 with value of 0. * Save INT_Mask_class2 in CSA. * Write INT_MASK_class2 with value of 0. + * Synchronize all three interrupts to be sure + * we no longer execute a handler on another CPU. */ spin_lock_irq(&spu->register_lock); if (csa) { @@ -129,6 +132,9 @@ static inline void disable_interrupts(struct spu_state *csa, struct spu *spu) spu_int_mask_set(spu, 2, 0ul); eieio(); spin_unlock_irq(&spu->register_lock); + synchronize_irq(spu->irqs[0]); + synchronize_irq(spu->irqs[1]); + synchronize_irq(spu->irqs[2]); } static inline void set_watchdog_timer(struct spu_state *csa, struct spu *spu) -- cgit v1.2.3 From cc4b7c1814c9ad375e8167ea4a9ec4a0ec1ada04 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 26 Feb 2008 07:01:56 +0100 Subject: [POWERPC] spufs: invalidate SLB translation before adding a new entry When we replace an SLB entry in the MFC after using up all the available entries, there is a short window in which an incorrect entry is marked as valid. The problem is that the 'valid' bit is stored in the ESID, which is always written after the VSID. Overwriting the VSID first will make the original ESID entry point to the new VSID, which means that any concurrent DMA accessing the old ESID ends up being redirected to the new virtual address. A few cycles later, we write the new ESID and everything is fine again. That race can be closed by writing a zero entry to the ESID first, which makes sure that the VSID is not accessed until we write the new ESID. Note that we don't actually need to invalidate the SLB entry using the invalidation register, which would also flush any ERAT entries for that segment, because the segment translation does not become invalid but is only removed from the SLB cache. Signed-off-by: Arnd Bergmann Signed-off-by: Jeremy Kerr --- arch/powerpc/platforms/cell/spu_base.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch/powerpc/platforms') diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c index 87eb07f94c5..cfc28e93c82 100644 --- a/arch/powerpc/platforms/cell/spu_base.c +++ b/arch/powerpc/platforms/cell/spu_base.c @@ -148,7 +148,11 @@ static inline void spu_load_slb(struct spu *spu, int slbe, struct spu_slb *slb) __func__, slbe, slb->vsid, slb->esid); out_be64(&priv2->slb_index_W, slbe); + /* set invalid before writing vsid */ + out_be64(&priv2->slb_esid_RW, 0); + /* now it's safe to write the vsid */ out_be64(&priv2->slb_vsid_RW, slb->vsid); + /* setting the new esid makes the entry valid again */ out_be64(&priv2->slb_esid_RW, slb->esid); } -- cgit v1.2.3 From c92a1acb675058375cc508ad024c33358b42d766 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Thu, 28 Feb 2008 06:06:30 +0100 Subject: [POWERPC] spufs: serialize SLB invalidation against SLB loading There is a potential race between flushes of the entire SLB in the MFC and the point where new entries are being established. The problem is that we might put a ESID entry into the MFC SLB when the VSID entry has just been cleared by the global flush. This can be circumvented by holding the register_lock throughout both the flushing and the creation of SLB entries. Signed-off-by: Arnd Bergmann Signed-off-by: Jeremy Kerr --- arch/powerpc/platforms/cell/spu_base.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) (limited to 'arch/powerpc/platforms') diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c index cfc28e93c82..712001f6b7d 100644 --- a/arch/powerpc/platforms/cell/spu_base.c +++ b/arch/powerpc/platforms/cell/spu_base.c @@ -81,9 +81,12 @@ struct spu_slb { void spu_invalidate_slbs(struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; + unsigned long flags; + spin_lock_irqsave(&spu->register_lock, flags); if (spu_mfc_sr1_get(spu) & MFC_STATE1_RELOCATE_MASK) out_be64(&priv2->slb_invalidate_all_W, 0UL); + spin_unlock_irqrestore(&spu->register_lock, flags); } EXPORT_SYMBOL_GPL(spu_invalidate_slbs); @@ -294,9 +297,11 @@ void spu_setup_kernel_slbs(struct spu *spu, struct spu_lscsa *lscsa, nr_slbs++; } + spin_lock_irq(&spu->register_lock); /* Add the set of SLBs */ for (i = 0; i < nr_slbs; i++) spu_load_slb(spu, i, &slbs[i]); + spin_unlock_irq(&spu->register_lock); } EXPORT_SYMBOL_GPL(spu_setup_kernel_slbs); @@ -341,13 +346,14 @@ spu_irq_class_1(int irq, void *data) if (stat & CLASS1_STORAGE_FAULT_INTR) spu_mfc_dsisr_set(spu, 0ul); spu_int_stat_clear(spu, 1, stat); - spin_unlock(&spu->register_lock); - pr_debug("%s: %lx %lx %lx %lx\n", __FUNCTION__, mask, stat, - dar, dsisr); if (stat & CLASS1_SEGMENT_FAULT_INTR) __spu_trap_data_seg(spu, dar); + spin_unlock(&spu->register_lock); + pr_debug("%s: %lx %lx %lx %lx\n", __FUNCTION__, mask, stat, + dar, dsisr); + if (stat & CLASS1_STORAGE_FAULT_INTR) __spu_trap_data_map(spu, dar, dsisr); -- cgit v1.2.3 From 2a58aa33daef37134c8a43dca0b7578c3fa7f993 Mon Sep 17 00:00:00 2001 From: Andre Detsch Date: Mon, 25 Feb 2008 15:07:42 -0300 Subject: [POWERPC] spufs: fix use time accounting on SPE-overcommit The spu_runcntl_RW register is restored within spu_restore function. So, at the end of spu_bind_context, the SPU context is not just loaded, but running. This change corrects the state switch to account the time as USER. Signed-off-by: Andre Detsch Signed-off-by: Jeremy Kerr --- arch/powerpc/platforms/cell/spufs/sched.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/powerpc/platforms') diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index 3a5972117de..5d5f680cd0b 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c @@ -246,7 +246,7 @@ static void spu_bind_context(struct spu *spu, struct spu_context *ctx) spu_switch_notify(spu, ctx); ctx->state = SPU_STATE_RUNNABLE; - spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED); + spuctx_switch_state(ctx, SPU_UTIL_USER); } /* -- cgit v1.2.3 From 9176c0b1f5a9099cebc07458042ae6a7c75af7b2 Mon Sep 17 00:00:00 2001 From: Jens Osterkamp Date: Thu, 28 Feb 2008 11:26:21 +0100 Subject: [POWERPC] move celleb DABRX definitions This moves the private DABRX definitions for celleb from beat.h to reg.h to make them usable for all. Signed-off-by: Jens Osterkamp Signed-off-by: Arnd Bergmann --- arch/powerpc/platforms/celleb/beat.h | 3 --- 1 file changed, 3 deletions(-) (limited to 'arch/powerpc/platforms') diff --git a/arch/powerpc/platforms/celleb/beat.h b/arch/powerpc/platforms/celleb/beat.h index b2e292df13c..ac82ac35b99 100644 --- a/arch/powerpc/platforms/celleb/beat.h +++ b/arch/powerpc/platforms/celleb/beat.h @@ -21,9 +21,6 @@ #ifndef _CELLEB_BEAT_H #define _CELLEB_BEAT_H -#define DABRX_KERNEL (1UL<<1) -#define DABRX_USER (1UL<<0) - int64_t beat_get_term_char(uint64_t,uint64_t*,uint64_t*,uint64_t*); int64_t beat_put_term_char(uint64_t,uint64_t,uint64_t,uint64_t); int64_t beat_repository_encode(int, const char *, uint64_t[4]); -- cgit v1.2.3 From f3c1ed9720ec62626bbf3e0c3648568c131978e2 Mon Sep 17 00:00:00 2001 From: Jens Osterkamp Date: Thu, 28 Feb 2008 11:27:31 +0100 Subject: [POWERPC] enable hardware watchpoints on cell blades Ulrich Weigand has found that the hardware watchpoints on cell were not working back in November : http://ozlabs.org/pipermail/linuxppc-dev/2007-November/046135.html This patch sets them during initialization. Signed-off-by: Jens Osterkamp Signed-off-by: Arnd Bergmann --- arch/powerpc/platforms/cell/setup.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'arch/powerpc/platforms') diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c index a7f609b3b87..dda34650cb0 100644 --- a/arch/powerpc/platforms/cell/setup.c +++ b/arch/powerpc/platforms/cell/setup.c @@ -149,6 +149,11 @@ static void __init cell_init_irq(void) mpic_init_IRQ(); } +static void __init cell_set_dabrx(void) +{ + mtspr(SPRN_DABRX, DABRX_KERNEL | DABRX_USER); +} + static void __init cell_setup_arch(void) { #ifdef CONFIG_SPU_BASE @@ -158,6 +163,8 @@ static void __init cell_setup_arch(void) cbe_regs_init(); + cell_set_dabrx(); + #ifdef CONFIG_CBE_RAS cbe_ras_init(); #endif -- cgit v1.2.3 From f9660e8a6c16e17935777cdee5194842904c2d72 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Fri, 29 Feb 2008 18:33:22 +1100 Subject: [POWERPC] Clearup cell IOMMU fixed mapping terminology It's called the fixed mapping, not the static mapping. Signed-off-by: Michael Ellerman Signed-off-by: Arnd Bergmann --- arch/powerpc/platforms/cell/iommu.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/powerpc/platforms') diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c index edab631a8dc..bbe83899647 100644 --- a/arch/powerpc/platforms/cell/iommu.c +++ b/arch/powerpc/platforms/cell/iommu.c @@ -549,7 +549,7 @@ static void cell_dma_dev_setup_iommu(struct device *dev) archdata->dma_data = &window->table; } -static void cell_dma_dev_setup_static(struct device *dev); +static void cell_dma_dev_setup_fixed(struct device *dev); static void cell_dma_dev_setup(struct device *dev) { @@ -557,7 +557,7 @@ static void cell_dma_dev_setup(struct device *dev) /* Order is important here, these are not mutually exclusive */ if (get_dma_ops(dev) == &dma_iommu_fixed_ops) - cell_dma_dev_setup_static(dev); + cell_dma_dev_setup_fixed(dev); else if (get_pci_dma_ops() == &dma_iommu_ops) cell_dma_dev_setup_iommu(dev); else if (get_pci_dma_ops() == &dma_direct_ops) @@ -858,7 +858,7 @@ static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask) return 0; } -static void cell_dma_dev_setup_static(struct device *dev) +static void cell_dma_dev_setup_fixed(struct device *dev) { struct dev_archdata *archdata = &dev->archdata; u64 addr; @@ -894,7 +894,7 @@ static void cell_iommu_setup_fixed_ptab(struct cbe_iommu *iommu, for (i = fbase; i < fbase + fsize; i++, uaddr += IOMMU_PAGE_SIZE) { /* Don't touch the dynamic region */ if (i >= dbase && i < (dbase + dsize)) { - pr_debug("iommu: static/dynamic overlap, skipping\n"); + pr_debug("iommu: fixed/dynamic overlap, skipping\n"); continue; } io_pte[i] = base_pte | (__pa(uaddr) & IOPTE_RPN_Mask); -- cgit v1.2.3 From 0d7386ebffd8506b28c37a7d5541132a576f64e2 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Fri, 29 Feb 2008 18:33:23 +1100 Subject: [POWERPC] Use it_offset not pte_offset in cell IOMMU code The cell IOMMU tce build and free routines use pte_offset to convert the index passed from the generic IOMMU code into a page table offset. This takes into account the SPIDER_DMA_OFFSET which sets the top bit of every DMA address. However it doesn't cater for the IOMMU window starting at a non-zero address, as the base of the window is not incorporated into pte_offset at all. As it turns out tbl->it_offset already contains the value we need, it takes into account the base of the window and also pte_offset. So use it instead! Signed-off-by: Michael Ellerman Signed-off-by: Arnd Bergmann --- arch/powerpc/platforms/cell/iommu.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/powerpc/platforms') diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c index bbe83899647..4e75919bf6b 100644 --- a/arch/powerpc/platforms/cell/iommu.c +++ b/arch/powerpc/platforms/cell/iommu.c @@ -200,7 +200,7 @@ static void tce_build_cell(struct iommu_table *tbl, long index, long npages, (window->ioid & IOPTE_IOID_Mask); #endif - io_pte = (unsigned long *)tbl->it_base + (index - window->pte_offset); + io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset); for (i = 0; i < npages; i++, uaddr += IOMMU_PAGE_SIZE) io_pte[i] = base_pte | (__pa(uaddr) & IOPTE_RPN_Mask); @@ -232,7 +232,7 @@ static void tce_free_cell(struct iommu_table *tbl, long index, long npages) | (window->ioid & IOPTE_IOID_Mask); #endif - io_pte = (unsigned long *)tbl->it_base + (index - window->pte_offset); + io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset); for (i = 0; i < npages; i++) io_pte[i] = pte; -- cgit v1.2.3 From 08e024272e529076663e5b4dc8eeecd4131f8a48 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Fri, 29 Feb 2008 18:33:23 +1100 Subject: [POWERPC] Remove unused pte_offset variable The cell IOMMU code no longer needs to save the pte_offset variable separately, it is incorporated into tbl->it_offset. Signed-off-by: Michael Ellerman Signed-off-by: Arnd Bergmann --- arch/powerpc/platforms/cell/iommu.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'arch/powerpc/platforms') diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c index 4e75919bf6b..555d264ad56 100644 --- a/arch/powerpc/platforms/cell/iommu.c +++ b/arch/powerpc/platforms/cell/iommu.c @@ -123,7 +123,6 @@ struct iommu_window { struct cbe_iommu *iommu; unsigned long offset; unsigned long size; - unsigned long pte_offset; unsigned int ioid; struct iommu_table table; }; @@ -475,13 +474,11 @@ cell_iommu_setup_window(struct cbe_iommu *iommu, struct device_node *np, window->size = size; window->ioid = ioid; window->iommu = iommu; - window->pte_offset = pte_offset; window->table.it_blocksize = 16; window->table.it_base = (unsigned long)iommu->ptab; window->table.it_index = iommu->nid; - window->table.it_offset = (offset >> IOMMU_PAGE_SHIFT) + - window->pte_offset; + window->table.it_offset = (offset >> IOMMU_PAGE_SHIFT) + pte_offset; window->table.it_size = size >> IOMMU_PAGE_SHIFT; iommu_init_table(&window->table, iommu->nid); -- cgit v1.2.3 From edf441fb80f9d7a962c298e8da94c8c64802fffa Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Fri, 29 Feb 2008 18:33:24 +1100 Subject: [POWERPC] Move allocation of cell IOMMU pad page There's no need to allocate the pad page unless we're going to actually use it - so move the allocation to where we know we're going to use it. Signed-off-by: Michael Ellerman Signed-off-by: Arnd Bergmann --- arch/powerpc/platforms/cell/iommu.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'arch/powerpc/platforms') diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c index 555d264ad56..8e57e1af378 100644 --- a/arch/powerpc/platforms/cell/iommu.c +++ b/arch/powerpc/platforms/cell/iommu.c @@ -344,12 +344,6 @@ static void cell_iommu_setup_page_tables(struct cbe_iommu *iommu, iommu->ptab = page_address(page); memset(iommu->ptab, 0, ptab_size); - /* allocate a bogus page for the end of each mapping */ - page = alloc_pages_node(iommu->nid, GFP_KERNEL, 0); - BUG_ON(!page); - iommu->pad_page = page_address(page); - clear_page(iommu->pad_page); - /* number of pages needed for a page table */ n_pte_pages = (pages_per_segment * sizeof(unsigned long)) >> IOMMU_PAGE_SHIFT; @@ -463,6 +457,7 @@ cell_iommu_setup_window(struct cbe_iommu *iommu, struct device_node *np, unsigned long pte_offset) { struct iommu_window *window; + struct page *page; u32 ioid; ioid = cell_iommu_get_ioid(np); @@ -501,6 +496,11 @@ cell_iommu_setup_window(struct cbe_iommu *iommu, struct device_node *np, * This code also assumes that we have a window that starts at 0, * which is the case on all spider based blades. */ + page = alloc_pages_node(iommu->nid, GFP_KERNEL, 0); + BUG_ON(!page); + iommu->pad_page = page_address(page); + clear_page(iommu->pad_page); + __set_bit(0, window->table.it_map); tce_build_cell(&window->table, window->table.it_offset, 1, (unsigned long)iommu->pad_page, DMA_TO_DEVICE); -- cgit v1.2.3 From 7d432ff1b7db87e78eb74d42631d2a23ca6f26f2 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Fri, 29 Feb 2008 18:33:25 +1100 Subject: [POWERPC] Split setup of IOMMU stab and ptab, allocate dynamic/fixed ptabs separately Currently the cell IOMMU code allocates the entire IOMMU page table in a contiguous chunk. This is nice and tidy, but for machines with larger amounts of RAM the page table allocation can fail due to it simply being too large. So split the segment table and page table setup routine, and arrange to have the dynamic and fixed page tables allocated separately. Signed-off-by: Michael Ellerman Signed-off-by: Arnd Bergmann --- arch/powerpc/platforms/cell/iommu.c | 69 +++++++++++++++++++++++-------------- 1 file changed, 43 insertions(+), 26 deletions(-) (limited to 'arch/powerpc/platforms') diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c index 8e57e1af378..187a723eafc 100644 --- a/arch/powerpc/platforms/cell/iommu.c +++ b/arch/powerpc/platforms/cell/iommu.c @@ -306,50 +306,54 @@ static int cell_iommu_find_ioc(int nid, unsigned long *base) return -ENODEV; } -static void cell_iommu_setup_page_tables(struct cbe_iommu *iommu, +static void cell_iommu_setup_stab(struct cbe_iommu *iommu, unsigned long dbase, unsigned long dsize, unsigned long fbase, unsigned long fsize) { struct page *page; - int i; - unsigned long reg, segments, pages_per_segment, ptab_size, stab_size, - n_pte_pages, base; - - base = dbase; - if (fsize != 0) - base = min(fbase, dbase); + unsigned long segments, stab_size; segments = max(dbase + dsize, fbase + fsize) >> IO_SEGMENT_SHIFT; - pages_per_segment = 1ull << IO_PAGENO_BITS; - pr_debug("%s: iommu[%d]: segments: %lu, pages per segment: %lu\n", - __FUNCTION__, iommu->nid, segments, pages_per_segment); + pr_debug("%s: iommu[%d]: segments: %lu\n", + __FUNCTION__, iommu->nid, segments); /* set up the segment table */ stab_size = segments * sizeof(unsigned long); page = alloc_pages_node(iommu->nid, GFP_KERNEL, get_order(stab_size)); BUG_ON(!page); iommu->stab = page_address(page); - clear_page(iommu->stab); + memset(iommu->stab, 0, stab_size); +} + +static unsigned long *cell_iommu_alloc_ptab(struct cbe_iommu *iommu, + unsigned long base, unsigned long size, unsigned long gap_base, + unsigned long gap_size) +{ + struct page *page; + int i; + unsigned long reg, segments, pages_per_segment, ptab_size, + n_pte_pages, start_seg, *ptab; + + start_seg = base >> IO_SEGMENT_SHIFT; + segments = size >> IO_SEGMENT_SHIFT; + pages_per_segment = 1ull << IO_PAGENO_BITS; - /* ... and the page tables. Since these are contiguous, we can treat - * the page tables as one array of ptes, like pSeries does. - */ ptab_size = segments * pages_per_segment * sizeof(unsigned long); pr_debug("%s: iommu[%d]: ptab_size: %lu, order: %d\n", __FUNCTION__, iommu->nid, ptab_size, get_order(ptab_size)); page = alloc_pages_node(iommu->nid, GFP_KERNEL, get_order(ptab_size)); BUG_ON(!page); - iommu->ptab = page_address(page); - memset(iommu->ptab, 0, ptab_size); + ptab = page_address(page); + memset(ptab, 0, ptab_size); /* number of pages needed for a page table */ n_pte_pages = (pages_per_segment * sizeof(unsigned long)) >> IOMMU_PAGE_SHIFT; pr_debug("%s: iommu[%d]: stab at %p, ptab at %p, n_pte_pages: %lu\n", - __FUNCTION__, iommu->nid, iommu->stab, iommu->ptab, + __FUNCTION__, iommu->nid, iommu->stab, ptab, n_pte_pages); /* initialise the STEs */ @@ -364,12 +368,21 @@ static void cell_iommu_setup_page_tables(struct cbe_iommu *iommu, __unknown_page_size_error(); } + gap_base = gap_base >> IO_SEGMENT_SHIFT; + gap_size = gap_size >> IO_SEGMENT_SHIFT; + pr_debug("Setting up IOMMU stab:\n"); - for (i = base >> IO_SEGMENT_SHIFT; i < segments; i++) { - iommu->stab[i] = reg | - (__pa(iommu->ptab) + n_pte_pages * IOMMU_PAGE_SIZE * i); + for (i = start_seg; i < (start_seg + segments); i++) { + if (i >= gap_base && i < (gap_base + gap_size)) { + pr_debug("\toverlap at %d, skipping\n", i); + continue; + } + iommu->stab[i] = reg | (__pa(ptab) + n_pte_pages * + IOMMU_PAGE_SIZE * (i - start_seg)); pr_debug("\t[%d] 0x%016lx\n", i, iommu->stab[i]); } + + return ptab; } static void cell_iommu_enable_hardware(struct cbe_iommu *iommu) @@ -416,7 +429,8 @@ static void cell_iommu_enable_hardware(struct cbe_iommu *iommu) static void cell_iommu_setup_hardware(struct cbe_iommu *iommu, unsigned long base, unsigned long size) { - cell_iommu_setup_page_tables(iommu, base, size, 0, 0); + cell_iommu_setup_stab(iommu, base, size, 0, 0); + iommu->ptab = cell_iommu_alloc_ptab(iommu, base, size, 0, 0); cell_iommu_enable_hardware(iommu); } @@ -870,8 +884,10 @@ static void cell_iommu_setup_fixed_ptab(struct cbe_iommu *iommu, struct device_node *np, unsigned long dbase, unsigned long dsize, unsigned long fbase, unsigned long fsize) { - unsigned long base_pte, uaddr, *io_pte; int i; + unsigned long base_pte, uaddr, *io_pte, *ptab; + + ptab = cell_iommu_alloc_ptab(iommu, fbase, fsize, dbase, dsize); dma_iommu_fixed_base = fbase; @@ -883,7 +899,7 @@ static void cell_iommu_setup_fixed_ptab(struct cbe_iommu *iommu, pr_debug("iommu: mapping 0x%lx pages from 0x%lx\n", fsize, fbase); - io_pte = iommu->ptab; + io_pte = ptab; base_pte = IOPTE_PP_W | IOPTE_PP_R | IOPTE_M | IOPTE_SO_RW | (cell_iommu_get_ioid(np) & IOPTE_IOID_Mask); @@ -894,7 +910,7 @@ static void cell_iommu_setup_fixed_ptab(struct cbe_iommu *iommu, pr_debug("iommu: fixed/dynamic overlap, skipping\n"); continue; } - io_pte[i] = base_pte | (__pa(uaddr) & IOPTE_RPN_Mask); + io_pte[i - fbase] = base_pte | (__pa(uaddr) & IOPTE_RPN_Mask); } mb(); @@ -992,7 +1008,8 @@ static int __init cell_iommu_fixed_mapping_init(void) "fixed window 0x%lx-0x%lx\n", iommu->nid, dbase, dbase + dsize, fbase, fbase + fsize); - cell_iommu_setup_page_tables(iommu, dbase, dsize, fbase, fsize); + cell_iommu_setup_stab(iommu, dbase, dsize, fbase, fsize); + iommu->ptab = cell_iommu_alloc_ptab(iommu, dbase, dsize, 0, 0); cell_iommu_setup_fixed_ptab(iommu, np, dbase, dsize, fbase, fsize); cell_iommu_enable_hardware(iommu); -- cgit v1.2.3 From 3d3e6da17d6af42a3fd4891fb09d93dca002e590 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Fri, 29 Feb 2008 18:33:26 +1100 Subject: [POWERPC] Cell IOMMU: n_pte_pages is in 4K page units, not IOMMU_PAGE_SIZE We use n_pte_pages to calculate the stride through the page tables, but we also use it to set the NPPT value in the segment table entry. That is defined as the number of 4K pages per segment, so we should calculate it as such regardless of the IOMMU page size. Signed-off-by: Michael Ellerman Signed-off-by: Arnd Bergmann --- arch/powerpc/platforms/cell/iommu.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'arch/powerpc/platforms') diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c index 187a723eafc..7a861cb960d 100644 --- a/arch/powerpc/platforms/cell/iommu.c +++ b/arch/powerpc/platforms/cell/iommu.c @@ -348,9 +348,8 @@ static unsigned long *cell_iommu_alloc_ptab(struct cbe_iommu *iommu, ptab = page_address(page); memset(ptab, 0, ptab_size); - /* number of pages needed for a page table */ - n_pte_pages = (pages_per_segment * - sizeof(unsigned long)) >> IOMMU_PAGE_SHIFT; + /* number of 4K pages needed for a page table */ + n_pte_pages = (pages_per_segment * sizeof(unsigned long)) >> 12; pr_debug("%s: iommu[%d]: stab at %p, ptab at %p, n_pte_pages: %lu\n", __FUNCTION__, iommu->nid, iommu->stab, ptab, @@ -377,8 +376,8 @@ static unsigned long *cell_iommu_alloc_ptab(struct cbe_iommu *iommu, pr_debug("\toverlap at %d, skipping\n", i); continue; } - iommu->stab[i] = reg | (__pa(ptab) + n_pte_pages * - IOMMU_PAGE_SIZE * (i - start_seg)); + iommu->stab[i] = reg | (__pa(ptab) + (n_pte_pages << 12) * + (i - start_seg)); pr_debug("\t[%d] 0x%016lx\n", i, iommu->stab[i]); } -- cgit v1.2.3 From 225d49050f9b6506f2f9df6b40e591ee93939d11 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Fri, 29 Feb 2008 18:33:27 +1100 Subject: [POWERPC] Allow for different IOMMU page sizes in cell IOMMU code Make some preliminary changes to cell_iommu_alloc_ptab() to allow it to take the page size as a parameter rather than assuming IOMMU_PAGE_SIZE. Signed-off-by: Michael Ellerman Signed-off-by: Arnd Bergmann --- arch/powerpc/platforms/cell/iommu.c | 31 ++++++++++++++++++------------- 1 file changed, 18 insertions(+), 13 deletions(-) (limited to 'arch/powerpc/platforms') diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c index 7a861cb960d..b0e347e4933 100644 --- a/arch/powerpc/platforms/cell/iommu.c +++ b/arch/powerpc/platforms/cell/iommu.c @@ -113,7 +113,7 @@ /* IOMMU sizing */ #define IO_SEGMENT_SHIFT 28 -#define IO_PAGENO_BITS (IO_SEGMENT_SHIFT - IOMMU_PAGE_SHIFT) +#define IO_PAGENO_BITS(shift) (IO_SEGMENT_SHIFT - (shift)) /* The high bit needs to be set on every DMA address */ #define SPIDER_DMA_OFFSET 0x80000000ul @@ -328,7 +328,7 @@ static void cell_iommu_setup_stab(struct cbe_iommu *iommu, static unsigned long *cell_iommu_alloc_ptab(struct cbe_iommu *iommu, unsigned long base, unsigned long size, unsigned long gap_base, - unsigned long gap_size) + unsigned long gap_size, unsigned long page_shift) { struct page *page; int i; @@ -337,7 +337,10 @@ static unsigned long *cell_iommu_alloc_ptab(struct cbe_iommu *iommu, start_seg = base >> IO_SEGMENT_SHIFT; segments = size >> IO_SEGMENT_SHIFT; - pages_per_segment = 1ull << IO_PAGENO_BITS; + pages_per_segment = 1ull << IO_PAGENO_BITS(page_shift); + /* PTEs for each segment must start on a 4K bounday */ + pages_per_segment = max(pages_per_segment, + (1 << 12) / sizeof(unsigned long)); ptab_size = segments * pages_per_segment * sizeof(unsigned long); pr_debug("%s: iommu[%d]: ptab_size: %lu, order: %d\n", __FUNCTION__, @@ -358,13 +361,12 @@ static unsigned long *cell_iommu_alloc_ptab(struct cbe_iommu *iommu, /* initialise the STEs */ reg = IOSTE_V | ((n_pte_pages - 1) << 5); - if (IOMMU_PAGE_SIZE == 0x1000) - reg |= IOSTE_PS_4K; - else if (IOMMU_PAGE_SIZE == 0x10000) - reg |= IOSTE_PS_64K; - else { - extern void __unknown_page_size_error(void); - __unknown_page_size_error(); + switch (page_shift) { + case 12: reg |= IOSTE_PS_4K; break; + case 16: reg |= IOSTE_PS_64K; break; + case 20: reg |= IOSTE_PS_1M; break; + case 24: reg |= IOSTE_PS_16M; break; + default: BUG(); } gap_base = gap_base >> IO_SEGMENT_SHIFT; @@ -429,7 +431,8 @@ static void cell_iommu_setup_hardware(struct cbe_iommu *iommu, unsigned long base, unsigned long size) { cell_iommu_setup_stab(iommu, base, size, 0, 0); - iommu->ptab = cell_iommu_alloc_ptab(iommu, base, size, 0, 0); + iommu->ptab = cell_iommu_alloc_ptab(iommu, base, size, 0, 0, + IOMMU_PAGE_SHIFT); cell_iommu_enable_hardware(iommu); } @@ -886,7 +889,8 @@ static void cell_iommu_setup_fixed_ptab(struct cbe_iommu *iommu, int i; unsigned long base_pte, uaddr, *io_pte, *ptab; - ptab = cell_iommu_alloc_ptab(iommu, fbase, fsize, dbase, dsize); + ptab = cell_iommu_alloc_ptab(iommu, fbase, fsize, dbase, dsize, + IOMMU_PAGE_SHIFT); dma_iommu_fixed_base = fbase; @@ -1008,7 +1012,8 @@ static int __init cell_iommu_fixed_mapping_init(void) dbase + dsize, fbase, fbase + fsize); cell_iommu_setup_stab(iommu, dbase, dsize, fbase, fsize); - iommu->ptab = cell_iommu_alloc_ptab(iommu, dbase, dsize, 0, 0); + iommu->ptab = cell_iommu_alloc_ptab(iommu, dbase, dsize, 0, 0, + IOMMU_PAGE_SHIFT); cell_iommu_setup_fixed_ptab(iommu, np, dbase, dsize, fbase, fsize); cell_iommu_enable_hardware(iommu); -- cgit v1.2.3 From da40451bba23b51eaca4170a095891646ce72104 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Fri, 29 Feb 2008 18:33:29 +1100 Subject: [POWERPC] Convert the cell IOMMU fixed mapping to 16M IOMMU pages The only tricky part is we need to adjust the PTE insertion loop to cater for holes in the page table. The PTEs for each segment start on a 4K boundary, so with 16M pages we have 16 PTEs per segment and then a gap to the next 4K page boundary. It might be possible to allocate the PTEs for each segment separately, saving the memory currently filling the gaps. However we'd need to check that's OK with the hardware, and that it actually saves memory. Signed-off-by: Michael Ellerman Signed-off-by: Arnd Bergmann --- arch/powerpc/platforms/cell/iommu.c | 37 ++++++++++++++++++++++--------------- 1 file changed, 22 insertions(+), 15 deletions(-) (limited to 'arch/powerpc/platforms') diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c index b0e347e4933..20ea0e118f2 100644 --- a/arch/powerpc/platforms/cell/iommu.c +++ b/arch/powerpc/platforms/cell/iommu.c @@ -882,38 +882,45 @@ static void cell_dma_dev_setup_fixed(struct device *dev) dev_dbg(dev, "iommu: fixed addr = %lx\n", addr); } +static void insert_16M_pte(unsigned long addr, unsigned long *ptab, + unsigned long base_pte) +{ + unsigned long segment, offset; + + segment = addr >> IO_SEGMENT_SHIFT; + offset = (addr >> 24) - (segment << IO_PAGENO_BITS(24)); + ptab = ptab + (segment * (1 << 12) / sizeof(unsigned long)); + + pr_debug("iommu: addr %lx ptab %p segment %lx offset %lx\n", + addr, ptab, segment, offset); + + ptab[offset] = base_pte | (__pa(addr) & IOPTE_RPN_Mask); +} + static void cell_iommu_setup_fixed_ptab(struct cbe_iommu *iommu, struct device_node *np, unsigned long dbase, unsigned long dsize, unsigned long fbase, unsigned long fsize) { - int i; - unsigned long base_pte, uaddr, *io_pte, *ptab; + unsigned long base_pte, uaddr, ioaddr, *ptab; - ptab = cell_iommu_alloc_ptab(iommu, fbase, fsize, dbase, dsize, - IOMMU_PAGE_SHIFT); + ptab = cell_iommu_alloc_ptab(iommu, fbase, fsize, dbase, dsize, 24); dma_iommu_fixed_base = fbase; - /* convert from bytes into page table indices */ - dbase = dbase >> IOMMU_PAGE_SHIFT; - dsize = dsize >> IOMMU_PAGE_SHIFT; - fbase = fbase >> IOMMU_PAGE_SHIFT; - fsize = fsize >> IOMMU_PAGE_SHIFT; - pr_debug("iommu: mapping 0x%lx pages from 0x%lx\n", fsize, fbase); - io_pte = ptab; base_pte = IOPTE_PP_W | IOPTE_PP_R | IOPTE_M | IOPTE_SO_RW | (cell_iommu_get_ioid(np) & IOPTE_IOID_Mask); - uaddr = 0; - for (i = fbase; i < fbase + fsize; i++, uaddr += IOMMU_PAGE_SIZE) { + for (uaddr = 0; uaddr < fsize; uaddr += (1 << 24)) { /* Don't touch the dynamic region */ - if (i >= dbase && i < (dbase + dsize)) { + ioaddr = uaddr + fbase; + if (ioaddr >= dbase && ioaddr < (dbase + dsize)) { pr_debug("iommu: fixed/dynamic overlap, skipping\n"); continue; } - io_pte[i - fbase] = base_pte | (__pa(uaddr) & IOPTE_RPN_Mask); + + insert_16M_pte(uaddr, ptab, base_pte); } mb(); -- cgit v1.2.3 From d7f46190ef1048e48f71c8a7a60c2881c437d08d Mon Sep 17 00:00:00 2001 From: Li Yang Date: Thu, 6 Mar 2008 18:42:35 +0800 Subject: [POWERPC] 83xx: Add local bus device nodes to MPC837xMDS device trees. Signed-off-by: Li Yang Signed-off-by: Kumar Gala --- arch/powerpc/platforms/83xx/mpc837x_mds.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) (limited to 'arch/powerpc/platforms') diff --git a/arch/powerpc/platforms/83xx/mpc837x_mds.c b/arch/powerpc/platforms/83xx/mpc837x_mds.c index 8a9c2697360..64d17b0d645 100644 --- a/arch/powerpc/platforms/83xx/mpc837x_mds.c +++ b/arch/powerpc/platforms/83xx/mpc837x_mds.c @@ -39,12 +39,9 @@ static int mpc837xmds_usb_cfg(void) if (ret) return ret; /* Map BCSR area */ - np = of_find_node_by_name(NULL, "bcsr"); + np = of_find_compatible_node(NULL, NULL, "fsl,mpc837xmds-bcsr"); if (np) { - struct resource res; - - of_address_to_resource(np, 0, &res); - bcsr_regs = ioremap(res.start, res.end - res.start + 1); + bcsr_regs = of_iomap(np, 0); of_node_put(np); } if (!bcsr_regs) @@ -96,6 +93,7 @@ static void __init mpc837x_mds_setup_arch(void) static struct of_device_id mpc837x_ids[] = { { .type = "soc", }, { .compatible = "soc", }, + { .compatible = "simple-bus", }, {}, }; -- cgit v1.2.3 From ad562c71592ae76440c27bba9bfa9c16cc851560 Mon Sep 17 00:00:00 2001 From: Andy Fleming Date: Fri, 7 Mar 2008 17:59:03 -0600 Subject: [POWERPC] 83xx: Make 83xx perfmon support selectable Not all e300 cores support the performance monitors, and the ones that don't will be confused by the mf/mtpmr instructions. This allows the support to be optional, so the 8349 can turn it off while the 8379 can turn it on. Sadly, those aren't config options, so it will be left to the defconfigs and the users to make that determination. Signed-off-by: Andy Fleming Signed-off-by: Kumar Gala --- arch/powerpc/platforms/Kconfig | 1 - arch/powerpc/platforms/Kconfig.cputype | 7 ++++++- 2 files changed, 6 insertions(+), 2 deletions(-) (limited to 'arch/powerpc/platforms') diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig index 0afd2259554..a578b966ecb 100644 --- a/arch/powerpc/platforms/Kconfig +++ b/arch/powerpc/platforms/Kconfig @@ -22,7 +22,6 @@ config PPC_83xx select FSL_SOC select MPC83xx select IPIC - select FSL_EMB_PERFMON config PPC_86xx bool "Freescale 86xx" diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index 73d81ce14b6..0c3face0ddb 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype @@ -113,7 +113,12 @@ config FSL_BOOKE default y config FSL_EMB_PERFMON - bool + bool "Freescale Embedded Perfmon" + depends on E500 || PPC_83xx + help + This is the Performance Monitor support found on the e500 core + and some e300 cores (c3 and c4). Select this only if your + core supports the Embedded Performance Monitor APU config PTE_64BIT bool -- cgit v1.2.3 From ce7c191bca88aa2f942f70a6d6c6315739a81a32 Mon Sep 17 00:00:00 2001 From: Jeremy Kerr Date: Tue, 4 Mar 2008 20:17:02 +1100 Subject: [POWERPC] spufs: don't (ab)use SCHED_IDLE commit 4ef11014 introduced a usage of SCHED_IDLE to detect when a context is within spu_run. Instead of SCHED_IDLE (which has other meaning), add a flag to sched_flags to tell if a context should be running. Signed-off-by: Jeremy Kerr --- arch/powerpc/platforms/cell/spufs/run.c | 3 ++- arch/powerpc/platforms/cell/spufs/sched.c | 4 ++-- arch/powerpc/platforms/cell/spufs/spufs.h | 1 + 3 files changed, 5 insertions(+), 3 deletions(-) (limited to 'arch/powerpc/platforms') diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c index 6221968c2a3..cac69e11677 100644 --- a/arch/powerpc/platforms/cell/spufs/run.c +++ b/arch/powerpc/platforms/cell/spufs/run.c @@ -220,6 +220,7 @@ static int spu_run_init(struct spu_context *ctx, u32 *npc) } } + set_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags); return 0; } @@ -234,7 +235,7 @@ static int spu_run_fini(struct spu_context *ctx, u32 *npc, *npc = ctx->ops->npc_read(ctx); spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED); - ctx->policy = SCHED_IDLE; + clear_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags); spu_release(ctx); if (signal_pending(current)) diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index 5d5f680cd0b..00528ef84ad 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c @@ -867,7 +867,7 @@ static noinline void spusched_tick(struct spu_context *ctx) if (ctx->policy == SCHED_FIFO) goto out; - if (--ctx->time_slice && ctx->policy != SCHED_IDLE) + if (--ctx->time_slice && test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags)) goto out; spu = ctx->spu; @@ -877,7 +877,7 @@ static noinline void spusched_tick(struct spu_context *ctx) new = grab_runnable_context(ctx->prio + 1, spu->node); if (new) { spu_unschedule(spu, ctx); - if (ctx->policy != SCHED_IDLE) + if (test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags)) spu_add_to_rq(ctx); } else { spu_context_nospu_trace(spusched_tick__newslice, ctx); diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h index 2c2fe3c07d7..cdc515182f8 100644 --- a/arch/powerpc/platforms/cell/spufs/spufs.h +++ b/arch/powerpc/platforms/cell/spufs/spufs.h @@ -44,6 +44,7 @@ struct spu_gang; enum { SPU_SCHED_NOTIFY_ACTIVE, SPU_SCHED_WAS_ACTIVE, /* was active upon spu_acquire_saved() */ + SPU_SCHED_SPU_RUN, /* context is within spu_run */ }; struct spu_context { -- cgit v1.2.3 From c368392a9951e6e25e2e2f9268153f1e9365e2c2 Mon Sep 17 00:00:00 2001 From: Jeremy Kerr Date: Tue, 11 Mar 2008 12:46:18 +1100 Subject: [POWERPC] spufs: fix rescheduling of non-runnable contexts At present, we can hit the BUG_ON in __spu_update_sched_info by reading the regs file of a context between two calls to spu_run. The spu_release_saved called by spufs_regs_read() is resulting in the (now non-runnable) context being placed back on the run queue, so the next call to spu_run ends up in the bug condition. This change uses the SPU_SCHED_SPU_RUN flag to only reschedule a context if it's still in spu_run(). Signed-off-by: Jeremy Kerr --- arch/powerpc/platforms/cell/spufs/context.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/powerpc/platforms') diff --git a/arch/powerpc/platforms/cell/spufs/context.c b/arch/powerpc/platforms/cell/spufs/context.c index cf6c2c89211..0ad83aeb70b 100644 --- a/arch/powerpc/platforms/cell/spufs/context.c +++ b/arch/powerpc/platforms/cell/spufs/context.c @@ -170,7 +170,8 @@ void spu_release_saved(struct spu_context *ctx) { BUG_ON(ctx->state != SPU_STATE_SAVED); - if (test_and_clear_bit(SPU_SCHED_WAS_ACTIVE, &ctx->sched_flags)) + if (test_and_clear_bit(SPU_SCHED_WAS_ACTIVE, &ctx->sched_flags) && + test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags)) spu_activate(ctx, 0); spu_release(ctx); -- cgit v1.2.3 From 98cddbfb3218925c35697562f7d9df692bf6a436 Mon Sep 17 00:00:00 2001 From: Tony Breeds Date: Wed, 12 Mar 2008 10:48:48 +1100 Subject: [POWERPC] Fix arch/powerpc/platforms/powermac/pic.c when !CONFIG_ADB_PMU When building arch/powerpc/platforms/powermac/pic.c when !CONFIG_ADB_PMU we get the following warnings: arch/powerpc/platforms/powermac/pic.c: In function 'pmacpic_find_viaint': arch/powerpc/platforms/powermac/pic.c:623: warning: label 'not_found' defined but not used This fixes it. Signed-off-by: Tony Breeds Signed-off-by: Paul Mackerras --- arch/powerpc/platforms/powermac/pic.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/powerpc/platforms') diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c index 40736400ef8..829b8b02527 100644 --- a/arch/powerpc/platforms/powermac/pic.c +++ b/arch/powerpc/platforms/powermac/pic.c @@ -618,9 +618,9 @@ static int pmacpic_find_viaint(void) if (np == NULL) goto not_found; viaint = irq_of_parse_and_map(np, 0);; -#endif /* CONFIG_ADB_PMU */ not_found: +#endif /* CONFIG_ADB_PMU */ return viaint; } -- cgit v1.2.3