From d58831375d68a3bd39d5ebab9eca711fbb4ee108 Mon Sep 17 00:00:00 2001 From: Jeremy Kerr Date: Tue, 26 Feb 2008 13:31:42 +1100 Subject: [POWERPC] spufs: fix context destruction during psmap fault We have a small window where a spu context may be destroyed while we're servicing a page fault (from another thread) to the context's problem state mapping. After we up_read() the mmap_sem, it's possible that the context is destroyed by its owning thread, and so the later references to ctx are invalid. This can maifest as a deadlock on the (now free()-ed) context state mutex. This change adds a reference to the context before we release the mmap_sem, so that the context cannot be destroyed. Signed-off-by: Jeremy Kerr --- arch/powerpc/platforms/cell/spufs/file.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) (limited to 'arch/powerpc') diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c index c66c3756970..f7a7e8635fb 100644 --- a/arch/powerpc/platforms/cell/spufs/file.c +++ b/arch/powerpc/platforms/cell/spufs/file.c @@ -366,6 +366,13 @@ static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma, if (offset >= ps_size) return NOPFN_SIGBUS; + /* + * Because we release the mmap_sem, the context may be destroyed while + * we're in spu_wait. Grab an extra reference so it isn't destroyed + * in the meantime. + */ + get_spu_context(ctx); + /* * We have to wait for context to be loaded before we have * pages to hand out to the user, but we don't want to wait @@ -375,7 +382,7 @@ static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma, * hanged. */ if (spu_acquire(ctx)) - return NOPFN_REFAULT; + goto refault; if (ctx->state == SPU_STATE_SAVED) { up_read(¤t->mm->mmap_sem); @@ -391,6 +398,9 @@ static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma, if (!ret) spu_release(ctx); + +refault: + put_spu_context(ctx); return NOPFN_REFAULT; } -- cgit v1.2.3 From 0111a701867a796a7ca6ecbc385e4befc9f35066 Mon Sep 17 00:00:00 2001 From: Jeremy Kerr Date: Wed, 27 Feb 2008 19:08:13 +1100 Subject: [POWERPC] spufs: fix invalid scheduling of forgotten contexts At present, we have a situation where a context with no owner is re-scheduled by spu_forget: Thread 1: reading regs file Thread 2: context owner spu_forget() - ctx->owner = NULL - set SPU_SCHED_WAS_ACTIVE spu_acquire_saved() - context is in saved state spu_release_saved() - SPU_SCHED_WAS_ACTIVE is set, so spu_activate() the context, which now has no owner In spu_forget(), we shouldn't be requesting a re-schedule by setting SPU_SCHED_WAS_ACTIVE. This change removes the set_bit in spu_forget(), so that spu_release_saved() doesn't reinsert this destroyed context on to the run queue. Signed-off-by: Jeremy Kerr --- arch/powerpc/platforms/cell/spufs/context.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'arch/powerpc') diff --git a/arch/powerpc/platforms/cell/spufs/context.c b/arch/powerpc/platforms/cell/spufs/context.c index 133995ed5cc..cf6c2c89211 100644 --- a/arch/powerpc/platforms/cell/spufs/context.c +++ b/arch/powerpc/platforms/cell/spufs/context.c @@ -109,13 +109,12 @@ void spu_forget(struct spu_context *ctx) /* * This is basically an open-coded spu_acquire_saved, except that - * we don't acquire the state mutex interruptible. + * we don't acquire the state mutex interruptible, and we don't + * want this context to be rescheduled on release. */ mutex_lock(&ctx->state_mutex); - if (ctx->state != SPU_STATE_SAVED) { - set_bit(SPU_SCHED_WAS_ACTIVE, &ctx->sched_flags); + if (ctx->state != SPU_STATE_SAVED) spu_deactivate(ctx); - } mm = ctx->owner; ctx->owner = NULL; -- cgit v1.2.3 From 71791bee90dd29b292c7e55c1c00857578c912bd Mon Sep 17 00:00:00 2001 From: Jeremy Kerr Date: Mon, 25 Feb 2008 14:58:37 +1100 Subject: [POWERPC] spufs: fix order of sputrace thread IDs Currently, we get the following output from sputrace: [5.097935954] 1606: spufs_ps_nopfn__enter (thread = 1605, spu = -1) [5.097958164] 1606: spufs_ps_nopfn__insert (thread = 1605, spu = 15) [5.097973529] 1607: spufs_ps_nopfn__enter (thread = 1605, spu = -1) [5.097989174] 1607: spufs_ps_nopfn__insert (thread = 1605, spu = 14) Which leads me to believe that 160[67] is the current thread ID, and 1605 is the context backing the psmap. However, the 'current' and 'owner' tids are reversed - the 'current' tid is on the right. This change puts the current thread ID in the left-hand column instead, and renames the right to 'ctxthread'. Signed-off-by: Jeremy Kerr --- arch/powerpc/platforms/cell/spufs/sputrace.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'arch/powerpc') diff --git a/arch/powerpc/platforms/cell/spufs/sputrace.c b/arch/powerpc/platforms/cell/spufs/sputrace.c index 01974f7776e..79aa773f3c9 100644 --- a/arch/powerpc/platforms/cell/spufs/sputrace.c +++ b/arch/powerpc/platforms/cell/spufs/sputrace.c @@ -58,12 +58,12 @@ static int sputrace_sprint(char *tbuf, int n) ktime_to_timespec(ktime_sub(t->tstamp, sputrace_start)); return snprintf(tbuf, n, - "[%lu.%09lu] %d: %s (thread = %d, spu = %d)\n", + "[%lu.%09lu] %d: %s (ctxthread = %d, spu = %d)\n", (unsigned long) tv.tv_sec, (unsigned long) tv.tv_nsec, - t->owner_tid, - t->name, t->curr_tid, + t->name, + t->owner_tid, t->number); } @@ -188,6 +188,7 @@ struct spu_probe spu_probes[] = { { "spufs_ps_nopfn__insert", "%p %p", spu_context_event }, { "spu_acquire_saved__enter", "%p", spu_context_nospu_event }, { "destroy_spu_context__enter", "%p", spu_context_nospu_event }, + { "spufs_stop_callback__enter", "%p %p", spu_context_event }, }; static int __init sputrace_init(void) -- cgit v1.2.3 From fae9ca791507876c3ccaa8ab686b2ce42dc7a560 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 29 Feb 2008 15:16:48 +1100 Subject: [POWERPC] spufs: synchronize IRQ when disabling There is a small race between the context save procedure and the SPU interrupt handling, where we expect all interrupt processing to have finished after disabling them, while an interrupt is still being processed on another CPU. The obvious fix is to call synchronize_irq() after disabling the interrupts at the start of the context save procedure to make sure we never access the SPU any more during an ongoing save or even after that. Thanks to Benjamin Herrenschmidt for pointing this out. Acked-by: Benjamin Herrenschmidt Signed-off-by: Arnd Bergmann Signed-off-by: Jeremy Kerr --- arch/powerpc/platforms/cell/spufs/switch.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'arch/powerpc') diff --git a/arch/powerpc/platforms/cell/spufs/switch.c b/arch/powerpc/platforms/cell/spufs/switch.c index 6f5886c7b1f..e9dc7a55d1b 100644 --- a/arch/powerpc/platforms/cell/spufs/switch.c +++ b/arch/powerpc/platforms/cell/spufs/switch.c @@ -34,6 +34,7 @@ #include #include +#include #include #include #include @@ -117,6 +118,8 @@ static inline void disable_interrupts(struct spu_state *csa, struct spu *spu) * Write INT_MASK_class1 with value of 0. * Save INT_Mask_class2 in CSA. * Write INT_MASK_class2 with value of 0. + * Synchronize all three interrupts to be sure + * we no longer execute a handler on another CPU. */ spin_lock_irq(&spu->register_lock); if (csa) { @@ -129,6 +132,9 @@ static inline void disable_interrupts(struct spu_state *csa, struct spu *spu) spu_int_mask_set(spu, 2, 0ul); eieio(); spin_unlock_irq(&spu->register_lock); + synchronize_irq(spu->irqs[0]); + synchronize_irq(spu->irqs[1]); + synchronize_irq(spu->irqs[2]); } static inline void set_watchdog_timer(struct spu_state *csa, struct spu *spu) -- cgit v1.2.3 From cc4b7c1814c9ad375e8167ea4a9ec4a0ec1ada04 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 26 Feb 2008 07:01:56 +0100 Subject: [POWERPC] spufs: invalidate SLB translation before adding a new entry When we replace an SLB entry in the MFC after using up all the available entries, there is a short window in which an incorrect entry is marked as valid. The problem is that the 'valid' bit is stored in the ESID, which is always written after the VSID. Overwriting the VSID first will make the original ESID entry point to the new VSID, which means that any concurrent DMA accessing the old ESID ends up being redirected to the new virtual address. A few cycles later, we write the new ESID and everything is fine again. That race can be closed by writing a zero entry to the ESID first, which makes sure that the VSID is not accessed until we write the new ESID. Note that we don't actually need to invalidate the SLB entry using the invalidation register, which would also flush any ERAT entries for that segment, because the segment translation does not become invalid but is only removed from the SLB cache. Signed-off-by: Arnd Bergmann Signed-off-by: Jeremy Kerr --- arch/powerpc/platforms/cell/spu_base.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch/powerpc') diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c index 87eb07f94c5..cfc28e93c82 100644 --- a/arch/powerpc/platforms/cell/spu_base.c +++ b/arch/powerpc/platforms/cell/spu_base.c @@ -148,7 +148,11 @@ static inline void spu_load_slb(struct spu *spu, int slbe, struct spu_slb *slb) __func__, slbe, slb->vsid, slb->esid); out_be64(&priv2->slb_index_W, slbe); + /* set invalid before writing vsid */ + out_be64(&priv2->slb_esid_RW, 0); + /* now it's safe to write the vsid */ out_be64(&priv2->slb_vsid_RW, slb->vsid); + /* setting the new esid makes the entry valid again */ out_be64(&priv2->slb_esid_RW, slb->esid); } -- cgit v1.2.3 From c92a1acb675058375cc508ad024c33358b42d766 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Thu, 28 Feb 2008 06:06:30 +0100 Subject: [POWERPC] spufs: serialize SLB invalidation against SLB loading There is a potential race between flushes of the entire SLB in the MFC and the point where new entries are being established. The problem is that we might put a ESID entry into the MFC SLB when the VSID entry has just been cleared by the global flush. This can be circumvented by holding the register_lock throughout both the flushing and the creation of SLB entries. Signed-off-by: Arnd Bergmann Signed-off-by: Jeremy Kerr --- arch/powerpc/platforms/cell/spu_base.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) (limited to 'arch/powerpc') diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c index cfc28e93c82..712001f6b7d 100644 --- a/arch/powerpc/platforms/cell/spu_base.c +++ b/arch/powerpc/platforms/cell/spu_base.c @@ -81,9 +81,12 @@ struct spu_slb { void spu_invalidate_slbs(struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; + unsigned long flags; + spin_lock_irqsave(&spu->register_lock, flags); if (spu_mfc_sr1_get(spu) & MFC_STATE1_RELOCATE_MASK) out_be64(&priv2->slb_invalidate_all_W, 0UL); + spin_unlock_irqrestore(&spu->register_lock, flags); } EXPORT_SYMBOL_GPL(spu_invalidate_slbs); @@ -294,9 +297,11 @@ void spu_setup_kernel_slbs(struct spu *spu, struct spu_lscsa *lscsa, nr_slbs++; } + spin_lock_irq(&spu->register_lock); /* Add the set of SLBs */ for (i = 0; i < nr_slbs; i++) spu_load_slb(spu, i, &slbs[i]); + spin_unlock_irq(&spu->register_lock); } EXPORT_SYMBOL_GPL(spu_setup_kernel_slbs); @@ -341,13 +346,14 @@ spu_irq_class_1(int irq, void *data) if (stat & CLASS1_STORAGE_FAULT_INTR) spu_mfc_dsisr_set(spu, 0ul); spu_int_stat_clear(spu, 1, stat); - spin_unlock(&spu->register_lock); - pr_debug("%s: %lx %lx %lx %lx\n", __FUNCTION__, mask, stat, - dar, dsisr); if (stat & CLASS1_SEGMENT_FAULT_INTR) __spu_trap_data_seg(spu, dar); + spin_unlock(&spu->register_lock); + pr_debug("%s: %lx %lx %lx %lx\n", __FUNCTION__, mask, stat, + dar, dsisr); + if (stat & CLASS1_STORAGE_FAULT_INTR) __spu_trap_data_map(spu, dar, dsisr); -- cgit v1.2.3 From 2a58aa33daef37134c8a43dca0b7578c3fa7f993 Mon Sep 17 00:00:00 2001 From: Andre Detsch Date: Mon, 25 Feb 2008 15:07:42 -0300 Subject: [POWERPC] spufs: fix use time accounting on SPE-overcommit The spu_runcntl_RW register is restored within spu_restore function. So, at the end of spu_bind_context, the SPU context is not just loaded, but running. This change corrects the state switch to account the time as USER. Signed-off-by: Andre Detsch Signed-off-by: Jeremy Kerr --- arch/powerpc/platforms/cell/spufs/sched.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/powerpc') diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index 3a5972117de..5d5f680cd0b 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c @@ -246,7 +246,7 @@ static void spu_bind_context(struct spu *spu, struct spu_context *ctx) spu_switch_notify(spu, ctx); ctx->state = SPU_STATE_RUNNABLE; - spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED); + spuctx_switch_state(ctx, SPU_UTIL_USER); } /* -- cgit v1.2.3