diff options
author | Christoph Hellwig <hch@lst.de> | 2007-04-23 21:08:12 +0200 |
---|---|---|
committer | Arnd Bergmann <arnd@klappe.arndb.de> | 2007-04-23 21:18:54 +0200 |
commit | 7ec18ab923a2e377ecb05c74a2d38f457f79950f (patch) | |
tree | b722d8063bfc4b1b44ba67083649efab0c2e5a64 | |
parent | a475c2f43520cb095452201da57395000cfeb94c (diff) |
[POWERPC] spufs: streamline locking for isolated spu setup
For quite a while now spu state is protected by a simple mutex instead
of the old rw_semaphore, and this means we can simplify the locking
around spu_setup_isolated a lot.
Instead of doing an spu_release before entering spu_setup_isolated and
then calling the complicated spu_acquire_exclusive we can now simply
enter the function locked an in guaranteed runnable state, so that the
only bit of spu_acquire_exclusive that's left is the call to
spu_unmap_mappings.
Similarly there's no more need to unlock and reacquire the state_mutex
when spu_setup_isolated is done, but we can always return with the
lock held and only drop it in spu_run_init in the failure case.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Arnd Bergmann <arnd.bergmann@de.ibm.com>
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/context.c | 40 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/run.c | 30 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/spufs.h | 1 |
3 files changed, 16 insertions, 55 deletions
diff --git a/arch/powerpc/platforms/cell/spufs/context.c b/arch/powerpc/platforms/cell/spufs/context.c index 065147fb1cc..ce17a284718 100644 --- a/arch/powerpc/platforms/cell/spufs/context.c +++ b/arch/powerpc/platforms/cell/spufs/context.c @@ -122,46 +122,6 @@ void spu_unmap_mappings(struct spu_context *ctx) } /** - * spu_acquire_exclusive - lock spu contex and protect against userspace access - * @ctx: spu contex to lock - * - * Note: - * Returns 0 and with the context locked on success - * Returns negative error and with the context _unlocked_ on failure. - */ -int spu_acquire_exclusive(struct spu_context *ctx) -{ - int ret = -EINVAL; - - spu_acquire(ctx); - /* - * Context is about to be freed, so we can't acquire it anymore. - */ - if (!ctx->owner) - goto out_unlock; - - if (ctx->state == SPU_STATE_SAVED) { - ret = spu_activate(ctx, 0); - if (ret) - goto out_unlock; - } else { - /* - * We need to exclude userspace access to the context. - * - * To protect against memory access we invalidate all ptes - * and make sure the pagefault handlers block on the mutex. - */ - spu_unmap_mappings(ctx); - } - - return 0; - - out_unlock: - spu_release(ctx); - return ret; -} - -/** * spu_acquire_runnable - lock spu contex and make sure it is in runnable state * @ctx: spu contex to lock * diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c index f95a611ca36..7df5202c9a9 100644 --- a/arch/powerpc/platforms/cell/spufs/run.c +++ b/arch/powerpc/platforms/cell/spufs/run.c @@ -63,13 +63,18 @@ static int spu_setup_isolated(struct spu_context *ctx) const u32 status_loading = SPU_STATUS_RUNNING | SPU_STATUS_ISOLATED_STATE | SPU_STATUS_ISOLATED_LOAD_STATUS; + ret = -ENODEV; if (!isolated_loader) - return -ENODEV; - - ret = spu_acquire_exclusive(ctx); - if (ret) goto out; + /* + * We need to exclude userspace access to the context. + * + * To protect against memory access we invalidate all ptes + * and make sure the pagefault handlers block on the mutex. + */ + spu_unmap_mappings(ctx); + mfc_cntl = &ctx->spu->priv2->mfc_control_RW; /* purge the MFC DMA queue to ensure no spurious accesses before we @@ -82,7 +87,7 @@ static int spu_setup_isolated(struct spu_context *ctx) printk(KERN_ERR "%s: timeout flushing MFC DMA queue\n", __FUNCTION__); ret = -EIO; - goto out_unlock; + goto out; } cond_resched(); } @@ -119,12 +124,15 @@ static int spu_setup_isolated(struct spu_context *ctx) pr_debug("%s: isolated LOAD failed\n", __FUNCTION__); ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE); ret = -EACCES; + goto out_drop_priv; + } - } else if (!(status & SPU_STATUS_ISOLATED_STATE)) { + if (!(status & SPU_STATUS_ISOLATED_STATE)) { /* This isn't allowed by the CBEA, but check anyway */ pr_debug("%s: SPU fell out of isolated mode?\n", __FUNCTION__); ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_STOP); ret = -EINVAL; + goto out_drop_priv; } out_drop_priv: @@ -132,8 +140,6 @@ out_drop_priv: sr1 |= MFC_STATE1_PROBLEM_STATE_MASK; spu_mfc_sr1_set(ctx->spu, sr1); -out_unlock: - spu_release(ctx); out: return ret; } @@ -149,13 +155,9 @@ static inline int spu_run_init(struct spu_context *ctx, u32 * npc) if (ctx->flags & SPU_CREATE_ISOLATE) { if (!(ctx->ops->status_read(ctx) & SPU_STATUS_ISOLATED_STATE)) { - /* Need to release ctx, because spu_setup_isolated will - * acquire it exclusively. - */ - spu_release(ctx); ret = spu_setup_isolated(ctx); - if (!ret) - ret = spu_acquire_runnable(ctx, 0); + if (ret) + spu_release(ctx); } /* if userspace has set the runcntrl register (eg, to issue an diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h index 0fb366d9d25..cae2ad435b0 100644 --- a/arch/powerpc/platforms/cell/spufs/spufs.h +++ b/arch/powerpc/platforms/cell/spufs/spufs.h @@ -192,7 +192,6 @@ void spu_unmap_mappings(struct spu_context *ctx); void spu_forget(struct spu_context *ctx); int spu_acquire_runnable(struct spu_context *ctx, unsigned long flags); void spu_acquire_saved(struct spu_context *ctx); -int spu_acquire_exclusive(struct spu_context *ctx); int spu_activate(struct spu_context *ctx, unsigned long flags); void spu_deactivate(struct spu_context *ctx); |