aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2007-04-23 21:08:11 +0200
committerArnd Bergmann <arnd@klappe.arndb.de>2007-04-23 21:18:54 +0200
commita475c2f43520cb095452201da57395000cfeb94c (patch)
tree0cfe49111b15cf20b03d308ec8b5a8b5210b0363 /arch/powerpc
parent390c53430498c9973e015432806edd53b2efe6c6 (diff)
[POWERPC] spufs: remove woken threads from the runqueue early
A single context should only be woken once, and we should not have more wakeups for a given priority than the number of contexts on that runqueue position. Also add some asserts to trap future problems in this area more easily. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Arnd Bergmann <arnd.bergmann@de.ibm.com>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/platforms/cell/spufs/context.c2
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c44
2 files changed, 19 insertions, 27 deletions
diff --git a/arch/powerpc/platforms/cell/spufs/context.c b/arch/powerpc/platforms/cell/spufs/context.c
index b3954aba424..065147fb1cc 100644
--- a/arch/powerpc/platforms/cell/spufs/context.c
+++ b/arch/powerpc/platforms/cell/spufs/context.c
@@ -52,6 +52,7 @@ struct spu_context *alloc_spu_context(struct spu_gang *gang)
ctx->state = SPU_STATE_SAVED;
ctx->ops = &spu_backing_ops;
ctx->owner = get_task_mm(current);
+ INIT_LIST_HEAD(&ctx->rq);
if (gang)
spu_gang_add_ctx(gang, ctx);
ctx->rt_priority = current->rt_priority;
@@ -76,6 +77,7 @@ void destroy_spu_context(struct kref *kref)
spu_fini_csa(&ctx->csa);
if (ctx->gang)
spu_gang_remove_ctx(ctx->gang, ctx);
+ BUG_ON(!list_empty(&ctx->rq));
kfree(ctx);
}
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 1582d764523..876828cc95a 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -245,6 +245,14 @@ static void spu_add_to_rq(struct spu_context *ctx)
spin_unlock(&spu_prio->runq_lock);
}
+static void __spu_del_from_rq(struct spu_context *ctx, int prio)
+{
+ if (!list_empty(&ctx->rq))
+ list_del_init(&ctx->rq);
+ if (list_empty(&spu_prio->runq[prio]))
+ clear_bit(ctx->prio, spu_prio->bitmap);
+}
+
/**
* spu_del_from_rq - remove a context from the runqueue
* @ctx: context to remove
@@ -252,33 +260,10 @@ static void spu_add_to_rq(struct spu_context *ctx)
static void spu_del_from_rq(struct spu_context *ctx)
{
spin_lock(&spu_prio->runq_lock);
- list_del_init(&ctx->rq);
- if (list_empty(&spu_prio->runq[ctx->prio]))
- clear_bit(ctx->prio, spu_prio->bitmap);
+ __spu_del_from_rq(ctx, ctx->prio);
spin_unlock(&spu_prio->runq_lock);
}
-/**
- * spu_grab_context - remove one context from the runqueue
- * @prio: priority of the context to be removed
- *
- * This function removes one context from the runqueue for priority @prio.
- * If there is more than one context with the given priority the first
- * task on the runqueue will be taken.
- *
- * Returns the spu_context it just removed.
- *
- * Must be called with spu_prio->runq_lock held.
- */
-static struct spu_context *spu_grab_context(int prio)
-{
- struct list_head *rq = &spu_prio->runq[prio];
-
- if (list_empty(rq))
- return NULL;
- return list_entry(rq->next, struct spu_context, rq);
-}
-
static void spu_prio_wait(struct spu_context *ctx)
{
DEFINE_WAIT(wait);
@@ -309,9 +294,14 @@ static void spu_reschedule(struct spu *spu)
spin_lock(&spu_prio->runq_lock);
best = sched_find_first_bit(spu_prio->bitmap);
if (best < MAX_PRIO) {
- struct spu_context *ctx = spu_grab_context(best);
- if (ctx)
- wake_up(&ctx->stop_wq);
+ struct list_head *rq = &spu_prio->runq[best];
+ struct spu_context *ctx;
+
+ BUG_ON(list_empty(rq));
+
+ ctx = list_entry(rq->next, struct spu_context, rq);
+ __spu_del_from_rq(ctx, best);
+ wake_up(&ctx->stop_wq);
}
spin_unlock(&spu_prio->runq_lock);
}