提交 650f8b02 编写于 作者: C Christoph Hellwig 提交者: Arnd Bergmann

[POWERPC] spufs: simplify state_mutex

The r/w semaphore to lock the spus was overkill and can be replaced
with a mutex to make it faster, simpler and easier to debug.  It also
helps to allow making most spufs interruptible in future patches.
Signed-off-by: NChristoph Hellwig <hch@lst.de>
Signed-off-by: NArnd Bergmann <arnd.bergmann@de.ibm.com>
上级 202557d2
...@@ -42,7 +42,7 @@ struct spu_context *alloc_spu_context(struct spu_gang *gang) ...@@ -42,7 +42,7 @@ struct spu_context *alloc_spu_context(struct spu_gang *gang)
} }
spin_lock_init(&ctx->mmio_lock); spin_lock_init(&ctx->mmio_lock);
kref_init(&ctx->kref); kref_init(&ctx->kref);
init_rwsem(&ctx->state_sema); mutex_init(&ctx->state_mutex);
init_MUTEX(&ctx->run_sema); init_MUTEX(&ctx->run_sema);
init_waitqueue_head(&ctx->ibox_wq); init_waitqueue_head(&ctx->ibox_wq);
init_waitqueue_head(&ctx->wbox_wq); init_waitqueue_head(&ctx->wbox_wq);
...@@ -65,9 +65,9 @@ void destroy_spu_context(struct kref *kref) ...@@ -65,9 +65,9 @@ void destroy_spu_context(struct kref *kref)
{ {
struct spu_context *ctx; struct spu_context *ctx;
ctx = container_of(kref, struct spu_context, kref); ctx = container_of(kref, struct spu_context, kref);
down_write(&ctx->state_sema); mutex_lock(&ctx->state_mutex);
spu_deactivate(ctx); spu_deactivate(ctx);
up_write(&ctx->state_sema); mutex_unlock(&ctx->state_mutex);
spu_fini_csa(&ctx->csa); spu_fini_csa(&ctx->csa);
if (ctx->gang) if (ctx->gang)
spu_gang_remove_ctx(ctx->gang, ctx); spu_gang_remove_ctx(ctx->gang, ctx);
...@@ -98,12 +98,12 @@ void spu_forget(struct spu_context *ctx) ...@@ -98,12 +98,12 @@ void spu_forget(struct spu_context *ctx)
void spu_acquire(struct spu_context *ctx) void spu_acquire(struct spu_context *ctx)
{ {
down_read(&ctx->state_sema); mutex_lock(&ctx->state_mutex);
} }
void spu_release(struct spu_context *ctx) void spu_release(struct spu_context *ctx)
{ {
up_read(&ctx->state_sema); mutex_unlock(&ctx->state_mutex);
} }
void spu_unmap_mappings(struct spu_context *ctx) void spu_unmap_mappings(struct spu_context *ctx)
...@@ -128,7 +128,7 @@ int spu_acquire_exclusive(struct spu_context *ctx) ...@@ -128,7 +128,7 @@ int spu_acquire_exclusive(struct spu_context *ctx)
{ {
int ret = 0; int ret = 0;
down_write(&ctx->state_sema); mutex_lock(&ctx->state_mutex);
/* ctx is about to be freed, can't acquire any more */ /* ctx is about to be freed, can't acquire any more */
if (!ctx->owner) { if (!ctx->owner) {
ret = -EINVAL; ret = -EINVAL;
...@@ -146,7 +146,7 @@ int spu_acquire_exclusive(struct spu_context *ctx) ...@@ -146,7 +146,7 @@ int spu_acquire_exclusive(struct spu_context *ctx)
out: out:
if (ret) if (ret)
up_write(&ctx->state_sema); mutex_unlock(&ctx->state_mutex);
return ret; return ret;
} }
...@@ -154,14 +154,12 @@ int spu_acquire_runnable(struct spu_context *ctx) ...@@ -154,14 +154,12 @@ int spu_acquire_runnable(struct spu_context *ctx)
{ {
int ret = 0; int ret = 0;
down_read(&ctx->state_sema); mutex_lock(&ctx->state_mutex);
if (ctx->state == SPU_STATE_RUNNABLE) { if (ctx->state == SPU_STATE_RUNNABLE) {
ctx->spu->prio = current->prio; ctx->spu->prio = current->prio;
return 0; return 0;
} }
up_read(&ctx->state_sema);
down_write(&ctx->state_sema);
/* ctx is about to be freed, can't acquire any more */ /* ctx is about to be freed, can't acquire any more */
if (!ctx->owner) { if (!ctx->owner) {
ret = -EINVAL; ret = -EINVAL;
...@@ -174,29 +172,18 @@ int spu_acquire_runnable(struct spu_context *ctx) ...@@ -174,29 +172,18 @@ int spu_acquire_runnable(struct spu_context *ctx)
goto out; goto out;
} }
downgrade_write(&ctx->state_sema);
/* On success, we return holding the lock */ /* On success, we return holding the lock */
return ret; return ret;
out: out:
/* Release here, to simplify calling code. */ /* Release here, to simplify calling code. */
up_write(&ctx->state_sema); mutex_unlock(&ctx->state_mutex);
return ret; return ret;
} }
void spu_acquire_saved(struct spu_context *ctx) void spu_acquire_saved(struct spu_context *ctx)
{ {
down_read(&ctx->state_sema); mutex_lock(&ctx->state_mutex);
if (ctx->state == SPU_STATE_SAVED)
return;
up_read(&ctx->state_sema);
down_write(&ctx->state_sema);
if (ctx->state == SPU_STATE_RUNNABLE) if (ctx->state == SPU_STATE_RUNNABLE)
spu_deactivate(ctx); spu_deactivate(ctx);
downgrade_write(&ctx->state_sema);
} }
...@@ -233,11 +233,11 @@ static void spu_prio_wait(struct spu_context *ctx, u64 flags) ...@@ -233,11 +233,11 @@ static void spu_prio_wait(struct spu_context *ctx, u64 flags)
spu_add_wq(wq, &wait, prio); spu_add_wq(wq, &wait, prio);
if (!signal_pending(current)) { if (!signal_pending(current)) {
up_write(&ctx->state_sema); mutex_unlock(&ctx->state_mutex);
pr_debug("%s: pid=%d prio=%d\n", __FUNCTION__, pr_debug("%s: pid=%d prio=%d\n", __FUNCTION__,
current->pid, current->prio); current->pid, current->prio);
schedule(); schedule();
down_write(&ctx->state_sema); mutex_lock(&ctx->state_mutex);
} }
spu_del_wq(wq, &wait, prio); spu_del_wq(wq, &wait, prio);
...@@ -334,7 +334,7 @@ void spu_yield(struct spu_context *ctx) ...@@ -334,7 +334,7 @@ void spu_yield(struct spu_context *ctx)
struct spu *spu; struct spu *spu;
int need_yield = 0; int need_yield = 0;
if (down_write_trylock(&ctx->state_sema)) { if (mutex_trylock(&ctx->state_mutex)) {
if ((spu = ctx->spu) != NULL) { if ((spu = ctx->spu) != NULL) {
int best = sched_find_first_bit(spu_prio->bitmap); int best = sched_find_first_bit(spu_prio->bitmap);
if (best < MAX_PRIO) { if (best < MAX_PRIO) {
...@@ -346,7 +346,7 @@ void spu_yield(struct spu_context *ctx) ...@@ -346,7 +346,7 @@ void spu_yield(struct spu_context *ctx)
spu->prio = MAX_PRIO; spu->prio = MAX_PRIO;
} }
} }
up_write(&ctx->state_sema); mutex_unlock(&ctx->state_mutex);
} }
if (unlikely(need_yield)) if (unlikely(need_yield))
yield(); yield();
......
...@@ -23,7 +23,7 @@ ...@@ -23,7 +23,7 @@
#define SPUFS_H #define SPUFS_H
#include <linux/kref.h> #include <linux/kref.h>
#include <linux/rwsem.h> #include <linux/mutex.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/fs.h> #include <linux/fs.h>
...@@ -53,7 +53,7 @@ struct spu_context { ...@@ -53,7 +53,7 @@ struct spu_context {
u64 object_id; /* user space pointer for oprofile */ u64 object_id; /* user space pointer for oprofile */
enum { SPU_STATE_RUNNABLE, SPU_STATE_SAVED } state; enum { SPU_STATE_RUNNABLE, SPU_STATE_SAVED } state;
struct rw_semaphore state_sema; struct mutex state_mutex;
struct semaphore run_sema; struct semaphore run_sema;
struct mm_struct *owner; struct mm_struct *owner;
...@@ -173,7 +173,7 @@ int spu_acquire_exclusive(struct spu_context *ctx); ...@@ -173,7 +173,7 @@ int spu_acquire_exclusive(struct spu_context *ctx);
static inline void spu_release_exclusive(struct spu_context *ctx) static inline void spu_release_exclusive(struct spu_context *ctx)
{ {
up_write(&ctx->state_sema); mutex_unlock(&ctx->state_mutex);
} }
int spu_activate(struct spu_context *ctx, u64 flags); int spu_activate(struct spu_context *ctx, u64 flags);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册