提交 ee2d7340 编写于 作者: A Arnd Bergmann 提交者: Paul Mackerras

[POWERPC] spufs: Use SPU master control to prevent wild SPU execution

When the user changes the runcontrol register, an SPU might be
running without a process being attached to it and waiting for
events. In order to prevent this, make sure we always disable
the priv1 master control when we're not inside of spu_run.
Signed-off-by: NArnd Bergmann <arnd.bergmann@de.ibm.com>
Signed-off-by: NPaul Mackerras <paulus@samba.org>
上级 3692dc66
...@@ -280,9 +280,26 @@ static void spu_backing_runcntl_write(struct spu_context *ctx, u32 val) ...@@ -280,9 +280,26 @@ static void spu_backing_runcntl_write(struct spu_context *ctx, u32 val)
spin_unlock(&ctx->csa.register_lock); spin_unlock(&ctx->csa.register_lock);
} }
static void spu_backing_runcntl_stop(struct spu_context *ctx) static void spu_backing_master_start(struct spu_context *ctx)
{ {
spu_backing_runcntl_write(ctx, SPU_RUNCNTL_STOP); struct spu_state *csa = &ctx->csa;
u64 sr1;
spin_lock(&csa->register_lock);
sr1 = csa->priv1.mfc_sr1_RW | MFC_STATE1_MASTER_RUN_CONTROL_MASK;
csa->priv1.mfc_sr1_RW = sr1;
spin_unlock(&csa->register_lock);
}
static void spu_backing_master_stop(struct spu_context *ctx)
{
struct spu_state *csa = &ctx->csa;
u64 sr1;
spin_lock(&csa->register_lock);
sr1 = csa->priv1.mfc_sr1_RW & ~MFC_STATE1_MASTER_RUN_CONTROL_MASK;
csa->priv1.mfc_sr1_RW = sr1;
spin_unlock(&csa->register_lock);
} }
static int spu_backing_set_mfc_query(struct spu_context * ctx, u32 mask, static int spu_backing_set_mfc_query(struct spu_context * ctx, u32 mask,
...@@ -347,7 +364,8 @@ struct spu_context_ops spu_backing_ops = { ...@@ -347,7 +364,8 @@ struct spu_context_ops spu_backing_ops = {
.status_read = spu_backing_status_read, .status_read = spu_backing_status_read,
.get_ls = spu_backing_get_ls, .get_ls = spu_backing_get_ls,
.runcntl_write = spu_backing_runcntl_write, .runcntl_write = spu_backing_runcntl_write,
.runcntl_stop = spu_backing_runcntl_stop, .master_start = spu_backing_master_start,
.master_stop = spu_backing_master_stop,
.set_mfc_query = spu_backing_set_mfc_query, .set_mfc_query = spu_backing_set_mfc_query,
.read_mfc_tagstatus = spu_backing_read_mfc_tagstatus, .read_mfc_tagstatus = spu_backing_read_mfc_tagstatus,
.get_mfc_free_elements = spu_backing_get_mfc_free_elements, .get_mfc_free_elements = spu_backing_get_mfc_free_elements,
......
...@@ -122,29 +122,29 @@ void spu_unmap_mappings(struct spu_context *ctx) ...@@ -122,29 +122,29 @@ void spu_unmap_mappings(struct spu_context *ctx)
int spu_acquire_exclusive(struct spu_context *ctx) int spu_acquire_exclusive(struct spu_context *ctx)
{ {
int ret = 0; int ret = 0;
down_write(&ctx->state_sema); down_write(&ctx->state_sema);
/* ctx is about to be freed, can't acquire any more */ /* ctx is about to be freed, can't acquire any more */
if (!ctx->owner) { if (!ctx->owner) {
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
} }
if (ctx->state == SPU_STATE_SAVED) { if (ctx->state == SPU_STATE_SAVED) {
ret = spu_activate(ctx, 0); ret = spu_activate(ctx, 0);
if (ret) if (ret)
goto out; goto out;
ctx->state = SPU_STATE_RUNNABLE; ctx->state = SPU_STATE_RUNNABLE;
} else { } else {
/* We need to exclude userspace access to the context. */ /* We need to exclude userspace access to the context. */
spu_unmap_mappings(ctx); spu_unmap_mappings(ctx);
} }
out: out:
if (ret) if (ret)
up_write(&ctx->state_sema); up_write(&ctx->state_sema);
return ret; return ret;
} }
int spu_acquire_runnable(struct spu_context *ctx) int spu_acquire_runnable(struct spu_context *ctx)
......
...@@ -216,13 +216,26 @@ static void spu_hw_runcntl_write(struct spu_context *ctx, u32 val) ...@@ -216,13 +216,26 @@ static void spu_hw_runcntl_write(struct spu_context *ctx, u32 val)
spin_unlock_irq(&ctx->spu->register_lock); spin_unlock_irq(&ctx->spu->register_lock);
} }
static void spu_hw_runcntl_stop(struct spu_context *ctx) static void spu_hw_master_start(struct spu_context *ctx)
{ {
spin_lock_irq(&ctx->spu->register_lock); struct spu *spu = ctx->spu;
out_be32(&ctx->spu->problem->spu_runcntl_RW, SPU_RUNCNTL_STOP); u64 sr1;
while (in_be32(&ctx->spu->problem->spu_status_R) & SPU_STATUS_RUNNING)
cpu_relax(); spin_lock_irq(&spu->register_lock);
spin_unlock_irq(&ctx->spu->register_lock); sr1 = spu_mfc_sr1_get(spu) | MFC_STATE1_MASTER_RUN_CONTROL_MASK;
spu_mfc_sr1_set(spu, sr1);
spin_unlock_irq(&spu->register_lock);
}
static void spu_hw_master_stop(struct spu_context *ctx)
{
struct spu *spu = ctx->spu;
u64 sr1;
spin_lock_irq(&spu->register_lock);
sr1 = spu_mfc_sr1_get(spu) & ~MFC_STATE1_MASTER_RUN_CONTROL_MASK;
spu_mfc_sr1_set(spu, sr1);
spin_unlock_irq(&spu->register_lock);
} }
static int spu_hw_set_mfc_query(struct spu_context * ctx, u32 mask, u32 mode) static int spu_hw_set_mfc_query(struct spu_context * ctx, u32 mask, u32 mode)
...@@ -295,7 +308,8 @@ struct spu_context_ops spu_hw_ops = { ...@@ -295,7 +308,8 @@ struct spu_context_ops spu_hw_ops = {
.status_read = spu_hw_status_read, .status_read = spu_hw_status_read,
.get_ls = spu_hw_get_ls, .get_ls = spu_hw_get_ls,
.runcntl_write = spu_hw_runcntl_write, .runcntl_write = spu_hw_runcntl_write,
.runcntl_stop = spu_hw_runcntl_stop, .master_start = spu_hw_master_start,
.master_stop = spu_hw_master_stop,
.set_mfc_query = spu_hw_set_mfc_query, .set_mfc_query = spu_hw_set_mfc_query,
.read_mfc_tagstatus = spu_hw_read_mfc_tagstatus, .read_mfc_tagstatus = spu_hw_read_mfc_tagstatus,
.get_mfc_free_elements = spu_hw_get_mfc_free_elements, .get_mfc_free_elements = spu_hw_get_mfc_free_elements,
......
...@@ -248,8 +248,13 @@ static int spu_setup_isolated(struct spu_context *ctx) ...@@ -248,8 +248,13 @@ static int spu_setup_isolated(struct spu_context *ctx)
if (!isolated_loader) if (!isolated_loader)
return -ENODEV; return -ENODEV;
if ((ret = spu_acquire_exclusive(ctx)) != 0) /* prevent concurrent operation with spu_run */
return ret; down(&ctx->run_sema);
ctx->ops->master_start(ctx);
ret = spu_acquire_exclusive(ctx);
if (ret)
goto out;
mfc_cntl = &ctx->spu->priv2->mfc_control_RW; mfc_cntl = &ctx->spu->priv2->mfc_control_RW;
...@@ -315,12 +320,14 @@ static int spu_setup_isolated(struct spu_context *ctx) ...@@ -315,12 +320,14 @@ static int spu_setup_isolated(struct spu_context *ctx)
out_unlock: out_unlock:
spu_release_exclusive(ctx); spu_release_exclusive(ctx);
out:
ctx->ops->master_stop(ctx);
up(&ctx->run_sema);
return ret; return ret;
} }
int spu_recycle_isolated(struct spu_context *ctx) int spu_recycle_isolated(struct spu_context *ctx)
{ {
ctx->ops->runcntl_stop(ctx);
return spu_setup_isolated(ctx); return spu_setup_isolated(ctx);
} }
...@@ -435,6 +442,8 @@ static int spufs_create_context(struct inode *inode, ...@@ -435,6 +442,8 @@ static int spufs_create_context(struct inode *inode,
if (ret >= 0 && (flags & SPU_CREATE_ISOLATE)) { if (ret >= 0 && (flags & SPU_CREATE_ISOLATE)) {
int setup_err = spu_setup_isolated( int setup_err = spu_setup_isolated(
SPUFS_I(dentry->d_inode)->i_ctx); SPUFS_I(dentry->d_inode)->i_ctx);
/* FIXME: clean up context again on failure to avoid
leak. */
if (setup_err) if (setup_err)
ret = setup_err; ret = setup_err;
} }
......
...@@ -207,6 +207,7 @@ long spufs_run_spu(struct file *file, struct spu_context *ctx, ...@@ -207,6 +207,7 @@ long spufs_run_spu(struct file *file, struct spu_context *ctx,
if (down_interruptible(&ctx->run_sema)) if (down_interruptible(&ctx->run_sema))
return -ERESTARTSYS; return -ERESTARTSYS;
ctx->ops->master_start(ctx);
ctx->event_return = 0; ctx->event_return = 0;
ret = spu_run_init(ctx, npc); ret = spu_run_init(ctx, npc);
if (ret) if (ret)
...@@ -234,7 +235,7 @@ long spufs_run_spu(struct file *file, struct spu_context *ctx, ...@@ -234,7 +235,7 @@ long spufs_run_spu(struct file *file, struct spu_context *ctx,
} while (!ret && !(status & (SPU_STATUS_STOPPED_BY_STOP | } while (!ret && !(status & (SPU_STATUS_STOPPED_BY_STOP |
SPU_STATUS_STOPPED_BY_HALT))); SPU_STATUS_STOPPED_BY_HALT)));
ctx->ops->runcntl_stop(ctx); ctx->ops->master_stop(ctx);
ret = spu_run_fini(ctx, npc, &status); ret = spu_run_fini(ctx, npc, &status);
spu_yield(ctx); spu_yield(ctx);
......
...@@ -116,7 +116,8 @@ struct spu_context_ops { ...@@ -116,7 +116,8 @@ struct spu_context_ops {
u32(*status_read) (struct spu_context * ctx); u32(*status_read) (struct spu_context * ctx);
char*(*get_ls) (struct spu_context * ctx); char*(*get_ls) (struct spu_context * ctx);
void (*runcntl_write) (struct spu_context * ctx, u32 data); void (*runcntl_write) (struct spu_context * ctx, u32 data);
void (*runcntl_stop) (struct spu_context * ctx); void (*master_start) (struct spu_context * ctx);
void (*master_stop) (struct spu_context * ctx);
int (*set_mfc_query)(struct spu_context * ctx, u32 mask, u32 mode); int (*set_mfc_query)(struct spu_context * ctx, u32 mask, u32 mode);
u32 (*read_mfc_tagstatus)(struct spu_context * ctx); u32 (*read_mfc_tagstatus)(struct spu_context * ctx);
u32 (*get_mfc_free_elements)(struct spu_context *ctx); u32 (*get_mfc_free_elements)(struct spu_context *ctx);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册