提交 a8e9f5f6 编写于 作者: A Arnd Bergmann

Merge tag 'tee-drv-async-supplicant-for-v4.16' of...

Merge tag 'tee-drv-async-supplicant-for-v4.16' of https://git.linaro.org/people/jens.wiklander/linux-tee into next/drivers

Pull "Enable async communication with tee supplicant" from Jens Wiklander:

This pull request enables asynchronous communication with TEE supplicant
by introducing meta parameters in the user space API. The meta
parameters can be used to tag requests with an id that can be matched
against an asynchronous response as is done here in the OP-TEE driver.

Asynchronous supplicant communication is needed by OP-TEE to implement
GlobalPlatforms TEE Sockets API Specification v1.0.1. The specification
is available at https://www.globalplatform.org/specificationsdevice.asp.

This change is backwards compatible allowing older supplicants to work
with newer kernels and vice versa.

* tag 'tee-drv-async-supplicant-for-v4.16' of https://git.linaro.org/people/jens.wiklander/linux-tee:
  optee: support asynchronous supplicant requests
  tee: add TEE_IOCTL_PARAM_ATTR_META
  tee: add tee_param_is_memref() for driver use
...@@ -187,12 +187,12 @@ static int optee_open(struct tee_context *ctx) ...@@ -187,12 +187,12 @@ static int optee_open(struct tee_context *ctx)
if (teedev == optee->supp_teedev) { if (teedev == optee->supp_teedev) {
bool busy = true; bool busy = true;
mutex_lock(&optee->supp.ctx_mutex); mutex_lock(&optee->supp.mutex);
if (!optee->supp.ctx) { if (!optee->supp.ctx) {
busy = false; busy = false;
optee->supp.ctx = ctx; optee->supp.ctx = ctx;
} }
mutex_unlock(&optee->supp.ctx_mutex); mutex_unlock(&optee->supp.mutex);
if (busy) { if (busy) {
kfree(ctxdata); kfree(ctxdata);
return -EBUSY; return -EBUSY;
...@@ -252,11 +252,8 @@ static void optee_release(struct tee_context *ctx) ...@@ -252,11 +252,8 @@ static void optee_release(struct tee_context *ctx)
ctx->data = NULL; ctx->data = NULL;
if (teedev == optee->supp_teedev) { if (teedev == optee->supp_teedev)
mutex_lock(&optee->supp.ctx_mutex); optee_supp_release(&optee->supp);
optee->supp.ctx = NULL;
mutex_unlock(&optee->supp.ctx_mutex);
}
} }
static const struct tee_driver_ops optee_ops = { static const struct tee_driver_ops optee_ops = {
......
...@@ -53,36 +53,24 @@ struct optee_wait_queue { ...@@ -53,36 +53,24 @@ struct optee_wait_queue {
* @ctx the context of current connected supplicant. * @ctx the context of current connected supplicant.
* if !NULL the supplicant device is available for use, * if !NULL the supplicant device is available for use,
* else busy * else busy
* @ctx_mutex: held while accessing @ctx * @mutex: held while accessing content of this struct
* @func: supplicant function id to call * @req_id: current request id if supplicant is doing synchronous
* @ret: call return value * communication, else -1
* @num_params: number of elements in @param * @reqs: queued request not yet retrieved by supplicant
* @param: parameters for @func * @idr: IDR holding all requests currently being processed
* @req_posted: if true, a request has been posted to the supplicant * by supplicant
* @supp_next_send: if true, next step is for supplicant to send response * @reqs_c: completion used by supplicant when waiting for a
* @thrd_mutex: held by the thread doing a request to supplicant * request to be queued.
* @supp_mutex: held by supplicant while operating on this struct
* @data_to_supp: supplicant is waiting on this for next request
* @data_from_supp: requesting thread is waiting on this to get the result
*/ */
struct optee_supp { struct optee_supp {
/* Serializes access to this struct */
struct mutex mutex;
struct tee_context *ctx; struct tee_context *ctx;
/* Serializes access of ctx */
struct mutex ctx_mutex; int req_id;
struct list_head reqs;
u32 func; struct idr idr;
u32 ret; struct completion reqs_c;
size_t num_params;
struct tee_param *param;
bool req_posted;
bool supp_next_send;
/* Serializes access to this struct for requesting thread */
struct mutex thrd_mutex;
/* Serializes access to this struct for supplicant threads */
struct mutex supp_mutex;
struct completion data_to_supp;
struct completion data_from_supp;
}; };
/** /**
...@@ -142,6 +130,7 @@ int optee_supp_read(struct tee_context *ctx, void __user *buf, size_t len); ...@@ -142,6 +130,7 @@ int optee_supp_read(struct tee_context *ctx, void __user *buf, size_t len);
int optee_supp_write(struct tee_context *ctx, void __user *buf, size_t len); int optee_supp_write(struct tee_context *ctx, void __user *buf, size_t len);
void optee_supp_init(struct optee_supp *supp); void optee_supp_init(struct optee_supp *supp);
void optee_supp_uninit(struct optee_supp *supp); void optee_supp_uninit(struct optee_supp *supp);
void optee_supp_release(struct optee_supp *supp);
int optee_supp_recv(struct tee_context *ctx, u32 *func, u32 *num_params, int optee_supp_recv(struct tee_context *ctx, u32 *func, u32 *num_params,
struct tee_param *param); struct tee_param *param);
......
...@@ -192,10 +192,10 @@ static struct tee_shm *cmd_alloc_suppl(struct tee_context *ctx, size_t sz) ...@@ -192,10 +192,10 @@ static struct tee_shm *cmd_alloc_suppl(struct tee_context *ctx, size_t sz)
if (ret) if (ret)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
mutex_lock(&optee->supp.ctx_mutex); mutex_lock(&optee->supp.mutex);
/* Increases count as secure world doesn't have a reference */ /* Increases count as secure world doesn't have a reference */
shm = tee_shm_get_from_id(optee->supp.ctx, param.u.value.c); shm = tee_shm_get_from_id(optee->supp.ctx, param.u.value.c);
mutex_unlock(&optee->supp.ctx_mutex); mutex_unlock(&optee->supp.mutex);
return shm; return shm;
} }
......
...@@ -16,21 +16,61 @@ ...@@ -16,21 +16,61 @@
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include "optee_private.h" #include "optee_private.h"
struct optee_supp_req {
struct list_head link;
bool busy;
u32 func;
u32 ret;
size_t num_params;
struct tee_param *param;
struct completion c;
};
void optee_supp_init(struct optee_supp *supp) void optee_supp_init(struct optee_supp *supp)
{ {
memset(supp, 0, sizeof(*supp)); memset(supp, 0, sizeof(*supp));
mutex_init(&supp->ctx_mutex); mutex_init(&supp->mutex);
mutex_init(&supp->thrd_mutex); init_completion(&supp->reqs_c);
mutex_init(&supp->supp_mutex); idr_init(&supp->idr);
init_completion(&supp->data_to_supp); INIT_LIST_HEAD(&supp->reqs);
init_completion(&supp->data_from_supp); supp->req_id = -1;
} }
void optee_supp_uninit(struct optee_supp *supp) void optee_supp_uninit(struct optee_supp *supp)
{ {
mutex_destroy(&supp->ctx_mutex); mutex_destroy(&supp->mutex);
mutex_destroy(&supp->thrd_mutex); idr_destroy(&supp->idr);
mutex_destroy(&supp->supp_mutex); }
void optee_supp_release(struct optee_supp *supp)
{
int id;
struct optee_supp_req *req;
struct optee_supp_req *req_tmp;
mutex_lock(&supp->mutex);
/* Abort all request retrieved by supplicant */
idr_for_each_entry(&supp->idr, req, id) {
req->busy = false;
idr_remove(&supp->idr, id);
req->ret = TEEC_ERROR_COMMUNICATION;
complete(&req->c);
}
/* Abort all queued requests */
list_for_each_entry_safe(req, req_tmp, &supp->reqs, link) {
list_del(&req->link);
req->ret = TEEC_ERROR_COMMUNICATION;
complete(&req->c);
}
supp->ctx = NULL;
supp->req_id = -1;
mutex_unlock(&supp->mutex);
} }
/** /**
...@@ -44,53 +84,42 @@ void optee_supp_uninit(struct optee_supp *supp) ...@@ -44,53 +84,42 @@ void optee_supp_uninit(struct optee_supp *supp)
*/ */
u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params, u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params,
struct tee_param *param) struct tee_param *param)
{ {
bool interruptable;
struct optee *optee = tee_get_drvdata(ctx->teedev); struct optee *optee = tee_get_drvdata(ctx->teedev);
struct optee_supp *supp = &optee->supp; struct optee_supp *supp = &optee->supp;
struct optee_supp_req *req = kzalloc(sizeof(*req), GFP_KERNEL);
bool interruptable;
u32 ret; u32 ret;
/* if (!req)
* Other threads blocks here until we've copied our answer from return TEEC_ERROR_OUT_OF_MEMORY;
* supplicant.
*/
while (mutex_lock_interruptible(&supp->thrd_mutex)) {
/* See comment below on when the RPC can be interrupted. */
mutex_lock(&supp->ctx_mutex);
interruptable = !supp->ctx;
mutex_unlock(&supp->ctx_mutex);
if (interruptable)
return TEEC_ERROR_COMMUNICATION;
}
/* init_completion(&req->c);
* We have exclusive access now since the supplicant at this req->func = func;
* point is either doing a req->num_params = num_params;
* wait_for_completion_interruptible(&supp->data_to_supp) or is in req->param = param;
* userspace still about to do the ioctl() to enter
* optee_supp_recv() below.
*/
supp->func = func; /* Insert the request in the request list */
supp->num_params = num_params; mutex_lock(&supp->mutex);
supp->param = param; list_add_tail(&req->link, &supp->reqs);
supp->req_posted = true; mutex_unlock(&supp->mutex);
/* Let supplicant get the data */ /* Tell an eventual waiter there's a new request */
complete(&supp->data_to_supp); complete(&supp->reqs_c);
/* /*
* Wait for supplicant to process and return result, once we've * Wait for supplicant to process and return result, once we've
* returned from wait_for_completion(data_from_supp) we have * returned from wait_for_completion(&req->c) successfully we have
* exclusive access again. * exclusive access again.
*/ */
while (wait_for_completion_interruptible(&supp->data_from_supp)) { while (wait_for_completion_interruptible(&req->c)) {
mutex_lock(&supp->ctx_mutex); mutex_lock(&supp->mutex);
interruptable = !supp->ctx; interruptable = !supp->ctx;
if (interruptable) { if (interruptable) {
/* /*
* There's no supplicant available and since the * There's no supplicant available and since the
* supp->ctx_mutex currently is held none can * supp->mutex currently is held none can
* become available until the mutex released * become available until the mutex released
* again. * again.
* *
...@@ -101,24 +130,91 @@ u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params, ...@@ -101,24 +130,91 @@ u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params,
* will serve all requests in a timely manner and * will serve all requests in a timely manner and
* interrupting then wouldn't make sense. * interrupting then wouldn't make sense.
*/ */
supp->ret = TEEC_ERROR_COMMUNICATION; interruptable = !req->busy;
init_completion(&supp->data_to_supp); if (!req->busy)
list_del(&req->link);
} }
mutex_unlock(&supp->ctx_mutex); mutex_unlock(&supp->mutex);
if (interruptable)
if (interruptable) {
req->ret = TEEC_ERROR_COMMUNICATION;
break; break;
}
} }
ret = supp->ret; ret = req->ret;
supp->param = NULL; kfree(req);
supp->req_posted = false;
/* We're done, let someone else talk to the supplicant now. */
mutex_unlock(&supp->thrd_mutex);
return ret; return ret;
} }
static struct optee_supp_req *supp_pop_entry(struct optee_supp *supp,
int num_params, int *id)
{
struct optee_supp_req *req;
if (supp->req_id != -1) {
/*
* Supplicant should not mix synchronous and asnynchronous
* requests.
*/
return ERR_PTR(-EINVAL);
}
if (list_empty(&supp->reqs))
return NULL;
req = list_first_entry(&supp->reqs, struct optee_supp_req, link);
if (num_params < req->num_params) {
/* Not enough room for parameters */
return ERR_PTR(-EINVAL);
}
*id = idr_alloc(&supp->idr, req, 1, 0, GFP_KERNEL);
if (*id < 0)
return ERR_PTR(-ENOMEM);
list_del(&req->link);
req->busy = true;
return req;
}
static int supp_check_recv_params(size_t num_params, struct tee_param *params,
size_t *num_meta)
{
size_t n;
if (!num_params)
return -EINVAL;
/*
* If there's memrefs we need to decrease those as they where
* increased earlier and we'll even refuse to accept any below.
*/
for (n = 0; n < num_params; n++)
if (tee_param_is_memref(params + n) && params[n].u.memref.shm)
tee_shm_put(params[n].u.memref.shm);
/*
* We only expect parameters as TEE_IOCTL_PARAM_ATTR_TYPE_NONE with
* or without the TEE_IOCTL_PARAM_ATTR_META bit set.
*/
for (n = 0; n < num_params; n++)
if (params[n].attr &&
params[n].attr != TEE_IOCTL_PARAM_ATTR_META)
return -EINVAL;
/* At most we'll need one meta parameter so no need to check for more */
if (params->attr == TEE_IOCTL_PARAM_ATTR_META)
*num_meta = 1;
else
*num_meta = 0;
return 0;
}
/** /**
* optee_supp_recv() - receive request for supplicant * optee_supp_recv() - receive request for supplicant
* @ctx: context receiving the request * @ctx: context receiving the request
...@@ -135,65 +231,99 @@ int optee_supp_recv(struct tee_context *ctx, u32 *func, u32 *num_params, ...@@ -135,65 +231,99 @@ int optee_supp_recv(struct tee_context *ctx, u32 *func, u32 *num_params,
struct tee_device *teedev = ctx->teedev; struct tee_device *teedev = ctx->teedev;
struct optee *optee = tee_get_drvdata(teedev); struct optee *optee = tee_get_drvdata(teedev);
struct optee_supp *supp = &optee->supp; struct optee_supp *supp = &optee->supp;
struct optee_supp_req *req = NULL;
int id;
size_t num_meta;
int rc; int rc;
/* rc = supp_check_recv_params(*num_params, param, &num_meta);
* In case two threads in one supplicant is calling this function if (rc)
* simultaneously we need to protect the data with a mutex which return rc;
* we'll release before returning.
*/ while (true) {
mutex_lock(&supp->supp_mutex); mutex_lock(&supp->mutex);
req = supp_pop_entry(supp, *num_params - num_meta, &id);
mutex_unlock(&supp->mutex);
if (req) {
if (IS_ERR(req))
return PTR_ERR(req);
break;
}
if (supp->supp_next_send) {
/* /*
* optee_supp_recv() has been called again without * If we didn't get a request we'll block in
* a optee_supp_send() in between. Supplicant has * wait_for_completion() to avoid needless spinning.
* probably been restarted before it was able to *
* write back last result. Abort last request and * This is where supplicant will be hanging most of
* wait for a new. * the time, let's make this interruptable so we
* can easily restart supplicant if needed.
*/ */
if (supp->req_posted) { if (wait_for_completion_interruptible(&supp->reqs_c))
supp->ret = TEEC_ERROR_COMMUNICATION; return -ERESTARTSYS;
supp->supp_next_send = false;
complete(&supp->data_from_supp);
}
} }
/* if (num_meta) {
* This is where supplicant will be hanging most of the /*
* time, let's make this interruptable so we can easily * tee-supplicant support meta parameters -> requsts can be
* restart supplicant if needed. * processed asynchronously.
*/ */
if (wait_for_completion_interruptible(&supp->data_to_supp)) { param->attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT |
rc = -ERESTARTSYS; TEE_IOCTL_PARAM_ATTR_META;
goto out; param->u.value.a = id;
param->u.value.b = 0;
param->u.value.c = 0;
} else {
mutex_lock(&supp->mutex);
supp->req_id = id;
mutex_unlock(&supp->mutex);
} }
/* We have exlusive access to the data */ *func = req->func;
*num_params = req->num_params + num_meta;
memcpy(param + num_meta, req->param,
sizeof(struct tee_param) * req->num_params);
if (*num_params < supp->num_params) { return 0;
/* }
* Not enough room for parameters, tell supplicant
* it failed and abort last request. static struct optee_supp_req *supp_pop_req(struct optee_supp *supp,
*/ size_t num_params,
supp->ret = TEEC_ERROR_COMMUNICATION; struct tee_param *param,
rc = -EINVAL; size_t *num_meta)
complete(&supp->data_from_supp); {
goto out; struct optee_supp_req *req;
int id;
size_t nm;
const u32 attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT |
TEE_IOCTL_PARAM_ATTR_META;
if (!num_params)
return ERR_PTR(-EINVAL);
if (supp->req_id == -1) {
if (param->attr != attr)
return ERR_PTR(-EINVAL);
id = param->u.value.a;
nm = 1;
} else {
id = supp->req_id;
nm = 0;
} }
*func = supp->func; req = idr_find(&supp->idr, id);
*num_params = supp->num_params; if (!req)
memcpy(param, supp->param, return ERR_PTR(-ENOENT);
sizeof(struct tee_param) * supp->num_params);
/* Allow optee_supp_send() below to do its work */ if ((num_params - nm) != req->num_params)
supp->supp_next_send = true; return ERR_PTR(-EINVAL);
rc = 0; req->busy = false;
out: idr_remove(&supp->idr, id);
mutex_unlock(&supp->supp_mutex); supp->req_id = -1;
return rc; *num_meta = nm;
return req;
} }
/** /**
...@@ -211,63 +341,42 @@ int optee_supp_send(struct tee_context *ctx, u32 ret, u32 num_params, ...@@ -211,63 +341,42 @@ int optee_supp_send(struct tee_context *ctx, u32 ret, u32 num_params,
struct tee_device *teedev = ctx->teedev; struct tee_device *teedev = ctx->teedev;
struct optee *optee = tee_get_drvdata(teedev); struct optee *optee = tee_get_drvdata(teedev);
struct optee_supp *supp = &optee->supp; struct optee_supp *supp = &optee->supp;
struct optee_supp_req *req;
size_t n; size_t n;
int rc = 0; size_t num_meta;
/* mutex_lock(&supp->mutex);
* We still have exclusive access to the data since that's how we req = supp_pop_req(supp, num_params, param, &num_meta);
* left it when returning from optee_supp_read(). mutex_unlock(&supp->mutex);
*/
/* See comment on mutex in optee_supp_read() above */
mutex_lock(&supp->supp_mutex);
if (!supp->supp_next_send) {
/*
* Something strange is going on, supplicant shouldn't
* enter optee_supp_send() in this state
*/
rc = -ENOENT;
goto out;
}
if (num_params != supp->num_params) { if (IS_ERR(req)) {
/* /* Something is wrong, let supplicant restart. */
* Something is wrong, let supplicant restart. Next call to return PTR_ERR(req);
* optee_supp_recv() will give an error to the requesting
* thread and release it.
*/
rc = -EINVAL;
goto out;
} }
/* Update out and in/out parameters */ /* Update out and in/out parameters */
for (n = 0; n < num_params; n++) { for (n = 0; n < req->num_params; n++) {
struct tee_param *p = supp->param + n; struct tee_param *p = req->param + n;
switch (p->attr) { switch (p->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT: case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT: case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
p->u.value.a = param[n].u.value.a; p->u.value.a = param[n + num_meta].u.value.a;
p->u.value.b = param[n].u.value.b; p->u.value.b = param[n + num_meta].u.value.b;
p->u.value.c = param[n].u.value.c; p->u.value.c = param[n + num_meta].u.value.c;
break; break;
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT: case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT: case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
p->u.memref.size = param[n].u.memref.size; p->u.memref.size = param[n + num_meta].u.memref.size;
break; break;
default: default:
break; break;
} }
} }
supp->ret = ret; req->ret = ret;
/* Allow optee_supp_recv() above to do its work */
supp->supp_next_send = false;
/* Let the requesting thread continue */ /* Let the requesting thread continue */
complete(&supp->data_from_supp); complete(&req->c);
out:
mutex_unlock(&supp->supp_mutex); return 0;
return rc;
} }
...@@ -152,11 +152,11 @@ static int params_from_user(struct tee_context *ctx, struct tee_param *params, ...@@ -152,11 +152,11 @@ static int params_from_user(struct tee_context *ctx, struct tee_param *params,
return -EFAULT; return -EFAULT;
/* All unused attribute bits has to be zero */ /* All unused attribute bits has to be zero */
if (ip.attr & ~TEE_IOCTL_PARAM_ATTR_TYPE_MASK) if (ip.attr & ~TEE_IOCTL_PARAM_ATTR_MASK)
return -EINVAL; return -EINVAL;
params[n].attr = ip.attr; params[n].attr = ip.attr;
switch (ip.attr) { switch (ip.attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
case TEE_IOCTL_PARAM_ATTR_TYPE_NONE: case TEE_IOCTL_PARAM_ATTR_TYPE_NONE:
case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT: case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
break; break;
...@@ -221,18 +221,6 @@ static int params_to_user(struct tee_ioctl_param __user *uparams, ...@@ -221,18 +221,6 @@ static int params_to_user(struct tee_ioctl_param __user *uparams,
return 0; return 0;
} }
static bool param_is_memref(struct tee_param *param)
{
switch (param->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
return true;
default:
return false;
}
}
static int tee_ioctl_open_session(struct tee_context *ctx, static int tee_ioctl_open_session(struct tee_context *ctx,
struct tee_ioctl_buf_data __user *ubuf) struct tee_ioctl_buf_data __user *ubuf)
{ {
...@@ -296,7 +284,7 @@ static int tee_ioctl_open_session(struct tee_context *ctx, ...@@ -296,7 +284,7 @@ static int tee_ioctl_open_session(struct tee_context *ctx,
if (params) { if (params) {
/* Decrease ref count for all valid shared memory pointers */ /* Decrease ref count for all valid shared memory pointers */
for (n = 0; n < arg.num_params; n++) for (n = 0; n < arg.num_params; n++)
if (param_is_memref(params + n) && if (tee_param_is_memref(params + n) &&
params[n].u.memref.shm) params[n].u.memref.shm)
tee_shm_put(params[n].u.memref.shm); tee_shm_put(params[n].u.memref.shm);
kfree(params); kfree(params);
...@@ -358,7 +346,7 @@ static int tee_ioctl_invoke(struct tee_context *ctx, ...@@ -358,7 +346,7 @@ static int tee_ioctl_invoke(struct tee_context *ctx,
if (params) { if (params) {
/* Decrease ref count for all valid shared memory pointers */ /* Decrease ref count for all valid shared memory pointers */
for (n = 0; n < arg.num_params; n++) for (n = 0; n < arg.num_params; n++)
if (param_is_memref(params + n) && if (tee_param_is_memref(params + n) &&
params[n].u.memref.shm) params[n].u.memref.shm)
tee_shm_put(params[n].u.memref.shm); tee_shm_put(params[n].u.memref.shm);
kfree(params); kfree(params);
...@@ -406,8 +394,8 @@ static int params_to_supp(struct tee_context *ctx, ...@@ -406,8 +394,8 @@ static int params_to_supp(struct tee_context *ctx,
struct tee_ioctl_param ip; struct tee_ioctl_param ip;
struct tee_param *p = params + n; struct tee_param *p = params + n;
ip.attr = p->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK; ip.attr = p->attr;
switch (p->attr) { switch (p->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT: case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT:
case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT: case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
ip.a = p->u.value.a; ip.a = p->u.value.a;
...@@ -471,6 +459,10 @@ static int tee_ioctl_supp_recv(struct tee_context *ctx, ...@@ -471,6 +459,10 @@ static int tee_ioctl_supp_recv(struct tee_context *ctx,
if (!params) if (!params)
return -ENOMEM; return -ENOMEM;
rc = params_from_user(ctx, params, num_params, uarg->params);
if (rc)
goto out;
rc = ctx->teedev->desc->ops->supp_recv(ctx, &func, &num_params, params); rc = ctx->teedev->desc->ops->supp_recv(ctx, &func, &num_params, params);
if (rc) if (rc)
goto out; goto out;
...@@ -500,11 +492,11 @@ static int params_from_supp(struct tee_param *params, size_t num_params, ...@@ -500,11 +492,11 @@ static int params_from_supp(struct tee_param *params, size_t num_params,
return -EFAULT; return -EFAULT;
/* All unused attribute bits has to be zero */ /* All unused attribute bits has to be zero */
if (ip.attr & ~TEE_IOCTL_PARAM_ATTR_TYPE_MASK) if (ip.attr & ~TEE_IOCTL_PARAM_ATTR_MASK)
return -EINVAL; return -EINVAL;
p->attr = ip.attr; p->attr = ip.attr;
switch (ip.attr) { switch (ip.attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT: case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT: case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
/* Only out and in/out values can be updated */ /* Only out and in/out values can be updated */
......
...@@ -275,4 +275,16 @@ int tee_shm_get_id(struct tee_shm *shm); ...@@ -275,4 +275,16 @@ int tee_shm_get_id(struct tee_shm *shm);
*/ */
struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id); struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id);
static inline bool tee_param_is_memref(struct tee_param *param)
{
switch (param->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
return true;
default:
return false;
}
}
#endif /*__TEE_DRV_H*/ #endif /*__TEE_DRV_H*/
...@@ -154,6 +154,13 @@ struct tee_ioctl_buf_data { ...@@ -154,6 +154,13 @@ struct tee_ioctl_buf_data {
*/ */
#define TEE_IOCTL_PARAM_ATTR_TYPE_MASK 0xff #define TEE_IOCTL_PARAM_ATTR_TYPE_MASK 0xff
/* Meta parameter carrying extra information about the message. */
#define TEE_IOCTL_PARAM_ATTR_META 0x100
/* Mask of all known attr bits */
#define TEE_IOCTL_PARAM_ATTR_MASK \
(TEE_IOCTL_PARAM_ATTR_TYPE_MASK | TEE_IOCTL_PARAM_ATTR_META)
/* /*
* Matches TEEC_LOGIN_* in GP TEE Client API * Matches TEEC_LOGIN_* in GP TEE Client API
* Are only defined for GP compliant TEEs * Are only defined for GP compliant TEEs
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册