提交 9196dc11 编写于 作者: M Mark A. Allyn 提交者: Greg Kroah-Hartman

staging: sep: reworked crypto layer

This gets the SEP crypto layer up and running with things like dmcrypt.
It's a fairly big set of changes because it has to rework the whole context
handling system.

[This is picked out of the differences between the upstream driver and
 the staging driver. I'm resolving the differences as a series of updates -AC]
Signed-off-by: NAlan Cox <alan@linux.intel.com>
Signed-off-by: NGreg Kroah-Hartman <gregkh@linuxfoundation.org>
上级 ab8ef351
...@@ -88,17 +88,17 @@ static void sep_dequeuer(void *data); ...@@ -88,17 +88,17 @@ static void sep_dequeuer(void *data);
* This will only print dump if DEBUG is set; it does * This will only print dump if DEBUG is set; it does
* follow kernel debug print enabling * follow kernel debug print enabling
*/ */
static void crypto_sep_dump_message(struct sep_system_ctx *sctx) static void crypto_sep_dump_message(struct sep_device *sep, void *msg)
{ {
#if 0 #if 0
u32 *p; u32 *p;
u32 *i; u32 *i;
int count; int count;
p = sctx->sep_used->shared_addr; p = sep->shared_addr;
i = (u32 *)sctx->msg; i = (u32 *)msg;
for (count = 0; count < 40 * 4; count += 4) for (count = 0; count < 10 * 4; count += 4)
dev_dbg(&sctx->sep_used->pdev->dev, dev_dbg(&sep->pdev->dev,
"[PID%d] Word %d of the message is %x (local)%x\n", "[PID%d] Word %d of the message is %x (local)%x\n",
current->pid, count/4, *p++, *i++); current->pid, count/4, *p++, *i++);
#endif #endif
...@@ -534,6 +534,67 @@ static void sep_dump_sg(struct sep_device *sep, char *stg, ...@@ -534,6 +534,67 @@ static void sep_dump_sg(struct sep_device *sep, char *stg,
#endif #endif
} }
/* Debug - prints only if DEBUG is defined */
static void sep_dump_ivs(struct ablkcipher_request *req, char *reason)
{
unsigned char *cptr;
struct sep_aes_internal_context *aes_internal;
struct sep_des_internal_context *des_internal;
int ct1;
struct this_task_ctx *ta_ctx;
struct crypto_ablkcipher *tfm;
struct sep_system_ctx *sctx;
ta_ctx = ablkcipher_request_ctx(req);
tfm = crypto_ablkcipher_reqtfm(req);
sctx = crypto_ablkcipher_ctx(tfm);
dev_dbg(&ta_ctx->sep_used->pdev->dev, "IV DUMP - %s\n", reason);
if ((ta_ctx->current_request == DES_CBC) &&
(ta_ctx->des_opmode == SEP_DES_CBC)) {
des_internal = (struct sep_des_internal_context *)
sctx->des_private_ctx.ctx_buf;
/* print vendor */
dev_dbg(&ta_ctx->sep_used->pdev->dev,
"sep - vendor iv for DES\n");
cptr = (unsigned char *)des_internal->iv_context;
for (ct1 = 0; ct1 < crypto_ablkcipher_ivsize(tfm); ct1 += 1)
dev_dbg(&ta_ctx->sep_used->pdev->dev,
"%02x\n", *(cptr + ct1));
/* print walk */
dev_dbg(&ta_ctx->sep_used->pdev->dev,
"sep - walk from kernel crypto iv for DES\n");
cptr = (unsigned char *)ta_ctx->walk.iv;
for (ct1 = 0; ct1 < crypto_ablkcipher_ivsize(tfm); ct1 += 1)
dev_dbg(&ta_ctx->sep_used->pdev->dev,
"%02x\n", *(cptr + ct1));
} else if ((ta_ctx->current_request == AES_CBC) &&
(ta_ctx->aes_opmode == SEP_AES_CBC)) {
aes_internal = (struct sep_aes_internal_context *)
sctx->aes_private_ctx.cbuff;
/* print vendor */
dev_dbg(&ta_ctx->sep_used->pdev->dev,
"sep - vendor iv for AES\n");
cptr = (unsigned char *)aes_internal->aes_ctx_iv;
for (ct1 = 0; ct1 < crypto_ablkcipher_ivsize(tfm); ct1 += 1)
dev_dbg(&ta_ctx->sep_used->pdev->dev,
"%02x\n", *(cptr + ct1));
/* print walk */
dev_dbg(&ta_ctx->sep_used->pdev->dev,
"sep - walk from kernel crypto iv for AES\n");
cptr = (unsigned char *)ta_ctx->walk.iv;
for (ct1 = 0; ct1 < crypto_ablkcipher_ivsize(tfm); ct1 += 1)
dev_dbg(&ta_ctx->sep_used->pdev->dev,
"%02x\n", *(cptr + ct1));
}
}
/** /**
* RFC2451: Weak key check * RFC2451: Weak key check
* Returns: 1 (weak), 0 (not weak) * Returns: 1 (weak), 0 (not weak)
...@@ -671,61 +732,61 @@ static u32 sep_sg_nents(struct scatterlist *sg) ...@@ -671,61 +732,61 @@ static u32 sep_sg_nents(struct scatterlist *sg)
/** /**
* sep_start_msg - * sep_start_msg -
* @sctx: pointer to struct sep_system_ctx * @ta_ctx: pointer to struct this_task_ctx
* @returns: offset to place for the next word in the message * @returns: offset to place for the next word in the message
* Set up pointer in message pool for new message * Set up pointer in message pool for new message
*/ */
static u32 sep_start_msg(struct sep_system_ctx *sctx) static u32 sep_start_msg(struct this_task_ctx *ta_ctx)
{ {
u32 *word_ptr; u32 *word_ptr;
sctx->msg_len_words = 2; ta_ctx->msg_len_words = 2;
sctx->msgptr = sctx->msg; ta_ctx->msgptr = ta_ctx->msg;
memset(sctx->msg, 0, SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES); memset(ta_ctx->msg, 0, SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
sctx->msgptr += sizeof(u32) * 2; ta_ctx->msgptr += sizeof(u32) * 2;
word_ptr = (u32 *)sctx->msgptr; word_ptr = (u32 *)ta_ctx->msgptr;
*word_ptr = SEP_START_MSG_TOKEN; *word_ptr = SEP_START_MSG_TOKEN;
return sizeof(u32) * 2; return sizeof(u32) * 2;
} }
/** /**
* sep_end_msg - * sep_end_msg -
* @sctx: pointer to struct sep_system_ctx * @ta_ctx: pointer to struct this_task_ctx
* @messages_offset: current message offset * @messages_offset: current message offset
* Returns: 0 for success; <0 otherwise * Returns: 0 for success; <0 otherwise
* End message; set length and CRC; and * End message; set length and CRC; and
* send interrupt to the SEP * send interrupt to the SEP
*/ */
static void sep_end_msg(struct sep_system_ctx *sctx, u32 msg_offset) static void sep_end_msg(struct this_task_ctx *ta_ctx, u32 msg_offset)
{ {
u32 *word_ptr; u32 *word_ptr;
/* Msg size goes into msg after token */ /* Msg size goes into msg after token */
sctx->msg_len_words = msg_offset / sizeof(u32) + 1; ta_ctx->msg_len_words = msg_offset / sizeof(u32) + 1;
word_ptr = (u32 *)sctx->msgptr; word_ptr = (u32 *)ta_ctx->msgptr;
word_ptr += 1; word_ptr += 1;
*word_ptr = sctx->msg_len_words; *word_ptr = ta_ctx->msg_len_words;
/* CRC (currently 0) goes at end of msg */ /* CRC (currently 0) goes at end of msg */
word_ptr = (u32 *)(sctx->msgptr + msg_offset); word_ptr = (u32 *)(ta_ctx->msgptr + msg_offset);
*word_ptr = 0; *word_ptr = 0;
} }
/** /**
* sep_start_inbound_msg - * sep_start_inbound_msg -
* @sctx: pointer to struct sep_system_ctx * @ta_ctx: pointer to struct this_task_ctx
* @msg_offset: offset to place for the next word in the message * @msg_offset: offset to place for the next word in the message
* @returns: 0 for success; error value for failure * @returns: 0 for success; error value for failure
* Set up pointer in message pool for inbound message * Set up pointer in message pool for inbound message
*/ */
static u32 sep_start_inbound_msg(struct sep_system_ctx *sctx, u32 *msg_offset) static u32 sep_start_inbound_msg(struct this_task_ctx *ta_ctx, u32 *msg_offset)
{ {
u32 *word_ptr; u32 *word_ptr;
u32 token; u32 token;
u32 error = SEP_OK; u32 error = SEP_OK;
*msg_offset = sizeof(u32) * 2; *msg_offset = sizeof(u32) * 2;
word_ptr = (u32 *)sctx->msgptr; word_ptr = (u32 *)ta_ctx->msgptr;
token = *word_ptr; token = *word_ptr;
sctx->msg_len_words = *(word_ptr + 1); ta_ctx->msg_len_words = *(word_ptr + 1);
if (token != SEP_START_MSG_TOKEN) { if (token != SEP_START_MSG_TOKEN) {
error = SEP_INVALID_START; error = SEP_INVALID_START;
...@@ -739,7 +800,7 @@ static u32 sep_start_inbound_msg(struct sep_system_ctx *sctx, u32 *msg_offset) ...@@ -739,7 +800,7 @@ static u32 sep_start_inbound_msg(struct sep_system_ctx *sctx, u32 *msg_offset)
/** /**
* sep_write_msg - * sep_write_msg -
* @sctx: pointer to struct sep_system_ctx * @ta_ctx: pointer to struct this_task_ctx
* @in_addr: pointer to start of parameter * @in_addr: pointer to start of parameter
* @size: size of parameter to copy (in bytes) * @size: size of parameter to copy (in bytes)
* @max_size: size to move up offset; SEP mesg is in word sizes * @max_size: size to move up offset; SEP mesg is in word sizes
...@@ -747,12 +808,12 @@ static u32 sep_start_inbound_msg(struct sep_system_ctx *sctx, u32 *msg_offset) ...@@ -747,12 +808,12 @@ static u32 sep_start_inbound_msg(struct sep_system_ctx *sctx, u32 *msg_offset)
* @byte_array: flag ti indicate wheter endian must be changed * @byte_array: flag ti indicate wheter endian must be changed
* Copies data into the message area from caller * Copies data into the message area from caller
*/ */
static void sep_write_msg(struct sep_system_ctx *sctx, void *in_addr, static void sep_write_msg(struct this_task_ctx *ta_ctx, void *in_addr,
u32 size, u32 max_size, u32 *msg_offset, u32 byte_array) u32 size, u32 max_size, u32 *msg_offset, u32 byte_array)
{ {
u32 *word_ptr; u32 *word_ptr;
void *void_ptr; void *void_ptr;
void_ptr = sctx->msgptr + *msg_offset; void_ptr = ta_ctx->msgptr + *msg_offset;
word_ptr = (u32 *)void_ptr; word_ptr = (u32 *)void_ptr;
memcpy(void_ptr, in_addr, size); memcpy(void_ptr, in_addr, size);
*msg_offset += max_size; *msg_offset += max_size;
...@@ -767,18 +828,18 @@ static void sep_write_msg(struct sep_system_ctx *sctx, void *in_addr, ...@@ -767,18 +828,18 @@ static void sep_write_msg(struct sep_system_ctx *sctx, void *in_addr,
/** /**
* sep_make_header * sep_make_header
* @sctx: pointer to struct sep_system_ctx * @ta_ctx: pointer to struct this_task_ctx
* @msg_offset: pointer to current offset (is updated) * @msg_offset: pointer to current offset (is updated)
* @op_code: op code to put into message * @op_code: op code to put into message
* Puts op code into message and updates offset * Puts op code into message and updates offset
*/ */
static void sep_make_header(struct sep_system_ctx *sctx, u32 *msg_offset, static void sep_make_header(struct this_task_ctx *ta_ctx, u32 *msg_offset,
u32 op_code) u32 op_code)
{ {
u32 *word_ptr; u32 *word_ptr;
*msg_offset = sep_start_msg(sctx); *msg_offset = sep_start_msg(ta_ctx);
word_ptr = (u32 *)(sctx->msgptr + *msg_offset); word_ptr = (u32 *)(ta_ctx->msgptr + *msg_offset);
*word_ptr = op_code; *word_ptr = op_code;
*msg_offset += sizeof(u32); *msg_offset += sizeof(u32);
} }
...@@ -787,7 +848,7 @@ static void sep_make_header(struct sep_system_ctx *sctx, u32 *msg_offset, ...@@ -787,7 +848,7 @@ static void sep_make_header(struct sep_system_ctx *sctx, u32 *msg_offset,
/** /**
* sep_read_msg - * sep_read_msg -
* @sctx: pointer to struct sep_system_ctx * @ta_ctx: pointer to struct this_task_ctx
* @in_addr: pointer to start of parameter * @in_addr: pointer to start of parameter
* @size: size of parameter to copy (in bytes) * @size: size of parameter to copy (in bytes)
* @max_size: size to move up offset; SEP mesg is in word sizes * @max_size: size to move up offset; SEP mesg is in word sizes
...@@ -795,12 +856,12 @@ static void sep_make_header(struct sep_system_ctx *sctx, u32 *msg_offset, ...@@ -795,12 +856,12 @@ static void sep_make_header(struct sep_system_ctx *sctx, u32 *msg_offset,
* @byte_array: flag ti indicate wheter endian must be changed * @byte_array: flag ti indicate wheter endian must be changed
* Copies data out of the message area to caller * Copies data out of the message area to caller
*/ */
static void sep_read_msg(struct sep_system_ctx *sctx, void *in_addr, static void sep_read_msg(struct this_task_ctx *ta_ctx, void *in_addr,
u32 size, u32 max_size, u32 *msg_offset, u32 byte_array) u32 size, u32 max_size, u32 *msg_offset, u32 byte_array)
{ {
u32 *word_ptr; u32 *word_ptr;
void *void_ptr; void *void_ptr;
void_ptr = sctx->msgptr + *msg_offset; void_ptr = ta_ctx->msgptr + *msg_offset;
word_ptr = (u32 *)void_ptr; word_ptr = (u32 *)void_ptr;
/* Do we need to manipulate endian? */ /* Do we need to manipulate endian? */
...@@ -816,28 +877,28 @@ static void sep_read_msg(struct sep_system_ctx *sctx, void *in_addr, ...@@ -816,28 +877,28 @@ static void sep_read_msg(struct sep_system_ctx *sctx, void *in_addr,
/** /**
* sep_verify_op - * sep_verify_op -
* @sctx: pointer to struct sep_system_ctx * @ta_ctx: pointer to struct this_task_ctx
* @op_code: expected op_code * @op_code: expected op_code
* @msg_offset: pointer to current offset (is updated) * @msg_offset: pointer to current offset (is updated)
* @returns: 0 for success; error for failure * @returns: 0 for success; error for failure
*/ */
static u32 sep_verify_op(struct sep_system_ctx *sctx, u32 op_code, static u32 sep_verify_op(struct this_task_ctx *ta_ctx, u32 op_code,
u32 *msg_offset) u32 *msg_offset)
{ {
u32 error; u32 error;
u32 in_ary[2]; u32 in_ary[2];
struct sep_device *sep = sctx->sep_used; struct sep_device *sep = ta_ctx->sep_used;
dev_dbg(&sep->pdev->dev, "dumping return message\n"); dev_dbg(&sep->pdev->dev, "dumping return message\n");
error = sep_start_inbound_msg(sctx, msg_offset); error = sep_start_inbound_msg(ta_ctx, msg_offset);
if (error) { if (error) {
dev_warn(&sep->pdev->dev, dev_warn(&sep->pdev->dev,
"sep_start_inbound_msg error\n"); "sep_start_inbound_msg error\n");
return error; return error;
} }
sep_read_msg(sctx, in_ary, sizeof(u32) * 2, sizeof(u32) * 2, sep_read_msg(ta_ctx, in_ary, sizeof(u32) * 2, sizeof(u32) * 2,
msg_offset, 0); msg_offset, 0);
if (in_ary[0] != op_code) { if (in_ary[0] != op_code) {
...@@ -863,7 +924,7 @@ return 0; ...@@ -863,7 +924,7 @@ return 0;
/** /**
* sep_read_context - * sep_read_context -
* @sctx: pointer to struct sep_system_ctx * @ta_ctx: pointer to struct this_task_ctx
* @msg_offset: point to current place in SEP msg; is updated * @msg_offset: point to current place in SEP msg; is updated
* @dst: pointer to place to put the context * @dst: pointer to place to put the context
* @len: size of the context structure (differs for crypro/hash) * @len: size of the context structure (differs for crypro/hash)
...@@ -873,16 +934,16 @@ return 0; ...@@ -873,16 +934,16 @@ return 0;
* it skips over some words in the msg area depending on the size * it skips over some words in the msg area depending on the size
* of the context * of the context
*/ */
static void sep_read_context(struct sep_system_ctx *sctx, u32 *msg_offset, static void sep_read_context(struct this_task_ctx *ta_ctx, u32 *msg_offset,
void *dst, u32 len) void *dst, u32 len)
{ {
u32 max_length = ((len + 3) / sizeof(u32)) * sizeof(u32); u32 max_length = ((len + 3) / sizeof(u32)) * sizeof(u32);
sep_read_msg(sctx, dst, len, max_length, msg_offset, 0); sep_read_msg(ta_ctx, dst, len, max_length, msg_offset, 0);
} }
/** /**
* sep_write_context - * sep_write_context -
* @sctx: pointer to struct sep_system_ctx * @ta_ctx: pointer to struct this_task_ctx
* @msg_offset: point to current place in SEP msg; is updated * @msg_offset: point to current place in SEP msg; is updated
* @src: pointer to the current context * @src: pointer to the current context
* @len: size of the context structure (differs for crypro/hash) * @len: size of the context structure (differs for crypro/hash)
...@@ -892,76 +953,77 @@ static void sep_read_context(struct sep_system_ctx *sctx, u32 *msg_offset, ...@@ -892,76 +953,77 @@ static void sep_read_context(struct sep_system_ctx *sctx, u32 *msg_offset,
* it skips over some words in the msg area depending on the size * it skips over some words in the msg area depending on the size
* of the context * of the context
*/ */
static void sep_write_context(struct sep_system_ctx *sctx, u32 *msg_offset, static void sep_write_context(struct this_task_ctx *ta_ctx, u32 *msg_offset,
void *src, u32 len) void *src, u32 len)
{ {
u32 max_length = ((len + 3) / sizeof(u32)) * sizeof(u32); u32 max_length = ((len + 3) / sizeof(u32)) * sizeof(u32);
sep_write_msg(sctx, src, len, max_length, msg_offset, 0); sep_write_msg(ta_ctx, src, len, max_length, msg_offset, 0);
} }
/** /**
* sep_clear_out - * sep_clear_out -
* @sctx: pointer to struct sep_system_ctx * @ta_ctx: pointer to struct this_task_ctx
* Clear out crypto related values in sep device structure * Clear out crypto related values in sep device structure
* to enable device to be used by anyone; either kernel * to enable device to be used by anyone; either kernel
* crypto or userspace app via middleware * crypto or userspace app via middleware
*/ */
static void sep_clear_out(struct sep_system_ctx *sctx) static void sep_clear_out(struct this_task_ctx *ta_ctx)
{ {
if (sctx->src_sg_hold) { if (ta_ctx->src_sg_hold) {
sep_free_sg_buf(sctx->src_sg_hold); sep_free_sg_buf(ta_ctx->src_sg_hold);
sctx->src_sg_hold = NULL; ta_ctx->src_sg_hold = NULL;
} }
if (sctx->dst_sg_hold) { if (ta_ctx->dst_sg_hold) {
sep_free_sg_buf(sctx->dst_sg_hold); sep_free_sg_buf(ta_ctx->dst_sg_hold);
sctx->dst_sg_hold = NULL; ta_ctx->dst_sg_hold = NULL;
} }
sctx->src_sg = NULL; ta_ctx->src_sg = NULL;
sctx->dst_sg = NULL; ta_ctx->dst_sg = NULL;
sep_free_dma_table_data_handler(sctx->sep_used, &sctx->dma_ctx); sep_free_dma_table_data_handler(ta_ctx->sep_used, &ta_ctx->dma_ctx);
if (sctx->i_own_sep) { if (ta_ctx->i_own_sep) {
/** /**
* The following unlocks the sep and makes it available * The following unlocks the sep and makes it available
* to any other application * to any other application
* First, null out crypto entries in sep before relesing it * First, null out crypto entries in sep before relesing it
*/ */
sctx->sep_used->current_hash_req = NULL; ta_ctx->sep_used->current_hash_req = NULL;
sctx->sep_used->current_cypher_req = NULL; ta_ctx->sep_used->current_cypher_req = NULL;
sctx->sep_used->current_request = 0; ta_ctx->sep_used->current_request = 0;
sctx->sep_used->current_hash_stage = 0; ta_ctx->sep_used->current_hash_stage = 0;
sctx->sep_used->sctx = NULL; ta_ctx->sep_used->ta_ctx = NULL;
sctx->sep_used->in_kernel = 0; ta_ctx->sep_used->in_kernel = 0;
sctx->call_status.status = 0; ta_ctx->call_status.status = 0;
/* Remove anything confidentail */ /* Remove anything confidentail */
memset(sctx->sep_used->shared_addr, 0, memset(ta_ctx->sep_used->shared_addr, 0,
SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES); SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
sep_queue_status_remove(sctx->sep_used, &sctx->queue_elem); sep_queue_status_remove(ta_ctx->sep_used, &ta_ctx->queue_elem);
#ifdef SEP_ENABLE_RUNTIME_PM #ifdef SEP_ENABLE_RUNTIME_PM
sctx->sep_used->in_use = 0; ta_ctx->sep_used->in_use = 0;
pm_runtime_mark_last_busy(&sctx->sep_used->pdev->dev); pm_runtime_mark_last_busy(&ta_ctx->sep_used->pdev->dev);
pm_runtime_put_autosuspend(&sctx->sep_used->pdev->dev); pm_runtime_put_autosuspend(&ta_ctx->sep_used->pdev->dev);
#endif #endif
clear_bit(SEP_WORKING_LOCK_BIT, &sctx->sep_used->in_use_flags); clear_bit(SEP_WORKING_LOCK_BIT,
sctx->sep_used->pid_doing_transaction = 0; &ta_ctx->sep_used->in_use_flags);
ta_ctx->sep_used->pid_doing_transaction = 0;
dev_dbg(&sctx->sep_used->pdev->dev, dev_dbg(&ta_ctx->sep_used->pdev->dev,
"[PID%d] waking up next transaction\n", "[PID%d] waking up next transaction\n",
current->pid); current->pid);
clear_bit(SEP_TRANSACTION_STARTED_LOCK_BIT, clear_bit(SEP_TRANSACTION_STARTED_LOCK_BIT,
&sctx->sep_used->in_use_flags); &ta_ctx->sep_used->in_use_flags);
wake_up(&sctx->sep_used->event_transactions); wake_up(&ta_ctx->sep_used->event_transactions);
sctx->i_own_sep = 0; ta_ctx->i_own_sep = 0;
} }
} }
...@@ -969,22 +1031,34 @@ static void sep_clear_out(struct sep_system_ctx *sctx) ...@@ -969,22 +1031,34 @@ static void sep_clear_out(struct sep_system_ctx *sctx)
* Release crypto infrastructure from EINPROGRESS and * Release crypto infrastructure from EINPROGRESS and
* clear sep_dev so that SEP is available to anyone * clear sep_dev so that SEP is available to anyone
*/ */
static void sep_crypto_release(struct sep_system_ctx *sctx, u32 error) static void sep_crypto_release(struct sep_system_ctx *sctx,
struct this_task_ctx *ta_ctx, u32 error)
{ {
struct ahash_request *hash_req = sctx->current_hash_req; struct ahash_request *hash_req = ta_ctx->current_hash_req;
struct ablkcipher_request *cypher_req = struct ablkcipher_request *cypher_req =
sctx->current_cypher_req; ta_ctx->current_cypher_req;
struct sep_device *sep = sctx->sep_used; struct sep_device *sep = ta_ctx->sep_used;
sep_clear_out(ta_ctx);
sep_clear_out(sctx); /**
* This may not yet exist depending when we
* chose to bail out. If it does exist, set
* it to 1
*/
if (ta_ctx->are_we_done_yet != NULL)
*ta_ctx->are_we_done_yet = 1;
if (cypher_req != NULL) { if (cypher_req != NULL) {
if (cypher_req->base.complete == NULL) { if ((sctx->key_sent == 1) ||
dev_dbg(&sep->pdev->dev, ((error != 0) && (error != -EINPROGRESS))) {
"release is null for cypher!"); if (cypher_req->base.complete == NULL) {
} else { dev_dbg(&sep->pdev->dev,
cypher_req->base.complete( "release is null for cypher!");
&cypher_req->base, error); } else {
cypher_req->base.complete(
&cypher_req->base, error);
}
} }
} }
...@@ -1005,20 +1079,20 @@ static void sep_crypto_release(struct sep_system_ctx *sctx, u32 error) ...@@ -1005,20 +1079,20 @@ static void sep_crypto_release(struct sep_system_ctx *sctx, u32 error)
* and it will return 0 if sep is now ours; error value if there * and it will return 0 if sep is now ours; error value if there
* were problems * were problems
*/ */
static int sep_crypto_take_sep(struct sep_system_ctx *sctx) static int sep_crypto_take_sep(struct this_task_ctx *ta_ctx)
{ {
struct sep_device *sep = sctx->sep_used; struct sep_device *sep = ta_ctx->sep_used;
int result; int result;
struct sep_msgarea_hdr *my_msg_header; struct sep_msgarea_hdr *my_msg_header;
my_msg_header = (struct sep_msgarea_hdr *)sctx->msg; my_msg_header = (struct sep_msgarea_hdr *)ta_ctx->msg;
/* add to status queue */ /* add to status queue */
sctx->queue_elem = sep_queue_status_add(sep, my_msg_header->opcode, ta_ctx->queue_elem = sep_queue_status_add(sep, my_msg_header->opcode,
sctx->nbytes, current->pid, ta_ctx->nbytes, current->pid,
current->comm, sizeof(current->comm)); current->comm, sizeof(current->comm));
if (!sctx->queue_elem) { if (!ta_ctx->queue_elem) {
dev_dbg(&sep->pdev->dev, "[PID%d] updating queue" dev_dbg(&sep->pdev->dev, "[PID%d] updating queue"
" status error\n", current->pid); " status error\n", current->pid);
return -EINVAL; return -EINVAL;
...@@ -1033,48 +1107,61 @@ static int sep_crypto_take_sep(struct sep_system_ctx *sctx) ...@@ -1033,48 +1107,61 @@ static int sep_crypto_take_sep(struct sep_system_ctx *sctx)
pm_runtime_get_sync(&sep_dev->pdev->dev); pm_runtime_get_sync(&sep_dev->pdev->dev);
/* Copy in the message */ /* Copy in the message */
memcpy(sep->shared_addr, sctx->msg, memcpy(sep->shared_addr, ta_ctx->msg,
SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES); SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
/* Copy in the dcb information if there is any */ /* Copy in the dcb information if there is any */
if (sctx->dcb_region) { if (ta_ctx->dcb_region) {
result = sep_activate_dcb_dmatables_context(sep, result = sep_activate_dcb_dmatables_context(sep,
&sctx->dcb_region, &sctx->dmatables_region, &ta_ctx->dcb_region, &ta_ctx->dmatables_region,
sctx->dma_ctx); ta_ctx->dma_ctx);
if (result) if (result)
return result; return result;
} }
/* Mark the device so we know how to finish the job in the tasklet */ /* Mark the device so we know how to finish the job in the tasklet */
if (sctx->current_hash_req) if (ta_ctx->current_hash_req)
sep->current_hash_req = sctx->current_hash_req; sep->current_hash_req = ta_ctx->current_hash_req;
else else
sep->current_cypher_req = sctx->current_cypher_req; sep->current_cypher_req = ta_ctx->current_cypher_req;
sep->current_request = sctx->current_request; sep->current_request = ta_ctx->current_request;
sep->current_hash_stage = sctx->current_hash_stage; sep->current_hash_stage = ta_ctx->current_hash_stage;
sep->sctx = sctx; sep->ta_ctx = ta_ctx;
sep->in_kernel = 1; sep->in_kernel = 1;
sctx->i_own_sep = 1; ta_ctx->i_own_sep = 1;
/* need to set bit first to avoid race condition with interrupt */
set_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET, &ta_ctx->call_status.status);
result = sep_send_command_handler(sep); result = sep_send_command_handler(sep);
dev_dbg(&sep->pdev->dev, "[PID%d]: sending command to the sep\n", dev_dbg(&sep->pdev->dev, "[PID%d]: sending command to the sep\n",
current->pid); current->pid);
if (!result) { if (!result)
set_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
&sctx->call_status.status);
dev_dbg(&sep->pdev->dev, "[PID%d]: command sent okay\n", dev_dbg(&sep->pdev->dev, "[PID%d]: command sent okay\n",
current->pid); current->pid);
else {
dev_dbg(&sep->pdev->dev, "[PID%d]: cant send command\n",
current->pid);
clear_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
&ta_ctx->call_status.status);
} }
return result; return result;
} }
/* This needs to be run as a work queue as it can be put asleep */ /**
static void sep_crypto_block(void *data) * This function sets things up for a crypto data block process
* This does all preparation, but does not try to grab the
* sep
* @req: pointer to struct ablkcipher_request
* returns: 0 if all went well, non zero if error
*/
static int sep_crypto_block_data(struct ablkcipher_request *req)
{ {
int int_error; int int_error;
u32 msg_offset; u32 msg_offset;
static u32 msg[10]; static u32 msg[10];
...@@ -1085,318 +1172,440 @@ static void sep_crypto_block(void *data) ...@@ -1085,318 +1172,440 @@ static void sep_crypto_block(void *data)
ssize_t copy_result; ssize_t copy_result;
int result; int result;
u32 max_length;
struct scatterlist *new_sg; struct scatterlist *new_sg;
struct ablkcipher_request *req; struct this_task_ctx *ta_ctx;
struct sep_block_ctx *bctx;
struct crypto_ablkcipher *tfm; struct crypto_ablkcipher *tfm;
struct sep_system_ctx *sctx; struct sep_system_ctx *sctx;
req = (struct ablkcipher_request *)data; struct sep_des_internal_context *des_internal;
bctx = ablkcipher_request_ctx(req); struct sep_aes_internal_context *aes_internal;
ta_ctx = ablkcipher_request_ctx(req);
tfm = crypto_ablkcipher_reqtfm(req); tfm = crypto_ablkcipher_reqtfm(req);
sctx = crypto_ablkcipher_ctx(tfm); sctx = crypto_ablkcipher_ctx(tfm);
/* start the walk on scatterlists */ /* start the walk on scatterlists */
ablkcipher_walk_init(&bctx->walk, req->src, req->dst, req->nbytes); ablkcipher_walk_init(&ta_ctx->walk, req->src, req->dst, req->nbytes);
dev_dbg(&sctx->sep_used->pdev->dev, "sep crypto block data size of %x\n", dev_dbg(&ta_ctx->sep_used->pdev->dev, "sep crypto block data size of %x\n",
req->nbytes); req->nbytes);
int_error = ablkcipher_walk_phys(req, &bctx->walk); int_error = ablkcipher_walk_phys(req, &ta_ctx->walk);
if (int_error) { if (int_error) {
dev_warn(&sctx->sep_used->pdev->dev, "walk phys error %x\n", dev_warn(&ta_ctx->sep_used->pdev->dev, "walk phys error %x\n",
int_error); int_error);
sep_crypto_release(sctx, -ENOMEM); return -ENOMEM;
return;
}
/* check iv */
if (bctx->des_opmode == SEP_DES_CBC) {
if (!bctx->walk.iv) {
dev_warn(&sctx->sep_used->pdev->dev, "no iv found\n");
sep_crypto_release(sctx, -EINVAL);
return;
}
memcpy(bctx->iv, bctx->walk.iv, SEP_DES_IV_SIZE_BYTES);
sep_dump(sctx->sep_used, "iv", bctx->iv, SEP_DES_IV_SIZE_BYTES);
}
if (bctx->aes_opmode == SEP_AES_CBC) {
if (!bctx->walk.iv) {
dev_warn(&sctx->sep_used->pdev->dev, "no iv found\n");
sep_crypto_release(sctx, -EINVAL);
return;
}
memcpy(bctx->iv, bctx->walk.iv, SEP_AES_IV_SIZE_BYTES);
sep_dump(sctx->sep_used, "iv", bctx->iv, SEP_AES_IV_SIZE_BYTES);
} }
dev_dbg(&sctx->sep_used->pdev->dev, dev_dbg(&ta_ctx->sep_used->pdev->dev,
"crypto block: src is %lx dst is %lx\n", "crypto block: src is %lx dst is %lx\n",
(unsigned long)req->src, (unsigned long)req->dst); (unsigned long)req->src, (unsigned long)req->dst);
/* Make sure all pages are even block */ /* Make sure all pages are even block */
int_error = sep_oddball_pages(sctx->sep_used, req->src, int_error = sep_oddball_pages(ta_ctx->sep_used, req->src,
req->nbytes, bctx->walk.blocksize, &new_sg, 1); req->nbytes, ta_ctx->walk.blocksize, &new_sg, 1);
if (int_error < 0) { if (int_error < 0) {
dev_warn(&sctx->sep_used->pdev->dev, "oddball page eerror\n"); dev_warn(&ta_ctx->sep_used->pdev->dev, "oddball page eerror\n");
sep_crypto_release(sctx, -ENOMEM); return -ENOMEM;
return;
} else if (int_error == 1) { } else if (int_error == 1) {
sctx->src_sg = new_sg; ta_ctx->src_sg = new_sg;
sctx->src_sg_hold = new_sg; ta_ctx->src_sg_hold = new_sg;
} else { } else {
sctx->src_sg = req->src; ta_ctx->src_sg = req->src;
sctx->src_sg_hold = NULL; ta_ctx->src_sg_hold = NULL;
} }
int_error = sep_oddball_pages(sctx->sep_used, req->dst, int_error = sep_oddball_pages(ta_ctx->sep_used, req->dst,
req->nbytes, bctx->walk.blocksize, &new_sg, 0); req->nbytes, ta_ctx->walk.blocksize, &new_sg, 0);
if (int_error < 0) { if (int_error < 0) {
dev_warn(&sctx->sep_used->pdev->dev, "walk phys error %x\n", dev_warn(&ta_ctx->sep_used->pdev->dev, "walk phys error %x\n",
int_error); int_error);
sep_crypto_release(sctx, -ENOMEM); return -ENOMEM;
return;
} else if (int_error == 1) { } else if (int_error == 1) {
sctx->dst_sg = new_sg; ta_ctx->dst_sg = new_sg;
sctx->dst_sg_hold = new_sg; ta_ctx->dst_sg_hold = new_sg;
} else { } else {
sctx->dst_sg = req->dst; ta_ctx->dst_sg = req->dst;
sctx->dst_sg_hold = NULL; ta_ctx->dst_sg_hold = NULL;
} }
/* Do we need to perform init; ie; send key to sep? */ /* set nbytes for queue status */
if (sctx->key_sent == 0) { ta_ctx->nbytes = req->nbytes;
dev_dbg(&sctx->sep_used->pdev->dev, "sending key\n"); /* Key already done; this is for data */
dev_dbg(&ta_ctx->sep_used->pdev->dev, "sending data\n");
/* put together message to SEP */ sep_dump_sg(ta_ctx->sep_used,
/* Start with op code */ "block sg in", ta_ctx->src_sg);
sep_make_header(sctx, &msg_offset, bctx->init_opcode);
/* now deal with IV */ /* check for valid data and proper spacing */
if (bctx->init_opcode == SEP_DES_INIT_OPCODE) { src_ptr = sg_virt(ta_ctx->src_sg);
if (bctx->des_opmode == SEP_DES_CBC) { dst_ptr = sg_virt(ta_ctx->dst_sg);
sep_write_msg(sctx, bctx->iv,
SEP_DES_IV_SIZE_BYTES, sizeof(u32) * 4,
&msg_offset, 1);
sep_dump(sctx->sep_used, "initial IV",
bctx->walk.iv, SEP_DES_IV_SIZE_BYTES);
} else {
/* Skip if ECB */
msg_offset += 4 * sizeof(u32);
}
} else {
max_length = ((SEP_AES_IV_SIZE_BYTES + 3) /
sizeof(u32)) * sizeof(u32);
if (bctx->aes_opmode == SEP_AES_CBC) {
sep_write_msg(sctx, bctx->iv,
SEP_AES_IV_SIZE_BYTES, max_length,
&msg_offset, 1);
sep_dump(sctx->sep_used, "initial IV",
bctx->walk.iv, SEP_AES_IV_SIZE_BYTES);
} else {
/* Skip if ECB */
msg_offset += max_length;
}
}
/* load the key */ if (!src_ptr || !dst_ptr ||
if (bctx->init_opcode == SEP_DES_INIT_OPCODE) { (ta_ctx->current_cypher_req->nbytes %
sep_write_msg(sctx, (void *)&sctx->key.des.key1, crypto_ablkcipher_blocksize(tfm))) {
sizeof(u32) * 8, sizeof(u32) * 8,
&msg_offset, 1); dev_warn(&ta_ctx->sep_used->pdev->dev,
"cipher block size odd\n");
dev_warn(&ta_ctx->sep_used->pdev->dev,
"cipher block size is %x\n",
crypto_ablkcipher_blocksize(tfm));
dev_warn(&ta_ctx->sep_used->pdev->dev,
"cipher data size is %x\n",
ta_ctx->current_cypher_req->nbytes);
return -EINVAL;
}
msg[0] = (u32)sctx->des_nbr_keys; if (partial_overlap(src_ptr, dst_ptr,
msg[1] = (u32)bctx->des_encmode; ta_ctx->current_cypher_req->nbytes)) {
msg[2] = (u32)bctx->des_opmode; dev_warn(&ta_ctx->sep_used->pdev->dev,
"block partial overlap\n");
return -EINVAL;
}
sep_write_msg(sctx, (void *)msg, /* Put together the message */
sizeof(u32) * 3, sizeof(u32) * 3, sep_make_header(ta_ctx, &msg_offset, ta_ctx->block_opcode);
&msg_offset, 0);
} else { /* If des, and size is 1 block, put directly in msg */
sep_write_msg(sctx, (void *)&sctx->key.aes, if ((ta_ctx->block_opcode == SEP_DES_BLOCK_OPCODE) &&
sctx->keylen, (req->nbytes == crypto_ablkcipher_blocksize(tfm))) {
SEP_AES_MAX_KEY_SIZE_BYTES,
&msg_offset, 1); dev_dbg(&ta_ctx->sep_used->pdev->dev,
"writing out one block des\n");
copy_result = sg_copy_to_buffer(
ta_ctx->src_sg, sep_sg_nents(ta_ctx->src_sg),
small_buf, crypto_ablkcipher_blocksize(tfm));
msg[0] = (u32)sctx->aes_key_size; if (copy_result != crypto_ablkcipher_blocksize(tfm)) {
msg[1] = (u32)bctx->aes_encmode; dev_warn(&ta_ctx->sep_used->pdev->dev,
msg[2] = (u32)bctx->aes_opmode; "des block copy faild\n");
msg[3] = (u32)0; /* Secret key is not used */ return -ENOMEM;
sep_write_msg(sctx, (void *)msg,
sizeof(u32) * 4, sizeof(u32) * 4,
&msg_offset, 0);
} }
/* Put data into message */
sep_write_msg(ta_ctx, small_buf,
crypto_ablkcipher_blocksize(tfm),
crypto_ablkcipher_blocksize(tfm) * 2,
&msg_offset, 1);
/* Put size into message */
sep_write_msg(ta_ctx, &req->nbytes,
sizeof(u32), sizeof(u32), &msg_offset, 0);
} else { } else {
/* Otherwise, fill out dma tables */
ta_ctx->dcb_input_data.app_in_address = src_ptr;
ta_ctx->dcb_input_data.data_in_size = req->nbytes;
ta_ctx->dcb_input_data.app_out_address = dst_ptr;
ta_ctx->dcb_input_data.block_size =
crypto_ablkcipher_blocksize(tfm);
ta_ctx->dcb_input_data.tail_block_size = 0;
ta_ctx->dcb_input_data.is_applet = 0;
ta_ctx->dcb_input_data.src_sg = ta_ctx->src_sg;
ta_ctx->dcb_input_data.dst_sg = ta_ctx->dst_sg;
result = sep_create_dcb_dmatables_context_kernel(
ta_ctx->sep_used,
&ta_ctx->dcb_region,
&ta_ctx->dmatables_region,
&ta_ctx->dma_ctx,
&ta_ctx->dcb_input_data,
1);
if (result) {
dev_warn(&ta_ctx->sep_used->pdev->dev,
"crypto dma table create failed\n");
return -EINVAL;
}
/* Portion of msg is nulled (no data) */
msg[0] = (u32)0;
msg[1] = (u32)0;
msg[2] = (u32)0;
msg[3] = (u32)0;
msg[4] = (u32)0;
sep_write_msg(ta_ctx, (void *)msg, sizeof(u32) * 5,
sizeof(u32) * 5, &msg_offset, 0);
}
/* set nbytes for queue status */ /**
sctx->nbytes = req->nbytes; * Before we write the message, we need to overwrite the
* vendor's IV with the one from our own ablkcipher walk
* iv because this is needed for dm-crypt
*/
sep_dump_ivs(req, "sending data block to sep\n");
if ((ta_ctx->current_request == DES_CBC) &&
(ta_ctx->des_opmode == SEP_DES_CBC)) {
dev_dbg(&ta_ctx->sep_used->pdev->dev,
"overwrite vendor iv on DES\n");
des_internal = (struct sep_des_internal_context *)
sctx->des_private_ctx.ctx_buf;
memcpy((void *)des_internal->iv_context,
ta_ctx->walk.iv, crypto_ablkcipher_ivsize(tfm));
} else if ((ta_ctx->current_request == AES_CBC) &&
(ta_ctx->aes_opmode == SEP_AES_CBC)) {
dev_dbg(&ta_ctx->sep_used->pdev->dev,
"overwrite vendor iv on AES\n");
aes_internal = (struct sep_aes_internal_context *)
sctx->aes_private_ctx.cbuff;
memcpy((void *)aes_internal->aes_ctx_iv,
ta_ctx->walk.iv, crypto_ablkcipher_ivsize(tfm));
}
/* Write context into message */
if (ta_ctx->block_opcode == SEP_DES_BLOCK_OPCODE) {
sep_write_context(ta_ctx, &msg_offset,
&sctx->des_private_ctx,
sizeof(struct sep_des_private_context));
sep_dump(ta_ctx->sep_used, "ctx to block des",
&sctx->des_private_ctx, 40);
} else {
sep_write_context(ta_ctx, &msg_offset,
&sctx->aes_private_ctx,
sizeof(struct sep_aes_private_context));
sep_dump(ta_ctx->sep_used, "ctx to block aes",
&sctx->aes_private_ctx, 20);
}
/* Key already done; this is for data */ /* conclude message */
dev_dbg(&sctx->sep_used->pdev->dev, "sending data\n"); sep_end_msg(ta_ctx, msg_offset);
sep_dump_sg(sctx->sep_used, /* Parent (caller) is now ready to tell the sep to do ahead */
"block sg in", sctx->src_sg); return 0;
}
/* check for valid data and proper spacing */
src_ptr = sg_virt(sctx->src_sg);
dst_ptr = sg_virt(sctx->dst_sg);
if (!src_ptr || !dst_ptr || /**
(sctx->current_cypher_req->nbytes % * This function sets things up for a crypto key submit process
crypto_ablkcipher_blocksize(tfm))) { * This does all preparation, but does not try to grab the
* sep
* @req: pointer to struct ablkcipher_request
* returns: 0 if all went well, non zero if error
*/
static int sep_crypto_send_key(struct ablkcipher_request *req)
{
dev_warn(&sctx->sep_used->pdev->dev, int int_error;
"cipher block size odd\n"); u32 msg_offset;
dev_warn(&sctx->sep_used->pdev->dev, static u32 msg[10];
"cipher block size is %x\n",
crypto_ablkcipher_blocksize(tfm));
dev_warn(&sctx->sep_used->pdev->dev,
"cipher data size is %x\n",
sctx->current_cypher_req->nbytes);
sep_crypto_release(sctx, -EINVAL);
return;
}
if (partial_overlap(src_ptr, dst_ptr, u32 max_length;
sctx->current_cypher_req->nbytes)) { struct this_task_ctx *ta_ctx;
dev_warn(&sctx->sep_used->pdev->dev, struct crypto_ablkcipher *tfm;
"block partial overlap\n"); struct sep_system_ctx *sctx;
sep_crypto_release(sctx, -EINVAL);
return;
}
/* Put together the message */ ta_ctx = ablkcipher_request_ctx(req);
sep_make_header(sctx, &msg_offset, bctx->block_opcode); tfm = crypto_ablkcipher_reqtfm(req);
sctx = crypto_ablkcipher_ctx(tfm);
/* If des, and size is 1 block, put directly in msg */ dev_dbg(&ta_ctx->sep_used->pdev->dev, "sending key\n");
if ((bctx->block_opcode == SEP_DES_BLOCK_OPCODE) &&
(req->nbytes == crypto_ablkcipher_blocksize(tfm))) {
dev_dbg(&sctx->sep_used->pdev->dev, /* start the walk on scatterlists */
"writing out one block des\n"); ablkcipher_walk_init(&ta_ctx->walk, req->src, req->dst, req->nbytes);
dev_dbg(&ta_ctx->sep_used->pdev->dev,
"sep crypto block data size of %x\n", req->nbytes);
copy_result = sg_copy_to_buffer( int_error = ablkcipher_walk_phys(req, &ta_ctx->walk);
sctx->src_sg, sep_sg_nents(sctx->src_sg), if (int_error) {
small_buf, crypto_ablkcipher_blocksize(tfm)); dev_warn(&ta_ctx->sep_used->pdev->dev, "walk phys error %x\n",
int_error);
return -ENOMEM;
}
if (copy_result != crypto_ablkcipher_blocksize(tfm)) { /* check iv */
dev_warn(&sctx->sep_used->pdev->dev, if ((ta_ctx->current_request == DES_CBC) &&
"des block copy faild\n"); (ta_ctx->des_opmode == SEP_DES_CBC)) {
sep_crypto_release(sctx, -ENOMEM); if (!ta_ctx->walk.iv) {
return; dev_warn(&ta_ctx->sep_used->pdev->dev, "no iv found\n");
} return -EINVAL;
}
/* Put data into message */ memcpy(ta_ctx->iv, ta_ctx->walk.iv, SEP_DES_IV_SIZE_BYTES);
sep_write_msg(sctx, small_buf, sep_dump(ta_ctx->sep_used, "iv",
crypto_ablkcipher_blocksize(tfm), ta_ctx->iv, SEP_DES_IV_SIZE_BYTES);
crypto_ablkcipher_blocksize(tfm) * 2, }
&msg_offset, 1);
/* Put size into message */ if ((ta_ctx->current_request == AES_CBC) &&
sep_write_msg(sctx, &req->nbytes, (ta_ctx->aes_opmode == SEP_AES_CBC)) {
sizeof(u32), sizeof(u32), &msg_offset, 0); if (!ta_ctx->walk.iv) {
dev_warn(&ta_ctx->sep_used->pdev->dev, "no iv found\n");
return -EINVAL;
}
memcpy(ta_ctx->iv, ta_ctx->walk.iv, SEP_AES_IV_SIZE_BYTES);
sep_dump(ta_ctx->sep_used, "iv",
ta_ctx->iv, SEP_AES_IV_SIZE_BYTES);
}
/* put together message to SEP */
/* Start with op code */
sep_make_header(ta_ctx, &msg_offset, ta_ctx->init_opcode);
/* now deal with IV */
if (ta_ctx->init_opcode == SEP_DES_INIT_OPCODE) {
if (ta_ctx->des_opmode == SEP_DES_CBC) {
sep_write_msg(ta_ctx, ta_ctx->iv,
SEP_DES_IV_SIZE_BYTES, sizeof(u32) * 4,
&msg_offset, 1);
sep_dump(ta_ctx->sep_used, "initial IV",
ta_ctx->walk.iv, SEP_DES_IV_SIZE_BYTES);
} else {
/* Skip if ECB */
msg_offset += 4 * sizeof(u32);
}
} else {
max_length = ((SEP_AES_IV_SIZE_BYTES + 3) /
sizeof(u32)) * sizeof(u32);
if (ta_ctx->aes_opmode == SEP_AES_CBC) {
sep_write_msg(ta_ctx, ta_ctx->iv,
SEP_AES_IV_SIZE_BYTES, max_length,
&msg_offset, 1);
sep_dump(ta_ctx->sep_used, "initial IV",
ta_ctx->walk.iv, SEP_AES_IV_SIZE_BYTES);
} else { } else {
/* Otherwise, fill out dma tables */ /* Skip if ECB */
sctx->dcb_input_data.app_in_address = src_ptr; msg_offset += max_length;
sctx->dcb_input_data.data_in_size = req->nbytes;
sctx->dcb_input_data.app_out_address = dst_ptr;
sctx->dcb_input_data.block_size =
crypto_ablkcipher_blocksize(tfm);
sctx->dcb_input_data.tail_block_size = 0;
sctx->dcb_input_data.is_applet = 0;
sctx->dcb_input_data.src_sg = sctx->src_sg;
sctx->dcb_input_data.dst_sg = sctx->dst_sg;
result = sep_create_dcb_dmatables_context_kernel(
sctx->sep_used,
&sctx->dcb_region,
&sctx->dmatables_region,
&sctx->dma_ctx,
&sctx->dcb_input_data,
1);
if (result) {
dev_warn(&sctx->sep_used->pdev->dev,
"crypto dma table create failed\n");
sep_crypto_release(sctx, -EINVAL);
return;
} }
}
/* load the key */
if (ta_ctx->init_opcode == SEP_DES_INIT_OPCODE) {
sep_write_msg(ta_ctx, (void *)&sctx->key.des.key1,
sizeof(u32) * 8, sizeof(u32) * 8,
&msg_offset, 1);
msg[0] = (u32)sctx->des_nbr_keys;
msg[1] = (u32)ta_ctx->des_encmode;
msg[2] = (u32)ta_ctx->des_opmode;
/* Portion of msg is nulled (no data) */ sep_write_msg(ta_ctx, (void *)msg,
msg[0] = (u32)0; sizeof(u32) * 3, sizeof(u32) * 3,
msg[1] = (u32)0; &msg_offset, 0);
msg[2] = (u32)0; } else {
msg[3] = (u32)0; sep_write_msg(ta_ctx, (void *)&sctx->key.aes,
msg[4] = (u32)0; sctx->keylen,
sep_write_msg(sctx, (void *)msg, SEP_AES_MAX_KEY_SIZE_BYTES,
sizeof(u32) * 5, &msg_offset, 1);
sizeof(u32) * 5,
&msg_offset, 0); msg[0] = (u32)sctx->aes_key_size;
msg[1] = (u32)ta_ctx->aes_encmode;
msg[2] = (u32)ta_ctx->aes_opmode;
msg[3] = (u32)0; /* Secret key is not used */
sep_write_msg(ta_ctx, (void *)msg,
sizeof(u32) * 4, sizeof(u32) * 4,
&msg_offset, 0);
}
/* conclude message */
sep_end_msg(ta_ctx, msg_offset);
/* Parent (caller) is now ready to tell the sep to do ahead */
return 0;
}
/* This needs to be run as a work queue as it can be put asleep */
static void sep_crypto_block(void *data)
{
unsigned long end_time;
int result;
struct ablkcipher_request *req;
struct this_task_ctx *ta_ctx;
struct crypto_ablkcipher *tfm;
struct sep_system_ctx *sctx;
int are_we_done_yet;
req = (struct ablkcipher_request *)data;
ta_ctx = ablkcipher_request_ctx(req);
tfm = crypto_ablkcipher_reqtfm(req);
sctx = crypto_ablkcipher_ctx(tfm);
ta_ctx->are_we_done_yet = &are_we_done_yet;
pr_debug("sep_crypto_block\n");
pr_debug("tfm is %p sctx is %p ta_ctx is %p\n",
tfm, sctx, ta_ctx);
pr_debug("key_sent is %d\n", sctx->key_sent);
/* do we need to send the key */
if (sctx->key_sent == 0) {
are_we_done_yet = 0;
result = sep_crypto_send_key(req); /* prep to send key */
if (result != 0) {
dev_dbg(&ta_ctx->sep_used->pdev->dev,
"could not prep key %x\n", result);
sep_crypto_release(sctx, ta_ctx, result);
return;
} }
/* Write context into message */ result = sep_crypto_take_sep(ta_ctx);
if (bctx->block_opcode == SEP_DES_BLOCK_OPCODE) { if (result) {
sep_write_context(sctx, &msg_offset, dev_warn(&ta_ctx->sep_used->pdev->dev,
&bctx->des_private_ctx, "sep_crypto_take_sep for key send failed\n");
sizeof(struct sep_des_private_context)); sep_crypto_release(sctx, ta_ctx, result);
sep_dump(sctx->sep_used, "ctx to block des", return;
&bctx->des_private_ctx, 40); }
} else {
sep_write_context(sctx, &msg_offset, /* now we sit and wait up to a fixed time for completion */
&bctx->aes_private_ctx, end_time = jiffies + (WAIT_TIME * HZ);
sizeof(struct sep_aes_private_context)); while ((time_before(jiffies, end_time)) &&
sep_dump(sctx->sep_used, "ctx to block aes", (are_we_done_yet == 0))
&bctx->aes_private_ctx, 20); schedule();
/* Done waiting; still not done yet? */
if (are_we_done_yet == 0) {
dev_dbg(&ta_ctx->sep_used->pdev->dev,
"Send key job never got done\n");
sep_crypto_release(sctx, ta_ctx, -EINVAL);
return;
} }
/* Set the key sent variable so this can be skipped later */
sctx->key_sent = 1;
} }
/* conclude message and then tell sep to do its thing */ /* Key sent (or maybe not if we did not have to), now send block */
sctx->done_with_transaction = 0; are_we_done_yet = 0;
sep_end_msg(sctx, msg_offset); result = sep_crypto_block_data(req);
result = sep_crypto_take_sep(sctx);
if (result) { if (result != 0) {
dev_warn(&sctx->sep_used->pdev->dev, dev_dbg(&ta_ctx->sep_used->pdev->dev,
"sep_crypto_take_sep failed\n"); "could prep not send block %x\n", result);
sep_crypto_release(sctx, -EINVAL); sep_crypto_release(sctx, ta_ctx, result);
return; return;
} }
/** result = sep_crypto_take_sep(ta_ctx);
* Sep is now working. Lets wait up to 5 seconds if (result) {
* for completion. If it does not complete, we will do dev_warn(&ta_ctx->sep_used->pdev->dev,
* a crypto release with -EINVAL to release the "sep_crypto_take_sep for block send failed\n");
* kernel crypto infrastructure and let the system sep_crypto_release(sctx, ta_ctx, result);
* continue to boot up return;
* We have to wait this long because some crypto }
* operations can take a while
*/
dev_dbg(&sctx->sep_used->pdev->dev,
"waiting for done with transaction\n");
sctx->end_time = jiffies + (SEP_TRANSACTION_WAIT_TIME * HZ); /* now we sit and wait up to a fixed time for completion */
while ((time_before(jiffies, sctx->end_time)) && end_time = jiffies + (WAIT_TIME * HZ);
(!sctx->done_with_transaction)) while ((time_before(jiffies, end_time)) && (are_we_done_yet == 0))
schedule(); schedule();
dev_dbg(&sctx->sep_used->pdev->dev, /* Done waiting; still not done yet? */
"done waiting for done with transaction\n"); if (are_we_done_yet == 0) {
dev_dbg(&ta_ctx->sep_used->pdev->dev,
/* are we done? */ "Send block job never got done\n");
if (!sctx->done_with_transaction) { sep_crypto_release(sctx, ta_ctx, -EINVAL);
/* Nope, lets release and tell crypto no */ return;
dev_warn(&sctx->sep_used->pdev->dev,
"[PID%d] sep_crypto_block never finished\n",
current->pid);
sep_crypto_release(sctx, -EINVAL);
} }
/* That's it; entire thing done, get out of queue */
pr_debug("crypto_block leaving\n");
pr_debug("tfm is %p sctx is %p ta_ctx is %p\n", tfm, sctx, ta_ctx);
} }
/** /**
...@@ -1405,7 +1614,6 @@ static void sep_crypto_block(void *data) ...@@ -1405,7 +1614,6 @@ static void sep_crypto_block(void *data)
static u32 crypto_post_op(struct sep_device *sep) static u32 crypto_post_op(struct sep_device *sep)
{ {
/* HERE */ /* HERE */
int int_error;
u32 u32_error; u32 u32_error;
u32 msg_offset; u32 msg_offset;
...@@ -1413,169 +1621,185 @@ static u32 crypto_post_op(struct sep_device *sep) ...@@ -1413,169 +1621,185 @@ static u32 crypto_post_op(struct sep_device *sep)
static char small_buf[100]; static char small_buf[100];
struct ablkcipher_request *req; struct ablkcipher_request *req;
struct sep_block_ctx *bctx; struct this_task_ctx *ta_ctx;
struct sep_system_ctx *sctx; struct sep_system_ctx *sctx;
struct crypto_ablkcipher *tfm; struct crypto_ablkcipher *tfm;
struct sep_des_internal_context *des_internal;
struct sep_aes_internal_context *aes_internal;
if (!sep->current_cypher_req) if (!sep->current_cypher_req)
return -EINVAL; return -EINVAL;
/* hold req since we need to submit work after clearing sep */ /* hold req since we need to submit work after clearing sep */
req = sep->current_cypher_req; req = sep->current_cypher_req;
bctx = ablkcipher_request_ctx(sep->current_cypher_req); ta_ctx = ablkcipher_request_ctx(sep->current_cypher_req);
tfm = crypto_ablkcipher_reqtfm(sep->current_cypher_req); tfm = crypto_ablkcipher_reqtfm(sep->current_cypher_req);
sctx = crypto_ablkcipher_ctx(tfm); sctx = crypto_ablkcipher_ctx(tfm);
dev_dbg(&sctx->sep_used->pdev->dev, "crypto post_op\n"); pr_debug("crypto_post op\n");
dev_dbg(&sctx->sep_used->pdev->dev, "crypto post_op message dump\n"); pr_debug("key_sent is %d tfm is %p sctx is %p ta_ctx is %p\n",
crypto_sep_dump_message(sctx); sctx->key_sent, tfm, sctx, ta_ctx);
sctx->done_with_transaction = 1; dev_dbg(&ta_ctx->sep_used->pdev->dev, "crypto post_op\n");
dev_dbg(&ta_ctx->sep_used->pdev->dev, "crypto post_op message dump\n");
crypto_sep_dump_message(ta_ctx->sep_used, ta_ctx->msg);
/* first bring msg from shared area to local area */ /* first bring msg from shared area to local area */
memcpy(sctx->msg, sep->shared_addr, memcpy(ta_ctx->msg, sep->shared_addr,
SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES); SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
/* Is this the result of performing init (key to SEP */ /* Is this the result of performing init (key to SEP */
if (sctx->key_sent == 0) { if (sctx->key_sent == 0) {
/* Did SEP do it okay */ /* Did SEP do it okay */
u32_error = sep_verify_op(sctx, bctx->init_opcode, u32_error = sep_verify_op(ta_ctx, ta_ctx->init_opcode,
&msg_offset); &msg_offset);
if (u32_error) { if (u32_error) {
dev_warn(&sctx->sep_used->pdev->dev, dev_warn(&ta_ctx->sep_used->pdev->dev,
"aes init error %x\n", u32_error); "aes init error %x\n", u32_error);
sep_crypto_release(sctx, u32_error); sep_crypto_release(sctx, ta_ctx, u32_error);
return u32_error; return u32_error;
} }
/* Read Context */ /* Read Context */
if (bctx->init_opcode == SEP_DES_INIT_OPCODE) { if (ta_ctx->init_opcode == SEP_DES_INIT_OPCODE) {
sep_read_context(sctx, &msg_offset, sep_read_context(ta_ctx, &msg_offset,
&bctx->des_private_ctx, &sctx->des_private_ctx,
sizeof(struct sep_des_private_context)); sizeof(struct sep_des_private_context));
sep_dump(sctx->sep_used, "ctx init des", sep_dump(ta_ctx->sep_used, "ctx init des",
&bctx->des_private_ctx, 40); &sctx->des_private_ctx, 40);
} else { } else {
sep_read_context(sctx, &msg_offset, sep_read_context(ta_ctx, &msg_offset,
&bctx->aes_private_ctx, &sctx->aes_private_ctx,
sizeof(struct sep_des_private_context)); sizeof(struct sep_aes_private_context));
sep_dump(sctx->sep_used, "ctx init aes",
&bctx->aes_private_ctx, 20);
}
/* We are done with init. Now send out the data */
/* first release the sep */
sctx->key_sent = 1;
sep_crypto_release(sctx, -EINPROGRESS);
spin_lock_irq(&queue_lock); sep_dump(ta_ctx->sep_used, "ctx init aes",
int_error = crypto_enqueue_request(&sep_queue, &req->base); &sctx->aes_private_ctx, 20);
spin_unlock_irq(&queue_lock);
if ((int_error != 0) && (int_error != -EINPROGRESS)) {
dev_warn(&sctx->sep_used->pdev->dev,
"spe cypher post op cant queue\n");
sep_crypto_release(sctx, int_error);
return int_error;
} }
/* schedule the data send */ sep_dump_ivs(req, "after sending key to sep\n");
int_error = sep_submit_work(sep->workqueue, sep_dequeuer,
(void *)&sep_queue);
if (int_error) { /* key sent went okay; release sep, and set are_we_done_yet */
dev_warn(&sep->pdev->dev, sctx->key_sent = 1;
"cant submit work sep_crypto_block\n"); sep_crypto_release(sctx, ta_ctx, -EINPROGRESS);
sep_crypto_release(sctx, -EINVAL);
return -EINVAL;
}
} else { } else {
/** /**
* This is the result of a block request * This is the result of a block request
*/ */
dev_dbg(&sctx->sep_used->pdev->dev, dev_dbg(&ta_ctx->sep_used->pdev->dev,
"crypto_post_op block response\n"); "crypto_post_op block response\n");
u32_error = sep_verify_op(sctx, bctx->block_opcode, u32_error = sep_verify_op(ta_ctx, ta_ctx->block_opcode,
&msg_offset); &msg_offset);
if (u32_error) { if (u32_error) {
dev_warn(&sctx->sep_used->pdev->dev, dev_warn(&ta_ctx->sep_used->pdev->dev,
"sep block error %x\n", u32_error); "sep block error %x\n", u32_error);
sep_crypto_release(sctx, u32_error); sep_crypto_release(sctx, ta_ctx, u32_error);
return -EINVAL; return -EINVAL;
} }
if (bctx->block_opcode == SEP_DES_BLOCK_OPCODE) { if (ta_ctx->block_opcode == SEP_DES_BLOCK_OPCODE) {
dev_dbg(&sctx->sep_used->pdev->dev, dev_dbg(&ta_ctx->sep_used->pdev->dev,
"post op for DES\n"); "post op for DES\n");
/* special case for 1 block des */ /* special case for 1 block des */
if (sep->current_cypher_req->nbytes == if (sep->current_cypher_req->nbytes ==
crypto_ablkcipher_blocksize(tfm)) { crypto_ablkcipher_blocksize(tfm)) {
sep_read_msg(sctx, small_buf, sep_read_msg(ta_ctx, small_buf,
crypto_ablkcipher_blocksize(tfm), crypto_ablkcipher_blocksize(tfm),
crypto_ablkcipher_blocksize(tfm) * 2, crypto_ablkcipher_blocksize(tfm) * 2,
&msg_offset, 1); &msg_offset, 1);
dev_dbg(&sctx->sep_used->pdev->dev, dev_dbg(&ta_ctx->sep_used->pdev->dev,
"reading in block des\n"); "reading in block des\n");
copy_result = sg_copy_from_buffer( copy_result = sg_copy_from_buffer(
sctx->dst_sg, ta_ctx->dst_sg,
sep_sg_nents(sctx->dst_sg), sep_sg_nents(ta_ctx->dst_sg),
small_buf, small_buf,
crypto_ablkcipher_blocksize(tfm)); crypto_ablkcipher_blocksize(tfm));
if (copy_result != if (copy_result !=
crypto_ablkcipher_blocksize(tfm)) { crypto_ablkcipher_blocksize(tfm)) {
dev_warn(&sctx->sep_used->pdev->dev, dev_warn(&ta_ctx->sep_used->pdev->dev,
"des block copy faild\n"); "des block copy faild\n");
sep_crypto_release(sctx, -ENOMEM); sep_crypto_release(sctx, ta_ctx,
-ENOMEM);
return -ENOMEM; return -ENOMEM;
} }
} }
/* Read Context */ /* Read Context */
sep_read_context(sctx, &msg_offset, sep_read_context(ta_ctx, &msg_offset,
&bctx->des_private_ctx, &sctx->des_private_ctx,
sizeof(struct sep_des_private_context)); sizeof(struct sep_des_private_context));
} else { } else {
dev_dbg(&sctx->sep_used->pdev->dev, dev_dbg(&ta_ctx->sep_used->pdev->dev,
"post op for AES\n"); "post op for AES\n");
/* Skip the MAC Output */ /* Skip the MAC Output */
msg_offset += (sizeof(u32) * 4); msg_offset += (sizeof(u32) * 4);
/* Read Context */ /* Read Context */
sep_read_context(sctx, &msg_offset, sep_read_context(ta_ctx, &msg_offset,
&bctx->aes_private_ctx, &sctx->aes_private_ctx,
sizeof(struct sep_aes_private_context)); sizeof(struct sep_aes_private_context));
} }
sep_dump_sg(sctx->sep_used, sep_dump_sg(ta_ctx->sep_used,
"block sg out", sctx->dst_sg); "block sg out", ta_ctx->dst_sg);
/* Copy to correct sg if this block had oddball pages */ /* Copy to correct sg if this block had oddball pages */
if (sctx->dst_sg_hold) if (ta_ctx->dst_sg_hold)
sep_copy_sg(sctx->sep_used, sep_copy_sg(ta_ctx->sep_used,
sctx->dst_sg, ta_ctx->dst_sg,
sctx->current_cypher_req->dst, ta_ctx->current_cypher_req->dst,
sctx->current_cypher_req->nbytes); ta_ctx->current_cypher_req->nbytes);
/**
* Copy the iv's back to the walk.iv
* This is required for dm_crypt
*/
sep_dump_ivs(req, "got data block from sep\n");
if ((ta_ctx->current_request == DES_CBC) &&
(ta_ctx->des_opmode == SEP_DES_CBC)) {
dev_dbg(&ta_ctx->sep_used->pdev->dev,
"returning result iv to walk on DES\n");
des_internal = (struct sep_des_internal_context *)
sctx->des_private_ctx.ctx_buf;
memcpy(ta_ctx->walk.iv,
(void *)des_internal->iv_context,
crypto_ablkcipher_ivsize(tfm));
} else if ((ta_ctx->current_request == AES_CBC) &&
(ta_ctx->aes_opmode == SEP_AES_CBC)) {
dev_dbg(&ta_ctx->sep_used->pdev->dev,
"returning result iv to walk on AES\n");
aes_internal = (struct sep_aes_internal_context *)
sctx->aes_private_ctx.cbuff;
memcpy(ta_ctx->walk.iv,
(void *)aes_internal->aes_ctx_iv,
crypto_ablkcipher_ivsize(tfm));
}
/* finished, release everything */ /* finished, release everything */
sep_crypto_release(sctx, 0); sep_crypto_release(sctx, ta_ctx, 0);
} }
pr_debug("crypto_post_op done\n");
pr_debug("key_sent is %d tfm is %p sctx is %p ta_ctx is %p\n",
sctx->key_sent, tfm, sctx, ta_ctx);
return 0; return 0;
} }
...@@ -1584,35 +1808,33 @@ static u32 hash_init_post_op(struct sep_device *sep) ...@@ -1584,35 +1808,33 @@ static u32 hash_init_post_op(struct sep_device *sep)
u32 u32_error; u32 u32_error;
u32 msg_offset; u32 msg_offset;
struct crypto_ahash *tfm = crypto_ahash_reqtfm(sep->current_hash_req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(sep->current_hash_req);
struct sep_hash_ctx *ctx = ahash_request_ctx(sep->current_hash_req); struct this_task_ctx *ta_ctx = ahash_request_ctx(sep->current_hash_req);
struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm); struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm);
dev_dbg(&sctx->sep_used->pdev->dev, dev_dbg(&ta_ctx->sep_used->pdev->dev,
"hash init post op\n"); "hash init post op\n");
sctx->done_with_transaction = 1;
/* first bring msg from shared area to local area */ /* first bring msg from shared area to local area */
memcpy(sctx->msg, sep->shared_addr, memcpy(ta_ctx->msg, sep->shared_addr,
SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES); SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
u32_error = sep_verify_op(sctx, SEP_HASH_INIT_OPCODE, u32_error = sep_verify_op(ta_ctx, SEP_HASH_INIT_OPCODE,
&msg_offset); &msg_offset);
if (u32_error) { if (u32_error) {
dev_warn(&sctx->sep_used->pdev->dev, "hash init error %x\n", dev_warn(&ta_ctx->sep_used->pdev->dev, "hash init error %x\n",
u32_error); u32_error);
sep_crypto_release(sctx, u32_error); sep_crypto_release(sctx, ta_ctx, u32_error);
return u32_error; return u32_error;
} }
/* Read Context */ /* Read Context */
sep_read_context(sctx, &msg_offset, sep_read_context(ta_ctx, &msg_offset,
&ctx->hash_private_ctx, &sctx->hash_private_ctx,
sizeof(struct sep_hash_private_context)); sizeof(struct sep_hash_private_context));
/* Signal to crypto infrastructure and clear out */ /* Signal to crypto infrastructure and clear out */
dev_dbg(&sctx->sep_used->pdev->dev, "hash init post op done\n"); dev_dbg(&ta_ctx->sep_used->pdev->dev, "hash init post op done\n");
sep_crypto_release(sctx, 0); sep_crypto_release(sctx, ta_ctx, 0);
return 0; return 0;
} }
...@@ -1621,33 +1843,69 @@ static u32 hash_update_post_op(struct sep_device *sep) ...@@ -1621,33 +1843,69 @@ static u32 hash_update_post_op(struct sep_device *sep)
u32 u32_error; u32 u32_error;
u32 msg_offset; u32 msg_offset;
struct crypto_ahash *tfm = crypto_ahash_reqtfm(sep->current_hash_req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(sep->current_hash_req);
struct sep_hash_ctx *ctx = ahash_request_ctx(sep->current_hash_req); struct this_task_ctx *ta_ctx = ahash_request_ctx(sep->current_hash_req);
struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm); struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm);
dev_dbg(&sctx->sep_used->pdev->dev, dev_dbg(&ta_ctx->sep_used->pdev->dev,
"hash update post op\n"); "hash update post op\n");
sctx->done_with_transaction = 1;
/* first bring msg from shared area to local area */ /* first bring msg from shared area to local area */
memcpy(sctx->msg, sep->shared_addr, memcpy(ta_ctx->msg, sep->shared_addr,
SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES); SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
u32_error = sep_verify_op(sctx, SEP_HASH_UPDATE_OPCODE, u32_error = sep_verify_op(ta_ctx, SEP_HASH_UPDATE_OPCODE,
&msg_offset); &msg_offset);
if (u32_error) { if (u32_error) {
dev_warn(&sctx->sep_used->pdev->dev, "hash init error %x\n", dev_warn(&ta_ctx->sep_used->pdev->dev, "hash init error %x\n",
u32_error); u32_error);
sep_crypto_release(sctx, u32_error); sep_crypto_release(sctx, ta_ctx, u32_error);
return u32_error; return u32_error;
} }
/* Read Context */ /* Read Context */
sep_read_context(sctx, &msg_offset, sep_read_context(ta_ctx, &msg_offset,
&ctx->hash_private_ctx, &sctx->hash_private_ctx,
sizeof(struct sep_hash_private_context)); sizeof(struct sep_hash_private_context));
sep_crypto_release(sctx, 0); /**
* Following is only for finup; if we just completd the
* data portion of finup, we now need to kick off the
* finish portion of finup.
*/
if (ta_ctx->sep_used->current_hash_stage == HASH_FINUP_DATA) {
/* first reset stage to HASH_FINUP_FINISH */
ta_ctx->sep_used->current_hash_stage = HASH_FINUP_FINISH;
/* now enqueue the finish operation */
spin_lock_irq(&queue_lock);
u32_error = crypto_enqueue_request(&sep_queue,
&ta_ctx->sep_used->current_hash_req->base);
spin_unlock_irq(&queue_lock);
if ((u32_error != 0) && (u32_error != -EINPROGRESS)) {
dev_warn(&ta_ctx->sep_used->pdev->dev,
"spe cypher post op cant queue\n");
sep_crypto_release(sctx, ta_ctx, u32_error);
return u32_error;
}
/* schedule the data send */
u32_error = sep_submit_work(ta_ctx->sep_used->workqueue,
sep_dequeuer, (void *)&sep_queue);
if (u32_error) {
dev_warn(&ta_ctx->sep_used->pdev->dev,
"cant submit work sep_crypto_block\n");
sep_crypto_release(sctx, ta_ctx, -EINVAL);
return -EINVAL;
}
}
/* Signal to crypto infrastructure and clear out */
dev_dbg(&ta_ctx->sep_used->pdev->dev, "hash update post op done\n");
sep_crypto_release(sctx, ta_ctx, 0);
return 0; return 0;
} }
...@@ -1658,45 +1916,44 @@ static u32 hash_final_post_op(struct sep_device *sep) ...@@ -1658,45 +1916,44 @@ static u32 hash_final_post_op(struct sep_device *sep)
u32 msg_offset; u32 msg_offset;
struct crypto_ahash *tfm = crypto_ahash_reqtfm(sep->current_hash_req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(sep->current_hash_req);
struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm); struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm);
dev_dbg(&sctx->sep_used->pdev->dev, struct this_task_ctx *ta_ctx = ahash_request_ctx(sep->current_hash_req);
dev_dbg(&ta_ctx->sep_used->pdev->dev,
"hash final post op\n"); "hash final post op\n");
sctx->done_with_transaction = 1;
/* first bring msg from shared area to local area */ /* first bring msg from shared area to local area */
memcpy(sctx->msg, sep->shared_addr, memcpy(ta_ctx->msg, sep->shared_addr,
SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES); SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
u32_error = sep_verify_op(sctx, SEP_HASH_FINISH_OPCODE, u32_error = sep_verify_op(ta_ctx, SEP_HASH_FINISH_OPCODE,
&msg_offset); &msg_offset);
if (u32_error) { if (u32_error) {
dev_warn(&sctx->sep_used->pdev->dev, "hash finish error %x\n", dev_warn(&ta_ctx->sep_used->pdev->dev, "hash finish error %x\n",
u32_error); u32_error);
sep_crypto_release(sctx, u32_error); sep_crypto_release(sctx, ta_ctx, u32_error);
return u32_error; return u32_error;
} }
/* Grab the result */ /* Grab the result */
if (sctx->current_hash_req->result == NULL) { if (ta_ctx->current_hash_req->result == NULL) {
/* Oops, null buffer; error out here */ /* Oops, null buffer; error out here */
dev_warn(&sctx->sep_used->pdev->dev, dev_warn(&ta_ctx->sep_used->pdev->dev,
"hash finish null buffer\n"); "hash finish null buffer\n");
sep_crypto_release(sctx, (u32)-ENOMEM); sep_crypto_release(sctx, ta_ctx, (u32)-ENOMEM);
return -ENOMEM; return -ENOMEM;
} }
max_length = (((SEP_HASH_RESULT_SIZE_WORDS * sizeof(u32)) + 3) / max_length = (((SEP_HASH_RESULT_SIZE_WORDS * sizeof(u32)) + 3) /
sizeof(u32)) * sizeof(u32); sizeof(u32)) * sizeof(u32);
sep_read_msg(sctx, sep_read_msg(ta_ctx,
sctx->current_hash_req->result, ta_ctx->current_hash_req->result,
crypto_ahash_digestsize(tfm), max_length, crypto_ahash_digestsize(tfm), max_length,
&msg_offset, 0); &msg_offset, 0);
/* Signal to crypto infrastructure and clear out */ /* Signal to crypto infrastructure and clear out */
dev_dbg(&sctx->sep_used->pdev->dev, "hash finish post op done\n"); dev_dbg(&ta_ctx->sep_used->pdev->dev, "hash finish post op done\n");
sep_crypto_release(sctx, 0); sep_crypto_release(sctx, ta_ctx, 0);
return 0; return 0;
} }
...@@ -1707,48 +1964,47 @@ static u32 hash_digest_post_op(struct sep_device *sep) ...@@ -1707,48 +1964,47 @@ static u32 hash_digest_post_op(struct sep_device *sep)
u32 msg_offset; u32 msg_offset;
struct crypto_ahash *tfm = crypto_ahash_reqtfm(sep->current_hash_req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(sep->current_hash_req);
struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm); struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm);
dev_dbg(&sctx->sep_used->pdev->dev, struct this_task_ctx *ta_ctx = ahash_request_ctx(sep->current_hash_req);
dev_dbg(&ta_ctx->sep_used->pdev->dev,
"hash digest post op\n"); "hash digest post op\n");
sctx->done_with_transaction = 1;
/* first bring msg from shared area to local area */ /* first bring msg from shared area to local area */
memcpy(sctx->msg, sep->shared_addr, memcpy(ta_ctx->msg, sep->shared_addr,
SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES); SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
u32_error = sep_verify_op(sctx, SEP_HASH_SINGLE_OPCODE, u32_error = sep_verify_op(ta_ctx, SEP_HASH_SINGLE_OPCODE,
&msg_offset); &msg_offset);
if (u32_error) { if (u32_error) {
dev_warn(&sctx->sep_used->pdev->dev, dev_warn(&ta_ctx->sep_used->pdev->dev,
"hash digest finish error %x\n", u32_error); "hash digest finish error %x\n", u32_error);
sep_crypto_release(sctx, u32_error); sep_crypto_release(sctx, ta_ctx, u32_error);
return u32_error; return u32_error;
} }
/* Grab the result */ /* Grab the result */
if (sctx->current_hash_req->result == NULL) { if (ta_ctx->current_hash_req->result == NULL) {
/* Oops, null buffer; error out here */ /* Oops, null buffer; error out here */
dev_warn(&sctx->sep_used->pdev->dev, dev_warn(&ta_ctx->sep_used->pdev->dev,
"hash digest finish null buffer\n"); "hash digest finish null buffer\n");
sep_crypto_release(sctx, (u32)-ENOMEM); sep_crypto_release(sctx, ta_ctx, (u32)-ENOMEM);
return -ENOMEM; return -ENOMEM;
} }
max_length = (((SEP_HASH_RESULT_SIZE_WORDS * sizeof(u32)) + 3) / max_length = (((SEP_HASH_RESULT_SIZE_WORDS * sizeof(u32)) + 3) /
sizeof(u32)) * sizeof(u32); sizeof(u32)) * sizeof(u32);
sep_read_msg(sctx, sep_read_msg(ta_ctx,
sctx->current_hash_req->result, ta_ctx->current_hash_req->result,
crypto_ahash_digestsize(tfm), max_length, crypto_ahash_digestsize(tfm), max_length,
&msg_offset, 0); &msg_offset, 0);
/* Signal to crypto infrastructure and clear out */ /* Signal to crypto infrastructure and clear out */
dev_dbg(&sctx->sep_used->pdev->dev, dev_dbg(&ta_ctx->sep_used->pdev->dev,
"hash digest finish post op done\n"); "hash digest finish post op done\n");
sep_crypto_release(sctx, 0); sep_crypto_release(sctx, ta_ctx, 0);
return 0; return 0;
} }
...@@ -1759,7 +2015,6 @@ static u32 hash_digest_post_op(struct sep_device *sep) ...@@ -1759,7 +2015,6 @@ static u32 hash_digest_post_op(struct sep_device *sep)
*/ */
static void sep_finish(unsigned long data) static void sep_finish(unsigned long data)
{ {
unsigned long flags;
struct sep_device *sep_dev; struct sep_device *sep_dev;
int res; int res;
...@@ -1776,18 +2031,15 @@ static void sep_finish(unsigned long data) ...@@ -1776,18 +2031,15 @@ static void sep_finish(unsigned long data)
return; return;
} }
spin_lock_irqsave(&sep_dev->busy_lock, flags);
if (sep_dev->in_kernel == (u32)0) { if (sep_dev->in_kernel == (u32)0) {
spin_unlock_irqrestore(&sep_dev->busy_lock, flags);
dev_warn(&sep_dev->pdev->dev, dev_warn(&sep_dev->pdev->dev,
"sep_finish; not in kernel operation\n"); "sep_finish; not in kernel operation\n");
return; return;
} }
spin_unlock_irqrestore(&sep_dev->busy_lock, flags);
/* Did we really do a sep command prior to this? */ /* Did we really do a sep command prior to this? */
if (0 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET, if (0 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
&sep_dev->sctx->call_status.status)) { &sep_dev->ta_ctx->call_status.status)) {
dev_warn(&sep_dev->pdev->dev, "[PID%d] sendmsg not called\n", dev_warn(&sep_dev->pdev->dev, "[PID%d] sendmsg not called\n",
current->pid); current->pid);
...@@ -1856,8 +2108,10 @@ static void sep_finish(unsigned long data) ...@@ -1856,8 +2108,10 @@ static void sep_finish(unsigned long data)
res = hash_init_post_op(sep_dev); res = hash_init_post_op(sep_dev);
break; break;
case HASH_UPDATE: case HASH_UPDATE:
case HASH_FINUP_DATA:
res = hash_update_post_op(sep_dev); res = hash_update_post_op(sep_dev);
break; break;
case HASH_FINUP_FINISH:
case HASH_FINISH: case HASH_FINISH:
res = hash_final_post_op(sep_dev); res = hash_final_post_op(sep_dev);
break; break;
...@@ -1865,43 +2119,31 @@ static void sep_finish(unsigned long data) ...@@ -1865,43 +2119,31 @@ static void sep_finish(unsigned long data)
res = hash_digest_post_op(sep_dev); res = hash_digest_post_op(sep_dev);
break; break;
default: default:
dev_warn(&sep_dev->pdev->dev, pr_debug("sep - invalid stage for hash finish\n");
"invalid stage for hash finish\n");
} }
break; break;
default: default:
dev_warn(&sep_dev->pdev->dev, pr_debug("sep - invalid request for finish\n");
"invalid request for finish\n");
} }
if (res) { if (res)
dev_warn(&sep_dev->pdev->dev, pr_debug("sep - finish returned error %x\n", res);
"finish returned error %x\n", res);
}
} }
static int sep_hash_cra_init(struct crypto_tfm *tfm) static int sep_hash_cra_init(struct crypto_tfm *tfm)
{ {
struct sep_system_ctx *sctx = crypto_tfm_ctx(tfm);
const char *alg_name = crypto_tfm_alg_name(tfm); const char *alg_name = crypto_tfm_alg_name(tfm);
sctx->sep_used = sep_dev; pr_debug("sep_hash_cra_init name is %s\n", alg_name);
dev_dbg(&sctx->sep_used->pdev->dev,
"sep_hash_cra_init name is %s\n", alg_name);
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct sep_hash_ctx)); sizeof(struct this_task_ctx));
return 0; return 0;
} }
static void sep_hash_cra_exit(struct crypto_tfm *tfm) static void sep_hash_cra_exit(struct crypto_tfm *tfm)
{ {
struct sep_system_ctx *sctx = crypto_tfm_ctx(tfm); pr_debug("sep_hash_cra_exit\n");
dev_dbg(&sctx->sep_used->pdev->dev,
"sep_hash_cra_exit\n");
sctx->sep_used = NULL;
} }
static void sep_hash_init(void *data) static void sep_hash_init(void *data)
...@@ -1910,60 +2152,49 @@ static void sep_hash_init(void *data) ...@@ -1910,60 +2152,49 @@ static void sep_hash_init(void *data)
int result; int result;
struct ahash_request *req; struct ahash_request *req;
struct crypto_ahash *tfm; struct crypto_ahash *tfm;
struct sep_hash_ctx *ctx; struct this_task_ctx *ta_ctx;
struct sep_system_ctx *sctx; struct sep_system_ctx *sctx;
unsigned long end_time;
int are_we_done_yet;
req = (struct ahash_request *)data; req = (struct ahash_request *)data;
tfm = crypto_ahash_reqtfm(req); tfm = crypto_ahash_reqtfm(req);
ctx = ahash_request_ctx(req);
sctx = crypto_ahash_ctx(tfm); sctx = crypto_ahash_ctx(tfm);
ta_ctx = ahash_request_ctx(req);
ta_ctx->sep_used = sep_dev;
ta_ctx->are_we_done_yet = &are_we_done_yet;
dev_dbg(&sctx->sep_used->pdev->dev, dev_dbg(&ta_ctx->sep_used->pdev->dev,
"sep_hash_init\n"); "sep_hash_init\n");
sctx->current_hash_stage = HASH_INIT; ta_ctx->current_hash_stage = HASH_INIT;
/* opcode and mode */ /* opcode and mode */
sep_make_header(sctx, &msg_offset, SEP_HASH_INIT_OPCODE); sep_make_header(ta_ctx, &msg_offset, SEP_HASH_INIT_OPCODE);
sep_write_msg(sctx, &ctx->hash_opmode, sep_write_msg(ta_ctx, &ta_ctx->hash_opmode,
sizeof(u32), sizeof(u32), &msg_offset, 0); sizeof(u32), sizeof(u32), &msg_offset, 0);
sep_end_msg(sctx, msg_offset); sep_end_msg(ta_ctx, msg_offset);
sctx->done_with_transaction = 0; are_we_done_yet = 0;
result = sep_crypto_take_sep(ta_ctx);
result = sep_crypto_take_sep(sctx);
if (result) { if (result) {
dev_warn(&sctx->sep_used->pdev->dev, dev_warn(&ta_ctx->sep_used->pdev->dev,
"sep_hash_init take sep failed\n"); "sep_hash_init take sep failed\n");
sep_crypto_release(sctx, -EINVAL); sep_crypto_release(sctx, ta_ctx, -EINVAL);
} }
/** /* now we sit and wait up to a fixed time for completion */
* Sep is now working. Lets wait up to 5 seconds end_time = jiffies + (WAIT_TIME * HZ);
* for completion. If it does not complete, we will do while ((time_before(jiffies, end_time)) && (are_we_done_yet == 0))
* a crypto release with -EINVAL to release the
* kernel crypto infrastructure and let the system
* continue to boot up
* We have to wait this long because some crypto
* operations can take a while
*/
dev_dbg(&sctx->sep_used->pdev->dev,
"waiting for done with transaction\n");
sctx->end_time = jiffies + (SEP_TRANSACTION_WAIT_TIME * HZ);
while ((time_before(jiffies, sctx->end_time)) &&
(!sctx->done_with_transaction))
schedule(); schedule();
dev_dbg(&sctx->sep_used->pdev->dev, /* Done waiting; still not done yet? */
"done waiting for done with transaction\n"); if (are_we_done_yet == 0) {
dev_dbg(&ta_ctx->sep_used->pdev->dev,
/* are we done? */ "hash init never got done\n");
if (!sctx->done_with_transaction) { sep_crypto_release(sctx, ta_ctx, -EINVAL);
/* Nope, lets release and tell crypto no */ return;
dev_warn(&sctx->sep_used->pdev->dev,
"[PID%d] sep_hash_init never finished\n",
current->pid);
sep_crypto_release(sctx, -EINVAL);
} }
} }
static void sep_hash_update(void *data) static void sep_hash_update(void *data)
...@@ -1975,6 +2206,8 @@ static void sep_hash_update(void *data) ...@@ -1975,6 +2206,8 @@ static void sep_hash_update(void *data)
u32 block_size; u32 block_size;
u32 head_len; u32 head_len;
u32 tail_len; u32 tail_len;
int are_we_done_yet;
static u32 msg[10]; static u32 msg[10];
static char small_buf[100]; static char small_buf[100];
void *src_ptr; void *src_ptr;
...@@ -1982,184 +2215,174 @@ static void sep_hash_update(void *data) ...@@ -1982,184 +2215,174 @@ static void sep_hash_update(void *data)
ssize_t copy_result; ssize_t copy_result;
struct ahash_request *req; struct ahash_request *req;
struct crypto_ahash *tfm; struct crypto_ahash *tfm;
struct sep_hash_ctx *ctx; struct this_task_ctx *ta_ctx;
struct sep_system_ctx *sctx; struct sep_system_ctx *sctx;
unsigned long end_time;
req = (struct ahash_request *)data; req = (struct ahash_request *)data;
tfm = crypto_ahash_reqtfm(req); tfm = crypto_ahash_reqtfm(req);
ctx = ahash_request_ctx(req);
sctx = crypto_ahash_ctx(tfm); sctx = crypto_ahash_ctx(tfm);
ta_ctx = ahash_request_ctx(req);
ta_ctx->sep_used = sep_dev;
ta_ctx->are_we_done_yet = &are_we_done_yet;
/* length for queue status */ /* length for queue status */
sctx->nbytes = req->nbytes; ta_ctx->nbytes = req->nbytes;
dev_dbg(&sctx->sep_used->pdev->dev, dev_dbg(&ta_ctx->sep_used->pdev->dev,
"sep_hash_update\n"); "sep_hash_update\n");
sctx->current_hash_stage = HASH_UPDATE; ta_ctx->current_hash_stage = HASH_UPDATE;
len = req->nbytes; len = req->nbytes;
block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
tail_len = req->nbytes % block_size; tail_len = req->nbytes % block_size;
dev_dbg(&sctx->sep_used->pdev->dev, "length is %x\n", len); dev_dbg(&ta_ctx->sep_used->pdev->dev, "length is %x\n", len);
dev_dbg(&sctx->sep_used->pdev->dev, "block_size is %x\n", block_size); dev_dbg(&ta_ctx->sep_used->pdev->dev, "block_size is %x\n", block_size);
dev_dbg(&sctx->sep_used->pdev->dev, "tail len is %x\n", tail_len); dev_dbg(&ta_ctx->sep_used->pdev->dev, "tail len is %x\n", tail_len);
/* Compute header/tail sizes */ /* Compute header/tail sizes */
int_ctx = (struct sep_hash_internal_context *)&ctx-> int_ctx = (struct sep_hash_internal_context *)&sctx->
hash_private_ctx.internal_context; hash_private_ctx.internal_context;
head_len = (block_size - int_ctx->prev_update_bytes) % block_size; head_len = (block_size - int_ctx->prev_update_bytes) % block_size;
tail_len = (req->nbytes - head_len) % block_size; tail_len = (req->nbytes - head_len) % block_size;
/* Make sure all pages are even block */ /* Make sure all pages are even block */
int_error = sep_oddball_pages(sctx->sep_used, req->src, int_error = sep_oddball_pages(ta_ctx->sep_used, req->src,
req->nbytes, req->nbytes,
block_size, &new_sg, 1); block_size, &new_sg, 1);
if (int_error < 0) { if (int_error < 0) {
dev_warn(&sctx->sep_used->pdev->dev, dev_warn(&ta_ctx->sep_used->pdev->dev,
"oddball pages error in crash update\n"); "oddball pages error in crash update\n");
sep_crypto_release(sctx, -ENOMEM); sep_crypto_release(sctx, ta_ctx, -ENOMEM);
return; return;
} else if (int_error == 1) { } else if (int_error == 1) {
sctx->src_sg = new_sg; ta_ctx->src_sg = new_sg;
sctx->src_sg_hold = new_sg; ta_ctx->src_sg_hold = new_sg;
} else { } else {
sctx->src_sg = req->src; ta_ctx->src_sg = req->src;
sctx->src_sg_hold = NULL; ta_ctx->src_sg_hold = NULL;
} }
src_ptr = sg_virt(sctx->src_sg); src_ptr = sg_virt(ta_ctx->src_sg);
if ((!req->nbytes) || (!ctx->sg)) { if ((!req->nbytes) || (!ta_ctx->src_sg)) {
/* null data */ /* null data */
src_ptr = NULL; src_ptr = NULL;
} }
sep_dump_sg(sctx->sep_used, "hash block sg in", sctx->src_sg); sep_dump_sg(ta_ctx->sep_used, "hash block sg in", ta_ctx->src_sg);
sctx->dcb_input_data.app_in_address = src_ptr; ta_ctx->dcb_input_data.app_in_address = src_ptr;
sctx->dcb_input_data.data_in_size = req->nbytes - (head_len + tail_len); ta_ctx->dcb_input_data.data_in_size =
sctx->dcb_input_data.app_out_address = NULL; req->nbytes - (head_len + tail_len);
sctx->dcb_input_data.block_size = block_size; ta_ctx->dcb_input_data.app_out_address = NULL;
sctx->dcb_input_data.tail_block_size = 0; ta_ctx->dcb_input_data.block_size = block_size;
sctx->dcb_input_data.is_applet = 0; ta_ctx->dcb_input_data.tail_block_size = 0;
sctx->dcb_input_data.src_sg = sctx->src_sg; ta_ctx->dcb_input_data.is_applet = 0;
sctx->dcb_input_data.dst_sg = NULL; ta_ctx->dcb_input_data.src_sg = ta_ctx->src_sg;
ta_ctx->dcb_input_data.dst_sg = NULL;
int_error = sep_create_dcb_dmatables_context_kernel( int_error = sep_create_dcb_dmatables_context_kernel(
sctx->sep_used, ta_ctx->sep_used,
&sctx->dcb_region, &ta_ctx->dcb_region,
&sctx->dmatables_region, &ta_ctx->dmatables_region,
&sctx->dma_ctx, &ta_ctx->dma_ctx,
&sctx->dcb_input_data, &ta_ctx->dcb_input_data,
1); 1);
if (int_error) { if (int_error) {
dev_warn(&sctx->sep_used->pdev->dev, dev_warn(&ta_ctx->sep_used->pdev->dev,
"hash update dma table create failed\n"); "hash update dma table create failed\n");
sep_crypto_release(sctx, -EINVAL); sep_crypto_release(sctx, ta_ctx, -EINVAL);
return; return;
} }
/* Construct message to SEP */ /* Construct message to SEP */
sep_make_header(sctx, &msg_offset, SEP_HASH_UPDATE_OPCODE); sep_make_header(ta_ctx, &msg_offset, SEP_HASH_UPDATE_OPCODE);
msg[0] = (u32)0; msg[0] = (u32)0;
msg[1] = (u32)0; msg[1] = (u32)0;
msg[2] = (u32)0; msg[2] = (u32)0;
sep_write_msg(sctx, msg, sizeof(u32) * 3, sizeof(u32) * 3, sep_write_msg(ta_ctx, msg, sizeof(u32) * 3, sizeof(u32) * 3,
&msg_offset, 0); &msg_offset, 0);
/* Handle remainders */ /* Handle remainders */
/* Head */ /* Head */
sep_write_msg(sctx, &head_len, sizeof(u32), sep_write_msg(ta_ctx, &head_len, sizeof(u32),
sizeof(u32), &msg_offset, 0); sizeof(u32), &msg_offset, 0);
if (head_len) { if (head_len) {
copy_result = sg_copy_to_buffer( copy_result = sg_copy_to_buffer(
req->src, req->src,
sep_sg_nents(sctx->src_sg), sep_sg_nents(ta_ctx->src_sg),
small_buf, head_len); small_buf, head_len);
if (copy_result != head_len) { if (copy_result != head_len) {
dev_warn(&sctx->sep_used->pdev->dev, dev_warn(&ta_ctx->sep_used->pdev->dev,
"sg head copy failure in hash block\n"); "sg head copy failure in hash block\n");
sep_crypto_release(sctx, -ENOMEM); sep_crypto_release(sctx, ta_ctx, -ENOMEM);
return; return;
} }
sep_write_msg(sctx, small_buf, head_len, sep_write_msg(ta_ctx, small_buf, head_len,
sizeof(u32) * 32, &msg_offset, 1); sizeof(u32) * 32, &msg_offset, 1);
} else { } else {
msg_offset += sizeof(u32) * 32; msg_offset += sizeof(u32) * 32;
} }
/* Tail */ /* Tail */
sep_write_msg(sctx, &tail_len, sizeof(u32), sep_write_msg(ta_ctx, &tail_len, sizeof(u32),
sizeof(u32), &msg_offset, 0); sizeof(u32), &msg_offset, 0);
if (tail_len) { if (tail_len) {
copy_result = sep_copy_offset_sg( copy_result = sep_copy_offset_sg(
sctx->sep_used, ta_ctx->sep_used,
sctx->src_sg, ta_ctx->src_sg,
req->nbytes - tail_len, req->nbytes - tail_len,
small_buf, tail_len); small_buf, tail_len);
if (copy_result != tail_len) { if (copy_result != tail_len) {
dev_warn(&sctx->sep_used->pdev->dev, dev_warn(&ta_ctx->sep_used->pdev->dev,
"sg tail copy failure in hash block\n"); "sg tail copy failure in hash block\n");
sep_crypto_release(sctx, -ENOMEM); sep_crypto_release(sctx, ta_ctx, -ENOMEM);
return; return;
} }
sep_write_msg(sctx, small_buf, tail_len, sep_write_msg(ta_ctx, small_buf, tail_len,
sizeof(u32) * 32, &msg_offset, 1); sizeof(u32) * 32, &msg_offset, 1);
} else { } else {
msg_offset += sizeof(u32) * 32; msg_offset += sizeof(u32) * 32;
} }
/* Context */ /* Context */
sep_write_context(sctx, &msg_offset, &ctx->hash_private_ctx, sep_write_context(ta_ctx, &msg_offset, &sctx->hash_private_ctx,
sizeof(struct sep_hash_private_context)); sizeof(struct sep_hash_private_context));
sep_end_msg(sctx, msg_offset); sep_end_msg(ta_ctx, msg_offset);
sctx->done_with_transaction = 0; are_we_done_yet = 0;
int_error = sep_crypto_take_sep(sctx); int_error = sep_crypto_take_sep(ta_ctx);
if (int_error) { if (int_error) {
dev_warn(&sctx->sep_used->pdev->dev, dev_warn(&ta_ctx->sep_used->pdev->dev,
"sep_hash_update take sep failed\n"); "sep_hash_update take sep failed\n");
sep_crypto_release(sctx, -EINVAL); sep_crypto_release(sctx, ta_ctx, -EINVAL);
} }
/** /* now we sit and wait up to a fixed time for completion */
* Sep is now working. Lets wait up to 5 seconds end_time = jiffies + (WAIT_TIME * HZ);
* for completion. If it does not complete, we will do while ((time_before(jiffies, end_time)) && (are_we_done_yet == 0))
* a crypto release with -EINVAL to release the
* kernel crypto infrastructure and let the system
* continue to boot up
* We have to wait this long because some crypto
* operations can take a while
*/
dev_dbg(&sctx->sep_used->pdev->dev,
"waiting for done with transaction\n");
sctx->end_time = jiffies + (SEP_TRANSACTION_WAIT_TIME * HZ);
while ((time_before(jiffies, sctx->end_time)) &&
(!sctx->done_with_transaction))
schedule(); schedule();
dev_dbg(&sctx->sep_used->pdev->dev, /* Done waiting; still not done yet? */
"done waiting for done with transaction\n"); if (are_we_done_yet == 0) {
dev_dbg(&ta_ctx->sep_used->pdev->dev,
/* are we done? */ "hash update never got done\n");
if (!sctx->done_with_transaction) { sep_crypto_release(sctx, ta_ctx, -EINVAL);
/* Nope, lets release and tell crypto no */ return;
dev_warn(&sctx->sep_used->pdev->dev,
"[PID%d] sep_hash_update never finished\n",
current->pid);
sep_crypto_release(sctx, -EINVAL);
} }
} }
static void sep_hash_final(void *data) static void sep_hash_final(void *data)
...@@ -2167,63 +2390,53 @@ static void sep_hash_final(void *data) ...@@ -2167,63 +2390,53 @@ static void sep_hash_final(void *data)
u32 msg_offset; u32 msg_offset;
struct ahash_request *req; struct ahash_request *req;
struct crypto_ahash *tfm; struct crypto_ahash *tfm;
struct sep_hash_ctx *ctx; struct this_task_ctx *ta_ctx;
struct sep_system_ctx *sctx; struct sep_system_ctx *sctx;
int result; int result;
unsigned long end_time;
int are_we_done_yet;
req = (struct ahash_request *)data; req = (struct ahash_request *)data;
tfm = crypto_ahash_reqtfm(req); tfm = crypto_ahash_reqtfm(req);
ctx = ahash_request_ctx(req);
sctx = crypto_ahash_ctx(tfm); sctx = crypto_ahash_ctx(tfm);
ta_ctx = ahash_request_ctx(req);
ta_ctx->sep_used = sep_dev;
dev_dbg(&sctx->sep_used->pdev->dev, dev_dbg(&ta_ctx->sep_used->pdev->dev,
"sep_hash_final\n"); "sep_hash_final\n");
sctx->current_hash_stage = HASH_FINISH; ta_ctx->current_hash_stage = HASH_FINISH;
ta_ctx->are_we_done_yet = &are_we_done_yet;
/* opcode and mode */ /* opcode and mode */
sep_make_header(sctx, &msg_offset, SEP_HASH_FINISH_OPCODE); sep_make_header(ta_ctx, &msg_offset, SEP_HASH_FINISH_OPCODE);
/* Context */ /* Context */
sep_write_context(sctx, &msg_offset, &ctx->hash_private_ctx, sep_write_context(ta_ctx, &msg_offset, &sctx->hash_private_ctx,
sizeof(struct sep_hash_private_context)); sizeof(struct sep_hash_private_context));
sep_end_msg(sctx, msg_offset); sep_end_msg(ta_ctx, msg_offset);
sctx->done_with_transaction = 0; are_we_done_yet = 0;
result = sep_crypto_take_sep(sctx); result = sep_crypto_take_sep(ta_ctx);
if (result) { if (result) {
dev_warn(&sctx->sep_used->pdev->dev, dev_warn(&ta_ctx->sep_used->pdev->dev,
"sep_hash_final take sep failed\n"); "sep_hash_final take sep failed\n");
sep_crypto_release(sctx, -EINVAL); sep_crypto_release(sctx, ta_ctx, -EINVAL);
} }
/** /* now we sit and wait up to a fixed time for completion */
* Sep is now working. Lets wait up to 5 seconds end_time = jiffies + (WAIT_TIME * HZ);
* for completion. If it does not complete, we will do while ((time_before(jiffies, end_time)) && (are_we_done_yet == 0))
* a crypto release with -EINVAL to release the
* kernel crypto infrastructure and let the system
* continue to boot up
* We have to wait this long because some crypto
* operations can take a while
*/
dev_dbg(&sctx->sep_used->pdev->dev,
"waiting for done with transaction\n");
sctx->end_time = jiffies + (SEP_TRANSACTION_WAIT_TIME * HZ);
while ((time_before(jiffies, sctx->end_time)) &&
(!sctx->done_with_transaction))
schedule(); schedule();
dev_dbg(&sctx->sep_used->pdev->dev, /* Done waiting; still not done yet? */
"done waiting for done with transaction\n"); if (are_we_done_yet == 0) {
dev_dbg(&ta_ctx->sep_used->pdev->dev,
/* are we done? */ "hash final job never got done\n");
if (!sctx->done_with_transaction) { sep_crypto_release(sctx, ta_ctx, -EINVAL);
/* Nope, lets release and tell crypto no */ return;
dev_warn(&sctx->sep_used->pdev->dev,
"[PID%d] sep_hash_final never finished\n",
current->pid);
sep_crypto_release(sctx, -EINVAL);
} }
} }
static void sep_hash_digest(void *data) static void sep_hash_digest(void *data)
...@@ -2234,6 +2447,7 @@ static void sep_hash_digest(void *data) ...@@ -2234,6 +2447,7 @@ static void sep_hash_digest(void *data)
u32 msg[10]; u32 msg[10];
size_t copy_result; size_t copy_result;
int result; int result;
int are_we_done_yet;
u32 tail_len; u32 tail_len;
static char small_buf[100]; static char small_buf[100];
struct scatterlist *new_sg; struct scatterlist *new_sg;
...@@ -2241,152 +2455,140 @@ static void sep_hash_digest(void *data) ...@@ -2241,152 +2455,140 @@ static void sep_hash_digest(void *data)
struct ahash_request *req; struct ahash_request *req;
struct crypto_ahash *tfm; struct crypto_ahash *tfm;
struct sep_hash_ctx *ctx; struct this_task_ctx *ta_ctx;
struct sep_system_ctx *sctx; struct sep_system_ctx *sctx;
unsigned long end_time;
req = (struct ahash_request *)data; req = (struct ahash_request *)data;
tfm = crypto_ahash_reqtfm(req); tfm = crypto_ahash_reqtfm(req);
ctx = ahash_request_ctx(req);
sctx = crypto_ahash_ctx(tfm); sctx = crypto_ahash_ctx(tfm);
ta_ctx = ahash_request_ctx(req);
ta_ctx->sep_used = sep_dev;
dev_dbg(&sctx->sep_used->pdev->dev, dev_dbg(&ta_ctx->sep_used->pdev->dev,
"sep_hash_digest\n"); "sep_hash_digest\n");
sctx->current_hash_stage = HASH_DIGEST; ta_ctx->current_hash_stage = HASH_DIGEST;
ta_ctx->are_we_done_yet = &are_we_done_yet;
/* length for queue status */ /* length for queue status */
sctx->nbytes = req->nbytes; ta_ctx->nbytes = req->nbytes;
block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
tail_len = req->nbytes % block_size; tail_len = req->nbytes % block_size;
dev_dbg(&sctx->sep_used->pdev->dev, "length is %x\n", req->nbytes); dev_dbg(&ta_ctx->sep_used->pdev->dev, "length is %x\n", req->nbytes);
dev_dbg(&sctx->sep_used->pdev->dev, "block_size is %x\n", block_size); dev_dbg(&ta_ctx->sep_used->pdev->dev, "block_size is %x\n", block_size);
dev_dbg(&sctx->sep_used->pdev->dev, "tail len is %x\n", tail_len); dev_dbg(&ta_ctx->sep_used->pdev->dev, "tail len is %x\n", tail_len);
/* Make sure all pages are even block */ /* Make sure all pages are even block */
int_error = sep_oddball_pages(sctx->sep_used, req->src, int_error = sep_oddball_pages(ta_ctx->sep_used, req->src,
req->nbytes, req->nbytes,
block_size, &new_sg, 1); block_size, &new_sg, 1);
if (int_error < 0) { if (int_error < 0) {
dev_warn(&sctx->sep_used->pdev->dev, dev_warn(&ta_ctx->sep_used->pdev->dev,
"oddball pages error in crash update\n"); "oddball pages error in crash update\n");
sep_crypto_release(sctx, -ENOMEM); sep_crypto_release(sctx, ta_ctx, -ENOMEM);
return; return;
} else if (int_error == 1) { } else if (int_error == 1) {
sctx->src_sg = new_sg; ta_ctx->src_sg = new_sg;
sctx->src_sg_hold = new_sg; ta_ctx->src_sg_hold = new_sg;
} else { } else {
sctx->src_sg = req->src; ta_ctx->src_sg = req->src;
sctx->src_sg_hold = NULL; ta_ctx->src_sg_hold = NULL;
} }
src_ptr = sg_virt(sctx->src_sg); src_ptr = sg_virt(ta_ctx->src_sg);
if ((!req->nbytes) || (!ctx->sg)) { if ((!req->nbytes) || (!ta_ctx->src_sg)) {
/* null data */ /* null data */
src_ptr = NULL; src_ptr = NULL;
} }
sep_dump_sg(sctx->sep_used, "hash block sg in", sctx->src_sg); sep_dump_sg(ta_ctx->sep_used, "hash block sg in", ta_ctx->src_sg);
sctx->dcb_input_data.app_in_address = src_ptr; ta_ctx->dcb_input_data.app_in_address = src_ptr;
sctx->dcb_input_data.data_in_size = req->nbytes - tail_len; ta_ctx->dcb_input_data.data_in_size = req->nbytes - tail_len;
sctx->dcb_input_data.app_out_address = NULL; ta_ctx->dcb_input_data.app_out_address = NULL;
sctx->dcb_input_data.block_size = block_size; ta_ctx->dcb_input_data.block_size = block_size;
sctx->dcb_input_data.tail_block_size = 0; ta_ctx->dcb_input_data.tail_block_size = 0;
sctx->dcb_input_data.is_applet = 0; ta_ctx->dcb_input_data.is_applet = 0;
sctx->dcb_input_data.src_sg = sctx->src_sg; ta_ctx->dcb_input_data.src_sg = ta_ctx->src_sg;
sctx->dcb_input_data.dst_sg = NULL; ta_ctx->dcb_input_data.dst_sg = NULL;
int_error = sep_create_dcb_dmatables_context_kernel( int_error = sep_create_dcb_dmatables_context_kernel(
sctx->sep_used, ta_ctx->sep_used,
&sctx->dcb_region, &ta_ctx->dcb_region,
&sctx->dmatables_region, &ta_ctx->dmatables_region,
&sctx->dma_ctx, &ta_ctx->dma_ctx,
&sctx->dcb_input_data, &ta_ctx->dcb_input_data,
1); 1);
if (int_error) { if (int_error) {
dev_warn(&sctx->sep_used->pdev->dev, dev_warn(&ta_ctx->sep_used->pdev->dev,
"hash update dma table create failed\n"); "hash update dma table create failed\n");
sep_crypto_release(sctx, -EINVAL); sep_crypto_release(sctx, ta_ctx, -EINVAL);
return; return;
} }
/* Construct message to SEP */ /* Construct message to SEP */
sep_make_header(sctx, &msg_offset, SEP_HASH_SINGLE_OPCODE); sep_make_header(ta_ctx, &msg_offset, SEP_HASH_SINGLE_OPCODE);
sep_write_msg(sctx, &ctx->hash_opmode, sep_write_msg(ta_ctx, &ta_ctx->hash_opmode,
sizeof(u32), sizeof(u32), &msg_offset, 0); sizeof(u32), sizeof(u32), &msg_offset, 0);
msg[0] = (u32)0; msg[0] = (u32)0;
msg[1] = (u32)0; msg[1] = (u32)0;
msg[2] = (u32)0; msg[2] = (u32)0;
sep_write_msg(sctx, msg, sizeof(u32) * 3, sizeof(u32) * 3, sep_write_msg(ta_ctx, msg, sizeof(u32) * 3, sizeof(u32) * 3,
&msg_offset, 0); &msg_offset, 0);
/* Tail */ /* Tail */
sep_write_msg(sctx, &tail_len, sizeof(u32), sep_write_msg(ta_ctx, &tail_len, sizeof(u32),
sizeof(u32), &msg_offset, 0); sizeof(u32), &msg_offset, 0);
if (tail_len) { if (tail_len) {
copy_result = sep_copy_offset_sg( copy_result = sep_copy_offset_sg(
sctx->sep_used, ta_ctx->sep_used,
sctx->src_sg, ta_ctx->src_sg,
req->nbytes - tail_len, req->nbytes - tail_len,
small_buf, tail_len); small_buf, tail_len);
if (copy_result != tail_len) { if (copy_result != tail_len) {
dev_warn(&sctx->sep_used->pdev->dev, dev_warn(&ta_ctx->sep_used->pdev->dev,
"sg tail copy failure in hash block\n"); "sg tail copy failure in hash block\n");
sep_crypto_release(sctx, -ENOMEM); sep_crypto_release(sctx, ta_ctx, -ENOMEM);
return; return;
} }
sep_write_msg(sctx, small_buf, tail_len, sep_write_msg(ta_ctx, small_buf, tail_len,
sizeof(u32) * 32, &msg_offset, 1); sizeof(u32) * 32, &msg_offset, 1);
} else { } else {
msg_offset += sizeof(u32) * 32; msg_offset += sizeof(u32) * 32;
} }
sep_end_msg(sctx, msg_offset); sep_end_msg(ta_ctx, msg_offset);
sctx->done_with_transaction = 0; are_we_done_yet = 0;
result = sep_crypto_take_sep(ta_ctx);
result = sep_crypto_take_sep(sctx);
if (result) { if (result) {
dev_warn(&sctx->sep_used->pdev->dev, dev_warn(&ta_ctx->sep_used->pdev->dev,
"sep_hash_digest take sep failed\n"); "sep_hash_digest take sep failed\n");
sep_crypto_release(sctx, -EINVAL); sep_crypto_release(sctx, ta_ctx, -EINVAL);
} }
/** /* now we sit and wait up to a fixed time for completion */
* Sep is now working. Lets wait up to 5 seconds end_time = jiffies + (WAIT_TIME * HZ);
* for completion. If it does not complete, we will do while ((time_before(jiffies, end_time)) && (are_we_done_yet == 0))
* a crypto release with -EINVAL to release the
* kernel crypto infrastructure and let the system
* continue to boot up
* We have to wait this long because some crypto
* operations can take a while
*/
dev_dbg(&sctx->sep_used->pdev->dev,
"waiting for done with transaction\n");
sctx->end_time = jiffies + (SEP_TRANSACTION_WAIT_TIME * HZ);
while ((time_before(jiffies, sctx->end_time)) &&
(!sctx->done_with_transaction))
schedule(); schedule();
dev_dbg(&sctx->sep_used->pdev->dev, /* Done waiting; still not done yet? */
"done waiting for done with transaction\n"); if (are_we_done_yet == 0) {
dev_dbg(&ta_ctx->sep_used->pdev->dev,
/* are we done? */ "hash digest job never got done\n");
if (!sctx->done_with_transaction) { sep_crypto_release(sctx, ta_ctx, -EINVAL);
/* Nope, lets release and tell crypto no */ return;
dev_warn(&sctx->sep_used->pdev->dev,
"[PID%d] sep_hash_digest never finished\n",
current->pid);
sep_crypto_release(sctx, -EINVAL);
} }
} }
/** /**
...@@ -2404,6 +2606,7 @@ static void sep_dequeuer(void *data) ...@@ -2404,6 +2606,7 @@ static void sep_dequeuer(void *data)
struct ahash_request *hash_req; struct ahash_request *hash_req;
struct sep_system_ctx *sctx; struct sep_system_ctx *sctx;
struct crypto_ahash *hash_tfm; struct crypto_ahash *hash_tfm;
struct this_task_ctx *ta_ctx;
this_queue = (struct crypto_queue *)data; this_queue = (struct crypto_queue *)data;
...@@ -2481,22 +2684,32 @@ static void sep_dequeuer(void *data) ...@@ -2481,22 +2684,32 @@ static void sep_dequeuer(void *data)
return; return;
} }
if (sctx->current_hash_stage == HASH_INIT) { ta_ctx = ahash_request_ctx(hash_req);
if (ta_ctx->current_hash_stage == HASH_INIT) {
pr_debug("sep crypto queue hash init\n"); pr_debug("sep crypto queue hash init\n");
sep_hash_init((void *)hash_req); sep_hash_init((void *)hash_req);
return; return;
} else if (sctx->current_hash_stage == HASH_UPDATE) { } else if (ta_ctx->current_hash_stage == HASH_UPDATE) {
pr_debug("sep crypto queue hash update\n"); pr_debug("sep crypto queue hash update\n");
sep_hash_update((void *)hash_req); sep_hash_update((void *)hash_req);
return; return;
} else if (sctx->current_hash_stage == HASH_FINISH) { } else if (ta_ctx->current_hash_stage == HASH_FINISH) {
pr_debug("sep crypto queue hash final\n"); pr_debug("sep crypto queue hash final\n");
sep_hash_final((void *)hash_req); sep_hash_final((void *)hash_req);
return; return;
} else if (sctx->current_hash_stage == HASH_DIGEST) { } else if (ta_ctx->current_hash_stage == HASH_DIGEST) {
pr_debug("sep crypto queue hash digest\n"); pr_debug("sep crypto queue hash digest\n");
sep_hash_digest((void *)hash_req); sep_hash_digest((void *)hash_req);
return; return;
} else if (ta_ctx->current_hash_stage == HASH_FINUP_DATA) {
pr_debug("sep crypto queue hash digest\n");
sep_hash_update((void *)hash_req);
return;
} else if (ta_ctx->current_hash_stage == HASH_FINUP_FINISH) {
pr_debug("sep crypto queue hash digest\n");
sep_hash_final((void *)hash_req);
return;
} else { } else {
pr_debug("sep crypto queue hash oops nothing\n"); pr_debug("sep crypto queue hash oops nothing\n");
return; return;
...@@ -2507,605 +2720,671 @@ static void sep_dequeuer(void *data) ...@@ -2507,605 +2720,671 @@ static void sep_dequeuer(void *data)
static int sep_sha1_init(struct ahash_request *req) static int sep_sha1_init(struct ahash_request *req)
{ {
int error; int error;
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); int error1;
struct sep_hash_ctx *ctx = ahash_request_ctx(req); struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm);
pr_debug("sep - doing sha1 init\n");
dev_dbg(&sctx->sep_used->pdev->dev, "doing sha1 init\n"); /* Clear out task context */
sctx->current_request = SHA1; memset(ta_ctx, 0, sizeof(struct this_task_ctx));
sctx->current_hash_req = req;
sctx->current_cypher_req = NULL;
ctx->hash_opmode = SEP_HASH_SHA1;
sctx->current_hash_stage = HASH_INIT;
ta_ctx->sep_used = sep_dev;
ta_ctx->current_request = SHA1;
ta_ctx->current_hash_req = req;
ta_ctx->current_cypher_req = NULL;
ta_ctx->hash_opmode = SEP_HASH_SHA1;
ta_ctx->current_hash_stage = HASH_INIT;
/* lock necessary so that only one entity touches the queues */
spin_lock_irq(&queue_lock); spin_lock_irq(&queue_lock);
error = crypto_enqueue_request(&sep_queue, &req->base); error = crypto_enqueue_request(&sep_queue, &req->base);
spin_unlock_irq(&queue_lock);
if ((error != 0) && (error != -EINPROGRESS)) {
dev_warn(&sctx->sep_used->pdev->dev,
"sep sha1 init cant enqueue\n");
sep_crypto_release(sctx, error);
return error;
}
error = sep_submit_work(sctx->sep_used->workqueue, sep_dequeuer, if ((error != 0) && (error != -EINPROGRESS))
(void *)&sep_queue); pr_debug(" sep - crypto enqueue failed: %x\n",
if (error) { error);
dev_warn(&sctx->sep_used->pdev->dev, error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
"sha1 init cannot submit queue\n"); sep_dequeuer, (void *)&sep_queue);
sep_crypto_release(sctx, -EINVAL); if (error1)
return -EINVAL; pr_debug(" sep - workqueue submit failed: %x\n",
} error1);
return -EINPROGRESS; spin_unlock_irq(&queue_lock);
/* We return result of crypto enqueue */
return error;
} }
static int sep_sha1_update(struct ahash_request *req) static int sep_sha1_update(struct ahash_request *req)
{ {
int error; int error;
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); int error1;
struct sep_hash_ctx *ctx = ahash_request_ctx(req); struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm);
dev_dbg(&sctx->sep_used->pdev->dev, "doing sha1 update\n"); pr_debug("sep - doing sha1 update\n");
sctx->current_request = SHA1;
sctx->current_hash_req = req;
sctx->current_cypher_req = NULL;
ctx->hash_opmode = SEP_HASH_SHA1;
sctx->current_hash_stage = HASH_INIT;
ta_ctx->sep_used = sep_dev;
ta_ctx->current_request = SHA1;
ta_ctx->current_hash_req = req;
ta_ctx->current_cypher_req = NULL;
ta_ctx->hash_opmode = SEP_HASH_SHA1;
ta_ctx->current_hash_stage = HASH_UPDATE;
/* lock necessary so that only one entity touches the queues */
spin_lock_irq(&queue_lock); spin_lock_irq(&queue_lock);
error = crypto_enqueue_request(&sep_queue, &req->base); error = crypto_enqueue_request(&sep_queue, &req->base);
spin_unlock_irq(&queue_lock);
if ((error != 0) && (error != -EINPROGRESS)) {
dev_warn(&sctx->sep_used->pdev->dev,
"sep sha1 update cant enqueue\n");
sep_crypto_release(sctx, error);
return error;
}
error = sep_submit_work(sctx->sep_used->workqueue, sep_dequeuer, if ((error != 0) && (error != -EINPROGRESS))
(void *)&sep_queue); pr_debug(" sep - crypto enqueue failed: %x\n",
if (error) { error);
dev_warn(&sctx->sep_used->pdev->dev, error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
"sha1 update cannot submit queue\n"); sep_dequeuer, (void *)&sep_queue);
sep_crypto_release(sctx, -EINVAL); if (error1)
return -EINVAL; pr_debug(" sep - workqueue submit failed: %x\n",
} error1);
return -EINPROGRESS; spin_unlock_irq(&queue_lock);
/* We return result of crypto enqueue */
return error;
} }
static int sep_sha1_final(struct ahash_request *req) static int sep_sha1_final(struct ahash_request *req)
{ {
int error; int error;
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); int error1;
struct sep_hash_ctx *ctx = ahash_request_ctx(req); struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm); pr_debug("sep - doing sha1 final\n");
dev_dbg(&sctx->sep_used->pdev->dev, "doing sha1 final\n");
ta_ctx->sep_used = sep_dev;
sctx->current_request = SHA1; ta_ctx->current_request = SHA1;
sctx->current_hash_req = req; ta_ctx->current_hash_req = req;
sctx->current_cypher_req = NULL; ta_ctx->current_cypher_req = NULL;
ctx->hash_opmode = SEP_HASH_SHA1; ta_ctx->hash_opmode = SEP_HASH_SHA1;
sctx->current_hash_stage = HASH_FINISH; ta_ctx->current_hash_stage = HASH_FINISH;
/* lock necessary so that only one entity touches the queues */
spin_lock_irq(&queue_lock); spin_lock_irq(&queue_lock);
error = crypto_enqueue_request(&sep_queue, &req->base); error = crypto_enqueue_request(&sep_queue, &req->base);
spin_unlock_irq(&queue_lock);
if ((error != 0) && (error != -EINPROGRESS)) {
dev_warn(&sctx->sep_used->pdev->dev,
"sep sha1 final cant enqueue\n");
sep_crypto_release(sctx, error);
return error;
}
error = sep_submit_work(sctx->sep_used->workqueue, sep_dequeuer,
(void *)&sep_queue);
if (error) {
dev_warn(&sctx->sep_used->pdev->dev,
"sha1 final cannot submit queue\n");
sep_crypto_release(sctx, -EINVAL);
return -EINVAL;
}
return -EINPROGRESS;
if ((error != 0) && (error != -EINPROGRESS))
pr_debug(" sep - crypto enqueue failed: %x\n",
error);
error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
sep_dequeuer, (void *)&sep_queue);
if (error1)
pr_debug(" sep - workqueue submit failed: %x\n",
error1);
spin_unlock_irq(&queue_lock);
/* We return result of crypto enqueue */
return error;
} }
static int sep_sha1_digest(struct ahash_request *req) static int sep_sha1_digest(struct ahash_request *req)
{ {
int error; int error;
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); int error1;
struct sep_hash_ctx *ctx = ahash_request_ctx(req); struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm); pr_debug("sep - doing sha1 digest\n");
dev_dbg(&sctx->sep_used->pdev->dev, "doing sha1 digest\n");
sctx->current_request = SHA1; /* Clear out task context */
sctx->current_hash_req = req; memset(ta_ctx, 0, sizeof(struct this_task_ctx));
sctx->current_cypher_req = NULL;
ctx->hash_opmode = SEP_HASH_SHA1;
sctx->current_hash_stage = HASH_DIGEST;
ta_ctx->sep_used = sep_dev;
ta_ctx->current_request = SHA1;
ta_ctx->current_hash_req = req;
ta_ctx->current_cypher_req = NULL;
ta_ctx->hash_opmode = SEP_HASH_SHA1;
ta_ctx->current_hash_stage = HASH_DIGEST;
/* lock necessary so that only one entity touches the queues */
spin_lock_irq(&queue_lock); spin_lock_irq(&queue_lock);
error = crypto_enqueue_request(&sep_queue, &req->base); error = crypto_enqueue_request(&sep_queue, &req->base);
spin_unlock_irq(&queue_lock);
if ((error != 0) && (error != -EINPROGRESS)) { if ((error != 0) && (error != -EINPROGRESS))
dev_warn(&sctx->sep_used->pdev->dev, pr_debug(" sep - crypto enqueue failed: %x\n",
"sep sha1 digest cant enqueue\n"); error);
sep_crypto_release(sctx, error); error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
return error; sep_dequeuer, (void *)&sep_queue);
} if (error1)
pr_debug(" sep - workqueue submit failed: %x\n",
error1);
spin_unlock_irq(&queue_lock);
/* We return result of crypto enqueue */
return error;
}
error = sep_submit_work(sctx->sep_used->workqueue, sep_dequeuer, static int sep_sha1_finup(struct ahash_request *req)
(void *)&sep_queue); {
if (error) { int error;
dev_warn(&sctx->sep_used->pdev->dev, int error1;
"sha1 digest cannot submit queue\n"); struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
sep_crypto_release(sctx, -EINVAL); pr_debug("sep - doing sha1 finup\n");
return -EINVAL;
} ta_ctx->sep_used = sep_dev;
return -EINPROGRESS; ta_ctx->current_request = SHA1;
ta_ctx->current_hash_req = req;
ta_ctx->current_cypher_req = NULL;
ta_ctx->hash_opmode = SEP_HASH_SHA1;
ta_ctx->current_hash_stage = HASH_FINUP_DATA;
/* lock necessary so that only one entity touches the queues */
spin_lock_irq(&queue_lock);
error = crypto_enqueue_request(&sep_queue, &req->base);
if ((error != 0) && (error != -EINPROGRESS))
pr_debug(" sep - crypto enqueue failed: %x\n",
error);
error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
sep_dequeuer, (void *)&sep_queue);
if (error1)
pr_debug(" sep - workqueue submit failed: %x\n",
error1);
spin_unlock_irq(&queue_lock);
/* We return result of crypto enqueue */
return error;
} }
static int sep_md5_init(struct ahash_request *req) static int sep_md5_init(struct ahash_request *req)
{ {
int error; int error;
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); int error1;
struct sep_hash_ctx *ctx = ahash_request_ctx(req); struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm); pr_debug("sep - doing md5 init\n");
dev_dbg(&sctx->sep_used->pdev->dev, "doing md5 init\n");
/* Clear out task context */
memset(ta_ctx, 0, sizeof(struct this_task_ctx));
sctx->current_request = MD5; ta_ctx->sep_used = sep_dev;
sctx->current_hash_req = req; ta_ctx->current_request = MD5;
sctx->current_cypher_req = NULL; ta_ctx->current_hash_req = req;
ctx->hash_opmode = SEP_HASH_MD5; ta_ctx->current_cypher_req = NULL;
sctx->current_hash_stage = HASH_INIT; ta_ctx->hash_opmode = SEP_HASH_MD5;
ta_ctx->current_hash_stage = HASH_INIT;
/* lock necessary so that only one entity touches the queues */
spin_lock_irq(&queue_lock); spin_lock_irq(&queue_lock);
error = crypto_enqueue_request(&sep_queue, &req->base); error = crypto_enqueue_request(&sep_queue, &req->base);
spin_unlock_irq(&queue_lock);
if ((error != 0) && (error != -EINPROGRESS)) {
dev_warn(&sctx->sep_used->pdev->dev,
"sep md5 init cant enqueue\n");
sep_crypto_release(sctx, error);
return error;
}
error = sep_submit_work(sctx->sep_used->workqueue, sep_dequeuer,
(void *)&sep_queue);
if (error) {
dev_warn(&sctx->sep_used->pdev->dev,
"md5 init cannot submit queue\n");
sep_crypto_release(sctx, -EINVAL);
return -EINVAL;
}
return -EINPROGRESS;
if ((error != 0) && (error != -EINPROGRESS))
pr_debug(" sep - crypto enqueue failed: %x\n",
error);
error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
sep_dequeuer, (void *)&sep_queue);
if (error1)
pr_debug(" sep - workqueue submit failed: %x\n",
error1);
spin_unlock_irq(&queue_lock);
/* We return result of crypto enqueue */
return error;
} }
static int sep_md5_update(struct ahash_request *req) static int sep_md5_update(struct ahash_request *req)
{ {
int error; int error;
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); int error1;
struct sep_hash_ctx *ctx = ahash_request_ctx(req); struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm); pr_debug("sep - doing md5 update\n");
dev_dbg(&sctx->sep_used->pdev->dev, "doing md5 update\n");
ta_ctx->sep_used = sep_dev;
sctx->current_request = MD5; ta_ctx->current_request = MD5;
sctx->current_hash_req = req; ta_ctx->current_hash_req = req;
sctx->current_cypher_req = NULL; ta_ctx->current_cypher_req = NULL;
ctx->hash_opmode = SEP_HASH_MD5; ta_ctx->hash_opmode = SEP_HASH_MD5;
sctx->current_hash_stage = HASH_UPDATE; ta_ctx->current_hash_stage = HASH_UPDATE;
/* lock necessary so that only one entity touches the queues */
spin_lock_irq(&queue_lock); spin_lock_irq(&queue_lock);
error = crypto_enqueue_request(&sep_queue, &req->base); error = crypto_enqueue_request(&sep_queue, &req->base);
spin_unlock_irq(&queue_lock);
if ((error != 0) && (error != -EINPROGRESS)) { if ((error != 0) && (error != -EINPROGRESS))
dev_warn(&sctx->sep_used->pdev->dev, pr_debug(" sep - crypto enqueue failed: %x\n",
"md5 update cant enqueue\n"); error);
sep_crypto_release(sctx, error); error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
return error; sep_dequeuer, (void *)&sep_queue);
} if (error1)
pr_debug(" sep - workqueue submit failed: %x\n",
error = sep_submit_work(sctx->sep_used->workqueue, sep_dequeuer, error1);
(void *)&sep_queue); spin_unlock_irq(&queue_lock);
if (error) { /* We return result of crypto enqueue */
dev_warn(&sctx->sep_used->pdev->dev, return error;
"md5 update cannot submit queue\n");
sep_crypto_release(sctx, -EINVAL);
return -EINVAL;
}
return -EINPROGRESS;
} }
static int sep_md5_final(struct ahash_request *req) static int sep_md5_final(struct ahash_request *req)
{ {
int error; int error;
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); int error1;
struct sep_hash_ctx *ctx = ahash_request_ctx(req); struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm); pr_debug("sep - doing md5 final\n");
dev_dbg(&sctx->sep_used->pdev->dev, "doing md5 final\n");
ta_ctx->sep_used = sep_dev;
sctx->current_request = MD5; ta_ctx->current_request = MD5;
sctx->current_hash_req = req; ta_ctx->current_hash_req = req;
sctx->current_cypher_req = NULL; ta_ctx->current_cypher_req = NULL;
ctx->hash_opmode = SEP_HASH_MD5; ta_ctx->hash_opmode = SEP_HASH_MD5;
sctx->current_hash_stage = HASH_FINISH; ta_ctx->current_hash_stage = HASH_FINISH;
/* lock necessary so that only one entity touches the queues */
spin_lock_irq(&queue_lock); spin_lock_irq(&queue_lock);
error = crypto_enqueue_request(&sep_queue, &req->base); error = crypto_enqueue_request(&sep_queue, &req->base);
spin_unlock_irq(&queue_lock);
if ((error != 0) && (error != -EINPROGRESS)) {
dev_warn(&sctx->sep_used->pdev->dev,
"sep md5 final cant enqueue\n");
sep_crypto_release(sctx, error);
return error;
}
error = sep_submit_work(sctx->sep_used->workqueue, sep_dequeuer,
(void *)&sep_queue);
if (error) {
dev_warn(&sctx->sep_used->pdev->dev,
"md5 final cannot submit queue\n");
sep_crypto_release(sctx, -EINVAL);
return -EINVAL;
}
return -EINPROGRESS;
if ((error != 0) && (error != -EINPROGRESS))
pr_debug(" sep - crypto enqueue failed: %x\n",
error);
error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
sep_dequeuer, (void *)&sep_queue);
if (error1)
pr_debug(" sep - workqueue submit failed: %x\n",
error1);
spin_unlock_irq(&queue_lock);
/* We return result of crypto enqueue */
return error;
} }
static int sep_md5_digest(struct ahash_request *req) static int sep_md5_digest(struct ahash_request *req)
{ {
int error; int error;
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); int error1;
struct sep_hash_ctx *ctx = ahash_request_ctx(req); struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm);
dev_dbg(&sctx->sep_used->pdev->dev, "doing md5 digest\n"); pr_debug("sep - doing md5 digest\n");
sctx->current_request = MD5;
sctx->current_hash_req = req;
sctx->current_cypher_req = NULL;
ctx->hash_opmode = SEP_HASH_MD5;
sctx->current_hash_stage = HASH_DIGEST;
/* Clear out task context */
memset(ta_ctx, 0, sizeof(struct this_task_ctx));
ta_ctx->sep_used = sep_dev;
ta_ctx->current_request = MD5;
ta_ctx->current_hash_req = req;
ta_ctx->current_cypher_req = NULL;
ta_ctx->hash_opmode = SEP_HASH_MD5;
ta_ctx->current_hash_stage = HASH_DIGEST;
/* lock necessary so that only one entity touches the queues */
spin_lock_irq(&queue_lock); spin_lock_irq(&queue_lock);
error = crypto_enqueue_request(&sep_queue, &req->base); error = crypto_enqueue_request(&sep_queue, &req->base);
spin_unlock_irq(&queue_lock);
if ((error != 0) && (error != -EINPROGRESS)) {
dev_warn(&sctx->sep_used->pdev->dev,
"sep md5 digest cant enqueue\n");
sep_crypto_release(sctx, error);
return error;
}
error = sep_submit_work(sctx->sep_used->workqueue, sep_dequeuer, if ((error != 0) && (error != -EINPROGRESS))
(void *)&sep_queue); pr_debug(" sep - crypto enqueue failed: %x\n",
if (error) { error);
dev_warn(&sctx->sep_used->pdev->dev, error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
"md5 digest cannot submit queue\n"); sep_dequeuer, (void *)&sep_queue);
sep_crypto_release(sctx, -EINVAL); if (error1)
return -EINVAL; pr_debug(" sep - workqueue submit failed: %x\n",
} error1);
return -EINPROGRESS; spin_unlock_irq(&queue_lock);
/* We return result of crypto enqueue */
return error;
} }
static int sep_sha224_init(struct ahash_request *req) static int sep_md5_finup(struct ahash_request *req)
{ {
int error; int error;
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); int error1;
struct sep_hash_ctx *ctx = ahash_request_ctx(req); struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm);
dev_dbg(&sctx->sep_used->pdev->dev, "doing sha224 init\n"); pr_debug("sep - doing md5 finup\n");
sctx->current_request = SHA224; ta_ctx->sep_used = sep_dev;
sctx->current_hash_req = req; ta_ctx->current_request = MD5;
sctx->current_cypher_req = NULL; ta_ctx->current_hash_req = req;
ctx->hash_opmode = SEP_HASH_SHA224; ta_ctx->current_cypher_req = NULL;
sctx->current_hash_stage = HASH_INIT; ta_ctx->hash_opmode = SEP_HASH_MD5;
ta_ctx->current_hash_stage = HASH_FINUP_DATA;
/* lock necessary so that only one entity touches the queues */
spin_lock_irq(&queue_lock); spin_lock_irq(&queue_lock);
error = crypto_enqueue_request(&sep_queue, &req->base); error = crypto_enqueue_request(&sep_queue, &req->base);
spin_unlock_irq(&queue_lock);
if ((error != 0) && (error != -EINPROGRESS)) {
dev_warn(&sctx->sep_used->pdev->dev,
"sep sha224 init cant enqueue\n");
sep_crypto_release(sctx, error);
return error;
}
error = sep_submit_work(sctx->sep_used->workqueue, sep_dequeuer, if ((error != 0) && (error != -EINPROGRESS))
(void *)&sep_queue); pr_debug(" sep - crypto enqueue failed: %x\n",
if (error) { error);
dev_warn(&sctx->sep_used->pdev->dev, error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
"sha224 init cannot submit queue\n"); sep_dequeuer, (void *)&sep_queue);
sep_crypto_release(sctx, -EINVAL); if (error1)
return -EINVAL; pr_debug(" sep - workqueue submit failed: %x\n",
} error1);
return -EINPROGRESS; spin_unlock_irq(&queue_lock);
/* We return result of crypto enqueue */
return error;
} }
static int sep_sha224_update(struct ahash_request *req) static int sep_sha224_init(struct ahash_request *req)
{ {
int error; int error;
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); int error1;
struct sep_hash_ctx *ctx = ahash_request_ctx(req); struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm); pr_debug("sep - doing sha224 init\n");
dev_dbg(&sctx->sep_used->pdev->dev, "doing sha224 update\n");
/* Clear out task context */
memset(ta_ctx, 0, sizeof(struct this_task_ctx));
sctx->current_request = SHA224; ta_ctx->sep_used = sep_dev;
sctx->current_hash_req = req; ta_ctx->current_request = SHA224;
sctx->current_cypher_req = NULL; ta_ctx->current_hash_req = req;
ctx->hash_opmode = SEP_HASH_SHA224; ta_ctx->current_cypher_req = NULL;
sctx->current_hash_stage = HASH_UPDATE; ta_ctx->hash_opmode = SEP_HASH_SHA224;
ta_ctx->current_hash_stage = HASH_INIT;
/* lock necessary so that only one entity touches the queues */
spin_lock_irq(&queue_lock); spin_lock_irq(&queue_lock);
error = crypto_enqueue_request(&sep_queue, &req->base); error = crypto_enqueue_request(&sep_queue, &req->base);
if ((error != 0) && (error != -EINPROGRESS))
pr_debug(" sep - crypto enqueue failed: %x\n",
error);
error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
sep_dequeuer, (void *)&sep_queue);
if (error1)
pr_debug(" sep - workqueue submit failed: %x\n",
error1);
spin_unlock_irq(&queue_lock); spin_unlock_irq(&queue_lock);
/* We return result of crypto enqueue */
return error;
}
if ((error != 0) && (error != -EINPROGRESS)) { static int sep_sha224_update(struct ahash_request *req)
dev_warn(&sctx->sep_used->pdev->dev, {
"sep sha224 update cant enqueue\n"); int error;
sep_crypto_release(sctx, error); int error1;
return error; struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
} pr_debug("sep - doing sha224 update\n");
ta_ctx->sep_used = sep_dev;
ta_ctx->current_request = SHA224;
ta_ctx->current_hash_req = req;
ta_ctx->current_cypher_req = NULL;
ta_ctx->hash_opmode = SEP_HASH_SHA224;
ta_ctx->current_hash_stage = HASH_UPDATE;
/* lock necessary so that only one entity touches the queues */
spin_lock_irq(&queue_lock);
error = crypto_enqueue_request(&sep_queue, &req->base);
error = sep_submit_work(sctx->sep_used->workqueue, sep_dequeuer, if ((error != 0) && (error != -EINPROGRESS))
(void *)&sep_queue); pr_debug(" sep - crypto enqueue failed: %x\n",
if (error) { error);
dev_warn(&sctx->sep_used->pdev->dev, error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
"sha224 update cannot submit queue\n"); sep_dequeuer, (void *)&sep_queue);
sep_crypto_release(sctx, -EINVAL); if (error1)
return -EINVAL; pr_debug(" sep - workqueue submit failed: %x\n",
} error1);
return -EINPROGRESS; spin_unlock_irq(&queue_lock);
/* We return result of crypto enqueue */
return error;
} }
static int sep_sha224_final(struct ahash_request *req) static int sep_sha224_final(struct ahash_request *req)
{ {
int error; int error;
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); int error1;
struct sep_hash_ctx *ctx = ahash_request_ctx(req); struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm); pr_debug("sep - doing sha224 final\n");
dev_dbg(&sctx->sep_used->pdev->dev, "doing sha224 final\n");
ta_ctx->sep_used = sep_dev;
sctx->current_request = SHA224; ta_ctx->current_request = SHA224;
sctx->current_hash_req = req; ta_ctx->current_hash_req = req;
sctx->current_cypher_req = NULL; ta_ctx->current_cypher_req = NULL;
ctx->hash_opmode = SEP_HASH_SHA224; ta_ctx->hash_opmode = SEP_HASH_SHA224;
sctx->current_hash_stage = HASH_FINISH; ta_ctx->current_hash_stage = HASH_FINISH;
/* lock necessary so that only one entity touches the queues */
spin_lock_irq(&queue_lock); spin_lock_irq(&queue_lock);
error = crypto_enqueue_request(&sep_queue, &req->base); error = crypto_enqueue_request(&sep_queue, &req->base);
spin_unlock_irq(&queue_lock);
if ((error != 0) && (error != -EINPROGRESS)) { if ((error != 0) && (error != -EINPROGRESS))
dev_warn(&sctx->sep_used->pdev->dev, pr_debug(" sep - crypto enqueue failed: %x\n",
"sep sha224 final cant enqueue\n"); error);
sep_crypto_release(sctx, error); error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
return error; sep_dequeuer, (void *)&sep_queue);
} if (error1)
pr_debug(" sep - workqueue submit failed: %x\n",
error = sep_submit_work(sctx->sep_used->workqueue, sep_dequeuer, error1);
(void *)&sep_queue); spin_unlock_irq(&queue_lock);
if (error) { /* We return result of crypto enqueue */
dev_warn(&sctx->sep_used->pdev->dev, return error;
"sha224 final cannot submit queue\n");
sep_crypto_release(sctx, -EINVAL);
return -EINVAL;
}
return -EINPROGRESS;
} }
static int sep_sha224_digest(struct ahash_request *req) static int sep_sha224_digest(struct ahash_request *req)
{ {
int error; int error;
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); int error1;
struct sep_hash_ctx *ctx = ahash_request_ctx(req); struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm);
pr_debug("sep - doing sha224 digest\n");
dev_dbg(&sctx->sep_used->pdev->dev, "doing 224 digest\n"); /* Clear out task context */
sctx->current_request = SHA224; memset(ta_ctx, 0, sizeof(struct this_task_ctx));
sctx->current_hash_req = req;
sctx->current_cypher_req = NULL;
ctx->hash_opmode = SEP_HASH_SHA224;
sctx->current_hash_stage = HASH_DIGEST;
ta_ctx->sep_used = sep_dev;
ta_ctx->current_request = SHA224;
ta_ctx->current_hash_req = req;
ta_ctx->current_cypher_req = NULL;
ta_ctx->hash_opmode = SEP_HASH_SHA224;
ta_ctx->current_hash_stage = HASH_DIGEST;
/* lock necessary so that only one entity touches the queues */
spin_lock_irq(&queue_lock); spin_lock_irq(&queue_lock);
error = crypto_enqueue_request(&sep_queue, &req->base); error = crypto_enqueue_request(&sep_queue, &req->base);
spin_unlock_irq(&queue_lock);
if ((error != 0) && (error != -EINPROGRESS)) { if ((error != 0) && (error != -EINPROGRESS))
dev_warn(&sctx->sep_used->pdev->dev, pr_debug(" sep - crypto enqueue failed: %x\n",
"sep sha224 digest cant enqueue\n"); error);
sep_crypto_release(sctx, error); error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
return error; sep_dequeuer, (void *)&sep_queue);
} if (error1)
pr_debug(" sep - workqueue submit failed: %x\n",
error = sep_submit_work(sctx->sep_used->workqueue, sep_dequeuer, error1);
(void *)&sep_queue); spin_unlock_irq(&queue_lock);
if (error) { /* We return result of crypto enqueue */
dev_warn(&sctx->sep_used->pdev->dev, return error;
"sha256 digest cannot submit queue\n");
sep_crypto_release(sctx, -EINVAL);
return -EINVAL;
}
return -EINPROGRESS;
} }
static int sep_sha256_init(struct ahash_request *req) static int sep_sha224_finup(struct ahash_request *req)
{ {
int error; int error;
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); int error1;
struct sep_hash_ctx *ctx = ahash_request_ctx(req); struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm);
dev_dbg(&sctx->sep_used->pdev->dev, "doing sha256 init\n"); pr_debug("sep - doing sha224 finup\n");
sctx->current_request = SHA256; ta_ctx->sep_used = sep_dev;
sctx->current_hash_req = req; ta_ctx->current_request = SHA224;
sctx->current_cypher_req = NULL; ta_ctx->current_hash_req = req;
ctx->hash_opmode = SEP_HASH_SHA256; ta_ctx->current_cypher_req = NULL;
sctx->current_hash_stage = HASH_INIT; ta_ctx->hash_opmode = SEP_HASH_SHA224;
ta_ctx->current_hash_stage = HASH_FINUP_DATA;
/* lock necessary so that only one entity touches the queues */
spin_lock_irq(&queue_lock); spin_lock_irq(&queue_lock);
error = crypto_enqueue_request(&sep_queue, &req->base); error = crypto_enqueue_request(&sep_queue, &req->base);
spin_unlock_irq(&queue_lock);
if ((error != 0) && (error != -EINPROGRESS)) { if ((error != 0) && (error != -EINPROGRESS))
dev_warn(&sctx->sep_used->pdev->dev, pr_debug(" sep - crypto enqueue failed: %x\n",
"sep sha256 init cant enqueue\n"); error);
sep_crypto_release(sctx, error); error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
return error; sep_dequeuer, (void *)&sep_queue);
} if (error1)
pr_debug(" sep - workqueue submit failed: %x\n",
error = sep_submit_work(sctx->sep_used->workqueue, sep_dequeuer, error1);
(void *)&sep_queue); spin_unlock_irq(&queue_lock);
if (error) { /* We return result of crypto enqueue */
dev_warn(&sctx->sep_used->pdev->dev, return error;
"sha256 init cannot submit queue\n");
sep_crypto_release(sctx, -EINVAL);
return -EINVAL;
}
return -EINPROGRESS;
} }
static int sep_sha256_update(struct ahash_request *req) static int sep_sha256_init(struct ahash_request *req)
{ {
int error; int error;
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); int error1;
struct sep_hash_ctx *ctx = ahash_request_ctx(req); struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm); pr_debug("sep - doing sha256 init\n");
dev_dbg(&sctx->sep_used->pdev->dev, "doing sha256 update\n");
/* Clear out task context */
memset(ta_ctx, 0, sizeof(struct this_task_ctx));
sctx->current_request = SHA256; ta_ctx->sep_used = sep_dev;
sctx->current_hash_req = req; ta_ctx->current_request = SHA256;
sctx->current_cypher_req = NULL; ta_ctx->current_hash_req = req;
ctx->hash_opmode = SEP_HASH_SHA256; ta_ctx->current_cypher_req = NULL;
sctx->current_hash_stage = HASH_UPDATE; ta_ctx->hash_opmode = SEP_HASH_SHA256;
ta_ctx->current_hash_stage = HASH_INIT;
/* lock necessary so that only one entity touches the queues */
spin_lock_irq(&queue_lock); spin_lock_irq(&queue_lock);
error = crypto_enqueue_request(&sep_queue, &req->base); error = crypto_enqueue_request(&sep_queue, &req->base);
if ((error != 0) && (error != -EINPROGRESS))
pr_debug(" sep - crypto enqueue failed: %x\n",
error);
error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
sep_dequeuer, (void *)&sep_queue);
if (error1)
pr_debug(" sep - workqueue submit failed: %x\n",
error1);
spin_unlock_irq(&queue_lock); spin_unlock_irq(&queue_lock);
/* We return result of crypto enqueue */
return error;
}
if ((error != 0) && (error != -EINPROGRESS)) { static int sep_sha256_update(struct ahash_request *req)
dev_warn(&sctx->sep_used->pdev->dev, {
"sep sha256 update cant enqueue\n"); int error;
sep_crypto_release(sctx, error); int error1;
return error; struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
} pr_debug("sep - doing sha256 update\n");
ta_ctx->sep_used = sep_dev;
ta_ctx->current_request = SHA256;
ta_ctx->current_hash_req = req;
ta_ctx->current_cypher_req = NULL;
ta_ctx->hash_opmode = SEP_HASH_SHA256;
ta_ctx->current_hash_stage = HASH_UPDATE;
/* lock necessary so that only one entity touches the queues */
spin_lock_irq(&queue_lock);
error = crypto_enqueue_request(&sep_queue, &req->base);
error = sep_submit_work(sctx->sep_used->workqueue, sep_dequeuer, if ((error != 0) && (error != -EINPROGRESS))
(void *)&sep_queue); pr_debug(" sep - crypto enqueue failed: %x\n",
if (error) { error);
dev_warn(&sctx->sep_used->pdev->dev, error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
"sha256 update cannot submit queue\n"); sep_dequeuer, (void *)&sep_queue);
sep_crypto_release(sctx, -EINVAL); if (error1)
return -EINVAL; pr_debug(" sep - workqueue submit failed: %x\n",
} error1);
return -EINPROGRESS; spin_unlock_irq(&queue_lock);
/* We return result of crypto enqueue */
return error;
} }
static int sep_sha256_final(struct ahash_request *req) static int sep_sha256_final(struct ahash_request *req)
{ {
int error; int error;
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); int error1;
struct sep_hash_ctx *ctx = ahash_request_ctx(req); struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm); pr_debug("sep - doing sha256 final\n");
dev_dbg(&sctx->sep_used->pdev->dev, "doing sha256 final\n");
ta_ctx->sep_used = sep_dev;
sctx->current_request = SHA256; ta_ctx->current_request = SHA256;
sctx->current_hash_req = req; ta_ctx->current_hash_req = req;
sctx->current_cypher_req = NULL; ta_ctx->current_cypher_req = NULL;
ctx->hash_opmode = SEP_HASH_SHA256; ta_ctx->hash_opmode = SEP_HASH_SHA256;
sctx->current_hash_stage = HASH_FINISH; ta_ctx->current_hash_stage = HASH_FINISH;
/* lock necessary so that only one entity touches the queues */
spin_lock_irq(&queue_lock); spin_lock_irq(&queue_lock);
error = crypto_enqueue_request(&sep_queue, &req->base); error = crypto_enqueue_request(&sep_queue, &req->base);
spin_unlock_irq(&queue_lock);
if ((error != 0) && (error != -EINPROGRESS)) { if ((error != 0) && (error != -EINPROGRESS))
dev_warn(&sctx->sep_used->pdev->dev, pr_debug(" sep - crypto enqueue failed: %x\n",
"sep sha256 final cant enqueue\n"); error);
sep_crypto_release(sctx, error); error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
return error; sep_dequeuer, (void *)&sep_queue);
} if (error1)
pr_debug(" sep - workqueue submit failed: %x\n",
error = sep_submit_work(sctx->sep_used->workqueue, sep_dequeuer, error1);
(void *)&sep_queue); spin_unlock_irq(&queue_lock);
if (error) { /* We return result of crypto enqueue */
dev_warn(&sctx->sep_used->pdev->dev, return error;
"sha256 final cannot submit queue\n");
sep_crypto_release(sctx, -EINVAL);
return -EINVAL;
}
return -EINPROGRESS;
} }
static int sep_sha256_digest(struct ahash_request *req) static int sep_sha256_digest(struct ahash_request *req)
{ {
int error; int error;
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); int error1;
struct sep_hash_ctx *ctx = ahash_request_ctx(req); struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm);
pr_debug("sep - doing sha256 digest\n");
/* Clear out task context */
memset(ta_ctx, 0, sizeof(struct this_task_ctx));
dev_dbg(&sctx->sep_used->pdev->dev, "doing sha256 digest\n"); ta_ctx->sep_used = sep_dev;
sctx->current_request = SHA256; ta_ctx->current_request = SHA256;
sctx->current_hash_req = req; ta_ctx->current_hash_req = req;
sctx->current_cypher_req = NULL; ta_ctx->current_cypher_req = NULL;
ctx->hash_opmode = SEP_HASH_SHA256; ta_ctx->hash_opmode = SEP_HASH_SHA256;
sctx->current_hash_stage = HASH_DIGEST; ta_ctx->current_hash_stage = HASH_DIGEST;
/* lock necessary so that only one entity touches the queues */
spin_lock_irq(&queue_lock); spin_lock_irq(&queue_lock);
error = crypto_enqueue_request(&sep_queue, &req->base); error = crypto_enqueue_request(&sep_queue, &req->base);
if ((error != 0) && (error != -EINPROGRESS))
pr_debug(" sep - crypto enqueue failed: %x\n",
error);
error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
sep_dequeuer, (void *)&sep_queue);
if (error1)
pr_debug(" sep - workqueue submit failed: %x\n",
error1);
spin_unlock_irq(&queue_lock); spin_unlock_irq(&queue_lock);
/* We return result of crypto enqueue */
return error;
}
if ((error != 0) && (error != -EINPROGRESS)) { static int sep_sha256_finup(struct ahash_request *req)
dev_warn(&sctx->sep_used->pdev->dev, {
"sep sha256 digest cant enqueue\n"); int error;
sep_crypto_release(sctx, error); int error1;
return error; struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
}
error = sep_submit_work(sctx->sep_used->workqueue, sep_dequeuer, pr_debug("sep - doing sha256 finup\n");
(void *)&sep_queue);
if (error) { ta_ctx->sep_used = sep_dev;
dev_warn(&sctx->sep_used->pdev->dev, ta_ctx->current_request = SHA256;
"sha256 digest cannot submit queue\n"); ta_ctx->current_hash_req = req;
sep_crypto_release(sctx, -EINVAL); ta_ctx->current_cypher_req = NULL;
return -EINVAL; ta_ctx->hash_opmode = SEP_HASH_SHA256;
} ta_ctx->current_hash_stage = HASH_FINUP_DATA;
return -EINPROGRESS;
/* lock necessary so that only one entity touches the queues */
spin_lock_irq(&queue_lock);
error = crypto_enqueue_request(&sep_queue, &req->base);
if ((error != 0) && (error != -EINPROGRESS))
pr_debug(" sep - crypto enqueue failed: %x\n",
error);
error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
sep_dequeuer, (void *)&sep_queue);
if (error1)
pr_debug(" sep - workqueue submit failed: %x\n",
error1);
spin_unlock_irq(&queue_lock);
/* We return result of crypto enqueue */
return error;
} }
static int sep_crypto_init(struct crypto_tfm *tfm) static int sep_crypto_init(struct crypto_tfm *tfm)
{ {
struct sep_system_ctx *sctx = crypto_tfm_ctx(tfm);
const char *alg_name = crypto_tfm_alg_name(tfm); const char *alg_name = crypto_tfm_alg_name(tfm);
sctx->sep_used = sep_dev;
if (alg_name == NULL) if (alg_name == NULL)
dev_dbg(&sctx->sep_used->pdev->dev, "alg is NULL\n"); pr_debug("sep_crypto_init alg is NULL\n");
else else
dev_dbg(&sctx->sep_used->pdev->dev, "alg is %s\n", alg_name); pr_debug("sep_crypto_init alg is %s\n", alg_name);
tfm->crt_ablkcipher.reqsize = sizeof(struct sep_block_ctx); tfm->crt_ablkcipher.reqsize = sizeof(struct this_task_ctx);
dev_dbg(&sctx->sep_used->pdev->dev, "sep_crypto_init\n");
return 0; return 0;
} }
static void sep_crypto_exit(struct crypto_tfm *tfm) static void sep_crypto_exit(struct crypto_tfm *tfm)
{ {
struct sep_system_ctx *sctx = crypto_tfm_ctx(tfm); pr_debug("sep_crypto_exit\n");
dev_dbg(&sctx->sep_used->pdev->dev, "sep_crypto_exit\n");
sctx->sep_used = NULL;
} }
static int sep_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, static int sep_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
...@@ -3113,8 +3392,9 @@ static int sep_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, ...@@ -3113,8 +3392,9 @@ static int sep_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
{ {
struct sep_system_ctx *sctx = crypto_ablkcipher_ctx(tfm); struct sep_system_ctx *sctx = crypto_ablkcipher_ctx(tfm);
dev_dbg(&sctx->sep_used->pdev->dev, "sep aes setkey\n"); pr_debug("sep aes setkey\n");
pr_debug("tfm is %p sctx is %p\n", tfm, sctx);
switch (keylen) { switch (keylen) {
case SEP_AES_KEY_128_SIZE: case SEP_AES_KEY_128_SIZE:
sctx->aes_key_size = AES_128; sctx->aes_key_size = AES_128;
...@@ -3129,7 +3409,7 @@ static int sep_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, ...@@ -3129,7 +3409,7 @@ static int sep_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
sctx->aes_key_size = AES_512; sctx->aes_key_size = AES_512;
break; break;
default: default:
dev_warn(&sctx->sep_used->pdev->dev, "sep aes key size %x\n", pr_debug("invalid sep aes key size %x\n",
keylen); keylen);
return -EINVAL; return -EINVAL;
} }
...@@ -3140,7 +3420,6 @@ static int sep_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, ...@@ -3140,7 +3420,6 @@ static int sep_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
sctx->keylen = keylen; sctx->keylen = keylen;
/* Indicate to encrypt/decrypt function to send key to SEP */ /* Indicate to encrypt/decrypt function to send key to SEP */
sctx->key_sent = 0; sctx->key_sent = 0;
sctx->last_block = 0;
return 0; return 0;
} }
...@@ -3148,153 +3427,159 @@ static int sep_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, ...@@ -3148,153 +3427,159 @@ static int sep_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
static int sep_aes_ecb_encrypt(struct ablkcipher_request *req) static int sep_aes_ecb_encrypt(struct ablkcipher_request *req)
{ {
int error; int error;
struct sep_block_ctx *bctx = ablkcipher_request_ctx(req); int error1;
struct sep_system_ctx *sctx = crypto_ablkcipher_ctx( struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
crypto_ablkcipher_reqtfm(req));
pr_debug("sep - doing aes ecb encrypt\n");
dev_dbg(&sctx->sep_used->pdev->dev, "sep aes ecb encrypt\n"); /* Clear out task context */
sctx->current_request = AES_ECB; memset(ta_ctx, 0, sizeof(struct this_task_ctx));
sctx->current_hash_req = NULL;
sctx->current_cypher_req = req;
bctx->aes_encmode = SEP_AES_ENCRYPT;
bctx->aes_opmode = SEP_AES_ECB;
bctx->init_opcode = SEP_AES_INIT_OPCODE;
bctx->block_opcode = SEP_AES_BLOCK_OPCODE;
ta_ctx->sep_used = sep_dev;
ta_ctx->current_request = AES_ECB;
ta_ctx->current_hash_req = NULL;
ta_ctx->current_cypher_req = req;
ta_ctx->aes_encmode = SEP_AES_ENCRYPT;
ta_ctx->aes_opmode = SEP_AES_ECB;
ta_ctx->init_opcode = SEP_AES_INIT_OPCODE;
ta_ctx->block_opcode = SEP_AES_BLOCK_OPCODE;
/* lock necessary so that only one entity touches the queues */
spin_lock_irq(&queue_lock); spin_lock_irq(&queue_lock);
error = crypto_enqueue_request(&sep_queue, &req->base); error = crypto_enqueue_request(&sep_queue, &req->base);
spin_unlock_irq(&queue_lock);
if ((error != 0) && (error != -EINPROGRESS)) {
dev_warn(&sctx->sep_used->pdev->dev,
"sep_aes_ecb_encrypt cant enqueue\n");
sep_crypto_release(sctx, error);
return error;
}
error = sep_submit_work(sctx->sep_used->workqueue, sep_dequeuer, if ((error != 0) && (error != -EINPROGRESS))
(void *)&sep_queue); pr_debug(" sep - crypto enqueue failed: %x\n",
if (error) { error);
dev_warn(&sctx->sep_used->pdev->dev, error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
"sep_aes_ecb_encrypt cannot submit queue\n"); sep_dequeuer, (void *)&sep_queue);
sep_crypto_release(sctx, -EINVAL); if (error1)
return -EINVAL; pr_debug(" sep - workqueue submit failed: %x\n",
} error1);
return -EINPROGRESS; spin_unlock_irq(&queue_lock);
/* We return result of crypto enqueue */
return error;
} }
static int sep_aes_ecb_decrypt(struct ablkcipher_request *req) static int sep_aes_ecb_decrypt(struct ablkcipher_request *req)
{ {
int error; int error;
struct sep_block_ctx *bctx = ablkcipher_request_ctx(req); int error1;
struct sep_system_ctx *sctx = crypto_ablkcipher_ctx( struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
crypto_ablkcipher_reqtfm(req));
pr_debug("sep - doing aes ecb decrypt\n");
dev_dbg(&sctx->sep_used->pdev->dev, "sep aes ecb decrypt\n"); /* Clear out task context */
sctx->current_request = AES_ECB; memset(ta_ctx, 0, sizeof(struct this_task_ctx));
sctx->current_hash_req = NULL;
sctx->current_cypher_req = req;
bctx->aes_encmode = SEP_AES_DECRYPT;
bctx->aes_opmode = SEP_AES_ECB;
bctx->init_opcode = SEP_AES_INIT_OPCODE;
bctx->block_opcode = SEP_AES_BLOCK_OPCODE;
ta_ctx->sep_used = sep_dev;
ta_ctx->current_request = AES_ECB;
ta_ctx->current_hash_req = NULL;
ta_ctx->current_cypher_req = req;
ta_ctx->aes_encmode = SEP_AES_DECRYPT;
ta_ctx->aes_opmode = SEP_AES_ECB;
ta_ctx->init_opcode = SEP_AES_INIT_OPCODE;
ta_ctx->block_opcode = SEP_AES_BLOCK_OPCODE;
/* lock necessary so that only one entity touches the queues */
spin_lock_irq(&queue_lock); spin_lock_irq(&queue_lock);
error = crypto_enqueue_request(&sep_queue, &req->base); error = crypto_enqueue_request(&sep_queue, &req->base);
spin_unlock_irq(&queue_lock);
if ((error != 0) && (error != -EINPROGRESS)) { if ((error != 0) && (error != -EINPROGRESS))
dev_warn(&sctx->sep_used->pdev->dev, pr_debug(" sep - crypto enqueue failed: %x\n",
"sep_aes_ecb_decrypt cant enqueue\n"); error);
sep_crypto_release(sctx, error); error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
return error; sep_dequeuer, (void *)&sep_queue);
} if (error1)
pr_debug(" sep - workqueue submit failed: %x\n",
error = sep_submit_work(sctx->sep_used->workqueue, sep_dequeuer, error1);
(void *)&sep_queue); spin_unlock_irq(&queue_lock);
if (error) { /* We return result of crypto enqueue */
dev_warn(&sctx->sep_used->pdev->dev, return error;
"sep_aes_ecb_decrypt cannot submit queue\n");
sep_crypto_release(sctx, -EINVAL);
return -EINVAL;
}
return -EINPROGRESS;
} }
static int sep_aes_cbc_encrypt(struct ablkcipher_request *req) static int sep_aes_cbc_encrypt(struct ablkcipher_request *req)
{ {
int error; int error;
struct sep_block_ctx *bctx = ablkcipher_request_ctx(req); int error1;
struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
struct sep_system_ctx *sctx = crypto_ablkcipher_ctx( struct sep_system_ctx *sctx = crypto_ablkcipher_ctx(
crypto_ablkcipher_reqtfm(req)); crypto_ablkcipher_reqtfm(req));
dev_dbg(&sctx->sep_used->pdev->dev, "sep aes cbc encrypt\n"); pr_debug("sep - doing aes cbc encrypt\n");
sctx->current_request = AES_CBC;
sctx->current_hash_req = NULL; /* Clear out task context */
sctx->current_cypher_req = req; memset(ta_ctx, 0, sizeof(struct this_task_ctx));
bctx->aes_encmode = SEP_AES_ENCRYPT;
bctx->aes_opmode = SEP_AES_CBC; pr_debug("tfm is %p sctx is %p and ta_ctx is %p\n",
bctx->init_opcode = SEP_AES_INIT_OPCODE; crypto_ablkcipher_reqtfm(req), sctx, ta_ctx);
bctx->block_opcode = SEP_AES_BLOCK_OPCODE;
ta_ctx->sep_used = sep_dev;
ta_ctx->current_request = AES_CBC;
ta_ctx->current_hash_req = NULL;
ta_ctx->current_cypher_req = req;
ta_ctx->aes_encmode = SEP_AES_ENCRYPT;
ta_ctx->aes_opmode = SEP_AES_CBC;
ta_ctx->init_opcode = SEP_AES_INIT_OPCODE;
ta_ctx->block_opcode = SEP_AES_BLOCK_OPCODE;
/* lock necessary so that only one entity touches the queues */
spin_lock_irq(&queue_lock); spin_lock_irq(&queue_lock);
error = crypto_enqueue_request(&sep_queue, &req->base); error = crypto_enqueue_request(&sep_queue, &req->base);
spin_unlock_irq(&queue_lock);
if ((error != 0) && (error != -EINPROGRESS)) { if ((error != 0) && (error != -EINPROGRESS))
dev_warn(&sctx->sep_used->pdev->dev, pr_debug(" sep - crypto enqueue failed: %x\n",
"sep_aes_cbc_encrypt cant enqueue\n"); error);
sep_crypto_release(sctx, error); error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
return error; sep_dequeuer, (void *)&sep_queue);
} if (error1)
pr_debug(" sep - workqueue submit failed: %x\n",
error = sep_submit_work(sctx->sep_used->workqueue, sep_dequeuer, error1);
(void *)&sep_queue); spin_unlock_irq(&queue_lock);
if (error) { /* We return result of crypto enqueue */
dev_warn(&sctx->sep_used->pdev->dev, return error;
"sep_aes_cbc_encrypt cannot submit queue\n");
sep_crypto_release(sctx, -EINVAL);
return -EINVAL;
}
return -EINPROGRESS;
} }
static int sep_aes_cbc_decrypt(struct ablkcipher_request *req) static int sep_aes_cbc_decrypt(struct ablkcipher_request *req)
{ {
int error; int error;
struct sep_block_ctx *bctx = ablkcipher_request_ctx(req); int error1;
struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
struct sep_system_ctx *sctx = crypto_ablkcipher_ctx( struct sep_system_ctx *sctx = crypto_ablkcipher_ctx(
crypto_ablkcipher_reqtfm(req)); crypto_ablkcipher_reqtfm(req));
dev_dbg(&sctx->sep_used->pdev->dev, "sep aes cbc decrypt\n"); pr_debug("sep - doing aes cbc decrypt\n");
sctx->current_request = AES_CBC;
sctx->current_hash_req = NULL; pr_debug("tfm is %p sctx is %p and ta_ctx is %p\n",
sctx->current_cypher_req = req; crypto_ablkcipher_reqtfm(req), sctx, ta_ctx);
bctx->aes_encmode = SEP_AES_DECRYPT;
bctx->aes_opmode = SEP_AES_CBC;
bctx->init_opcode = SEP_AES_INIT_OPCODE;
bctx->block_opcode = SEP_AES_BLOCK_OPCODE;
/* Clear out task context */
memset(ta_ctx, 0, sizeof(struct this_task_ctx));
ta_ctx->sep_used = sep_dev;
ta_ctx->current_request = AES_CBC;
ta_ctx->current_hash_req = NULL;
ta_ctx->current_cypher_req = req;
ta_ctx->aes_encmode = SEP_AES_DECRYPT;
ta_ctx->aes_opmode = SEP_AES_CBC;
ta_ctx->init_opcode = SEP_AES_INIT_OPCODE;
ta_ctx->block_opcode = SEP_AES_BLOCK_OPCODE;
/* lock necessary so that only one entity touches the queues */
spin_lock_irq(&queue_lock); spin_lock_irq(&queue_lock);
error = crypto_enqueue_request(&sep_queue, &req->base); error = crypto_enqueue_request(&sep_queue, &req->base);
spin_unlock_irq(&queue_lock);
if ((error != 0) && (error != -EINPROGRESS)) {
dev_warn(&sctx->sep_used->pdev->dev,
"sep_aes_cbc_decrypt cant enqueue\n");
sep_crypto_release(sctx, error);
return error;
}
error = sep_submit_work(sctx->sep_used->workqueue, sep_dequeuer, if ((error != 0) && (error != -EINPROGRESS))
(void *)&sep_queue); pr_debug(" sep - crypto enqueue failed: %x\n",
if (error) { error);
dev_warn(&sctx->sep_used->pdev->dev, error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
"sep_aes_cbc_decrypt cannot submit queue\n"); sep_dequeuer, (void *)&sep_queue);
sep_crypto_release(sctx, -EINVAL); if (error1)
return -EINVAL; pr_debug(" sep - workqueue submit failed: %x\n",
} error1);
return -EINPROGRESS; spin_unlock_irq(&queue_lock);
/* We return result of crypto enqueue */
return error;
} }
static int sep_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key, static int sep_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
...@@ -3304,7 +3589,7 @@ static int sep_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key, ...@@ -3304,7 +3589,7 @@ static int sep_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
struct crypto_tfm *ctfm = crypto_ablkcipher_tfm(tfm); struct crypto_tfm *ctfm = crypto_ablkcipher_tfm(tfm);
u32 *flags = &ctfm->crt_flags; u32 *flags = &ctfm->crt_flags;
dev_dbg(&sctx->sep_used->pdev->dev, "sep des setkey\n"); pr_debug("sep des setkey\n");
switch (keylen) { switch (keylen) {
case DES_KEY_SIZE: case DES_KEY_SIZE:
...@@ -3317,7 +3602,7 @@ static int sep_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key, ...@@ -3317,7 +3602,7 @@ static int sep_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
sctx->des_nbr_keys = DES_KEY_3; sctx->des_nbr_keys = DES_KEY_3;
break; break;
default: default:
dev_dbg(&sctx->sep_used->pdev->dev, "invalid key size %x\n", pr_debug("invalid key size %x\n",
keylen); keylen);
return -EINVAL; return -EINVAL;
} }
...@@ -3326,7 +3611,7 @@ static int sep_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key, ...@@ -3326,7 +3611,7 @@ static int sep_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
(sep_weak_key(key, keylen))) { (sep_weak_key(key, keylen))) {
*flags |= CRYPTO_TFM_RES_WEAK_KEY; *flags |= CRYPTO_TFM_RES_WEAK_KEY;
dev_warn(&sctx->sep_used->pdev->dev, "weak key\n"); pr_debug("weak key\n");
return -EINVAL; return -EINVAL;
} }
...@@ -3335,7 +3620,6 @@ static int sep_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key, ...@@ -3335,7 +3620,6 @@ static int sep_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
sctx->keylen = keylen; sctx->keylen = keylen;
/* Indicate to encrypt/decrypt function to send key to SEP */ /* Indicate to encrypt/decrypt function to send key to SEP */
sctx->key_sent = 0; sctx->key_sent = 0;
sctx->last_block = 0;
return 0; return 0;
} }
...@@ -3343,153 +3627,149 @@ static int sep_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key, ...@@ -3343,153 +3627,149 @@ static int sep_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
static int sep_des_ebc_encrypt(struct ablkcipher_request *req) static int sep_des_ebc_encrypt(struct ablkcipher_request *req)
{ {
int error; int error;
struct sep_block_ctx *bctx = ablkcipher_request_ctx(req); int error1;
struct sep_system_ctx *sctx = crypto_ablkcipher_ctx( struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
crypto_ablkcipher_reqtfm(req));
pr_debug("sep - doing des ecb encrypt\n");
dev_dbg(&sctx->sep_used->pdev->dev, "sep des ecb encrypt\n"); /* Clear out task context */
sctx->current_request = DES_ECB; memset(ta_ctx, 0, sizeof(struct this_task_ctx));
sctx->current_hash_req = NULL;
sctx->current_cypher_req = req;
bctx->des_encmode = SEP_DES_ENCRYPT;
bctx->des_opmode = SEP_DES_ECB;
bctx->init_opcode = SEP_DES_INIT_OPCODE;
bctx->block_opcode = SEP_DES_BLOCK_OPCODE;
ta_ctx->sep_used = sep_dev;
ta_ctx->current_request = DES_ECB;
ta_ctx->current_hash_req = NULL;
ta_ctx->current_cypher_req = req;
ta_ctx->des_encmode = SEP_DES_ENCRYPT;
ta_ctx->des_opmode = SEP_DES_ECB;
ta_ctx->init_opcode = SEP_DES_INIT_OPCODE;
ta_ctx->block_opcode = SEP_DES_BLOCK_OPCODE;
/* lock necessary so that only one entity touches the queues */
spin_lock_irq(&queue_lock); spin_lock_irq(&queue_lock);
error = crypto_enqueue_request(&sep_queue, &req->base); error = crypto_enqueue_request(&sep_queue, &req->base);
spin_unlock_irq(&queue_lock);
if ((error != 0) && (error != -EINPROGRESS)) { if ((error != 0) && (error != -EINPROGRESS))
dev_warn(&sctx->sep_used->pdev->dev, pr_debug(" sep - crypto enqueue failed: %x\n",
"sep_des_ecb_encrypt cant enqueue\n"); error);
sep_crypto_release(sctx, error); error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
return error; sep_dequeuer, (void *)&sep_queue);
} if (error1)
pr_debug(" sep - workqueue submit failed: %x\n",
error = sep_submit_work(sctx->sep_used->workqueue, sep_dequeuer, error1);
(void *)&sep_queue); spin_unlock_irq(&queue_lock);
if (error) { /* We return result of crypto enqueue */
dev_warn(&sctx->sep_used->pdev->dev, return error;
"sep_des_ecb_encrypt cannot submit queue\n");
sep_crypto_release(sctx, -EINVAL);
return -EINVAL;
}
return -EINPROGRESS;
} }
static int sep_des_ebc_decrypt(struct ablkcipher_request *req) static int sep_des_ebc_decrypt(struct ablkcipher_request *req)
{ {
int error; int error;
struct sep_block_ctx *bctx = ablkcipher_request_ctx(req); int error1;
struct sep_system_ctx *sctx = crypto_ablkcipher_ctx( struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
crypto_ablkcipher_reqtfm(req));
pr_debug("sep - doing des ecb decrypt\n");
dev_dbg(&sctx->sep_used->pdev->dev, "sep des ecb decrypt\n"); /* Clear out task context */
sctx->current_request = DES_ECB; memset(ta_ctx, 0, sizeof(struct this_task_ctx));
sctx->current_hash_req = NULL;
sctx->current_cypher_req = req;
bctx->des_encmode = SEP_DES_DECRYPT;
bctx->des_opmode = SEP_DES_ECB;
bctx->init_opcode = SEP_DES_INIT_OPCODE;
bctx->block_opcode = SEP_DES_BLOCK_OPCODE;
ta_ctx->sep_used = sep_dev;
ta_ctx->current_request = DES_ECB;
ta_ctx->current_hash_req = NULL;
ta_ctx->current_cypher_req = req;
ta_ctx->des_encmode = SEP_DES_DECRYPT;
ta_ctx->des_opmode = SEP_DES_ECB;
ta_ctx->init_opcode = SEP_DES_INIT_OPCODE;
ta_ctx->block_opcode = SEP_DES_BLOCK_OPCODE;
/* lock necessary so that only one entity touches the queues */
spin_lock_irq(&queue_lock); spin_lock_irq(&queue_lock);
error = crypto_enqueue_request(&sep_queue, &req->base); error = crypto_enqueue_request(&sep_queue, &req->base);
spin_unlock_irq(&queue_lock);
if ((error != 0) && (error != -EINPROGRESS)) {
dev_warn(&sctx->sep_used->pdev->dev,
"sep_des_ecb_decrypt cant enqueue\n");
sep_crypto_release(sctx, error);
return error;
}
error = sep_submit_work(sctx->sep_used->workqueue, sep_dequeuer, if ((error != 0) && (error != -EINPROGRESS))
(void *)&sep_queue); pr_debug(" sep - crypto enqueue failed: %x\n",
if (error) { error);
dev_warn(&sctx->sep_used->pdev->dev, error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
"sep_des_ecb_decrypt cannot submit queue\n"); sep_dequeuer, (void *)&sep_queue);
sep_crypto_release(sctx, -EINVAL); if (error1)
return -EINVAL; pr_debug(" sep - workqueue submit failed: %x\n",
} error1);
return -EINPROGRESS; spin_unlock_irq(&queue_lock);
/* We return result of crypto enqueue */
return error;
} }
static int sep_des_cbc_encrypt(struct ablkcipher_request *req) static int sep_des_cbc_encrypt(struct ablkcipher_request *req)
{ {
int error; int error;
struct sep_block_ctx *bctx = ablkcipher_request_ctx(req); int error1;
struct sep_system_ctx *sctx = crypto_ablkcipher_ctx( struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
crypto_ablkcipher_reqtfm(req));
pr_debug("sep - doing des cbc encrypt\n");
dev_dbg(&sctx->sep_used->pdev->dev, "sep des cbc encrypt\n"); /* Clear out task context */
sctx->current_request = DES_CBC; memset(ta_ctx, 0, sizeof(struct this_task_ctx));
sctx->current_hash_req = NULL;
sctx->current_cypher_req = req;
bctx->des_encmode = SEP_DES_ENCRYPT;
bctx->des_opmode = SEP_DES_CBC;
bctx->init_opcode = SEP_DES_INIT_OPCODE;
bctx->block_opcode = SEP_DES_BLOCK_OPCODE;
ta_ctx->sep_used = sep_dev;
ta_ctx->current_request = DES_CBC;
ta_ctx->current_hash_req = NULL;
ta_ctx->current_cypher_req = req;
ta_ctx->des_encmode = SEP_DES_ENCRYPT;
ta_ctx->des_opmode = SEP_DES_CBC;
ta_ctx->init_opcode = SEP_DES_INIT_OPCODE;
ta_ctx->block_opcode = SEP_DES_BLOCK_OPCODE;
/* lock necessary so that only one entity touches the queues */
spin_lock_irq(&queue_lock); spin_lock_irq(&queue_lock);
error = crypto_enqueue_request(&sep_queue, &req->base); error = crypto_enqueue_request(&sep_queue, &req->base);
spin_unlock_irq(&queue_lock);
if ((error != 0) && (error != -EINPROGRESS)) { if ((error != 0) && (error != -EINPROGRESS))
dev_warn(&sctx->sep_used->pdev->dev, pr_debug(" sep - crypto enqueue failed: %x\n",
"sep_des_cbc_encrypt cant enqueue\n"); error);
sep_crypto_release(sctx, error); error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
return error; sep_dequeuer, (void *)&sep_queue);
} if (error1)
pr_debug(" sep - workqueue submit failed: %x\n",
error = sep_submit_work(sctx->sep_used->workqueue, sep_dequeuer, error1);
(void *)&sep_queue); spin_unlock_irq(&queue_lock);
if (error) { /* We return result of crypto enqueue */
dev_warn(&sctx->sep_used->pdev->dev, return error;
"sep_des_cbc_encrypt cannot submit queue\n");
sep_crypto_release(sctx, -EINVAL);
return -EINVAL;
}
return -EINPROGRESS;
} }
static int sep_des_cbc_decrypt(struct ablkcipher_request *req) static int sep_des_cbc_decrypt(struct ablkcipher_request *req)
{ {
int error; int error;
struct sep_block_ctx *bctx = ablkcipher_request_ctx(req); int error1;
struct sep_system_ctx *sctx = crypto_ablkcipher_ctx( struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
crypto_ablkcipher_reqtfm(req));
pr_debug("sep - doing des ecb decrypt\n");
dev_dbg(&sctx->sep_used->pdev->dev, "sep des cbc decrypt\n"); /* Clear out task context */
sctx->current_request = DES_CBC; memset(ta_ctx, 0, sizeof(struct this_task_ctx));
sctx->current_hash_req = NULL;
sctx->current_cypher_req = req;
bctx->des_encmode = SEP_DES_DECRYPT;
bctx->des_opmode = SEP_DES_CBC;
bctx->init_opcode = SEP_DES_INIT_OPCODE;
bctx->block_opcode = SEP_DES_BLOCK_OPCODE;
ta_ctx->sep_used = sep_dev;
ta_ctx->current_request = DES_CBC;
ta_ctx->current_hash_req = NULL;
ta_ctx->current_cypher_req = req;
ta_ctx->des_encmode = SEP_DES_DECRYPT;
ta_ctx->des_opmode = SEP_DES_CBC;
ta_ctx->init_opcode = SEP_DES_INIT_OPCODE;
ta_ctx->block_opcode = SEP_DES_BLOCK_OPCODE;
/* lock necessary so that only one entity touches the queues */
spin_lock_irq(&queue_lock); spin_lock_irq(&queue_lock);
error = crypto_enqueue_request(&sep_queue, &req->base); error = crypto_enqueue_request(&sep_queue, &req->base);
spin_unlock_irq(&queue_lock);
if ((error != 0) && (error != -EINPROGRESS)) {
dev_warn(&sctx->sep_used->pdev->dev,
"sep_des_cbc_decrypt cant enqueue\n");
sep_crypto_release(sctx, error);
return error;
}
error = sep_submit_work(sctx->sep_used->workqueue, sep_dequeuer, if ((error != 0) && (error != -EINPROGRESS))
(void *)&sep_queue); pr_debug(" sep - crypto enqueue failed: %x\n",
if (error) { error);
dev_warn(&sctx->sep_used->pdev->dev, error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
"sep_des_cbc_decrypt cannot submit queue\n"); sep_dequeuer, (void *)&sep_queue);
sep_crypto_release(sctx, -EINVAL); if (error1)
return -EINVAL; pr_debug(" sep - workqueue submit failed: %x\n",
} error1);
return -EINPROGRESS; spin_unlock_irq(&queue_lock);
/* We return result of crypto enqueue */
return error;
} }
static struct ahash_alg hash_algs[] = { static struct ahash_alg hash_algs[] = {
...@@ -3498,6 +3778,7 @@ static struct ahash_alg hash_algs[] = { ...@@ -3498,6 +3778,7 @@ static struct ahash_alg hash_algs[] = {
.update = sep_sha1_update, .update = sep_sha1_update,
.final = sep_sha1_final, .final = sep_sha1_final,
.digest = sep_sha1_digest, .digest = sep_sha1_digest,
.finup = sep_sha1_finup,
.halg = { .halg = {
.digestsize = SHA1_DIGEST_SIZE, .digestsize = SHA1_DIGEST_SIZE,
.base = { .base = {
...@@ -3520,6 +3801,7 @@ static struct ahash_alg hash_algs[] = { ...@@ -3520,6 +3801,7 @@ static struct ahash_alg hash_algs[] = {
.update = sep_md5_update, .update = sep_md5_update,
.final = sep_md5_final, .final = sep_md5_final,
.digest = sep_md5_digest, .digest = sep_md5_digest,
.finup = sep_md5_finup,
.halg = { .halg = {
.digestsize = MD5_DIGEST_SIZE, .digestsize = MD5_DIGEST_SIZE,
.base = { .base = {
...@@ -3542,6 +3824,7 @@ static struct ahash_alg hash_algs[] = { ...@@ -3542,6 +3824,7 @@ static struct ahash_alg hash_algs[] = {
.update = sep_sha224_update, .update = sep_sha224_update,
.final = sep_sha224_final, .final = sep_sha224_final,
.digest = sep_sha224_digest, .digest = sep_sha224_digest,
.finup = sep_sha224_finup,
.halg = { .halg = {
.digestsize = SHA224_DIGEST_SIZE, .digestsize = SHA224_DIGEST_SIZE,
.base = { .base = {
...@@ -3564,6 +3847,7 @@ static struct ahash_alg hash_algs[] = { ...@@ -3564,6 +3847,7 @@ static struct ahash_alg hash_algs[] = {
.update = sep_sha256_update, .update = sep_sha256_update,
.final = sep_sha256_final, .final = sep_sha256_final,
.digest = sep_sha256_digest, .digest = sep_sha256_digest,
.finup = sep_sha256_finup,
.halg = { .halg = {
.digestsize = SHA256_DIGEST_SIZE, .digestsize = SHA256_DIGEST_SIZE,
.base = { .base = {
...@@ -3621,6 +3905,7 @@ static struct crypto_alg crypto_algs[] = { ...@@ -3621,6 +3905,7 @@ static struct crypto_alg crypto_algs[] = {
.max_keysize = AES_MAX_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE,
.setkey = sep_aes_setkey, .setkey = sep_aes_setkey,
.encrypt = sep_aes_cbc_encrypt, .encrypt = sep_aes_cbc_encrypt,
.ivsize = AES_BLOCK_SIZE,
.decrypt = sep_aes_cbc_decrypt, .decrypt = sep_aes_cbc_decrypt,
} }
}, },
...@@ -3661,6 +3946,7 @@ static struct crypto_alg crypto_algs[] = { ...@@ -3661,6 +3946,7 @@ static struct crypto_alg crypto_algs[] = {
.max_keysize = DES_KEY_SIZE, .max_keysize = DES_KEY_SIZE,
.setkey = sep_des_setkey, .setkey = sep_des_setkey,
.encrypt = sep_des_cbc_encrypt, .encrypt = sep_des_cbc_encrypt,
.ivsize = DES_BLOCK_SIZE,
.decrypt = sep_des_cbc_decrypt, .decrypt = sep_des_cbc_decrypt,
} }
}, },
...@@ -3714,7 +4000,8 @@ int sep_crypto_setup(void) ...@@ -3714,7 +4000,8 @@ int sep_crypto_setup(void)
crypto_init_queue(&sep_queue, SEP_QUEUE_LENGTH); crypto_init_queue(&sep_queue, SEP_QUEUE_LENGTH);
sep_dev->workqueue = create_workqueue("sep_crypto_workqueue"); sep_dev->workqueue = create_singlethread_workqueue(
"sep_crypto_workqueue");
if (!sep_dev->workqueue) { if (!sep_dev->workqueue) {
dev_warn(&sep_dev->pdev->dev, "cant create workqueue\n"); dev_warn(&sep_dev->pdev->dev, "cant create workqueue\n");
return -ENOMEM; return -ENOMEM;
...@@ -3723,7 +4010,6 @@ int sep_crypto_setup(void) ...@@ -3723,7 +4010,6 @@ int sep_crypto_setup(void)
i = 0; i = 0;
j = 0; j = 0;
spin_lock_init(&sep_dev->busy_lock);
spin_lock_init(&queue_lock); spin_lock_init(&queue_lock);
err = 0; err = 0;
......
...@@ -117,7 +117,7 @@ ...@@ -117,7 +117,7 @@
#define SEP_TRANSACTION_WAIT_TIME 5 #define SEP_TRANSACTION_WAIT_TIME 5
#define SEP_QUEUE_LENGTH 10 #define SEP_QUEUE_LENGTH 2
/* Macros */ /* Macros */
#ifndef __LITTLE_ENDIAN #ifndef __LITTLE_ENDIAN
#define CHG_ENDIAN(val) \ #define CHG_ENDIAN(val) \
...@@ -270,9 +270,26 @@ struct sep_hash_private_context { ...@@ -270,9 +270,26 @@ struct sep_hash_private_context {
u8 internal_context[sizeof(struct sep_hash_internal_context)]; u8 internal_context[sizeof(struct sep_hash_internal_context)];
}; };
union key_t {
struct sep_des_key des;
u32 aes[SEP_AES_MAX_KEY_SIZE_WORDS];
};
/* Context structures for crypto API */ /* Context structures for crypto API */
struct sep_block_ctx { /**
struct sep_device *sep; * Structure for this current task context
* This same structure is used for both hash
* and crypt in order to reduce duplicate code
* for stuff that is done for both hash operations
* and crypto operations. We cannot trust that the
* system context is not pulled out from under
* us during operation to operation, so all
* critical stuff such as data pointers must
* be in in a context that is exclusive for this
* particular task at hand.
*/
struct this_task_ctx {
struct sep_device *sep_used;
u32 done; u32 done;
unsigned char iv[100]; unsigned char iv[100];
enum des_enc_mode des_encmode; enum des_enc_mode des_encmode;
...@@ -284,36 +301,7 @@ struct sep_block_ctx { ...@@ -284,36 +301,7 @@ struct sep_block_ctx {
size_t data_length; size_t data_length;
size_t ivlen; size_t ivlen;
struct ablkcipher_walk walk; struct ablkcipher_walk walk;
struct sep_des_private_context des_private_ctx;
struct sep_aes_private_context aes_private_ctx;
};
struct sep_hash_ctx {
u32 done;
unsigned char *buf;
size_t buflen;
unsigned char *dgst;
int digest_size_words;
int digest_size_bytes;
int block_size_words;
int block_size_bytes;
struct scatterlist *sg;
enum hash_op_mode hash_opmode;
struct sep_hash_private_context hash_private_ctx;
};
struct sep_system_ctx {
struct sep_device *sep_used;
union key_t {
struct sep_des_key des;
u32 aes[SEP_AES_MAX_KEY_SIZE_WORDS];
} key;
int i_own_sep; /* Do I have custody of the sep? */ int i_own_sep; /* Do I have custody of the sep? */
size_t keylen;
enum des_numkey des_nbr_keys;
enum aes_keysize aes_key_size;
u32 key_sent; /* Indicate if key is sent to sep */
u32 last_block; /* Indicate that this is the final block */
struct sep_call_status call_status; struct sep_call_status call_status;
struct build_dcb_struct_kernel dcb_input_data; struct build_dcb_struct_kernel dcb_input_data;
struct sep_dma_context *dma_ctx; struct sep_dma_context *dma_ctx;
...@@ -331,9 +319,32 @@ struct sep_system_ctx { ...@@ -331,9 +319,32 @@ struct sep_system_ctx {
struct ahash_request *current_hash_req; struct ahash_request *current_hash_req;
struct ablkcipher_request *current_cypher_req; struct ablkcipher_request *current_cypher_req;
enum type_of_request current_request; enum type_of_request current_request;
int digest_size_words;
int digest_size_bytes;
int block_size_words;
int block_size_bytes;
enum hash_op_mode hash_opmode;
enum hash_stage current_hash_stage; enum hash_stage current_hash_stage;
int done_with_transaction; /**
* Not that this is a pointer. The are_we_done_yet variable is
* allocated by the task function. This way, even if the kernel
* crypto infrastructure has grabbed the task structure out from
* under us, the task function can still see this variable.
*/
int *are_we_done_yet;
unsigned long end_time;
};
struct sep_system_ctx {
union key_t key;
size_t keylen;
int key_sent;
enum des_numkey des_nbr_keys;
enum aes_keysize aes_key_size;
unsigned long end_time; unsigned long end_time;
struct sep_des_private_context des_private_ctx;
struct sep_aes_private_context aes_private_ctx;
struct sep_hash_private_context hash_private_ctx;
}; };
/* work queue structures */ /* work queue structures */
......
...@@ -93,8 +93,7 @@ struct sep_device { ...@@ -93,8 +93,7 @@ struct sep_device {
enum hash_stage current_hash_stage; enum hash_stage current_hash_stage;
struct ahash_request *current_hash_req; struct ahash_request *current_hash_req;
struct ablkcipher_request *current_cypher_req; struct ablkcipher_request *current_cypher_req;
struct sep_system_ctx *sctx; struct this_task_ctx *ta_ctx;
spinlock_t busy_lock;
struct workqueue_struct *workqueue; struct workqueue_struct *workqueue;
}; };
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册