提交 4262c98a 编写于 作者: H Harsh Jain 提交者: Herbert Xu

crypto: chelsio - Remove separate buffer used for DMA map B0 block in CCM

Extends memory required for IV to include B0 Block and DMA map in
single operation.
Signed-off-by: NHarsh Jain <harsh@chelsio.com>
Signed-off-by: NHerbert Xu <herbert@gondor.apana.org.au>
上级 335bcc4a
...@@ -203,13 +203,8 @@ static inline void chcr_handle_aead_resp(struct aead_request *req, ...@@ -203,13 +203,8 @@ static inline void chcr_handle_aead_resp(struct aead_request *req,
int err) int err)
{ {
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm));
chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op); chcr_aead_common_exit(req);
if (reqctx->b0_dma)
dma_unmap_single(&u_ctx->lldi.pdev->dev, reqctx->b0_dma,
reqctx->b0_len, DMA_BIDIRECTIONAL);
if (reqctx->verify == VERIFY_SW) { if (reqctx->verify == VERIFY_SW) {
chcr_verify_tag(req, input, &err); chcr_verify_tag(req, input, &err);
reqctx->verify = VERIFY_HW; reqctx->verify = VERIFY_HW;
...@@ -2178,22 +2173,35 @@ static void chcr_hmac_cra_exit(struct crypto_tfm *tfm) ...@@ -2178,22 +2173,35 @@ static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
} }
} }
static int chcr_aead_common_init(struct aead_request *req, inline void chcr_aead_common_exit(struct aead_request *req)
unsigned short op_type) {
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm));
chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op);
}
static int chcr_aead_common_init(struct aead_request *req)
{ {
struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
int error = -EINVAL;
unsigned int authsize = crypto_aead_authsize(tfm); unsigned int authsize = crypto_aead_authsize(tfm);
int error = -EINVAL;
/* validate key size */ /* validate key size */
if (aeadctx->enckey_len == 0) if (aeadctx->enckey_len == 0)
goto err; goto err;
if (op_type && req->cryptlen < authsize) if (reqctx->op && req->cryptlen < authsize)
goto err; goto err;
if (reqctx->b0_len)
reqctx->scratch_pad = reqctx->iv + IV;
else
reqctx->scratch_pad = NULL;
error = chcr_aead_dma_map(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, error = chcr_aead_dma_map(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
op_type); reqctx->op);
if (error) { if (error) {
error = -ENOMEM; error = -ENOMEM;
goto err; goto err;
...@@ -2230,7 +2238,7 @@ static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type) ...@@ -2230,7 +2238,7 @@ static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type)
aead_request_set_tfm(subreq, aeadctx->sw_cipher); aead_request_set_tfm(subreq, aeadctx->sw_cipher);
aead_request_set_callback(subreq, req->base.flags, aead_request_set_callback(subreq, req->base.flags,
req->base.complete, req->base.data); req->base.complete, req->base.data);
aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
req->iv); req->iv);
aead_request_set_ad(subreq, req->assoclen); aead_request_set_ad(subreq, req->assoclen);
return op_type ? crypto_aead_decrypt(subreq) : return op_type ? crypto_aead_decrypt(subreq) :
...@@ -2239,8 +2247,7 @@ static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type) ...@@ -2239,8 +2247,7 @@ static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type)
static struct sk_buff *create_authenc_wr(struct aead_request *req, static struct sk_buff *create_authenc_wr(struct aead_request *req,
unsigned short qid, unsigned short qid,
int size, int size)
unsigned short op_type)
{ {
struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
...@@ -2264,18 +2271,20 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req, ...@@ -2264,18 +2271,20 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
if (req->cryptlen == 0) if (req->cryptlen == 0)
return NULL; return NULL;
reqctx->b0_dma = 0; reqctx->b0_len = 0;
error = chcr_aead_common_init(req);
if (error)
return ERR_PTR(error);
if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL || if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL ||
subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) { subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
null = 1; null = 1;
assoclen = 0; assoclen = 0;
reqctx->aad_nents = 0;
} }
error = chcr_aead_common_init(req, op_type);
if (error)
return ERR_PTR(error);
dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0); dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
dnents += sg_nents_xlen(req->dst, req->cryptlen + dnents += sg_nents_xlen(req->dst, req->cryptlen +
(op_type ? -authsize : authsize), CHCR_DST_SG_SIZE, (reqctx->op ? -authsize : authsize), CHCR_DST_SG_SIZE,
req->assoclen); req->assoclen);
dnents += MIN_AUTH_SG; // For IV dnents += MIN_AUTH_SG; // For IV
...@@ -2292,11 +2301,10 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req, ...@@ -2292,11 +2301,10 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
transhdr_len = roundup(transhdr_len, 16); transhdr_len = roundup(transhdr_len, 16);
if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE, if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
transhdr_len, op_type)) { transhdr_len, reqctx->op)) {
atomic_inc(&adap->chcr_stats.fallback); atomic_inc(&adap->chcr_stats.fallback);
chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, chcr_aead_common_exit(req);
op_type); return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
return ERR_PTR(chcr_aead_fallback(req, op_type));
} }
skb = alloc_skb(SGE_MAX_WR_LEN, flags); skb = alloc_skb(SGE_MAX_WR_LEN, flags);
if (!skb) { if (!skb) {
...@@ -2306,7 +2314,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req, ...@@ -2306,7 +2314,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
chcr_req = __skb_put_zero(skb, transhdr_len); chcr_req = __skb_put_zero(skb, transhdr_len);
temp = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize; temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
/* /*
* Input order is AAD,IV and Payload. where IV should be included as * Input order is AAD,IV and Payload. where IV should be included as
...@@ -2330,8 +2338,8 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req, ...@@ -2330,8 +2338,8 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
temp = CHCR_SCMD_CIPHER_MODE_AES_CTR; temp = CHCR_SCMD_CIPHER_MODE_AES_CTR;
else else
temp = CHCR_SCMD_CIPHER_MODE_AES_CBC; temp = CHCR_SCMD_CIPHER_MODE_AES_CBC;
chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type, chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op,
(op_type == CHCR_ENCRYPT_OP) ? 1 : 0, (reqctx->op == CHCR_ENCRYPT_OP) ? 1 : 0,
temp, temp,
actx->auth_mode, aeadctx->hmac_ctrl, actx->auth_mode, aeadctx->hmac_ctrl,
IV >> 1); IV >> 1);
...@@ -2339,7 +2347,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req, ...@@ -2339,7 +2347,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
0, 0, dst_size); 0, 0, dst_size);
chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr; chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
if (op_type == CHCR_ENCRYPT_OP || if (reqctx->op == CHCR_ENCRYPT_OP ||
subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA || subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL)
memcpy(chcr_req->key_ctx.key, aeadctx->key, memcpy(chcr_req->key_ctx.key, aeadctx->key,
...@@ -2362,20 +2370,18 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req, ...@@ -2362,20 +2370,18 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
} }
phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len); phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size); ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
chcr_add_aead_dst_ent(req, phys_cpl, assoclen, op_type, qid); chcr_add_aead_dst_ent(req, phys_cpl, assoclen, qid);
chcr_add_aead_src_ent(req, ulptx, assoclen, op_type); chcr_add_aead_src_ent(req, ulptx, assoclen);
atomic_inc(&adap->chcr_stats.cipher_rqst); atomic_inc(&adap->chcr_stats.cipher_rqst);
temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0); kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0);
create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size, create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
transhdr_len, temp, 0); transhdr_len, temp, 0);
reqctx->skb = skb; reqctx->skb = skb;
reqctx->op = op_type;
return skb; return skb;
err: err:
chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, chcr_aead_common_exit(req);
op_type);
return ERR_PTR(error); return ERR_PTR(error);
} }
...@@ -2394,11 +2400,14 @@ int chcr_aead_dma_map(struct device *dev, ...@@ -2394,11 +2400,14 @@ int chcr_aead_dma_map(struct device *dev,
-authsize : authsize); -authsize : authsize);
if (!req->cryptlen || !dst_size) if (!req->cryptlen || !dst_size)
return 0; return 0;
reqctx->iv_dma = dma_map_single(dev, reqctx->iv, IV, reqctx->iv_dma = dma_map_single(dev, reqctx->iv, (IV + reqctx->b0_len),
DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, reqctx->iv_dma)) if (dma_mapping_error(dev, reqctx->iv_dma))
return -ENOMEM; return -ENOMEM;
if (reqctx->b0_len)
reqctx->b0_dma = reqctx->iv_dma + IV;
else
reqctx->b0_dma = 0;
if (req->src == req->dst) { if (req->src == req->dst) {
error = dma_map_sg(dev, req->src, sg_nents(req->src), error = dma_map_sg(dev, req->src, sg_nents(req->src),
DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
...@@ -2438,7 +2447,7 @@ void chcr_aead_dma_unmap(struct device *dev, ...@@ -2438,7 +2447,7 @@ void chcr_aead_dma_unmap(struct device *dev,
if (!req->cryptlen || !dst_size) if (!req->cryptlen || !dst_size)
return; return;
dma_unmap_single(dev, reqctx->iv_dma, IV, dma_unmap_single(dev, reqctx->iv_dma, (IV + reqctx->b0_len),
DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
if (req->src == req->dst) { if (req->src == req->dst) {
dma_unmap_sg(dev, req->src, sg_nents(req->src), dma_unmap_sg(dev, req->src, sg_nents(req->src),
...@@ -2453,8 +2462,7 @@ void chcr_aead_dma_unmap(struct device *dev, ...@@ -2453,8 +2462,7 @@ void chcr_aead_dma_unmap(struct device *dev,
void chcr_add_aead_src_ent(struct aead_request *req, void chcr_add_aead_src_ent(struct aead_request *req,
struct ulptx_sgl *ulptx, struct ulptx_sgl *ulptx,
unsigned int assoclen, unsigned int assoclen)
unsigned short op_type)
{ {
struct ulptx_walk ulp_walk; struct ulptx_walk ulp_walk;
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
...@@ -2462,7 +2470,7 @@ void chcr_add_aead_src_ent(struct aead_request *req, ...@@ -2462,7 +2470,7 @@ void chcr_add_aead_src_ent(struct aead_request *req,
if (reqctx->imm) { if (reqctx->imm) {
u8 *buf = (u8 *)ulptx; u8 *buf = (u8 *)ulptx;
if (reqctx->b0_dma) { if (reqctx->b0_len) {
memcpy(buf, reqctx->scratch_pad, reqctx->b0_len); memcpy(buf, reqctx->scratch_pad, reqctx->b0_len);
buf += reqctx->b0_len; buf += reqctx->b0_len;
} }
...@@ -2475,7 +2483,7 @@ void chcr_add_aead_src_ent(struct aead_request *req, ...@@ -2475,7 +2483,7 @@ void chcr_add_aead_src_ent(struct aead_request *req,
buf, req->cryptlen, req->assoclen); buf, req->cryptlen, req->assoclen);
} else { } else {
ulptx_walk_init(&ulp_walk, ulptx); ulptx_walk_init(&ulp_walk, ulptx);
if (reqctx->b0_dma) if (reqctx->b0_len)
ulptx_walk_add_page(&ulp_walk, reqctx->b0_len, ulptx_walk_add_page(&ulp_walk, reqctx->b0_len,
&reqctx->b0_dma); &reqctx->b0_dma);
ulptx_walk_add_sg(&ulp_walk, req->src, assoclen, 0); ulptx_walk_add_sg(&ulp_walk, req->src, assoclen, 0);
...@@ -2489,7 +2497,6 @@ void chcr_add_aead_src_ent(struct aead_request *req, ...@@ -2489,7 +2497,6 @@ void chcr_add_aead_src_ent(struct aead_request *req,
void chcr_add_aead_dst_ent(struct aead_request *req, void chcr_add_aead_dst_ent(struct aead_request *req,
struct cpl_rx_phys_dsgl *phys_cpl, struct cpl_rx_phys_dsgl *phys_cpl,
unsigned int assoclen, unsigned int assoclen,
unsigned short op_type,
unsigned short qid) unsigned short qid)
{ {
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
...@@ -2499,11 +2506,11 @@ void chcr_add_aead_dst_ent(struct aead_request *req, ...@@ -2499,11 +2506,11 @@ void chcr_add_aead_dst_ent(struct aead_request *req,
u32 temp; u32 temp;
dsgl_walk_init(&dsgl_walk, phys_cpl); dsgl_walk_init(&dsgl_walk, phys_cpl);
if (reqctx->b0_dma) if (reqctx->b0_len)
dsgl_walk_add_page(&dsgl_walk, reqctx->b0_len, &reqctx->b0_dma); dsgl_walk_add_page(&dsgl_walk, reqctx->b0_len, &reqctx->b0_dma);
dsgl_walk_add_sg(&dsgl_walk, req->dst, assoclen, 0); dsgl_walk_add_sg(&dsgl_walk, req->dst, assoclen, 0);
dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma); dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma);
temp = req->cryptlen + (op_type ? -authsize : authsize); temp = req->cryptlen + (reqctx->op ? -authsize : authsize);
dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, req->assoclen); dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, req->assoclen);
dsgl_walk_end(&dsgl_walk, qid); dsgl_walk_end(&dsgl_walk, qid);
} }
...@@ -2710,7 +2717,8 @@ static inline int crypto_ccm_check_iv(const u8 *iv) ...@@ -2710,7 +2717,8 @@ static inline int crypto_ccm_check_iv(const u8 *iv)
static int ccm_format_packet(struct aead_request *req, static int ccm_format_packet(struct aead_request *req,
struct chcr_aead_ctx *aeadctx, struct chcr_aead_ctx *aeadctx,
unsigned int sub_type, unsigned int sub_type,
unsigned short op_type) unsigned short op_type,
unsigned int assoclen)
{ {
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
int rc = 0; int rc = 0;
...@@ -2720,13 +2728,13 @@ static int ccm_format_packet(struct aead_request *req, ...@@ -2720,13 +2728,13 @@ static int ccm_format_packet(struct aead_request *req,
memcpy(reqctx->iv + 1, &aeadctx->salt[0], 3); memcpy(reqctx->iv + 1, &aeadctx->salt[0], 3);
memcpy(reqctx->iv + 4, req->iv, 8); memcpy(reqctx->iv + 4, req->iv, 8);
memset(reqctx->iv + 12, 0, 4); memset(reqctx->iv + 12, 0, 4);
*((unsigned short *)(reqctx->scratch_pad + 16)) =
htons(req->assoclen - 8);
} else { } else {
memcpy(reqctx->iv, req->iv, 16); memcpy(reqctx->iv, req->iv, 16);
*((unsigned short *)(reqctx->scratch_pad + 16)) =
htons(req->assoclen);
} }
if (assoclen)
*((unsigned short *)(reqctx->scratch_pad + 16)) =
htons(assoclen);
generate_b0(req, aeadctx, op_type); generate_b0(req, aeadctx, op_type);
/* zero the ctr value */ /* zero the ctr value */
memset(reqctx->iv + 15 - reqctx->iv[0], 0, reqctx->iv[0] + 1); memset(reqctx->iv + 15 - reqctx->iv[0], 0, reqctx->iv[0] + 1);
...@@ -2808,8 +2816,7 @@ static int aead_ccm_validate_input(unsigned short op_type, ...@@ -2808,8 +2816,7 @@ static int aead_ccm_validate_input(unsigned short op_type,
static struct sk_buff *create_aead_ccm_wr(struct aead_request *req, static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
unsigned short qid, unsigned short qid,
int size, int size)
unsigned short op_type)
{ {
struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
...@@ -2827,22 +2834,20 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req, ...@@ -2827,22 +2834,20 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
GFP_ATOMIC; GFP_ATOMIC;
struct adapter *adap = padap(a_ctx(tfm)->dev); struct adapter *adap = padap(a_ctx(tfm)->dev);
reqctx->b0_dma = 0;
sub_type = get_aead_subtype(tfm); sub_type = get_aead_subtype(tfm);
if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
assoclen -= 8; assoclen -= 8;
error = chcr_aead_common_init(req, op_type); reqctx->b0_len = CCM_B0_SIZE + (assoclen ? CCM_AAD_FIELD_SIZE : 0);
error = chcr_aead_common_init(req);
if (error) if (error)
return ERR_PTR(error); return ERR_PTR(error);
error = aead_ccm_validate_input(reqctx->op, req, aeadctx, sub_type);
reqctx->b0_len = CCM_B0_SIZE + (assoclen ? CCM_AAD_FIELD_SIZE : 0);
error = aead_ccm_validate_input(op_type, req, aeadctx, sub_type);
if (error) if (error)
goto err; goto err;
dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0); dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
dnents += sg_nents_xlen(req->dst, req->cryptlen dnents += sg_nents_xlen(req->dst, req->cryptlen
+ (op_type ? -authsize : authsize), + (reqctx->op ? -authsize : authsize),
CHCR_DST_SG_SIZE, req->assoclen); CHCR_DST_SG_SIZE, req->assoclen);
dnents += MIN_CCM_SG; // For IV and B0 dnents += MIN_CCM_SG; // For IV and B0
dst_size = get_space_for_phys_dsgl(dnents); dst_size = get_space_for_phys_dsgl(dnents);
...@@ -2858,11 +2863,10 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req, ...@@ -2858,11 +2863,10 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
transhdr_len = roundup(transhdr_len, 16); transhdr_len = roundup(transhdr_len, 16);
if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE - if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE -
reqctx->b0_len, transhdr_len, op_type)) { reqctx->b0_len, transhdr_len, reqctx->op)) {
atomic_inc(&adap->chcr_stats.fallback); atomic_inc(&adap->chcr_stats.fallback);
chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, chcr_aead_common_exit(req);
op_type); return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
return ERR_PTR(chcr_aead_fallback(req, op_type));
} }
skb = alloc_skb(SGE_MAX_WR_LEN, flags); skb = alloc_skb(SGE_MAX_WR_LEN, flags);
...@@ -2873,7 +2877,7 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req, ...@@ -2873,7 +2877,7 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
chcr_req = (struct chcr_wr *) __skb_put_zero(skb, transhdr_len); chcr_req = (struct chcr_wr *) __skb_put_zero(skb, transhdr_len);
fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, op_type); fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, reqctx->op);
chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr; chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len); memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
...@@ -2882,21 +2886,11 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req, ...@@ -2882,21 +2886,11 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len); phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size); ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
error = ccm_format_packet(req, aeadctx, sub_type, op_type); error = ccm_format_packet(req, aeadctx, sub_type, reqctx->op, assoclen);
if (error) if (error)
goto dstmap_fail; goto dstmap_fail;
chcr_add_aead_dst_ent(req, phys_cpl, assoclen, qid);
reqctx->b0_dma = dma_map_single(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, chcr_add_aead_src_ent(req, ulptx, assoclen);
&reqctx->scratch_pad, reqctx->b0_len,
DMA_BIDIRECTIONAL);
if (dma_mapping_error(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev,
reqctx->b0_dma)) {
error = -ENOMEM;
goto dstmap_fail;
}
chcr_add_aead_dst_ent(req, phys_cpl, assoclen, op_type, qid);
chcr_add_aead_src_ent(req, ulptx, assoclen, op_type);
atomic_inc(&adap->chcr_stats.aead_rqst); atomic_inc(&adap->chcr_stats.aead_rqst);
temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
...@@ -2905,20 +2899,18 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req, ...@@ -2905,20 +2899,18 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, 0, create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, 0,
transhdr_len, temp, 0); transhdr_len, temp, 0);
reqctx->skb = skb; reqctx->skb = skb;
reqctx->op = op_type;
return skb; return skb;
dstmap_fail: dstmap_fail:
kfree_skb(skb); kfree_skb(skb);
err: err:
chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, op_type); chcr_aead_common_exit(req);
return ERR_PTR(error); return ERR_PTR(error);
} }
static struct sk_buff *create_gcm_wr(struct aead_request *req, static struct sk_buff *create_gcm_wr(struct aead_request *req,
unsigned short qid, unsigned short qid,
int size, int size)
unsigned short op_type)
{ {
struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
...@@ -2938,13 +2930,13 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req, ...@@ -2938,13 +2930,13 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
assoclen = req->assoclen - 8; assoclen = req->assoclen - 8;
reqctx->b0_dma = 0; reqctx->b0_len = 0;
error = chcr_aead_common_init(req, op_type); error = chcr_aead_common_init(req);
if (error) if (error)
return ERR_PTR(error); return ERR_PTR(error);
dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0); dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
dnents += sg_nents_xlen(req->dst, req->cryptlen + dnents += sg_nents_xlen(req->dst, req->cryptlen +
(op_type ? -authsize : authsize), (reqctx->op ? -authsize : authsize),
CHCR_DST_SG_SIZE, req->assoclen); CHCR_DST_SG_SIZE, req->assoclen);
dnents += MIN_GCM_SG; // For IV dnents += MIN_GCM_SG; // For IV
dst_size = get_space_for_phys_dsgl(dnents); dst_size = get_space_for_phys_dsgl(dnents);
...@@ -2958,11 +2950,11 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req, ...@@ -2958,11 +2950,11 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
transhdr_len += temp; transhdr_len += temp;
transhdr_len = roundup(transhdr_len, 16); transhdr_len = roundup(transhdr_len, 16);
if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE, if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
transhdr_len, op_type)) { transhdr_len, reqctx->op)) {
atomic_inc(&adap->chcr_stats.fallback); atomic_inc(&adap->chcr_stats.fallback);
chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, chcr_aead_common_exit(req);
op_type); return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
return ERR_PTR(chcr_aead_fallback(req, op_type));
} }
skb = alloc_skb(SGE_MAX_WR_LEN, flags); skb = alloc_skb(SGE_MAX_WR_LEN, flags);
if (!skb) { if (!skb) {
...@@ -2973,7 +2965,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req, ...@@ -2973,7 +2965,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
chcr_req = __skb_put_zero(skb, transhdr_len); chcr_req = __skb_put_zero(skb, transhdr_len);
//Offset of tag from end //Offset of tag from end
temp = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize; temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR( chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
a_ctx(tfm)->dev->rx_channel_id, 2, a_ctx(tfm)->dev->rx_channel_id, 2,
(assoclen + 1)); (assoclen + 1));
...@@ -2986,7 +2978,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req, ...@@ -2986,7 +2978,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
FILL_SEC_CPL_AUTHINSERT(0, assoclen + IV + 1, FILL_SEC_CPL_AUTHINSERT(0, assoclen + IV + 1,
temp, temp); temp, temp);
chcr_req->sec_cpl.seqno_numivs = chcr_req->sec_cpl.seqno_numivs =
FILL_SEC_CPL_SCMD0_SEQNO(op_type, (op_type == FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, (reqctx->op ==
CHCR_ENCRYPT_OP) ? 1 : 0, CHCR_ENCRYPT_OP) ? 1 : 0,
CHCR_SCMD_CIPHER_MODE_AES_GCM, CHCR_SCMD_CIPHER_MODE_AES_GCM,
CHCR_SCMD_AUTH_MODE_GHASH, CHCR_SCMD_AUTH_MODE_GHASH,
...@@ -3012,19 +3004,18 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req, ...@@ -3012,19 +3004,18 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len); phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size); ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
chcr_add_aead_dst_ent(req, phys_cpl, assoclen, op_type, qid); chcr_add_aead_dst_ent(req, phys_cpl, assoclen, qid);
chcr_add_aead_src_ent(req, ulptx, assoclen, op_type); chcr_add_aead_src_ent(req, ulptx, assoclen);
atomic_inc(&adap->chcr_stats.aead_rqst); atomic_inc(&adap->chcr_stats.aead_rqst);
temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0); kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0);
create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size, create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
transhdr_len, temp, reqctx->verify); transhdr_len, temp, reqctx->verify);
reqctx->skb = skb; reqctx->skb = skb;
reqctx->op = op_type;
return skb; return skb;
err: err:
chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, op_type); chcr_aead_common_exit(req);
return ERR_PTR(error); return ERR_PTR(error);
} }
...@@ -3558,7 +3549,6 @@ static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc, ...@@ -3558,7 +3549,6 @@ static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
} }
static int chcr_aead_op(struct aead_request *req, static int chcr_aead_op(struct aead_request *req,
unsigned short op_type,
int size, int size,
create_wr_t create_wr_fn) create_wr_t create_wr_fn)
{ {
...@@ -3580,8 +3570,7 @@ static int chcr_aead_op(struct aead_request *req, ...@@ -3580,8 +3570,7 @@ static int chcr_aead_op(struct aead_request *req,
} }
/* Form a WR from req */ /* Form a WR from req */
skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size, skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size);
op_type);
if (IS_ERR(skb) || !skb) if (IS_ERR(skb) || !skb)
return PTR_ERR(skb); return PTR_ERR(skb);
...@@ -3598,21 +3587,19 @@ static int chcr_aead_encrypt(struct aead_request *req) ...@@ -3598,21 +3587,19 @@ static int chcr_aead_encrypt(struct aead_request *req)
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
reqctx->verify = VERIFY_HW; reqctx->verify = VERIFY_HW;
reqctx->op = CHCR_ENCRYPT_OP;
switch (get_aead_subtype(tfm)) { switch (get_aead_subtype(tfm)) {
case CRYPTO_ALG_SUB_TYPE_CTR_SHA: case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
case CRYPTO_ALG_SUB_TYPE_CBC_SHA: case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
case CRYPTO_ALG_SUB_TYPE_CBC_NULL: case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
case CRYPTO_ALG_SUB_TYPE_CTR_NULL: case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0, return chcr_aead_op(req, 0, create_authenc_wr);
create_authenc_wr);
case CRYPTO_ALG_SUB_TYPE_AEAD_CCM: case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309: case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0, return chcr_aead_op(req, 0, create_aead_ccm_wr);
create_aead_ccm_wr);
default: default:
return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0, return chcr_aead_op(req, 0, create_gcm_wr);
create_gcm_wr);
} }
} }
...@@ -3630,21 +3617,18 @@ static int chcr_aead_decrypt(struct aead_request *req) ...@@ -3630,21 +3617,18 @@ static int chcr_aead_decrypt(struct aead_request *req)
size = 0; size = 0;
reqctx->verify = VERIFY_HW; reqctx->verify = VERIFY_HW;
} }
reqctx->op = CHCR_DECRYPT_OP;
switch (get_aead_subtype(tfm)) { switch (get_aead_subtype(tfm)) {
case CRYPTO_ALG_SUB_TYPE_CBC_SHA: case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
case CRYPTO_ALG_SUB_TYPE_CTR_SHA: case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
case CRYPTO_ALG_SUB_TYPE_CBC_NULL: case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
case CRYPTO_ALG_SUB_TYPE_CTR_NULL: case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
return chcr_aead_op(req, CHCR_DECRYPT_OP, size, return chcr_aead_op(req, size, create_authenc_wr);
create_authenc_wr);
case CRYPTO_ALG_SUB_TYPE_AEAD_CCM: case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309: case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
return chcr_aead_op(req, CHCR_DECRYPT_OP, size, return chcr_aead_op(req, size, create_aead_ccm_wr);
create_aead_ccm_wr);
default: default:
return chcr_aead_op(req, CHCR_DECRYPT_OP, size, return chcr_aead_op(req, size, create_gcm_wr);
create_gcm_wr);
} }
} }
......
...@@ -190,8 +190,8 @@ struct chcr_aead_reqctx { ...@@ -190,8 +190,8 @@ struct chcr_aead_reqctx {
short int dst_nents; short int dst_nents;
u16 imm; u16 imm;
u16 verify; u16 verify;
u8 iv[CHCR_MAX_CRYPTO_IV_LEN]; u8 iv[CHCR_MAX_CRYPTO_IV_LEN + MAX_SCRATCH_PAD_SIZE];
unsigned char scratch_pad[MAX_SCRATCH_PAD_SIZE]; u8 *scratch_pad;
}; };
struct ulptx_walk { struct ulptx_walk {
...@@ -311,8 +311,7 @@ struct chcr_alg_template { ...@@ -311,8 +311,7 @@ struct chcr_alg_template {
typedef struct sk_buff *(*create_wr_t)(struct aead_request *req, typedef struct sk_buff *(*create_wr_t)(struct aead_request *req,
unsigned short qid, unsigned short qid,
int size, int size);
unsigned short op_type);
void chcr_verify_tag(struct aead_request *req, u8 *input, int *err); void chcr_verify_tag(struct aead_request *req, u8 *input, int *err);
int chcr_aead_dma_map(struct device *dev, struct aead_request *req, int chcr_aead_dma_map(struct device *dev, struct aead_request *req,
...@@ -321,10 +320,10 @@ void chcr_aead_dma_unmap(struct device *dev, struct aead_request *req, ...@@ -321,10 +320,10 @@ void chcr_aead_dma_unmap(struct device *dev, struct aead_request *req,
unsigned short op_type); unsigned short op_type);
void chcr_add_aead_dst_ent(struct aead_request *req, void chcr_add_aead_dst_ent(struct aead_request *req,
struct cpl_rx_phys_dsgl *phys_cpl, struct cpl_rx_phys_dsgl *phys_cpl,
unsigned int assoclen, unsigned short op_type, unsigned int assoclen,
unsigned short qid); unsigned short qid);
void chcr_add_aead_src_ent(struct aead_request *req, struct ulptx_sgl *ulptx, void chcr_add_aead_src_ent(struct aead_request *req, struct ulptx_sgl *ulptx,
unsigned int assoclen, unsigned short op_type); unsigned int assoclen);
void chcr_add_cipher_src_ent(struct ablkcipher_request *req, void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
void *ulptx, void *ulptx,
struct cipher_wr_param *wrparam); struct cipher_wr_param *wrparam);
...@@ -339,4 +338,5 @@ void chcr_add_hash_src_ent(struct ahash_request *req, struct ulptx_sgl *ulptx, ...@@ -339,4 +338,5 @@ void chcr_add_hash_src_ent(struct ahash_request *req, struct ulptx_sgl *ulptx,
struct hash_wr_param *param); struct hash_wr_param *param);
int chcr_hash_dma_map(struct device *dev, struct ahash_request *req); int chcr_hash_dma_map(struct device *dev, struct ahash_request *req);
void chcr_hash_dma_unmap(struct device *dev, struct ahash_request *req); void chcr_hash_dma_unmap(struct device *dev, struct ahash_request *req);
void chcr_aead_common_exit(struct aead_request *req);
#endif /* __CHCR_CRYPTO_H__ */ #endif /* __CHCR_CRYPTO_H__ */
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册