提交 4b394a23 编写于 作者: G Gary R Hook 提交者: Herbert Xu

crypto: ccp - Let a v5 CCP provide the same function as v3

Enable equivalent function on a v5 CCP. Add support for a
version 5 CCP which enables AES/XTS/SHA services. Also,
more work on the data structures to virtualize
functionality.
Signed-off-by: NGary R Hook <gary.hook@amd.com>
Signed-off-by: NHerbert Xu <herbert@gondor.apana.org.au>
上级 bb4e89b3
...@@ -2,6 +2,7 @@ obj-$(CONFIG_CRYPTO_DEV_CCP_DD) += ccp.o ...@@ -2,6 +2,7 @@ obj-$(CONFIG_CRYPTO_DEV_CCP_DD) += ccp.o
ccp-objs := ccp-dev.o \ ccp-objs := ccp-dev.o \
ccp-ops.o \ ccp-ops.o \
ccp-dev-v3.o \ ccp-dev-v3.o \
ccp-dev-v5.o \
ccp-platform.o \ ccp-platform.o \
ccp-dmaengine.o ccp-dmaengine.o
ccp-$(CONFIG_PCI) += ccp-pci.o ccp-$(CONFIG_PCI) += ccp-pci.o
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
* Copyright (C) 2013,2016 Advanced Micro Devices, Inc. * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
* *
* Author: Tom Lendacky <thomas.lendacky@amd.com> * Author: Tom Lendacky <thomas.lendacky@amd.com>
* Author: Gary R Hook <gary.hook@amd.com>
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as * it under the terms of the GNU General Public License version 2 as
...@@ -134,7 +135,22 @@ static int ccp_do_sha_update(struct ahash_request *req, unsigned int nbytes, ...@@ -134,7 +135,22 @@ static int ccp_do_sha_update(struct ahash_request *req, unsigned int nbytes,
rctx->cmd.engine = CCP_ENGINE_SHA; rctx->cmd.engine = CCP_ENGINE_SHA;
rctx->cmd.u.sha.type = rctx->type; rctx->cmd.u.sha.type = rctx->type;
rctx->cmd.u.sha.ctx = &rctx->ctx_sg; rctx->cmd.u.sha.ctx = &rctx->ctx_sg;
rctx->cmd.u.sha.ctx_len = sizeof(rctx->ctx);
switch (rctx->type) {
case CCP_SHA_TYPE_1:
rctx->cmd.u.sha.ctx_len = SHA1_DIGEST_SIZE;
break;
case CCP_SHA_TYPE_224:
rctx->cmd.u.sha.ctx_len = SHA224_DIGEST_SIZE;
break;
case CCP_SHA_TYPE_256:
rctx->cmd.u.sha.ctx_len = SHA256_DIGEST_SIZE;
break;
default:
/* Should never get here */
break;
}
rctx->cmd.u.sha.src = sg; rctx->cmd.u.sha.src = sg;
rctx->cmd.u.sha.src_len = rctx->hash_cnt; rctx->cmd.u.sha.src_len = rctx->hash_cnt;
rctx->cmd.u.sha.opad = ctx->u.sha.key_len ? rctx->cmd.u.sha.opad = ctx->u.sha.key_len ?
......
...@@ -405,6 +405,7 @@ static int ccp_init(struct ccp_device *ccp) ...@@ -405,6 +405,7 @@ static int ccp_init(struct ccp_device *ccp)
init_waitqueue_head(&ccp->sb_queue); init_waitqueue_head(&ccp->sb_queue);
init_waitqueue_head(&ccp->suspend_queue); init_waitqueue_head(&ccp->suspend_queue);
dev_dbg(dev, "Starting threads...\n");
/* Create a kthread for each queue */ /* Create a kthread for each queue */
for (i = 0; i < ccp->cmd_q_count; i++) { for (i = 0; i < ccp->cmd_q_count; i++) {
struct task_struct *kthread; struct task_struct *kthread;
...@@ -424,6 +425,13 @@ static int ccp_init(struct ccp_device *ccp) ...@@ -424,6 +425,13 @@ static int ccp_init(struct ccp_device *ccp)
wake_up_process(kthread); wake_up_process(kthread);
} }
dev_dbg(dev, "Enabling interrupts...\n");
/* Enable interrupts */
iowrite32(qim, ccp->io_regs + IRQ_MASK_REG);
dev_dbg(dev, "Registering device...\n");
ccp_add_device(ccp);
/* Register the RNG */ /* Register the RNG */
ccp->hwrng.name = ccp->rngname; ccp->hwrng.name = ccp->rngname;
ccp->hwrng.read = ccp_trng_read; ccp->hwrng.read = ccp_trng_read;
...@@ -438,11 +446,6 @@ static int ccp_init(struct ccp_device *ccp) ...@@ -438,11 +446,6 @@ static int ccp_init(struct ccp_device *ccp)
if (ret) if (ret)
goto e_hwrng; goto e_hwrng;
ccp_add_device(ccp);
/* Enable interrupts */
iowrite32(qim, ccp->io_regs + IRQ_MASK_REG);
return 0; return 0;
e_hwrng: e_hwrng:
...@@ -468,7 +471,13 @@ static void ccp_destroy(struct ccp_device *ccp) ...@@ -468,7 +471,13 @@ static void ccp_destroy(struct ccp_device *ccp)
struct ccp_cmd *cmd; struct ccp_cmd *cmd;
unsigned int qim, i; unsigned int qim, i;
/* Remove this device from the list of available units first */ /* Unregister the DMA engine */
ccp_dmaengine_unregister(ccp);
/* Unregister the RNG */
hwrng_unregister(&ccp->hwrng);
/* Remove this device from the list of available units */
ccp_del_device(ccp); ccp_del_device(ccp);
/* Build queue interrupt mask (two interrupt masks per queue) */ /* Build queue interrupt mask (two interrupt masks per queue) */
...@@ -488,12 +497,6 @@ static void ccp_destroy(struct ccp_device *ccp) ...@@ -488,12 +497,6 @@ static void ccp_destroy(struct ccp_device *ccp)
} }
iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG); iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG);
/* Unregister the DMA engine */
ccp_dmaengine_unregister(ccp);
/* Unregister the RNG */
hwrng_unregister(&ccp->hwrng);
/* Stop the queue kthreads */ /* Stop the queue kthreads */
for (i = 0; i < ccp->cmd_q_count; i++) for (i = 0; i < ccp->cmd_q_count; i++)
if (ccp->cmd_q[i].kthread) if (ccp->cmd_q[i].kthread)
...@@ -570,6 +573,7 @@ static const struct ccp_actions ccp3_actions = { ...@@ -570,6 +573,7 @@ static const struct ccp_actions ccp3_actions = {
struct ccp_vdata ccpv3 = { struct ccp_vdata ccpv3 = {
.version = CCP_VERSION(3, 0), .version = CCP_VERSION(3, 0),
.setup = NULL,
.perform = &ccp3_actions, .perform = &ccp3_actions,
.bar = 2, .bar = 2,
.offset = 0x20000, .offset = 0x20000,
......
此差异已折叠。
...@@ -61,7 +61,62 @@ ...@@ -61,7 +61,62 @@
#define CMD_Q_ERROR(__qs) ((__qs) & 0x0000003f) #define CMD_Q_ERROR(__qs) ((__qs) & 0x0000003f)
#define CMD_Q_DEPTH(__qs) (((__qs) >> 12) & 0x0000000f) #define CMD_Q_DEPTH(__qs) (((__qs) >> 12) & 0x0000000f)
/****** REQ0 Related Values ******/ /* ------------------------ CCP Version 5 Specifics ------------------------ */
#define CMD5_QUEUE_MASK_OFFSET 0x00
#define CMD5_REQID_CONFIG_OFFSET 0x08
#define LSB_PUBLIC_MASK_LO_OFFSET 0x18
#define LSB_PUBLIC_MASK_HI_OFFSET 0x1C
#define LSB_PRIVATE_MASK_LO_OFFSET 0x20
#define LSB_PRIVATE_MASK_HI_OFFSET 0x24
#define CMD5_Q_CONTROL_BASE 0x0000
#define CMD5_Q_TAIL_LO_BASE 0x0004
#define CMD5_Q_HEAD_LO_BASE 0x0008
#define CMD5_Q_INT_ENABLE_BASE 0x000C
#define CMD5_Q_INTERRUPT_STATUS_BASE 0x0010
#define CMD5_Q_STATUS_BASE 0x0100
#define CMD5_Q_INT_STATUS_BASE 0x0104
#define CMD5_Q_DMA_STATUS_BASE 0x0108
#define CMD5_Q_DMA_READ_STATUS_BASE 0x010C
#define CMD5_Q_DMA_WRITE_STATUS_BASE 0x0110
#define CMD5_Q_ABORT_BASE 0x0114
#define CMD5_Q_AX_CACHE_BASE 0x0118
/* Address offset between two virtual queue registers */
#define CMD5_Q_STATUS_INCR 0x1000
/* Bit masks */
#define CMD5_Q_RUN 0x1
#define CMD5_Q_HALT 0x2
#define CMD5_Q_MEM_LOCATION 0x4
#define CMD5_Q_SIZE 0x1F
#define CMD5_Q_SHIFT 3
#define COMMANDS_PER_QUEUE 16
#define QUEUE_SIZE_VAL ((ffs(COMMANDS_PER_QUEUE) - 2) & \
CMD5_Q_SIZE)
#define Q_PTR_MASK (2 << (QUEUE_SIZE_VAL + 5) - 1)
#define Q_DESC_SIZE sizeof(struct ccp5_desc)
#define Q_SIZE(n) (COMMANDS_PER_QUEUE*(n))
#define INT_COMPLETION 0x1
#define INT_ERROR 0x2
#define INT_QUEUE_STOPPED 0x4
#define ALL_INTERRUPTS (INT_COMPLETION| \
INT_ERROR| \
INT_QUEUE_STOPPED)
#define LSB_REGION_WIDTH 5
#define MAX_LSB_CNT 8
#define LSB_SIZE 16
#define LSB_ITEM_SIZE 32
#define PLSB_MAP_SIZE (LSB_SIZE)
#define SLSB_MAP_SIZE (MAX_LSB_CNT * LSB_SIZE)
#define LSB_ENTRY_NUMBER(LSB_ADDR) (LSB_ADDR / LSB_ITEM_SIZE)
/* ------------------------ CCP Version 3 Specifics ------------------------ */
#define REQ0_WAIT_FOR_WRITE 0x00000004 #define REQ0_WAIT_FOR_WRITE 0x00000004
#define REQ0_INT_ON_COMPLETE 0x00000002 #define REQ0_INT_ON_COMPLETE 0x00000002
#define REQ0_STOP_ON_COMPLETE 0x00000001 #define REQ0_STOP_ON_COMPLETE 0x00000001
...@@ -115,6 +170,8 @@ ...@@ -115,6 +170,8 @@
#define CCP_JOBID_MASK 0x0000003f #define CCP_JOBID_MASK 0x0000003f
/* ------------------------ General CCP Defines ------------------------ */
#define CCP_DMAPOOL_MAX_SIZE 64 #define CCP_DMAPOOL_MAX_SIZE 64
#define CCP_DMAPOOL_ALIGN BIT(5) #define CCP_DMAPOOL_ALIGN BIT(5)
...@@ -149,6 +206,7 @@ ...@@ -149,6 +206,7 @@
struct ccp_op; struct ccp_op;
struct ccp_device; struct ccp_device;
struct ccp_cmd; struct ccp_cmd;
struct ccp_fns;
struct ccp_dma_cmd { struct ccp_dma_cmd {
struct list_head entry; struct list_head entry;
...@@ -192,10 +250,30 @@ struct ccp_cmd_queue { ...@@ -192,10 +250,30 @@ struct ccp_cmd_queue {
/* Queue dma pool */ /* Queue dma pool */
struct dma_pool *dma_pool; struct dma_pool *dma_pool;
/* Queue base address (not neccessarily aligned)*/
struct ccp5_desc *qbase;
/* Aligned queue start address (per requirement) */
struct mutex q_mutex ____cacheline_aligned;
unsigned int qidx;
/* Version 5 has different requirements for queue memory */
unsigned int qsize;
dma_addr_t qbase_dma;
dma_addr_t qdma_tail;
/* Per-queue reserved storage block(s) */ /* Per-queue reserved storage block(s) */
u32 sb_key; u32 sb_key;
u32 sb_ctx; u32 sb_ctx;
/* Bitmap of LSBs that can be accessed by this queue */
DECLARE_BITMAP(lsbmask, MAX_LSB_CNT);
/* Private LSB that is assigned to this queue, or -1 if none.
* Bitmap for my private LSB, unused otherwise
*/
unsigned int lsb;
DECLARE_BITMAP(lsbmap, PLSB_MAP_SIZE);
/* Queue processing thread */ /* Queue processing thread */
struct task_struct *kthread; struct task_struct *kthread;
unsigned int active; unsigned int active;
...@@ -209,8 +287,17 @@ struct ccp_cmd_queue { ...@@ -209,8 +287,17 @@ struct ccp_cmd_queue {
u32 int_err; u32 int_err;
/* Register addresses for queue */ /* Register addresses for queue */
void __iomem *reg_control;
void __iomem *reg_tail_lo;
void __iomem *reg_head_lo;
void __iomem *reg_int_enable;
void __iomem *reg_interrupt_status;
void __iomem *reg_status; void __iomem *reg_status;
void __iomem *reg_int_status; void __iomem *reg_int_status;
void __iomem *reg_dma_status;
void __iomem *reg_dma_read_status;
void __iomem *reg_dma_write_status;
u32 qcontrol; /* Cached control register */
/* Status values from job */ /* Status values from job */
u32 int_status; u32 int_status;
...@@ -306,6 +393,9 @@ struct ccp_device { ...@@ -306,6 +393,9 @@ struct ccp_device {
unsigned int sb_count; unsigned int sb_count;
u32 sb_start; u32 sb_start;
/* Bitmap of shared LSBs, if any */
DECLARE_BITMAP(lsbmap, SLSB_MAP_SIZE);
/* Suspend support */ /* Suspend support */
unsigned int suspending; unsigned int suspending;
wait_queue_head_t suspend_queue; wait_queue_head_t suspend_queue;
...@@ -320,6 +410,7 @@ enum ccp_memtype { ...@@ -320,6 +410,7 @@ enum ccp_memtype {
CCP_MEMTYPE_LOCAL, CCP_MEMTYPE_LOCAL,
CCP_MEMTYPE__LAST, CCP_MEMTYPE__LAST,
}; };
#define CCP_MEMTYPE_LSB CCP_MEMTYPE_KSB
struct ccp_dma_info { struct ccp_dma_info {
dma_addr_t address; dma_addr_t address;
...@@ -407,6 +498,7 @@ struct ccp_op { ...@@ -407,6 +498,7 @@ struct ccp_op {
struct ccp_mem src; struct ccp_mem src;
struct ccp_mem dst; struct ccp_mem dst;
struct ccp_mem exp;
union { union {
struct ccp_aes_op aes; struct ccp_aes_op aes;
...@@ -416,6 +508,7 @@ struct ccp_op { ...@@ -416,6 +508,7 @@ struct ccp_op {
struct ccp_passthru_op passthru; struct ccp_passthru_op passthru;
struct ccp_ecc_op ecc; struct ccp_ecc_op ecc;
} u; } u;
struct ccp_mem key;
}; };
static inline u32 ccp_addr_lo(struct ccp_dma_info *info) static inline u32 ccp_addr_lo(struct ccp_dma_info *info)
...@@ -428,6 +521,70 @@ static inline u32 ccp_addr_hi(struct ccp_dma_info *info) ...@@ -428,6 +521,70 @@ static inline u32 ccp_addr_hi(struct ccp_dma_info *info)
return upper_32_bits(info->address + info->offset) & 0x0000ffff; return upper_32_bits(info->address + info->offset) & 0x0000ffff;
} }
/**
* descriptor for version 5 CPP commands
* 8 32-bit words:
* word 0: function; engine; control bits
* word 1: length of source data
* word 2: low 32 bits of source pointer
* word 3: upper 16 bits of source pointer; source memory type
* word 4: low 32 bits of destination pointer
* word 5: upper 16 bits of destination pointer; destination memory type
* word 6: low 32 bits of key pointer
* word 7: upper 16 bits of key pointer; key memory type
*/
struct dword0 {
__le32 soc:1;
__le32 ioc:1;
__le32 rsvd1:1;
__le32 init:1;
__le32 eom:1; /* AES/SHA only */
__le32 function:15;
__le32 engine:4;
__le32 prot:1;
__le32 rsvd2:7;
};
struct dword3 {
__le32 src_hi:16;
__le32 src_mem:2;
__le32 lsb_cxt_id:8;
__le32 rsvd1:5;
__le32 fixed:1;
};
union dword4 {
__le32 dst_lo; /* NON-SHA */
__le32 sha_len_lo; /* SHA */
};
union dword5 {
struct {
__le32 dst_hi:16;
__le32 dst_mem:2;
__le32 rsvd1:13;
__le32 fixed:1;
} fields;
__le32 sha_len_hi;
};
struct dword7 {
__le32 key_hi:16;
__le32 key_mem:2;
__le32 rsvd1:14;
};
struct ccp5_desc {
struct dword0 dw0;
__le32 length;
__le32 src_lo;
struct dword3 dw3;
union dword4 dw4;
union dword5 dw5;
__le32 key_lo;
struct dword7 dw7;
};
int ccp_pci_init(void); int ccp_pci_init(void);
void ccp_pci_exit(void); void ccp_pci_exit(void);
...@@ -466,13 +623,14 @@ struct ccp_actions { ...@@ -466,13 +623,14 @@ struct ccp_actions {
/* Structure to hold CCP version-specific values */ /* Structure to hold CCP version-specific values */
struct ccp_vdata { struct ccp_vdata {
unsigned int version; const unsigned int version;
int (*init)(struct ccp_device *); void (*setup)(struct ccp_device *);
const struct ccp_actions *perform; const struct ccp_actions *perform;
const unsigned int bar; const unsigned int bar;
const unsigned int offset; const unsigned int offset;
}; };
extern struct ccp_vdata ccpv3; extern struct ccp_vdata ccpv3;
extern struct ccp_vdata ccpv5;
#endif #endif
...@@ -21,26 +21,29 @@ ...@@ -21,26 +21,29 @@
#include "ccp-dev.h" #include "ccp-dev.h"
/* SHA initial context values */ /* SHA initial context values */
static const __be32 ccp_sha1_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = { static const __be32 ccp_sha1_init[SHA1_DIGEST_SIZE / sizeof(__be32)] = {
cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1), cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1),
cpu_to_be32(SHA1_H2), cpu_to_be32(SHA1_H3), cpu_to_be32(SHA1_H2), cpu_to_be32(SHA1_H3),
cpu_to_be32(SHA1_H4), 0, 0, 0, cpu_to_be32(SHA1_H4),
}; };
static const __be32 ccp_sha224_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = { static const __be32 ccp_sha224_init[SHA256_DIGEST_SIZE / sizeof(__be32)] = {
cpu_to_be32(SHA224_H0), cpu_to_be32(SHA224_H1), cpu_to_be32(SHA224_H0), cpu_to_be32(SHA224_H1),
cpu_to_be32(SHA224_H2), cpu_to_be32(SHA224_H3), cpu_to_be32(SHA224_H2), cpu_to_be32(SHA224_H3),
cpu_to_be32(SHA224_H4), cpu_to_be32(SHA224_H5), cpu_to_be32(SHA224_H4), cpu_to_be32(SHA224_H5),
cpu_to_be32(SHA224_H6), cpu_to_be32(SHA224_H7), cpu_to_be32(SHA224_H6), cpu_to_be32(SHA224_H7),
}; };
static const __be32 ccp_sha256_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = { static const __be32 ccp_sha256_init[SHA256_DIGEST_SIZE / sizeof(__be32)] = {
cpu_to_be32(SHA256_H0), cpu_to_be32(SHA256_H1), cpu_to_be32(SHA256_H0), cpu_to_be32(SHA256_H1),
cpu_to_be32(SHA256_H2), cpu_to_be32(SHA256_H3), cpu_to_be32(SHA256_H2), cpu_to_be32(SHA256_H3),
cpu_to_be32(SHA256_H4), cpu_to_be32(SHA256_H5), cpu_to_be32(SHA256_H4), cpu_to_be32(SHA256_H5),
cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7), cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7),
}; };
#define CCP_NEW_JOBID(ccp) ((ccp->vdata->version == CCP_VERSION(3, 0)) ? \
ccp_gen_jobid(ccp) : 0)
static u32 ccp_gen_jobid(struct ccp_device *ccp) static u32 ccp_gen_jobid(struct ccp_device *ccp)
{ {
return atomic_inc_return(&ccp->current_id) & CCP_JOBID_MASK; return atomic_inc_return(&ccp->current_id) & CCP_JOBID_MASK;
...@@ -487,7 +490,7 @@ static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q, ...@@ -487,7 +490,7 @@ static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q,
ret = -EIO; ret = -EIO;
memset(&op, 0, sizeof(op)); memset(&op, 0, sizeof(op));
op.cmd_q = cmd_q; op.cmd_q = cmd_q;
op.jobid = ccp_gen_jobid(cmd_q->ccp); op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
op.sb_key = cmd_q->sb_key; op.sb_key = cmd_q->sb_key;
op.sb_ctx = cmd_q->sb_ctx; op.sb_ctx = cmd_q->sb_ctx;
op.init = 1; op.init = 1;
...@@ -640,7 +643,7 @@ static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) ...@@ -640,7 +643,7 @@ static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
ret = -EIO; ret = -EIO;
memset(&op, 0, sizeof(op)); memset(&op, 0, sizeof(op));
op.cmd_q = cmd_q; op.cmd_q = cmd_q;
op.jobid = ccp_gen_jobid(cmd_q->ccp); op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
op.sb_key = cmd_q->sb_key; op.sb_key = cmd_q->sb_key;
op.sb_ctx = cmd_q->sb_ctx; op.sb_ctx = cmd_q->sb_ctx;
op.init = (aes->mode == CCP_AES_MODE_ECB) ? 0 : 1; op.init = (aes->mode == CCP_AES_MODE_ECB) ? 0 : 1;
...@@ -679,7 +682,7 @@ static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) ...@@ -679,7 +682,7 @@ static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
goto e_key; goto e_key;
if (aes->mode != CCP_AES_MODE_ECB) { if (aes->mode != CCP_AES_MODE_ECB) {
/* Load the AES context - conver to LE */ /* Load the AES context - convert to LE */
dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE; dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
...@@ -817,7 +820,7 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q, ...@@ -817,7 +820,7 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q,
ret = -EIO; ret = -EIO;
memset(&op, 0, sizeof(op)); memset(&op, 0, sizeof(op));
op.cmd_q = cmd_q; op.cmd_q = cmd_q;
op.jobid = ccp_gen_jobid(cmd_q->ccp); op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
op.sb_key = cmd_q->sb_key; op.sb_key = cmd_q->sb_key;
op.sb_ctx = cmd_q->sb_ctx; op.sb_ctx = cmd_q->sb_ctx;
op.init = 1; op.init = 1;
...@@ -936,98 +939,154 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) ...@@ -936,98 +939,154 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
struct ccp_dm_workarea ctx; struct ccp_dm_workarea ctx;
struct ccp_data src; struct ccp_data src;
struct ccp_op op; struct ccp_op op;
unsigned int ioffset, ooffset;
unsigned int digest_size;
int sb_count;
const void *init;
u64 block_size;
int ctx_size;
int ret; int ret;
if (sha->ctx_len != CCP_SHA_CTXSIZE) switch (sha->type) {
case CCP_SHA_TYPE_1:
if (sha->ctx_len < SHA1_DIGEST_SIZE)
return -EINVAL;
block_size = SHA1_BLOCK_SIZE;
break;
case CCP_SHA_TYPE_224:
if (sha->ctx_len < SHA224_DIGEST_SIZE)
return -EINVAL;
block_size = SHA224_BLOCK_SIZE;
break;
case CCP_SHA_TYPE_256:
if (sha->ctx_len < SHA256_DIGEST_SIZE)
return -EINVAL;
block_size = SHA256_BLOCK_SIZE;
break;
default:
return -EINVAL; return -EINVAL;
}
if (!sha->ctx) if (!sha->ctx)
return -EINVAL; return -EINVAL;
if (!sha->final && (sha->src_len & (CCP_SHA_BLOCKSIZE - 1))) if (!sha->final && (sha->src_len & (block_size - 1)))
return -EINVAL; return -EINVAL;
/* The version 3 device can't handle zero-length input */
if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) {
if (!sha->src_len) { if (!sha->src_len) {
unsigned int digest_len;
const u8 *sha_zero; const u8 *sha_zero;
/* Not final, just return */ /* Not final, just return */
if (!sha->final) if (!sha->final)
return 0; return 0;
/* CCP can't do a zero length sha operation so the caller /* CCP can't do a zero length sha operation so the
* must buffer the data. * caller must buffer the data.
*/ */
if (sha->msg_bits) if (sha->msg_bits)
return -EINVAL; return -EINVAL;
/* The CCP cannot perform zero-length sha operations so the /* The CCP cannot perform zero-length sha operations
* caller is required to buffer data for the final operation. * so the caller is required to buffer data for the
* However, a sha operation for a message with a total length * final operation. However, a sha operation for a
* of zero is valid so known values are required to supply * message with a total length of zero is valid so
* the result. * known values are required to supply the result.
*/ */
switch (sha->type) { switch (sha->type) {
case CCP_SHA_TYPE_1: case CCP_SHA_TYPE_1:
sha_zero = sha1_zero_message_hash; sha_zero = sha1_zero_message_hash;
digest_len = SHA1_DIGEST_SIZE;
break; break;
case CCP_SHA_TYPE_224: case CCP_SHA_TYPE_224:
sha_zero = sha224_zero_message_hash; sha_zero = sha224_zero_message_hash;
digest_len = SHA224_DIGEST_SIZE;
break; break;
case CCP_SHA_TYPE_256: case CCP_SHA_TYPE_256:
sha_zero = sha256_zero_message_hash; sha_zero = sha256_zero_message_hash;
digest_len = SHA256_DIGEST_SIZE;
break; break;
default: default:
return -EINVAL; return -EINVAL;
} }
scatterwalk_map_and_copy((void *)sha_zero, sha->ctx, 0, scatterwalk_map_and_copy((void *)sha_zero, sha->ctx, 0,
sha->ctx_len, 1); digest_len, 1);
return 0; return 0;
} }
}
if (!sha->src) /* Set variables used throughout */
return -EINVAL; switch (sha->type) {
case CCP_SHA_TYPE_1:
digest_size = SHA1_DIGEST_SIZE;
init = (void *) ccp_sha1_init;
ctx_size = SHA1_DIGEST_SIZE;
sb_count = 1;
if (cmd_q->ccp->vdata->version != CCP_VERSION(3, 0))
ooffset = ioffset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
else
ooffset = ioffset = 0;
break;
case CCP_SHA_TYPE_224:
digest_size = SHA224_DIGEST_SIZE;
init = (void *) ccp_sha224_init;
ctx_size = SHA256_DIGEST_SIZE;
sb_count = 1;
ioffset = 0;
if (cmd_q->ccp->vdata->version != CCP_VERSION(3, 0))
ooffset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
else
ooffset = 0;
break;
case CCP_SHA_TYPE_256:
digest_size = SHA256_DIGEST_SIZE;
init = (void *) ccp_sha256_init;
ctx_size = SHA256_DIGEST_SIZE;
sb_count = 1;
ooffset = ioffset = 0;
break;
default:
ret = -EINVAL;
goto e_data;
}
BUILD_BUG_ON(CCP_SHA_SB_COUNT != 1); /* For zero-length plaintext the src pointer is ignored;
* otherwise both parts must be valid
*/
if (sha->src_len && !sha->src)
return -EINVAL;
memset(&op, 0, sizeof(op)); memset(&op, 0, sizeof(op));
op.cmd_q = cmd_q; op.cmd_q = cmd_q;
op.jobid = ccp_gen_jobid(cmd_q->ccp); op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
op.sb_ctx = cmd_q->sb_ctx; op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
op.u.sha.type = sha->type; op.u.sha.type = sha->type;
op.u.sha.msg_bits = sha->msg_bits; op.u.sha.msg_bits = sha->msg_bits;
/* The SHA context fits in a single (32-byte) SB entry and ret = ccp_init_dm_workarea(&ctx, cmd_q, sb_count * CCP_SB_BYTES,
* must be in little endian format. Use the 256-bit byte swap
* passthru option to convert from big endian to little endian.
*/
ret = ccp_init_dm_workarea(&ctx, cmd_q,
CCP_SHA_SB_COUNT * CCP_SB_BYTES,
DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
if (ret) if (ret)
return ret; return ret;
if (sha->first) { if (sha->first) {
const __be32 *init;
switch (sha->type) { switch (sha->type) {
case CCP_SHA_TYPE_1: case CCP_SHA_TYPE_1:
init = ccp_sha1_init;
break;
case CCP_SHA_TYPE_224: case CCP_SHA_TYPE_224:
init = ccp_sha224_init;
break;
case CCP_SHA_TYPE_256: case CCP_SHA_TYPE_256:
init = ccp_sha256_init; memcpy(ctx.address + ioffset, init, ctx_size);
break; break;
default: default:
ret = -EINVAL; ret = -EINVAL;
goto e_ctx; goto e_ctx;
} }
memcpy(ctx.address, init, CCP_SHA_CTXSIZE);
} else { } else {
ccp_set_dm_area(&ctx, 0, sha->ctx, 0, sha->ctx_len); /* Restore the context */
ccp_set_dm_area(&ctx, 0, sha->ctx, 0,
sb_count * CCP_SB_BYTES);
} }
ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
...@@ -1037,14 +1096,15 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) ...@@ -1037,14 +1096,15 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
goto e_ctx; goto e_ctx;
} }
/* Send data to the CCP SHA engine */ if (sha->src) {
/* Send data to the CCP SHA engine; block_size is set above */
ret = ccp_init_data(&src, cmd_q, sha->src, sha->src_len, ret = ccp_init_data(&src, cmd_q, sha->src, sha->src_len,
CCP_SHA_BLOCKSIZE, DMA_TO_DEVICE); block_size, DMA_TO_DEVICE);
if (ret) if (ret)
goto e_ctx; goto e_ctx;
while (src.sg_wa.bytes_left) { while (src.sg_wa.bytes_left) {
ccp_prepare_data(&src, NULL, &op, CCP_SHA_BLOCKSIZE, false); ccp_prepare_data(&src, NULL, &op, block_size, false);
if (sha->final && !src.sg_wa.bytes_left) if (sha->final && !src.sg_wa.bytes_left)
op.eom = 1; op.eom = 1;
...@@ -1056,6 +1116,14 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) ...@@ -1056,6 +1116,14 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
ccp_process_data(&src, NULL, &op); ccp_process_data(&src, NULL, &op);
} }
} else {
op.eom = 1;
ret = cmd_q->ccp->vdata->perform->sha(&op);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_data;
}
}
/* Retrieve the SHA context - convert from LE to BE using /* Retrieve the SHA context - convert from LE to BE using
* 32-byte (256-bit) byteswapping to BE * 32-byte (256-bit) byteswapping to BE
...@@ -1067,32 +1135,31 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) ...@@ -1067,32 +1135,31 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
goto e_data; goto e_data;
} }
ccp_get_dm_area(&ctx, 0, sha->ctx, 0, sha->ctx_len); if (sha->final) {
/* Finishing up, so get the digest */
if (sha->final && sha->opad) {
/* HMAC operation, recursively perform final SHA */
struct ccp_cmd hmac_cmd;
struct scatterlist sg;
u64 block_size, digest_size;
u8 *hmac_buf;
switch (sha->type) { switch (sha->type) {
case CCP_SHA_TYPE_1: case CCP_SHA_TYPE_1:
block_size = SHA1_BLOCK_SIZE;
digest_size = SHA1_DIGEST_SIZE;
break;
case CCP_SHA_TYPE_224: case CCP_SHA_TYPE_224:
block_size = SHA224_BLOCK_SIZE;
digest_size = SHA224_DIGEST_SIZE;
break;
case CCP_SHA_TYPE_256: case CCP_SHA_TYPE_256:
block_size = SHA256_BLOCK_SIZE; ccp_get_dm_area(&ctx, ooffset,
digest_size = SHA256_DIGEST_SIZE; sha->ctx, 0,
digest_size);
break; break;
default: default:
ret = -EINVAL; ret = -EINVAL;
goto e_data; goto e_ctx;
} }
} else {
/* Stash the context */
ccp_get_dm_area(&ctx, 0, sha->ctx, 0,
sb_count * CCP_SB_BYTES);
}
if (sha->final && sha->opad) {
/* HMAC operation, recursively perform final SHA */
struct ccp_cmd hmac_cmd;
struct scatterlist sg;
u8 *hmac_buf;
if (sha->opad_len != block_size) { if (sha->opad_len != block_size) {
ret = -EINVAL; ret = -EINVAL;
...@@ -1107,7 +1174,18 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) ...@@ -1107,7 +1174,18 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
sg_init_one(&sg, hmac_buf, block_size + digest_size); sg_init_one(&sg, hmac_buf, block_size + digest_size);
scatterwalk_map_and_copy(hmac_buf, sha->opad, 0, block_size, 0); scatterwalk_map_and_copy(hmac_buf, sha->opad, 0, block_size, 0);
memcpy(hmac_buf + block_size, ctx.address, digest_size); switch (sha->type) {
case CCP_SHA_TYPE_1:
case CCP_SHA_TYPE_224:
case CCP_SHA_TYPE_256:
memcpy(hmac_buf + block_size,
ctx.address + ooffset,
digest_size);
break;
default:
ret = -EINVAL;
goto e_ctx;
}
memset(&hmac_cmd, 0, sizeof(hmac_cmd)); memset(&hmac_cmd, 0, sizeof(hmac_cmd));
hmac_cmd.engine = CCP_ENGINE_SHA; hmac_cmd.engine = CCP_ENGINE_SHA;
...@@ -1130,6 +1208,7 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) ...@@ -1130,6 +1208,7 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
} }
e_data: e_data:
if (sha->src)
ccp_free_data(&src, cmd_q); ccp_free_data(&src, cmd_q);
e_ctx: e_ctx:
...@@ -1261,7 +1340,7 @@ static int ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q, ...@@ -1261,7 +1340,7 @@ static int ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q,
struct ccp_op op; struct ccp_op op;
bool in_place = false; bool in_place = false;
unsigned int i; unsigned int i;
int ret; int ret = 0;
if (!pt->final && (pt->src_len & (CCP_PASSTHRU_BLOCKSIZE - 1))) if (!pt->final && (pt->src_len & (CCP_PASSTHRU_BLOCKSIZE - 1)))
return -EINVAL; return -EINVAL;
...@@ -1280,7 +1359,7 @@ static int ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q, ...@@ -1280,7 +1359,7 @@ static int ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q,
memset(&op, 0, sizeof(op)); memset(&op, 0, sizeof(op));
op.cmd_q = cmd_q; op.cmd_q = cmd_q;
op.jobid = ccp_gen_jobid(cmd_q->ccp); op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) { if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
/* Load the mask */ /* Load the mask */
...@@ -1469,7 +1548,7 @@ static int ccp_run_ecc_mm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) ...@@ -1469,7 +1548,7 @@ static int ccp_run_ecc_mm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
memset(&op, 0, sizeof(op)); memset(&op, 0, sizeof(op));
op.cmd_q = cmd_q; op.cmd_q = cmd_q;
op.jobid = ccp_gen_jobid(cmd_q->ccp); op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
/* Concatenate the modulus and the operands. Both the modulus and /* Concatenate the modulus and the operands. Both the modulus and
* the operands must be in little endian format. Since the input * the operands must be in little endian format. Since the input
...@@ -1594,7 +1673,7 @@ static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) ...@@ -1594,7 +1673,7 @@ static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
memset(&op, 0, sizeof(op)); memset(&op, 0, sizeof(op));
op.cmd_q = cmd_q; op.cmd_q = cmd_q;
op.jobid = ccp_gen_jobid(cmd_q->ccp); op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
/* Concatenate the modulus and the operands. Both the modulus and /* Concatenate the modulus and the operands. Both the modulus and
* the operands must be in little endian format. Since the input * the operands must be in little endian format. Since the input
...@@ -1632,7 +1711,7 @@ static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) ...@@ -1632,7 +1711,7 @@ static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
goto e_src; goto e_src;
src.address += CCP_ECC_OPERAND_SIZE; src.address += CCP_ECC_OPERAND_SIZE;
/* Set the first point Z coordianate to 1 */ /* Set the first point Z coordinate to 1 */
*src.address = 0x01; *src.address = 0x01;
src.address += CCP_ECC_OPERAND_SIZE; src.address += CCP_ECC_OPERAND_SIZE;
...@@ -1651,7 +1730,7 @@ static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) ...@@ -1651,7 +1730,7 @@ static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
goto e_src; goto e_src;
src.address += CCP_ECC_OPERAND_SIZE; src.address += CCP_ECC_OPERAND_SIZE;
/* Set the second point Z coordianate to 1 */ /* Set the second point Z coordinate to 1 */
*src.address = 0x01; *src.address = 0x01;
src.address += CCP_ECC_OPERAND_SIZE; src.address += CCP_ECC_OPERAND_SIZE;
} else { } else {
......
...@@ -141,10 +141,11 @@ static void ccp_free_irqs(struct ccp_device *ccp) ...@@ -141,10 +141,11 @@ static void ccp_free_irqs(struct ccp_device *ccp)
free_irq(ccp_pci->msix[ccp_pci->msix_count].vector, free_irq(ccp_pci->msix[ccp_pci->msix_count].vector,
dev); dev);
pci_disable_msix(pdev); pci_disable_msix(pdev);
} else { } else if (ccp->irq) {
free_irq(ccp->irq, dev); free_irq(ccp->irq, dev);
pci_disable_msi(pdev); pci_disable_msi(pdev);
} }
ccp->irq = 0;
} }
static int ccp_find_mmio_area(struct ccp_device *ccp) static int ccp_find_mmio_area(struct ccp_device *ccp)
...@@ -229,6 +230,8 @@ static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -229,6 +230,8 @@ static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
dev_set_drvdata(dev, ccp); dev_set_drvdata(dev, ccp);
if (ccp->vdata->setup)
ccp->vdata->setup(ccp);
ret = ccp->vdata->perform->init(ccp); ret = ccp->vdata->perform->init(ccp);
if (ret) if (ret)
goto e_iomap; goto e_iomap;
...@@ -321,6 +324,7 @@ static int ccp_pci_resume(struct pci_dev *pdev) ...@@ -321,6 +324,7 @@ static int ccp_pci_resume(struct pci_dev *pdev)
static const struct pci_device_id ccp_pci_table[] = { static const struct pci_device_id ccp_pci_table[] = {
{ PCI_VDEVICE(AMD, 0x1537), (kernel_ulong_t)&ccpv3 }, { PCI_VDEVICE(AMD, 0x1537), (kernel_ulong_t)&ccpv3 },
{ PCI_VDEVICE(AMD, 0x1456), (kernel_ulong_t)&ccpv5 },
/* Last entry must be zero */ /* Last entry must be zero */
{ 0, } { 0, }
}; };
......
...@@ -238,9 +238,6 @@ struct ccp_xts_aes_engine { ...@@ -238,9 +238,6 @@ struct ccp_xts_aes_engine {
}; };
/***** SHA engine *****/ /***** SHA engine *****/
#define CCP_SHA_BLOCKSIZE SHA256_BLOCK_SIZE
#define CCP_SHA_CTXSIZE SHA256_DIGEST_SIZE
/** /**
* ccp_sha_type - type of SHA operation * ccp_sha_type - type of SHA operation
* *
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册