未验证 提交 723ccb6d 编写于 作者: O openeuler-ci-bot 提交者: Gitee

!1009 [sync] PR-980: crypto: hisilicon - fix mailbox operation process

Merge Pull Request from: @openeuler-sync-bot 
 

Origin pull request: 
https://gitee.com/openeuler/kernel/pulls/980 
 
The mailbox of the Kunpeng accelerator is a special operation.
Data(128 bits) needs to be read from hardware or written to hardware
at one time. And the operation cannot be canceled by software. Therefore,
the software process needs to be modified to avoid mailbox operation
errors.

Weili Qian (4):
  crypto: hisilicon/qm - obtain the mailbox configuration at one time
  vfio/migration: obtain the mailbox configuration at one time
  crypto: hisilicon/qm - fix the pf2vf timeout when device reset
  crypto: hisilicon/qm - alloc buffer to set and get xqc

issue: https://gitee.com/openeuler/kernel/issues/I7BJW9 
 
Link:https://gitee.com/openeuler/kernel/pulls/1009 

Reviewed-by: Yang Shen <shenyang39@huawei.com> 
Signed-off-by: Jialin Zhang <zhangjialin11@huawei.com> 
......@@ -137,8 +137,8 @@ static void dump_show(struct hisi_qm *qm, void *info,
static int qm_sqc_dump(struct hisi_qm *qm, char *s, char *name)
{
struct device *dev = &qm->pdev->dev;
struct qm_sqc *sqc, *sqc_curr;
dma_addr_t sqc_dma;
struct qm_sqc *sqc_curr;
struct qm_sqc sqc;
u32 qp_id;
int ret;
......@@ -151,35 +151,29 @@ static int qm_sqc_dump(struct hisi_qm *qm, char *s, char *name)
return -EINVAL;
}
sqc = hisi_qm_ctx_alloc(qm, sizeof(*sqc), &sqc_dma);
if (IS_ERR(sqc))
return PTR_ERR(sqc);
ret = qm_set_and_get_xqc(qm, QM_MB_CMD_SQC, &sqc, qp_id, 1);
if (!ret) {
dump_show(qm, &sqc, sizeof(struct qm_sqc), name);
ret = hisi_qm_mb(qm, QM_MB_CMD_SQC, sqc_dma, qp_id, 1);
if (ret) {
down_read(&qm->qps_lock);
if (qm->sqc) {
sqc_curr = qm->sqc + qp_id;
return 0;
}
dump_show(qm, sqc_curr, sizeof(*sqc), "SOFT SQC");
}
up_read(&qm->qps_lock);
down_read(&qm->qps_lock);
if (qm->sqc) {
sqc_curr = qm->sqc + qp_id;
goto free_ctx;
dump_show(qm, sqc_curr, sizeof(*sqc_curr), "SOFT SQC");
}
up_read(&qm->qps_lock);
dump_show(qm, sqc, sizeof(*sqc), name);
free_ctx:
hisi_qm_ctx_free(qm, sizeof(*sqc), sqc, &sqc_dma);
return 0;
}
static int qm_cqc_dump(struct hisi_qm *qm, char *s, char *name)
{
struct device *dev = &qm->pdev->dev;
struct qm_cqc *cqc, *cqc_curr;
dma_addr_t cqc_dma;
struct qm_cqc *cqc_curr;
struct qm_cqc cqc;
u32 qp_id;
int ret;
......@@ -192,34 +186,29 @@ static int qm_cqc_dump(struct hisi_qm *qm, char *s, char *name)
return -EINVAL;
}
cqc = hisi_qm_ctx_alloc(qm, sizeof(*cqc), &cqc_dma);
if (IS_ERR(cqc))
return PTR_ERR(cqc);
ret = qm_set_and_get_xqc(qm, QM_MB_CMD_CQC, &cqc, qp_id, 1);
if (!ret) {
dump_show(qm, &cqc, sizeof(struct qm_cqc), name);
ret = hisi_qm_mb(qm, QM_MB_CMD_CQC, cqc_dma, qp_id, 1);
if (ret) {
down_read(&qm->qps_lock);
if (qm->cqc) {
cqc_curr = qm->cqc + qp_id;
return 0;
}
dump_show(qm, cqc_curr, sizeof(*cqc), "SOFT CQC");
}
up_read(&qm->qps_lock);
down_read(&qm->qps_lock);
if (qm->cqc) {
cqc_curr = qm->cqc + qp_id;
goto free_ctx;
dump_show(qm, cqc_curr, sizeof(*cqc_curr), "SOFT CQC");
}
up_read(&qm->qps_lock);
dump_show(qm, cqc, sizeof(*cqc), name);
free_ctx:
hisi_qm_ctx_free(qm, sizeof(*cqc), cqc, &cqc_dma);
return 0;
}
static int qm_eqc_aeqc_dump(struct hisi_qm *qm, char *s, char *name)
{
struct device *dev = &qm->pdev->dev;
dma_addr_t xeqc_dma;
struct qm_aeqc aeqc;
struct qm_eqc eqc;
size_t size;
void *xeqc;
int ret;
......@@ -233,23 +222,19 @@ static int qm_eqc_aeqc_dump(struct hisi_qm *qm, char *s, char *name)
if (!strcmp(name, "EQC")) {
cmd = QM_MB_CMD_EQC;
size = sizeof(struct qm_eqc);
xeqc = &eqc;
} else {
cmd = QM_MB_CMD_AEQC;
size = sizeof(struct qm_aeqc);
xeqc = &aeqc;
}
xeqc = hisi_qm_ctx_alloc(qm, size, &xeqc_dma);
if (IS_ERR(xeqc))
return PTR_ERR(xeqc);
ret = hisi_qm_mb(qm, cmd, xeqc_dma, 0, 1);
ret = qm_set_and_get_xqc(qm, cmd, xeqc, 0, 1);
if (ret)
goto err_free_ctx;
return ret;
dump_show(qm, xeqc, size, name);
err_free_ctx:
hisi_qm_ctx_free(qm, size, xeqc, &xeqc_dma);
return ret;
}
......
......@@ -18,16 +18,6 @@
static struct dentry *mig_debugfs_root;
static int mig_root_ref;
/* return 0 mailbox ready, -ETIMEDOUT hardware timeout */
static int qm_wait_mb_ready(struct hisi_qm *qm)
{
u32 val;
return readl_relaxed_poll_timeout(qm->io_base + QM_MB_CMD_SEND_BASE,
val, !((val >> QM_MB_BUSY_SHIFT) &
0x1), POLL_PERIOD, POLL_TIMEOUT);
}
/* return 0 VM acc device ready, -ETIMEDOUT hardware timeout */
static int qm_wait_dev_ready(struct hisi_qm *qm)
{
......@@ -37,7 +27,6 @@ static int qm_wait_dev_ready(struct hisi_qm *qm)
val, !(val & 0x1), POLL_PERIOD, POLL_TIMEOUT);
}
/* 128 bit should be written to hardware at one time to trigger a mailbox */
static void qm_mb_write(struct hisi_qm *qm, const void *src)
{
......@@ -61,57 +50,129 @@ static void qm_mb_write(struct hisi_qm *qm, const void *src)
: "memory");
}
/* 128 bit should be read from hardware at one time */
static void qm_mb_read(struct hisi_qm *qm, void *dst)
{
const void __iomem *fun_base = qm->io_base + QM_MB_CMD_SEND_BASE;
unsigned long tmp0 = 0, tmp1 = 0;
if (!IS_ENABLED(CONFIG_ARM64)) {
memcpy_fromio(dst, fun_base, 16);
dma_wmb();
return;
}
asm volatile("ldp %0, %1, %3\n"
"stp %0, %1, %2\n"
"dmb oshst\n"
: "=&r" (tmp0),
"=&r" (tmp1),
"+Q" (*((char *)dst))
: "Q" (*((char __iomem *)fun_base))
: "memory");
}
static void qm_mb_pre_init(struct qm_mailbox *mailbox, u8 cmd,
u16 queue, bool op)
u64 base, u16 queue, bool op)
{
mailbox->w0 = cpu_to_le16(cmd |
(op ? 0x1 << QM_MB_OP_SHIFT : 0) |
(0x1 << QM_MB_BUSY_SHIFT));
mailbox->w0 = cpu_to_le16((cmd) |
((op) ? 0x1 << QM_MB_OP_SHIFT : 0) |
(0x1 << QM_MB_BUSY_SHIFT));
mailbox->queue_num = cpu_to_le16(queue);
mailbox->base_l = cpu_to_le32(lower_32_bits(base));
mailbox->base_h = cpu_to_le32(upper_32_bits(base));
mailbox->rsvd = 0;
}
static int qm_mb_nolock(struct hisi_qm *qm, struct qm_mailbox *mailbox)
static int qm_wait_mb_ready(struct hisi_qm *qm)
{
int cnt = 0;
struct qm_mailbox mailbox;
int i = 0;
if (unlikely(qm_wait_mb_ready(qm))) {
dev_err(&qm->pdev->dev, "QM mailbox is busy to start!\n");
return -EBUSY;
while (i++ < QM_MB_WAIT_READY_CNT) {
qm_mb_read(qm, &mailbox);
if (!((le16_to_cpu(mailbox.w0) >> QM_MB_BUSY_SHIFT) & 0x1))
return 0;
usleep_range(WAIT_PERIOD_US_MIN, WAIT_PERIOD_US_MAX);
}
qm_mb_write(qm, mailbox);
while (true) {
if (!qm_wait_mb_ready(qm))
return -EBUSY;
}
static int qm_wait_mb_finish(struct hisi_qm *qm, struct qm_mailbox *mailbox)
{
int i = 0;
while (i++) {
qm_mb_read(qm, mailbox);
if (!((le16_to_cpu(mailbox->w0) >> QM_MB_BUSY_SHIFT) & 0x1))
break;
if (++cnt > QM_MB_MAX_WAIT_CNT) {
if (i == QM_MB_MAX_WAIT_CNT) {
dev_err(&qm->pdev->dev, "QM mailbox operation timeout!\n");
return -EBUSY;
return -ETIMEDOUT;
}
usleep_range(WAIT_PERIOD_US_MIN, WAIT_PERIOD_US_MAX);
}
if (le16_to_cpu(mailbox->w0) & QM_MB_STATUS_MASK) {
dev_err(&qm->pdev->dev, "QM mailbox operation failed!\n");
return -EIO;
}
return 0;
}
static int qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue,
bool op)
static int qm_mb(struct hisi_qm *qm, struct qm_mailbox *mailbox)
{
struct qm_mailbox mailbox;
int ret;
dev_dbg(&qm->pdev->dev, "QM mailbox request to q%u: %u-0x%llx\n",
queue, cmd, (unsigned long long)dma_addr);
mutex_lock(&qm->mailbox_lock);
ret = qm_wait_mb_ready(qm);
if (ret)
goto unlock;
qm_mb_pre_init(&mailbox, cmd, queue, op);
mailbox.base_l = cpu_to_le32(lower_32_bits(dma_addr));
mailbox.base_h = cpu_to_le32(upper_32_bits(dma_addr));
qm_mb_write(qm, mailbox);
ret = qm_wait_mb_finish(qm, mailbox);
mutex_lock(&qm->mailbox_lock);
ret = qm_mb_nolock(qm, &mailbox);
unlock:
mutex_unlock(&qm->mailbox_lock);
return ret;
}
static int qm_config_set(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr,
u16 queue, bool op)
{
struct qm_mailbox mailbox;
dev_dbg(&qm->pdev->dev, "QM mailbox request to q%u: %u-0x%llx\n",
queue, cmd, (unsigned long long)dma_addr);
qm_mb_pre_init(&mailbox, cmd, dma_addr, queue, op);
return qm_mb(qm, &mailbox);
}
static int qm_config_get(struct hisi_qm *qm, u64 *base, u8 cmd, u16 queue)
{
struct qm_mailbox mailbox;
int ret;
qm_mb_pre_init(&mailbox, cmd, 0, queue, 1);
ret = qm_mb(qm, &mailbox);
if (ret)
return ret;
*base = le32_to_cpu(mailbox.base_l) |
((u64)le32_to_cpu(mailbox.base_h) << 32);
return 0;
}
/*
* Each state Reg is checked 100 times,
* with a delay of 100 microseconds after each check
......@@ -230,13 +291,10 @@ static int qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number)
u64 sqc_vft;
int ret;
ret = qm_mb(qm, QM_MB_CMD_SQC_VFT_V2, 0, 0, 1);
ret = qm_config_get(qm, &sqc_vft, QM_MB_CMD_SQC_VFT_V2, 0);
if (ret)
return ret;
sqc_vft = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) |
((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) <<
QM_XQC_ADDR_OFFSET);
*base = QM_SQC_VFT_BASE_MASK_V2 & (sqc_vft >> QM_SQC_VFT_BASE_SHIFT_V2);
*number = (QM_SQC_VFT_NUM_MASK_V2 &
(sqc_vft >> QM_SQC_VFT_NUM_SHIFT_V2)) + 1;
......@@ -244,36 +302,6 @@ static int qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number)
return 0;
}
static int qm_get_sqc(struct hisi_qm *qm, u64 *addr)
{
int ret;
ret = qm_mb(qm, QM_MB_CMD_SQC_BT, 0, 0, 1);
if (ret)
return ret;
*addr = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) |
((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) <<
QM_XQC_ADDR_OFFSET);
return 0;
}
static int qm_get_cqc(struct hisi_qm *qm, u64 *addr)
{
int ret;
ret = qm_mb(qm, QM_MB_CMD_CQC_BT, 0, 0, 1);
if (ret)
return ret;
*addr = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) |
((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) <<
QM_XQC_ADDR_OFFSET);
return 0;
}
static int qm_rw_regs_read(struct hisi_qm *qm, struct acc_vf_data *vf_data)
{
struct device *dev = &qm->pdev->dev;
......@@ -441,13 +469,13 @@ static int vf_migration_data_store(struct hisi_qm *qm,
vf_data->aeqe_dma |= vf_data->qm_aeqc_dw[1];
/* Through SQC_BT/CQC_BT to get sqc and cqc address */
ret = qm_get_sqc(qm, &vf_data->sqc_dma);
ret = qm_config_get(qm, &vf_data->sqc_dma, QM_MB_CMD_SQC_BT, 0);
if (ret) {
dev_err(dev, "failed to read SQC addr!\n");
return -EINVAL;
}
ret = qm_get_cqc(qm, &vf_data->cqc_dma);
ret = qm_config_get(qm, &vf_data->cqc_dma, QM_MB_CMD_CQC_BT, 0);
if (ret) {
dev_err(dev, "failed to read CQC addr!\n");
return -EINVAL;
......@@ -569,13 +597,13 @@ static int vf_migration_data_recover(struct hisi_qm *qm,
return ret;
}
ret = qm_mb(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, 0);
ret = qm_config_set(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, 0);
if (ret) {
dev_err(dev, "Set sqc failed!\n");
return ret;
}
ret = qm_mb(qm, QM_MB_CMD_CQC_BT, qm->cqc_dma, 0, 0);
ret = qm_config_set(qm, QM_MB_CMD_CQC_BT, qm->cqc_dma, 0, 0);
if (ret) {
dev_err(dev, "Set cqc failed!\n");
return ret;
......@@ -604,7 +632,7 @@ static int vf_qm_cache_wb(struct hisi_qm *qm)
static int vf_qm_func_stop(struct hisi_qm *qm)
{
return qm_mb(qm, QM_MB_CMD_PAUSE_QM, 0, 0, 0);
return qm_config_set(qm, QM_MB_CMD_PAUSE_QM, 0, 0, 0);
}
static int pf_qm_get_qp_num(struct hisi_qm *qm, int vf_id,
......
......@@ -58,9 +58,11 @@
#define QM_MB_CMD_SEND_BASE 0x300
#define QM_MB_BUSY_SHIFT 13
#define QM_MB_OP_SHIFT 14
#define QM_MB_CMD_DATA_ADDR_L 0x304
#define QM_MB_CMD_DATA_ADDR_H 0x308
#define QM_MB_MAX_WAIT_CNT 6000
#define QM_MB_WAIT_READY_CNT 10
#define QM_MB_MAX_WAIT_CNT 3000
#define WAIT_PERIOD_US_MIN 100
#define WAIT_PERIOD_US_MAX 200
#define QM_MB_STATUS_MASK GENMASK(12, 9)
/* doorbell */
#define QM_DOORBELL_CMD_SQ 0
......
此差异已折叠。
......@@ -76,10 +76,7 @@ static const char * const qm_s[] = {
"init", "start", "close", "stop",
};
void *hisi_qm_ctx_alloc(struct hisi_qm *qm, size_t ctx_size,
dma_addr_t *dma_addr);
void hisi_qm_ctx_free(struct hisi_qm *qm, size_t ctx_size,
const void *ctx_addr, dma_addr_t *dma_addr);
int qm_set_and_get_xqc(struct hisi_qm *qm, u8 cmd, void *xqc, u32 qp_id, bool op);
void hisi_qm_show_last_dfx_regs(struct hisi_qm *qm);
void hisi_qm_set_algqos_init(struct hisi_qm *qm);
......
......@@ -53,7 +53,6 @@
#define QM_MB_OP_SHIFT 14
#define QM_MB_CMD_DATA_ADDR_L 0x304
#define QM_MB_CMD_DATA_ADDR_H 0x308
#define QM_MB_MAX_WAIT_CNT 6000
/* doorbell */
#define QM_DOORBELL_CMD_SQ 0
......@@ -319,6 +318,18 @@ struct qm_err_isolate {
struct list_head qm_hw_errs;
};
struct qm_rsv_buf {
struct qm_sqc *sqc;
struct qm_cqc *cqc;
struct qm_eqc *eqc;
struct qm_aeqc *aeqc;
dma_addr_t sqc_dma;
dma_addr_t cqc_dma;
dma_addr_t eqc_dma;
dma_addr_t aeqc_dma;
struct qm_dma qcdma;
};
struct hisi_qm {
enum qm_hw_ver ver;
enum qm_fun_type fun_type;
......@@ -351,6 +362,7 @@ struct hisi_qm {
dma_addr_t cqc_dma;
dma_addr_t eqe_dma;
dma_addr_t aeqe_dma;
struct qm_rsv_buf xqc_buf;
struct hisi_qm_status status;
const struct hisi_qm_err_ini *err_ini;
......@@ -535,10 +547,6 @@ pci_ers_result_t hisi_qm_dev_slot_reset(struct pci_dev *pdev);
void hisi_qm_reset_prepare(struct pci_dev *pdev);
void hisi_qm_reset_done(struct pci_dev *pdev);
int hisi_qm_wait_mb_ready(struct hisi_qm *qm);
int hisi_qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue,
bool op);
struct hisi_acc_sgl_pool;
struct hisi_acc_hw_sgl *hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
struct scatterlist *sgl, struct hisi_acc_sgl_pool *pool,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册