提交 5df90d74 编写于 作者: Y Yu'an Wang 提交者: Yang Yingliang

acc: Remove uacce mode 1 logic below hisilicon

driver inclusion
category: feature
bugzilla: NA
CVE: NA

In this patch, we try to update logic of uacce mode.
In qm.c, we delete use_dma_api judgement branch, because the value
of use_dma_api is always true.
In hpre_main.c sec_main.c rde_main.c and zip_main.c, we update
related logic of uacce_mode, because the changes of uacce.c and qm.c.
Signed-off-by: NYu'an Wang <wangyuan46@huawei.com>
Reviewed-by: NHui Tang <tanghui20@huawei.com>
Reviewed-by: NCheng Hu <hucheng.hu@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 cfaa810b
...@@ -227,7 +227,7 @@ static int uacce_mode_set(const char *val, const struct kernel_param *kp) ...@@ -227,7 +227,7 @@ static int uacce_mode_set(const char *val, const struct kernel_param *kp)
return -EINVAL; return -EINVAL;
ret = kstrtou32(val, 10, &n); ret = kstrtou32(val, 10, &n);
if (ret != 0 || n > UACCE_MODE_NOIOMMU) if (ret != 0 || (n != UACCE_MODE_NOIOMMU && n != UACCE_MODE_NOUACCE))
return -EINVAL; return -EINVAL;
return param_set_int(val, kp); return param_set_int(val, kp);
...@@ -244,7 +244,7 @@ MODULE_PARM_DESC(pf_q_num, "Number of queues in PF of CS(1-1024)"); ...@@ -244,7 +244,7 @@ MODULE_PARM_DESC(pf_q_num, "Number of queues in PF of CS(1-1024)");
static int uacce_mode = UACCE_MODE_NOUACCE; static int uacce_mode = UACCE_MODE_NOUACCE;
module_param_cb(uacce_mode, &uacce_mode_ops, &uacce_mode, 0444); module_param_cb(uacce_mode, &uacce_mode_ops, &uacce_mode, 0444);
MODULE_PARM_DESC(uacce_mode, "Mode of UACCE can be 0(default), 1, 2"); MODULE_PARM_DESC(uacce_mode, "Mode of UACCE can be 0(default), 2");
static inline void hpre_add_to_list(struct hpre *hpre) static inline void hpre_add_to_list(struct hpre *hpre)
{ {
mutex_lock(&hpre_list_lock); mutex_lock(&hpre_list_lock);
...@@ -588,7 +588,7 @@ static ssize_t hpre_ctrl_debug_read(struct file *filp, char __user *buf, ...@@ -588,7 +588,7 @@ static ssize_t hpre_ctrl_debug_read(struct file *filp, char __user *buf,
return -EINVAL; return -EINVAL;
} }
spin_unlock_irq(&file->lock); spin_unlock_irq(&file->lock);
ret = sprintf(tbuf, "%u\n", val); ret = snprintf(tbuf, HPRE_DBGFS_VAL_MAX_LEN, "%u\n", val);
return simple_read_from_buffer(buf, count, pos, tbuf, ret); return simple_read_from_buffer(buf, count, pos, tbuf, ret);
} }
...@@ -819,20 +819,9 @@ static int hpre_qm_pre_init(struct hisi_qm *qm, struct pci_dev *pdev) ...@@ -819,20 +819,9 @@ static int hpre_qm_pre_init(struct hisi_qm *qm, struct pci_dev *pdev)
qm->algs = "rsa\ndh\n"; qm->algs = "rsa\ndh\n";
switch (uacce_mode) { switch (uacce_mode) {
case UACCE_MODE_NOUACCE: case UACCE_MODE_NOUACCE:
qm->use_dma_api = true;
qm->use_uacce = false; qm->use_uacce = false;
break; break;
case UACCE_MODE_UACCE:
#ifdef CONFIG_IOMMU_SVA
qm->use_dma_api = true;
qm->use_sva = true;
#else
qm->use_dma_api = false;
#endif
qm->use_uacce = true;
break;
case UACCE_MODE_NOIOMMU: case UACCE_MODE_NOIOMMU:
qm->use_dma_api = true;
qm->use_uacce = true; qm->use_uacce = true;
break; break;
default: default:
......
...@@ -905,7 +905,7 @@ static ssize_t qm_debug_read(struct file *filp, char __user *buf, ...@@ -905,7 +905,7 @@ static ssize_t qm_debug_read(struct file *filp, char __user *buf,
return -EINVAL; return -EINVAL;
} }
mutex_unlock(&file->lock); mutex_unlock(&file->lock);
ret = sprintf(tbuf, "%u\n", val); ret = snprintf(tbuf, TEMPBUFFER_LEN, "%u\n", val);
return simple_read_from_buffer(buf, count, pos, tbuf, ret); return simple_read_from_buffer(buf, count, pos, tbuf, ret);
} }
...@@ -1205,23 +1205,21 @@ static struct hisi_qp *hisi_qm_create_qp_nolock(struct hisi_qm *qm, ...@@ -1205,23 +1205,21 @@ static struct hisi_qp *hisi_qm_create_qp_nolock(struct hisi_qm *qm,
qp->qm = qm; qp->qm = qm;
/* allocate qp dma memory, uacce uses dus region for this */ /* allocate qp dma memory, uacce uses dus region for this */
if (qm->use_dma_api) { qp->qdma.size = qm->sqe_size * QM_Q_DEPTH +
qp->qdma.size = qm->sqe_size * QM_Q_DEPTH + sizeof(struct cqe) * QM_Q_DEPTH;
sizeof(struct cqe) * QM_Q_DEPTH; /* one more page for device or qp statuses */
/* one more page for device or qp statuses */ qp->qdma.size = PAGE_ALIGN(qp->qdma.size) + PAGE_SIZE;
qp->qdma.size = PAGE_ALIGN(qp->qdma.size) + PAGE_SIZE; qp->qdma.va = dma_alloc_coherent(dev, qp->qdma.size,
qp->qdma.va = dma_alloc_coherent(dev, qp->qdma.size, &qp->qdma.dma,
&qp->qdma.dma, GFP_KERNEL);
GFP_KERNEL); if (!qp->qdma.va) {
if (!qp->qdma.va) { ret = -ENOMEM;
ret = -ENOMEM; goto err_clear_bit;
goto err_clear_bit;
}
dev_dbg(dev, "allocate qp dma buf(va=%pK, dma=%pad, size=%zx)\n",
qp->qdma.va, &qp->qdma.dma, qp->qdma.size);
} }
dev_dbg(dev, "allocate qp dma buf(va=%pK, dma=%pad, size=%zx)\n",
qp->qdma.va, &qp->qdma.dma, qp->qdma.size);
qp->qp_id = qp_id; qp->qp_id = qp_id;
qp->alg_type = alg_type; qp->alg_type = alg_type;
qp->c_flag = 1; qp->c_flag = 1;
...@@ -1276,7 +1274,7 @@ void hisi_qm_release_qp(struct hisi_qp *qp) ...@@ -1276,7 +1274,7 @@ void hisi_qm_release_qp(struct hisi_qp *qp)
up_write(&qm->qps_lock); up_write(&qm->qps_lock);
return; return;
} }
if (qm->use_dma_api && qdma->va) if (qdma->va)
dma_free_coherent(dev, qdma->size, qdma->va, qdma->dma); dma_free_coherent(dev, qdma->size, qdma->va, qdma->dma);
dev_dbg(dev, "release qp %d\n", qp->qp_id); dev_dbg(dev, "release qp %d\n", qp->qp_id);
...@@ -1297,19 +1295,14 @@ static int qm_sq_ctx_cfg(struct hisi_qp *qp, int qp_id, int pasid) ...@@ -1297,19 +1295,14 @@ static int qm_sq_ctx_cfg(struct hisi_qp *qp, int qp_id, int pasid)
dma_addr_t sqc_dma; dma_addr_t sqc_dma;
int ret; int ret;
if (qm->use_dma_api) { sqc = kzalloc(sizeof(struct qm_sqc), GFP_KERNEL);
sqc = kzalloc(sizeof(struct qm_sqc), GFP_KERNEL); if (!sqc)
if (!sqc) return -ENOMEM;
return -ENOMEM; sqc_dma = dma_map_single(dev, sqc, sizeof(struct qm_sqc),
sqc_dma = dma_map_single(dev, sqc, sizeof(struct qm_sqc), DMA_TO_DEVICE);
DMA_TO_DEVICE); if (dma_mapping_error(dev, sqc_dma)) {
if (dma_mapping_error(dev, sqc_dma)) { kfree(sqc);
kfree(sqc); return -ENOMEM;
return -ENOMEM;
}
} else {
sqc = qm->reserve;
sqc_dma = qm->reserve_dma;
} }
INIT_QC_COMMON(sqc, qp->sqe_dma, pasid); INIT_QC_COMMON(sqc, qp->sqe_dma, pasid);
...@@ -1324,13 +1317,8 @@ static int qm_sq_ctx_cfg(struct hisi_qp *qp, int qp_id, int pasid) ...@@ -1324,13 +1317,8 @@ static int qm_sq_ctx_cfg(struct hisi_qp *qp, int qp_id, int pasid)
sqc->w13 = cpu_to_le16(QM_MK_SQC_W13(0, 1, qp->alg_type)); sqc->w13 = cpu_to_le16(QM_MK_SQC_W13(0, 1, qp->alg_type));
ret = qm_mb(qm, QM_MB_CMD_SQC, sqc_dma, qp_id, 0); ret = qm_mb(qm, QM_MB_CMD_SQC, sqc_dma, qp_id, 0);
if (qm->use_dma_api) { dma_unmap_single(dev, sqc_dma, sizeof(struct qm_sqc), DMA_TO_DEVICE);
dma_unmap_single(dev, sqc_dma, sizeof(struct qm_sqc), kfree(sqc);
DMA_TO_DEVICE);
kfree(sqc);
} else {
memset(sqc, 0, sizeof(struct qm_sqc));
}
return ret; return ret;
} }
...@@ -1344,20 +1332,15 @@ static int qm_cq_ctx_cfg(struct hisi_qp *qp, int qp_id, int pasid) ...@@ -1344,20 +1332,15 @@ static int qm_cq_ctx_cfg(struct hisi_qp *qp, int qp_id, int pasid)
dma_addr_t cqc_dma; dma_addr_t cqc_dma;
int ret; int ret;
if (qm->use_dma_api) { cqc = kzalloc(sizeof(struct qm_cqc), GFP_KERNEL);
cqc = kzalloc(sizeof(struct qm_cqc), GFP_KERNEL); if (!cqc)
if (!cqc) return -ENOMEM;
return -ENOMEM;
cqc_dma = dma_map_single(dev, cqc, sizeof(struct qm_cqc), cqc_dma = dma_map_single(dev, cqc, sizeof(struct qm_cqc),
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (dma_mapping_error(dev, cqc_dma)) { if (dma_mapping_error(dev, cqc_dma)) {
kfree(cqc); kfree(cqc);
return -ENOMEM; return -ENOMEM;
}
} else {
cqc = qm->reserve;
cqc_dma = qm->reserve_dma;
} }
INIT_QC_COMMON(cqc, qp->cqe_dma, pasid); INIT_QC_COMMON(cqc, qp->cqe_dma, pasid);
...@@ -1373,13 +1356,8 @@ static int qm_cq_ctx_cfg(struct hisi_qp *qp, int qp_id, int pasid) ...@@ -1373,13 +1356,8 @@ static int qm_cq_ctx_cfg(struct hisi_qp *qp, int qp_id, int pasid)
qp->c_flag << QM_CQ_FLAG_SHIFT); qp->c_flag << QM_CQ_FLAG_SHIFT);
ret = qm_mb(qm, QM_MB_CMD_CQC, cqc_dma, qp_id, 0); ret = qm_mb(qm, QM_MB_CMD_CQC, cqc_dma, qp_id, 0);
if (qm->use_dma_api) { dma_unmap_single(dev, cqc_dma, sizeof(struct qm_cqc), DMA_TO_DEVICE);
dma_unmap_single(dev, cqc_dma, sizeof(struct qm_cqc), kfree(cqc);
DMA_TO_DEVICE);
kfree(cqc);
} else {
memset(cqc, 0, sizeof(struct qm_cqc));
}
return ret; return ret;
} }
...@@ -1614,9 +1592,6 @@ int hisi_qm_get_free_qp_num(struct hisi_qm *qm) ...@@ -1614,9 +1592,6 @@ int hisi_qm_get_free_qp_num(struct hisi_qm *qm)
ret++; ret++;
up_read(&qm->qps_lock); up_read(&qm->qps_lock);
if (!qm->use_dma_api)
ret = (ret == qm->qp_num) ? 1 : 0;
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(hisi_qm_get_free_qp_num); EXPORT_SYMBOL_GPL(hisi_qm_get_free_qp_num);
...@@ -1729,25 +1704,21 @@ static int hisi_qm_uacce_mmap(struct uacce_queue *q, ...@@ -1729,25 +1704,21 @@ static int hisi_qm_uacce_mmap(struct uacce_queue *q,
qm->phys_base >> PAGE_SHIFT, qm->phys_base >> PAGE_SHIFT,
sz, pgprot_noncached(vma->vm_page_prot)); sz, pgprot_noncached(vma->vm_page_prot));
case UACCE_QFRT_DUS: case UACCE_QFRT_DUS:
if (qm->use_dma_api) { if (sz != qp->qdma.size) {
if (sz != qp->qdma.size) { dev_err(dev, "wrong queue size %ld vs %ld\n",
dev_err(dev, "wrong queue size %ld vs %ld\n", sz, qp->qdma.size);
sz, qp->qdma.size); return -EINVAL;
return -EINVAL;
}
/* dma_mmap_coherent() requires vm_pgoff as 0
* restore vm_pfoff to initial value for mmap()
*/
vm_pgoff = vma->vm_pgoff;
vma->vm_pgoff = 0;
ret = dma_mmap_coherent(dev, vma, qp->qdma.va,
qp->qdma.dma, sz);
vma->vm_pgoff = vm_pgoff;
return ret;
} }
return -EINVAL;
/* dma_mmap_coherent() requires vm_pgoff as 0
* restore vm_pfoff to initial value for mmap()
*/
vm_pgoff = vma->vm_pgoff;
vma->vm_pgoff = 0;
ret = dma_mmap_coherent(dev, vma, qp->qdma.va,
qp->qdma.dma, sz);
vma->vm_pgoff = vm_pgoff;
return ret;
default: default:
return -EINVAL; return -EINVAL;
} }
...@@ -1755,58 +1726,16 @@ static int hisi_qm_uacce_mmap(struct uacce_queue *q, ...@@ -1755,58 +1726,16 @@ static int hisi_qm_uacce_mmap(struct uacce_queue *q,
static int hisi_qm_uacce_start_queue(struct uacce_queue *q) static int hisi_qm_uacce_start_queue(struct uacce_queue *q)
{ {
int ret;
struct hisi_qm *qm = q->uacce->priv;
struct hisi_qp *qp = q->priv; struct hisi_qp *qp = q->priv;
dev_dbg(&q->uacce->dev, "uacce queue start\n"); return hisi_qm_start_qp(qp, qp->pasid);
/* without SVA, iommu api should be called after user mmap dko */
if (!qm->use_dma_api) {
qm->qdma.dma = q->qfrs[UACCE_QFRT_DKO]->iova;
qm->qdma.va = q->qfrs[UACCE_QFRT_DKO]->kaddr;
qm->qdma.size = q->qfrs[UACCE_QFRT_DKO]->nr_pages >> PAGE_SHIFT;
dev_dbg(&q->uacce->dev,
"use dko space: va=%pK, dma=%lx, size=%llx\n",
qm->qdma.va, (unsigned long)qm->qdma.dma,
qm->size);
ret = __hisi_qm_start(qm);
if (ret)
return ret;
qp->qdma.dma = q->qfrs[UACCE_QFRT_DUS]->iova;
qp->qdma.va = q->qfrs[UACCE_QFRT_DUS]->kaddr;
qp->qdma.size = q->qfrs[UACCE_QFRT_DUS]->nr_pages >> PAGE_SHIFT;
}
ret = hisi_qm_start_qp(qp, qp->pasid);
if (ret && !qm->use_dma_api) {
ret = hisi_qm_stop(qm, QM_NORMAL);
if (ret) {
dev_dbg(&q->uacce->dev, "Stop qm failed!\n");
return ret;
}
}
return ret;
} }
static void hisi_qm_uacce_stop_queue(struct uacce_queue *q) static void hisi_qm_uacce_stop_queue(struct uacce_queue *q)
{ {
struct hisi_qm *qm = q->uacce->priv;
struct hisi_qp *qp = q->priv; struct hisi_qp *qp = q->priv;
hisi_qm_stop_qp(qp); hisi_qm_stop_qp(qp);
if (!qm->use_dma_api) {
/*
* In uacce_mode=1, we flush qm sqc here.
* In uacce_fops_release, the working flow is stop_queue ->
* unmap memory -> put_queue. Before unmapping memory, we
* should flush sqc back to memory.
*/
hisi_qm_cache_wb(qm);
}
} }
static int qm_set_sqctype(struct uacce_queue *q, u16 type) static int qm_set_sqctype(struct uacce_queue *q, u16 type)
...@@ -1910,24 +1839,16 @@ static int qm_register_uacce(struct hisi_qm *qm) ...@@ -1910,24 +1839,16 @@ static int qm_register_uacce(struct hisi_qm *qm)
else else
uacce->api_ver = HISI_QM_API_VER2_BASE; uacce->api_ver = HISI_QM_API_VER2_BASE;
if (qm->use_dma_api) { if (qm->use_sva) {
/* uacce->flags = UACCE_DEV_SVA;
* Noiommu, SVA, and crypto-only modes are all using dma api. } else {
* So we don't use uacce to allocate memory. We allocate it uacce->flags = UACCE_DEV_NOIOMMU;
* by ourself with the UACCE_DEV_DRVMAP_DUS flag. if (qm->ver == QM_HW_V1)
*/ uacce->api_ver = HISI_QM_API_VER_BASE
if (qm->use_sva) { UACCE_API_VER_NOIOMMU_SUBFIX;
uacce->flags = UACCE_DEV_SVA | UACCE_DEV_DRVMAP_DUS; else
} else { uacce->api_ver = HISI_QM_API_VER2_BASE
uacce->flags = UACCE_DEV_NOIOMMU | UACCE_API_VER_NOIOMMU_SUBFIX;
UACCE_DEV_DRVMAP_DUS;
if (qm->ver == QM_HW_V1)
uacce->api_ver = HISI_QM_API_VER_BASE
UACCE_API_VER_NOIOMMU_SUBFIX;
else
uacce->api_ver = HISI_QM_API_VER2_BASE
UACCE_API_VER_NOIOMMU_SUBFIX;
}
} }
for (i = 0; i < UACCE_QFRT_MAX; i++) for (i = 0; i < UACCE_QFRT_MAX; i++)
...@@ -2031,9 +1952,7 @@ int hisi_qm_init(struct hisi_qm *qm) ...@@ -2031,9 +1952,7 @@ int hisi_qm_init(struct hisi_qm *qm)
atomic_set(&qm->status.flags, QM_INIT); atomic_set(&qm->status.flags, QM_INIT);
INIT_WORK(&qm->work, qm_work_process); INIT_WORK(&qm->work, qm_work_process);
dev_dbg(dev, "init qm %s with %s\n", dev_dbg(dev, "init qm %s\n", pdev->is_physfn ? "pf" : "vf");
pdev->is_physfn ? "pf" : "vf",
qm->use_dma_api ? "dma api" : "iommu api");
return 0; return 0;
...@@ -2072,7 +1991,7 @@ void hisi_qm_uninit(struct hisi_qm *qm) ...@@ -2072,7 +1991,7 @@ void hisi_qm_uninit(struct hisi_qm *qm)
return; return;
} }
/* qm hardware buffer free on put_queue if no dma api */ /* qm hardware buffer free on put_queue if no dma api */
if (qm->use_dma_api && qm->qdma.va) { if (qm->qdma.va) {
hisi_qm_cache_wb(qm); hisi_qm_cache_wb(qm);
dma_free_coherent(dev, qm->qdma.size, dma_free_coherent(dev, qm->qdma.size,
qm->qdma.va, qm->qdma.dma); qm->qdma.va, qm->qdma.dma);
...@@ -2190,19 +2109,14 @@ static int qm_eq_ctx_cfg(struct hisi_qm *qm) ...@@ -2190,19 +2109,14 @@ static int qm_eq_ctx_cfg(struct hisi_qm *qm)
dma_addr_t eqc_dma; dma_addr_t eqc_dma;
int ret; int ret;
if (qm->use_dma_api) { eqc = kzalloc(sizeof(struct qm_eqc), GFP_KERNEL);
eqc = kzalloc(sizeof(struct qm_eqc), GFP_KERNEL); if (!eqc)
if (!eqc) return -ENOMEM;
return -ENOMEM; eqc_dma = dma_map_single(dev, eqc, sizeof(struct qm_eqc),
eqc_dma = dma_map_single(dev, eqc, sizeof(struct qm_eqc), DMA_TO_DEVICE);
DMA_TO_DEVICE); if (dma_mapping_error(dev, eqc_dma)) {
if (dma_mapping_error(dev, eqc_dma)) { kfree(eqc);
kfree(eqc); return -ENOMEM;
return -ENOMEM;
}
} else {
eqc = qm->reserve;
eqc_dma = qm->reserve_dma;
} }
eqc->base_l = cpu_to_le32(lower_32_bits(qm->eqe_dma)); eqc->base_l = cpu_to_le32(lower_32_bits(qm->eqe_dma));
...@@ -2211,12 +2125,8 @@ static int qm_eq_ctx_cfg(struct hisi_qm *qm) ...@@ -2211,12 +2125,8 @@ static int qm_eq_ctx_cfg(struct hisi_qm *qm)
eqc->dw3 = cpu_to_le32(QM_EQE_AEQE_SIZE); eqc->dw3 = cpu_to_le32(QM_EQE_AEQE_SIZE);
eqc->dw6 = cpu_to_le32((QM_EQ_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT)); eqc->dw6 = cpu_to_le32((QM_EQ_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT));
ret = qm_mb(qm, QM_MB_CMD_EQC, eqc_dma, 0, 0); ret = qm_mb(qm, QM_MB_CMD_EQC, eqc_dma, 0, 0);
if (qm->use_dma_api) { dma_unmap_single(dev, eqc_dma, sizeof(struct qm_eqc), DMA_TO_DEVICE);
dma_unmap_single(dev, eqc_dma, sizeof(struct qm_eqc), kfree(eqc);
DMA_TO_DEVICE);
kfree(eqc);
} else
memset(eqc, 0, sizeof(struct qm_eqc));
return ret; return ret;
} }
...@@ -2228,31 +2138,22 @@ static int qm_aeq_ctx_cfg(struct hisi_qm *qm) ...@@ -2228,31 +2138,22 @@ static int qm_aeq_ctx_cfg(struct hisi_qm *qm)
dma_addr_t aeqc_dma; dma_addr_t aeqc_dma;
int ret; int ret;
if (qm->use_dma_api) { aeqc = kzalloc(sizeof(struct qm_aeqc), GFP_KERNEL);
aeqc = kzalloc(sizeof(struct qm_aeqc), GFP_KERNEL); if (!aeqc)
if (!aeqc) return -ENOMEM;
return -ENOMEM; aeqc_dma = dma_map_single(dev, aeqc, sizeof(struct qm_aeqc),
aeqc_dma = dma_map_single(dev, aeqc, sizeof(struct qm_aeqc), DMA_TO_DEVICE);
DMA_TO_DEVICE); if (dma_mapping_error(dev, aeqc_dma)) {
if (dma_mapping_error(dev, aeqc_dma)) { kfree(aeqc);
kfree(aeqc); return -ENOMEM;
return -ENOMEM;
}
} else {
aeqc = qm->reserve;
aeqc_dma = qm->reserve_dma;
} }
aeqc->base_l = cpu_to_le32(lower_32_bits(qm->aeqe_dma)); aeqc->base_l = cpu_to_le32(lower_32_bits(qm->aeqe_dma));
aeqc->base_h = cpu_to_le32(upper_32_bits(qm->aeqe_dma)); aeqc->base_h = cpu_to_le32(upper_32_bits(qm->aeqe_dma));
aeqc->dw6 = cpu_to_le32((QM_Q_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT)); aeqc->dw6 = cpu_to_le32((QM_Q_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT));
ret = qm_mb(qm, QM_MB_CMD_AEQC, aeqc_dma, 0, 0); ret = qm_mb(qm, QM_MB_CMD_AEQC, aeqc_dma, 0, 0);
if (qm->use_dma_api) { dma_unmap_single(dev, aeqc_dma, sizeof(struct qm_aeqc), DMA_TO_DEVICE);
dma_unmap_single(dev, aeqc_dma, sizeof(struct qm_aeqc), kfree(aeqc);
DMA_TO_DEVICE);
kfree(aeqc);
} else
memset(aeqc, 0, sizeof(struct qm_aeqc));
return ret; return ret;
} }
...@@ -2279,9 +2180,6 @@ static int __hisi_qm_start(struct hisi_qm *qm) ...@@ -2279,9 +2180,6 @@ static int __hisi_qm_start(struct hisi_qm *qm)
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
size_t off = 0; size_t off = 0;
int ret; int ret;
#ifdef CONFIG_CRYPTO_QM_UACCE
size_t dko_size;
#endif
#define QM_INIT_BUF(qm, type, num) do { \ #define QM_INIT_BUF(qm, type, num) do { \
(qm)->type = ((qm)->qdma.va + (off)); \ (qm)->type = ((qm)->qdma.va + (off)); \
...@@ -2324,20 +2222,6 @@ static int __hisi_qm_start(struct hisi_qm *qm) ...@@ -2324,20 +2222,6 @@ static int __hisi_qm_start(struct hisi_qm *qm)
qm->sqc, (unsigned long)qm->sqc_dma, qm->sqc, (unsigned long)qm->sqc_dma,
qm->cqc, (unsigned long)qm->cqc_dma); qm->cqc, (unsigned long)qm->cqc_dma);
#ifdef CONFIG_CRYPTO_QM_UACCE
/* check if the size exceed the DKO boundary */
if (qm->use_uacce && !qm->use_dma_api) {
WARN_ON(qm->uacce.qf_pg_start[UACCE_QFRT_DKO] == UACCE_QFR_NA);
dko_size = qm->uacce.qf_pg_start[UACCE_QFRT_DUS] -
qm->uacce.qf_pg_start[UACCE_QFRT_DKO];
dko_size <<= PAGE_SHIFT;
dev_dbg(&qm->pdev->dev,
"kernel-only buffer used (0x%lx/0x%lx)\n", off,
dko_size);
if (off > dko_size)
return -EINVAL;
}
#endif
ret = qm_eq_aeq_ctx_cfg(qm); ret = qm_eq_aeq_ctx_cfg(qm);
if (ret) if (ret)
return ret; return ret;
...@@ -2445,20 +2329,11 @@ int hisi_qm_start(struct hisi_qm *qm) ...@@ -2445,20 +2329,11 @@ int hisi_qm_start(struct hisi_qm *qm)
QM_V2_DOORBELL_OFFSET / PAGE_SIZE; QM_V2_DOORBELL_OFFSET / PAGE_SIZE;
else else
mmio_page_nr = QM_DOORBELL_PAGE_NR; mmio_page_nr = QM_DOORBELL_PAGE_NR;
if (qm->use_uacce && qm->use_dma_api) {
uacce->qf_pg_start[UACCE_QFRT_MMIO] = 0; if (qm->use_uacce) {
uacce->qf_pg_start[UACCE_QFRT_DKO] = UACCE_QFR_NA;
uacce->qf_pg_start[UACCE_QFRT_DUS] = mmio_page_nr;
uacce->qf_pg_start[UACCE_QFRT_SS] = mmio_page_nr +
dus_page_nr;
} else if (qm->use_uacce) {
uacce->qf_pg_start[UACCE_QFRT_MMIO] = 0; uacce->qf_pg_start[UACCE_QFRT_MMIO] = 0;
uacce->qf_pg_start[UACCE_QFRT_DKO] = mmio_page_nr; uacce->qf_pg_start[UACCE_QFRT_DUS] = mmio_page_nr;
uacce->qf_pg_start[UACCE_QFRT_DUS] = mmio_page_nr + uacce->qf_pg_start[UACCE_QFRT_SS] = mmio_page_nr + dus_page_nr;
dko_page_nr;
uacce->qf_pg_start[UACCE_QFRT_SS] = mmio_page_nr +
dko_page_nr +
dus_page_nr;
} }
#endif #endif
...@@ -2474,16 +2349,7 @@ int hisi_qm_start(struct hisi_qm *qm) ...@@ -2474,16 +2349,7 @@ int hisi_qm_start(struct hisi_qm *qm)
} }
} }
if (!qm->use_dma_api) { if (!qm->qdma.va) {
/*
* without SVA, qm have to be started after user region is
* mapped
*/
dev_dbg(&qm->pdev->dev, "qm delay start\n");
atomic_set(&qm->status.flags, QM_START);
up_write(&qm->qps_lock);
return 0;
} else if (!qm->qdma.va) {
qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * QM_EQ_DEPTH) + qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * QM_EQ_DEPTH) +
QMC_ALIGN(sizeof(struct qm_aeqe) * QM_Q_DEPTH) + QMC_ALIGN(sizeof(struct qm_aeqe) * QM_Q_DEPTH) +
QMC_ALIGN(sizeof(struct qm_sqc) * qm->qp_num) + QMC_ALIGN(sizeof(struct qm_sqc) * qm->qp_num) +
......
...@@ -287,7 +287,6 @@ struct hisi_qm { ...@@ -287,7 +287,6 @@ struct hisi_qm {
u32 msi_mask; u32 msi_mask;
const char *algs; const char *algs;
bool use_dma_api; /* use dma or iommu api api */
bool use_uacce; /* register to uacce */ bool use_uacce; /* register to uacce */
bool use_sva; bool use_sva;
......
...@@ -250,7 +250,7 @@ static int uacce_mode_set(const char *val, const struct kernel_param *kp) ...@@ -250,7 +250,7 @@ static int uacce_mode_set(const char *val, const struct kernel_param *kp)
return -EINVAL; return -EINVAL;
ret = kstrtou32(val, FORMAT_DECIMAL, &n); ret = kstrtou32(val, FORMAT_DECIMAL, &n);
if (ret != 0 || n > UACCE_MODE_NOIOMMU) if (ret != 0 || (n != UACCE_MODE_NOIOMMU && n != UACCE_MODE_NOUACCE))
return -EINVAL; return -EINVAL;
return param_set_int(val, kp); return param_set_int(val, kp);
...@@ -268,7 +268,7 @@ MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 0-4096, v2 0-1024)"); ...@@ -268,7 +268,7 @@ MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 0-4096, v2 0-1024)");
static int uacce_mode = UACCE_MODE_NOUACCE; static int uacce_mode = UACCE_MODE_NOUACCE;
module_param_cb(uacce_mode, &uacce_mode_ops, &uacce_mode, 0444); module_param_cb(uacce_mode, &uacce_mode_ops, &uacce_mode, 0444);
MODULE_PARM_DESC(uacce_mode, "Mode of UACCE can be 0(default), 1, 2"); MODULE_PARM_DESC(uacce_mode, "Mode of UACCE can be 0(default), 2");
static const struct pci_device_id hisi_rde_dev_ids[] = { static const struct pci_device_id hisi_rde_dev_ids[] = {
{PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HRDE_PCI_DEVICE_ID)}, {PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HRDE_PCI_DEVICE_ID)},
...@@ -791,20 +791,9 @@ static int hisi_rde_qm_pre_init(struct hisi_qm *qm, struct pci_dev *pdev) ...@@ -791,20 +791,9 @@ static int hisi_rde_qm_pre_init(struct hisi_qm *qm, struct pci_dev *pdev)
switch (uacce_mode) { switch (uacce_mode) {
case UACCE_MODE_NOUACCE: case UACCE_MODE_NOUACCE:
qm->use_dma_api = true;
qm->use_uacce = false; qm->use_uacce = false;
break; break;
case UACCE_MODE_UACCE:
#ifdef CONFIG_IOMMU_SVA
qm->use_dma_api = true;
qm->use_sva = true;
#else
qm->use_dma_api = false;
#endif
qm->use_uacce = true;
break;
case UACCE_MODE_NOIOMMU: case UACCE_MODE_NOIOMMU:
qm->use_dma_api = true;
qm->use_uacce = true; qm->use_uacce = true;
break; break;
default: default:
......
...@@ -313,7 +313,7 @@ static int uacce_mode_set(const char *val, const struct kernel_param *kp) ...@@ -313,7 +313,7 @@ static int uacce_mode_set(const char *val, const struct kernel_param *kp)
return -EINVAL; return -EINVAL;
ret = kstrtou32(val, FORMAT_DECIMAL, &n); ret = kstrtou32(val, FORMAT_DECIMAL, &n);
if (ret != 0 || n > UACCE_MODE_NOIOMMU) if (ret != 0 || (n != UACCE_MODE_NOIOMMU && n != UACCE_MODE_NOUACCE))
return -EINVAL; return -EINVAL;
return param_set_int(val, kp); return param_set_int(val, kp);
...@@ -405,7 +405,7 @@ MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 0-4096, v2 0-1024)"); ...@@ -405,7 +405,7 @@ MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 0-4096, v2 0-1024)");
static int uacce_mode = UACCE_MODE_NOUACCE; static int uacce_mode = UACCE_MODE_NOUACCE;
module_param_cb(uacce_mode, &uacce_mode_ops, &uacce_mode, 0444); module_param_cb(uacce_mode, &uacce_mode_ops, &uacce_mode, 0444);
MODULE_PARM_DESC(uacce_mode, "Mode of UACCE can be 0(default), 1, 2"); MODULE_PARM_DESC(uacce_mode, "Mode of UACCE can be 0(default), 2");
static int ctx_q_num = CTX_Q_NUM_DEF; static int ctx_q_num = CTX_Q_NUM_DEF;
module_param_cb(ctx_q_num, &ctx_q_num_ops, &ctx_q_num, 0444); module_param_cb(ctx_q_num, &ctx_q_num_ops, &ctx_q_num, 0444);
...@@ -1023,20 +1023,9 @@ static int hisi_sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) ...@@ -1023,20 +1023,9 @@ static int hisi_sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
switch (uacce_mode) { switch (uacce_mode) {
case UACCE_MODE_NOUACCE: case UACCE_MODE_NOUACCE:
qm->use_dma_api = true;
qm->use_uacce = false; qm->use_uacce = false;
break; break;
case UACCE_MODE_UACCE:
#ifdef CONFIG_IOMMU_SVA
qm->use_dma_api = true;
qm->use_sva = true;
#else
qm->use_dma_api = false;
#endif
qm->use_uacce = true;
break;
case UACCE_MODE_NOIOMMU: case UACCE_MODE_NOIOMMU:
qm->use_dma_api = true;
qm->use_uacce = true; qm->use_uacce = true;
break; break;
default: default:
...@@ -1783,10 +1772,6 @@ static int __init hisi_sec_init(void) ...@@ -1783,10 +1772,6 @@ static int __init hisi_sec_init(void)
pr_err("Failed to register pci driver.\n"); pr_err("Failed to register pci driver.\n");
goto err_pci; goto err_pci;
} }
#ifndef CONFIG_IOMMU_SVA
if (uacce_mode == UACCE_MODE_UACCE)
return 0;
#endif
if (list_empty(&hisi_sec_list)) { if (list_empty(&hisi_sec_list)) {
pr_err("no device!\n"); pr_err("no device!\n");
...@@ -1814,12 +1799,7 @@ static int __init hisi_sec_init(void) ...@@ -1814,12 +1799,7 @@ static int __init hisi_sec_init(void)
static void __exit hisi_sec_exit(void) static void __exit hisi_sec_exit(void)
{ {
#ifndef CONFIG_IOMMU_SVA
if (uacce_mode != UACCE_MODE_UACCE)
hisi_sec_unregister_from_crypto(fusion_limit);
#else
hisi_sec_unregister_from_crypto(fusion_limit); hisi_sec_unregister_from_crypto(fusion_limit);
#endif
pci_unregister_driver(&hisi_sec_pci_driver); pci_unregister_driver(&hisi_sec_pci_driver);
hisi_sec_unregister_debugfs(); hisi_sec_unregister_debugfs();
if (sec_wq) if (sec_wq)
......
...@@ -331,7 +331,7 @@ static int uacce_mode_set(const char *val, const struct kernel_param *kp) ...@@ -331,7 +331,7 @@ static int uacce_mode_set(const char *val, const struct kernel_param *kp)
return -EINVAL; return -EINVAL;
ret = kstrtou32(val, FORMAT_DECIMAL, &n); ret = kstrtou32(val, FORMAT_DECIMAL, &n);
if (ret != 0 || n > UACCE_MODE_NOIOMMU) if (ret != 0 || (n != UACCE_MODE_NOIOMMU && n != UACCE_MODE_NOUACCE))
return -EINVAL; return -EINVAL;
return param_set_int(val, kp); return param_set_int(val, kp);
...@@ -348,7 +348,7 @@ MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 1-4096, v2 1-1024)"); ...@@ -348,7 +348,7 @@ MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 1-4096, v2 1-1024)");
static int uacce_mode = UACCE_MODE_NOUACCE; static int uacce_mode = UACCE_MODE_NOUACCE;
module_param_cb(uacce_mode, &uacce_mode_ops, &uacce_mode, 0444); module_param_cb(uacce_mode, &uacce_mode_ops, &uacce_mode, 0444);
MODULE_PARM_DESC(uacce_mode, "Mode of UACCE can be 0(default), 1, 2"); MODULE_PARM_DESC(uacce_mode, "Mode of UACCE can be 0(default), 2");
static const struct pci_device_id hisi_zip_dev_ids[] = { static const struct pci_device_id hisi_zip_dev_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_ZIP_PF) }, { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_ZIP_PF) },
...@@ -580,7 +580,7 @@ static ssize_t hisi_zip_ctrl_debug_read(struct file *filp, char __user *buf, ...@@ -580,7 +580,7 @@ static ssize_t hisi_zip_ctrl_debug_read(struct file *filp, char __user *buf,
return -EINVAL; return -EINVAL;
} }
spin_unlock(&file->lock); spin_unlock(&file->lock);
ret = sprintf(tbuf, "%u\n", val); ret = snprintf(tbuf, HZIP_BUF_SIZE, "%u\n", val);
return simple_read_from_buffer(buf, count, pos, tbuf, ret); return simple_read_from_buffer(buf, count, pos, tbuf, ret);
} }
...@@ -867,21 +867,9 @@ static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -867,21 +867,9 @@ static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id)
switch (uacce_mode) { switch (uacce_mode) {
case UACCE_MODE_NOUACCE: case UACCE_MODE_NOUACCE:
qm->use_dma_api = true;
qm->use_uacce = false; qm->use_uacce = false;
break; break;
case UACCE_MODE_UACCE:
#ifdef CONFIG_IOMMU_SVA
qm->use_dma_api = true;
qm->use_sva = true;
#else
qm->use_dma_api = false;
qm->use_sva = false;
#endif
qm->use_uacce = true;
break;
case UACCE_MODE_NOIOMMU: case UACCE_MODE_NOIOMMU:
qm->use_dma_api = true;
qm->use_uacce = true; qm->use_uacce = true;
break; break;
default: default:
...@@ -930,10 +918,6 @@ static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -930,10 +918,6 @@ static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret) if (ret)
dev_err(&pdev->dev, "Failed to init debugfs (%d)!\n", ret); dev_err(&pdev->dev, "Failed to init debugfs (%d)!\n", ret);
#ifndef CONFIG_IOMMU_SVA
if (uacce_mode == UACCE_MODE_UACCE)
return 0;
#endif
ret = hisi_zip_register_to_crypto(); ret = hisi_zip_register_to_crypto();
if (ret < 0) { if (ret < 0) {
pr_err("Failed to register driver to crypto.\n"); pr_err("Failed to register driver to crypto.\n");
...@@ -1111,13 +1095,7 @@ static void hisi_zip_remove(struct pci_dev *pdev) ...@@ -1111,13 +1095,7 @@ static void hisi_zip_remove(struct pci_dev *pdev)
if (qm->fun_type == QM_HW_PF && hisi_zip->ctrl->num_vfs != 0) if (qm->fun_type == QM_HW_PF && hisi_zip->ctrl->num_vfs != 0)
(void)hisi_zip_sriov_disable(pdev); (void)hisi_zip_sriov_disable(pdev);
#ifndef CONFIG_IOMMU_SVA
if (uacce_mode != UACCE_MODE_UACCE)
hisi_zip_unregister_from_crypto();
#else
hisi_zip_unregister_from_crypto(); hisi_zip_unregister_from_crypto();
#endif
hisi_zip_debugfs_exit(hisi_zip); hisi_zip_debugfs_exit(hisi_zip);
hisi_qm_stop(qm, QM_NORMAL); hisi_qm_stop(qm, QM_NORMAL);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册