提交 8bf65d55 编写于 作者: M Mingqiang Ling 提交者: Xie XiuQi

ACC: Add state machine for uacce/QM

driver inclusion
category: bugfix
bugzilla: NA
CVE: NA

This patch adds state machine for uacce and QM. Related state machine and
lock design documents can be found:

https://github.com/hisilicon/dev-docs/blob/master/warpdrive/state_model.rst
https://github.com/hisilicon/dev-docs/blob/master/warpdrive/uacce_lock.rst

This patch also solves below problems:

- Remove uacce_queue pool in uacce, let's low level driver to maintain
queue pool. So locks(uacce_mutex, uacce->q_lock) for uacce are also
removed.

- Provide an ioctl to put hardware queue, as hardware queue which is put in
uacce .release callback will be delayed to put in kernel.

- Modify reset logic of uacce. UACCE_ST_RST state of uacce has been
deleted. Current logic is: before doing hardware reset, a SIGIO has been
sent to process which has been binded with uacce_queue, low level driver
waits user to close all fds before doing uacce_unregister; after hardware
reset done, low level driver can register to uacce subsystem again.

- Modify the way to send reset signal: use send_sig_info to send signal.

Known issue as comments of function uacce_send_sig_to_client:

"This function can be called in low level driver, which may bring a race
with uacce_fops_release. The problem is this function may be called
when q is NULL. Low level driver should avoid this by locking hardware
queue pool and check if there is related hardware queue before calling
this function.

Modify the sec/rde module codes to adapt the uacce/qm changes.
Signed-off-by: Ntanshukun (A) <tanshukun1@huawei.com>
Reviewed-by: Nwangzhou <wangzhou1@hisilicon.com>
Signed-off-by: NMingqiang Ling <lingmingqiang@huawei.com>
Signed-off-by: Nlingmingqiang <lingmingqiang@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 f85b09a3
...@@ -23,10 +23,15 @@ ...@@ -23,10 +23,15 @@
struct hpre_ctrl; struct hpre_ctrl;
enum hpre_status {
HPRE_RESET,
};
struct hpre { struct hpre {
struct hisi_qm qm; struct hisi_qm qm;
struct list_head list; struct list_head list;
struct hpre_ctrl *ctrl; struct hpre_ctrl *ctrl;
unsigned long status;
}; };
enum hpre_alg_type { enum hpre_alg_type {
......
...@@ -526,6 +526,9 @@ static void hpre_dh_clear_ctx(struct hpre_ctx *ctx, int is_exit) ...@@ -526,6 +526,9 @@ static void hpre_dh_clear_ctx(struct hpre_ctx *ctx, int is_exit)
unsigned int sz = ctx->key_sz; unsigned int sz = ctx->key_sz;
struct device *dev = &GET_DEV(ctx); struct device *dev = &GET_DEV(ctx);
if (is_exit)
hisi_qm_stop_qp(ctx->qp);
if (ctx->dh.g) { if (ctx->dh.g) {
dma_free_coherent(dev, sz, ctx->dh.g, ctx->dh.dma_g); dma_free_coherent(dev, sz, ctx->dh.g, ctx->dh.dma_g);
ctx->dh.g = NULL; ctx->dh.g = NULL;
...@@ -878,6 +881,9 @@ static void hpre_rsa_clear_ctx(struct hpre_ctx *ctx, int is_exit) ...@@ -878,6 +881,9 @@ static void hpre_rsa_clear_ctx(struct hpre_ctx *ctx, int is_exit)
unsigned int half_key_sz = ctx->key_sz >> 1; unsigned int half_key_sz = ctx->key_sz >> 1;
struct device *dev = &GET_DEV(ctx); struct device *dev = &GET_DEV(ctx);
if (is_exit)
hisi_qm_stop_qp(ctx->qp);
if (ctx->rsa.pubkey) { if (ctx->rsa.pubkey) {
dma_free_coherent(dev, ctx->key_sz << 1, dma_free_coherent(dev, ctx->key_sz << 1,
ctx->rsa.pubkey, ctx->rsa.dma_pubkey); ctx->rsa.pubkey, ctx->rsa.dma_pubkey);
......
...@@ -1078,26 +1078,96 @@ static pci_ers_result_t hpre_error_detected(struct pci_dev *pdev, ...@@ -1078,26 +1078,96 @@ static pci_ers_result_t hpre_error_detected(struct pci_dev *pdev,
return hpre_process_hw_error(pdev); return hpre_process_hw_error(pdev);
} }
static int hpre_reset_prepare_rdy(struct hpre *hpre)
{
int delay = 1;
u32 flag = 1;
int ret = 0;
#define TIMEOUT_VF 20000
while (flag) {
flag = 0;
if (delay > TIMEOUT_VF) {
ret = -EBUSY;
break;
}
msleep(delay);
delay *= 2;
if (test_and_set_bit(HPRE_RESET, &hpre->status))
flag = 1;
}
return ret;
}
static int hpre_vf_reset_prepare(struct pci_dev *pdev,
enum qm_stop_reason stop_reason)
{
struct pci_dev *dev;
struct hisi_qm *qm;
struct hpre *hpre;
int ret = 0;
mutex_lock(&hpre_list_lock);
if (pdev->is_physfn) {
list_for_each_entry(hpre, &hpre_list, list) {
dev = hpre->qm.pdev;
if (dev == pdev)
continue;
if (pci_physfn(dev) == pdev) {
qm = &hpre->qm;
ret = hisi_qm_stop(qm, stop_reason);
if (ret)
goto prepare_fail;
}
}
}
prepare_fail:
mutex_unlock(&hpre_list_lock);
return ret;
}
static int hpre_controller_reset_prepare(struct hpre *hpre) static int hpre_controller_reset_prepare(struct hpre *hpre)
{ {
struct hisi_qm *qm = &hpre->qm; struct hisi_qm *qm = &hpre->qm;
struct pci_dev *pdev = qm->pdev; struct pci_dev *pdev = qm->pdev;
int retry = 0;
int ret; int ret;
if (test_and_set_bit(QM_RESET, &qm->status.flags)) { ret = hpre_reset_prepare_rdy(hpre);
dev_warn(&pdev->dev, "Failed to set reset flag!"); if (ret) {
return -EBUSY; dev_err(&pdev->dev, "Controller reset not ready!\n");
return ret;
}
ret = hpre_vf_reset_prepare(pdev, QM_SOFT_RESET);
if (ret) {
dev_err(&pdev->dev, "Fails to stop VFs!\n");
return ret;
} }
ret = hisi_qm_stop(qm); ret = hisi_qm_stop(qm, QM_SOFT_RESET);
if (ret) { if (ret) {
dev_err(&pdev->dev, "Fails to stop QM!\n"); dev_err(&pdev->dev, "Fails to stop QM!\n");
return ret; return ret;
} }
#ifdef CONFIG_CRYPTO_QM_UACCE #ifdef CONFIG_CRYPTO_QM_UACCE
if (qm->use_uacce) /* wait 10s for uacce_queue to release */
uacce_reset_prepare(&qm->uacce); while (retry++ < 1000) {
msleep(20);
if (!uacce_unregister(&qm->uacce))
break;
if (retry == 1000)
return -EBUSY;
}
#endif #endif
return 0; return 0;
...@@ -1160,42 +1230,67 @@ static int hpre_soft_reset(struct hpre *hpre) ...@@ -1160,42 +1230,67 @@ static int hpre_soft_reset(struct hpre *hpre)
return 0; return 0;
} }
static int hpre_vf_reset_done(struct pci_dev *pdev)
{
struct pci_dev *dev;
struct hisi_qm *qm;
struct hpre *hpre;
int ret = 0;
mutex_lock(&hpre_list_lock);
list_for_each_entry(hpre, &hpre_list, list) {
dev = hpre->qm.pdev;
if (dev == pdev)
continue;
if (pci_physfn(dev) == pdev) {
qm = &hpre->qm;
hisi_qm_clear_queues(qm);
ret = hisi_qm_restart(qm);
if (ret)
goto reset_fail;
}
}
reset_fail:
mutex_unlock(&hpre_list_lock);
return ret;
}
static int hpre_controller_reset_done(struct hpre *hpre) static int hpre_controller_reset_done(struct hpre *hpre)
{ {
struct hisi_qm *qm = &hpre->qm; struct hisi_qm *qm = &hpre->qm;
struct pci_dev *pdev = qm->pdev; struct pci_dev *pdev = qm->pdev;
struct hisi_qp *qp; int ret;
int i, ret;
hisi_qm_clear_queues(qm); hisi_qm_clear_queues(qm);
ret = hpre_set_user_domain_and_cache(hpre); ret = hpre_set_user_domain_and_cache(hpre);
if (ret) if (ret)
return ret; return ret;
hpre_hw_err_init(hpre); hpre_hw_err_init(hpre);
ret = hisi_qm_start(qm);
ret = hisi_qm_restart(qm);
if (ret) { if (ret) {
dev_err(&pdev->dev, "Failed to start QM!\n"); dev_err(&pdev->dev, "Failed to start QM!\n");
return ret; return ret;
} }
for (i = 0; i < qm->qp_num; i++) {
qp = qm->qp_array[i];
if (qp) {
ret = hisi_qm_start_qp(qp, 0);
if (ret < 0) {
dev_err(&pdev->dev, "Start qp%d failed\n", i);
return ret;
}
}
}
if (hpre->ctrl->num_vfs) if (hpre->ctrl->num_vfs)
hpre_vf_q_assign(hpre, hpre->ctrl->num_vfs); hpre_vf_q_assign(hpre, hpre->ctrl->num_vfs);
/* Clear VF MSE bit */ /* Clear VF MSE bit */
hpre_set_mse(hpre, 1); hpre_set_mse(hpre, 1);
ret = hpre_vf_reset_done(pdev);
if (ret) {
dev_err(&pdev->dev, "Failed to start VFs!\n");
return -EPERM;
}
#ifdef CONFIG_CRYPTO_QM_UACCE #ifdef CONFIG_CRYPTO_QM_UACCE
if (qm->use_uacce) if (qm->use_uacce)
uacce_reset_done(&qm->uacce); uacce_register(&qm->uacce);
#endif #endif
return 0; return 0;
...@@ -1207,9 +1302,11 @@ static int hpre_controller_reset(struct hpre *hpre) ...@@ -1207,9 +1302,11 @@ static int hpre_controller_reset(struct hpre *hpre)
int ret; int ret;
dev_info(dev, "Controller resetting...\n"); dev_info(dev, "Controller resetting...\n");
ret = hpre_controller_reset_prepare(hpre); ret = hpre_controller_reset_prepare(hpre);
if (ret) if (ret)
return ret; return ret;
ret = hpre_soft_reset(hpre); ret = hpre_soft_reset(hpre);
if (ret) { if (ret) {
dev_err(dev, "Controller reset failed (%d)\n", ret); dev_err(dev, "Controller reset failed (%d)\n", ret);
...@@ -1219,8 +1316,9 @@ static int hpre_controller_reset(struct hpre *hpre) ...@@ -1219,8 +1316,9 @@ static int hpre_controller_reset(struct hpre *hpre)
ret = hpre_controller_reset_done(hpre); ret = hpre_controller_reset_done(hpre);
if (ret) if (ret)
return ret; return ret;
clear_bit(HPRE_RESET, &hpre->status);
dev_info(dev, "Controller reset complete\n"); dev_info(dev, "Controller reset complete\n");
clear_bit(QM_RESET, &hpre->qm.status.flags);
return 0; return 0;
} }
...@@ -1247,25 +1345,87 @@ static pci_ers_result_t hpre_slot_reset(struct pci_dev *pdev) ...@@ -1247,25 +1345,87 @@ static pci_ers_result_t hpre_slot_reset(struct pci_dev *pdev)
return PCI_ERS_RESULT_RECOVERED; return PCI_ERS_RESULT_RECOVERED;
} }
#ifdef CONFIG_CRYPTO_QM_UACCE static void hpre_flr_prepare_rdy(struct pci_dev *pdev)
{
struct pci_dev *pf_pdev = pci_physfn(pdev);
struct hpre *hpre = pci_get_drvdata(pf_pdev);
int delay = 1;
u32 flag = 1;
#define TIMEOUT 60000
#define DELAY_INC 2000
while (flag) {
flag = 0;
msleep(delay);
if (delay > TIMEOUT) {
flag = 1;
delay = 1;
dev_err(&pdev->dev, "Device error, please exit FLR!\n");
} else if (test_and_set_bit(HPRE_RESET, &hpre->status))
flag = 1;
delay += DELAY_INC;
}
}
static void hpre_reset_prepare(struct pci_dev *pdev) static void hpre_reset_prepare(struct pci_dev *pdev)
{ {
struct hpre *hpre = pci_get_drvdata(pdev); struct hpre *hpre = pci_get_drvdata(pdev);
struct hisi_qm *qm = &hpre->qm; struct hisi_qm *qm = &hpre->qm;
struct device *dev = &pdev->dev;
int ret;
if (qm->use_uacce) hpre_flr_prepare_rdy(pdev);
uacce_reset_prepare(&qm->uacce);
ret = hpre_vf_reset_prepare(pdev, QM_FLR);
if (ret) {
dev_err(&pdev->dev, "Fails to prepare reset!\n");
return;
}
ret = hisi_qm_stop(qm, QM_FLR);
if (ret) {
dev_err(&pdev->dev, "Fails to stop QM!\n");
return;
}
dev_info(dev, "FLR resetting...\n");
}
static void hpre_flr_reset_complete(struct pci_dev *pdev)
{
struct pci_dev *pf_pdev = pci_physfn(pdev);
struct hpre *hpre = pci_get_drvdata(pf_pdev);
clear_bit(HPRE_RESET, &hpre->status);
} }
static void hpre_reset_done(struct pci_dev *pdev) static void hpre_reset_done(struct pci_dev *pdev)
{ {
struct hpre *hpre = pci_get_drvdata(pdev); struct hpre *hpre = pci_get_drvdata(pdev);
struct hisi_qm *qm = &hpre->qm; struct hisi_qm *qm = &hpre->qm;
struct device *dev = &pdev->dev;
int ret;
if (qm->use_uacce) hisi_qm_clear_queues(qm);
uacce_reset_done(&qm->uacce); ret = hisi_qm_restart(qm);
if (ret) {
dev_err(dev, "Failed to start QM!\n");
return;
}
if (pdev->is_physfn) {
hpre_set_user_domain_and_cache(hpre);
hpre_hw_err_init(hpre);
if (hpre->ctrl->num_vfs)
hpre_vf_q_assign(hpre, hpre->ctrl->num_vfs);
hpre_vf_reset_done(pdev);
}
hpre_flr_reset_complete(pdev);
dev_info(dev, "FLR reset complete\n");
} }
#endif
static void hpre_remove(struct pci_dev *pdev) static void hpre_remove(struct pci_dev *pdev)
{ {
...@@ -1287,7 +1447,7 @@ static void hpre_remove(struct pci_dev *pdev) ...@@ -1287,7 +1447,7 @@ static void hpre_remove(struct pci_dev *pdev)
hpre_cnt_regs_clear(qm); hpre_cnt_regs_clear(qm);
hpre_debugfs_exit(hpre); hpre_debugfs_exit(hpre);
hisi_qm_stop(qm); hisi_qm_stop(qm, QM_NORMAL);
if (qm->fun_type == QM_HW_PF) if (qm->fun_type == QM_HW_PF)
hpre_hw_error_set_state(hpre, false); hpre_hw_error_set_state(hpre, false);
hisi_qm_uninit(qm); hisi_qm_uninit(qm);
......
...@@ -153,7 +153,7 @@ ...@@ -153,7 +153,7 @@
#define TASK_TIMEOUT 10000 #define TASK_TIMEOUT 10000
#define WAIT_PERIOD 20 #define WAIT_PERIOD 20
#define MAX_WAIT_COUNTS 3 #define MAX_WAIT_COUNTS 1000
#define CURRENT_Q_MASK 0x0000ffff #define CURRENT_Q_MASK 0x0000ffff
...@@ -244,6 +244,88 @@ static const char * const qm_fifo_overflow[] = { ...@@ -244,6 +244,88 @@ static const char * const qm_fifo_overflow[] = {
"cq", "eq", "aeq", "cq", "eq", "aeq",
}; };
static const char * const qm_s[] = {
"init", "start", "close", "stop",
};
static const char * const qp_s[] = {
"none", "init", "start", "stop", "close",
};
static bool qm_avail_state(struct hisi_qm *qm, enum qm_state new)
{
enum qm_state curr = atomic_read(&qm->status.flags);
bool avail = false;
switch (curr) {
case QM_INIT:
if (new == QM_START || new == QM_CLOSE)
avail = true;
break;
case QM_START:
if (new == QM_STOP)
avail = true;
break;
case QM_STOP:
if (new == QM_CLOSE || new == QM_START)
avail = true;
break;
default:
break;
}
dev_dbg(&qm->pdev->dev, "change qm state from %s to %s\n",
qm_s[curr], qm_s[new]);
if (!avail)
dev_warn(&qm->pdev->dev, "Can not change qm state from %s to %s\n",
qm_s[curr], qm_s[new]);
return avail;
}
static bool qm_qp_avail_state(struct hisi_qm *qm, struct hisi_qp *qp,
enum qp_state new)
{
enum qm_state qm_curr = atomic_read(&qm->status.flags);
enum qp_state qp_curr = 0;
bool avail = false;
if (qp)
qp_curr = atomic_read(&qp->qp_status.flags);
switch (new) {
case QP_INIT:
if (qm_curr == QM_START)
avail = true;
break;
case QP_START:
if ((qm_curr == QM_START && qp_curr == QP_INIT) ||
(qm_curr == QM_START && qp_curr == QP_STOP))
avail = true;
break;
case QP_STOP:
if (qm_curr == QM_START && qp_curr == QP_START)
avail = true;
break;
case QP_CLOSE:
if ((qm_curr == QM_START && qp_curr == QP_INIT) ||
(qm_curr == QM_START && qp_curr == QP_STOP) ||
(qm_curr == QM_STOP && qp_curr == QP_STOP) ||
(qm_curr == QM_STOP && qp_curr == QP_INIT))
avail = true;
break;
default:
break;
}
dev_dbg(&qm->pdev->dev, "change qp state from %s to %s in QM %s\n",
qp_s[qp_curr], qp_s[new], qm_s[qm_curr]);
if (!avail)
dev_warn(&qm->pdev->dev,
"Can not change qp state from %s to %s in QM %s\n",
qp_s[qp_curr], qp_s[new], qm_s[qm_curr]);
return avail;
}
/* return 0 mailbox ready, -ETIMEDOUT hardware timeout */ /* return 0 mailbox ready, -ETIMEDOUT hardware timeout */
static int qm_wait_mb_ready(struct hisi_qm *qm) static int qm_wait_mb_ready(struct hisi_qm *qm)
{ {
...@@ -374,14 +456,7 @@ static u32 qm_get_irq_num_v2(struct hisi_qm *qm) ...@@ -374,14 +456,7 @@ static u32 qm_get_irq_num_v2(struct hisi_qm *qm)
static struct hisi_qp *qm_to_hisi_qp(struct hisi_qm *qm, struct qm_eqe *eqe) static struct hisi_qp *qm_to_hisi_qp(struct hisi_qm *qm, struct qm_eqe *eqe)
{ {
u16 cqn = eqe->dw0 & QM_EQE_CQN_MASK; return qm->qp_array[eqe->dw0 & QM_EQE_CQN_MASK];
struct hisi_qp *qp;
read_lock(&qm->qps_lock);
qp = qm->qp_array[cqn];
read_unlock(&qm->qps_lock);
return qp;
} }
static void qm_sq_head_update(struct hisi_qp *qp) static void qm_sq_head_update(struct hisi_qp *qp)
...@@ -606,7 +681,7 @@ static void qm_init_qp_status(struct hisi_qp *qp) ...@@ -606,7 +681,7 @@ static void qm_init_qp_status(struct hisi_qp *qp)
qp_status->sq_head = 0; qp_status->sq_head = 0;
qp_status->cq_head = 0; qp_status->cq_head = 0;
qp_status->cqc_phase = 1; qp_status->cqc_phase = 1;
qp_status->flags = 0; atomic_set(&qp_status->flags, 0);
} }
static void qm_vft_data_cfg(struct hisi_qm *qm, enum vft_type type, u32 base, static void qm_vft_data_cfg(struct hisi_qm *qm, enum vft_type type, u32 base,
...@@ -1052,29 +1127,22 @@ static void *qm_get_avail_sqe(struct hisi_qp *qp) ...@@ -1052,29 +1127,22 @@ static void *qm_get_avail_sqe(struct hisi_qp *qp)
return qp->sqe + sq_tail * qp->qm->sqe_size; return qp->sqe + sq_tail * qp->qm->sqe_size;
} }
/** static struct hisi_qp *hisi_qm_create_qp_lockless(struct hisi_qm *qm,
* hisi_qm_create_qp() - Create a queue pair from qm. u8 alg_type)
* @qm: The qm we create a qp from.
* @alg_type: Accelerator specific algorithm type in sqc.
*
* return created qp, -EBUSY if all qps in qm allocated, -ENOMEM if allocating
* qp memory fails.
*/
struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type)
{ {
struct device *dev = &qm->pdev->dev; struct device *dev = &qm->pdev->dev;
struct hisi_qp *qp; struct hisi_qp *qp;
int qp_id, ret; int qp_id, ret;
if (!qm_qp_avail_state(qm, NULL, QP_INIT))
return ERR_PTR(-EPERM);
qp = kzalloc(sizeof(*qp), GFP_KERNEL); qp = kzalloc(sizeof(*qp), GFP_KERNEL);
if (!qp) if (!qp)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
write_lock(&qm->qps_lock);
qp_id = find_first_zero_bit(qm->qp_bitmap, qm->qp_num); qp_id = find_first_zero_bit(qm->qp_bitmap, qm->qp_num);
if (qp_id >= qm->qp_num) { if (qp_id >= qm->qp_num) {
write_unlock(&qm->qps_lock);
dev_info_ratelimited(&qm->pdev->dev, "QM all queues are busy!\n"); dev_info_ratelimited(&qm->pdev->dev, "QM all queues are busy!\n");
ret = -EBUSY; ret = -EBUSY;
...@@ -1082,9 +1150,6 @@ struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type) ...@@ -1082,9 +1150,6 @@ struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type)
} }
set_bit(qp_id, qm->qp_bitmap); set_bit(qp_id, qm->qp_bitmap);
qm->qp_array[qp_id] = qp; qm->qp_array[qp_id] = qp;
write_unlock(&qm->qps_lock);
qp->qm = qm; qp->qm = qm;
/* allocate qp dma memory, uacce uses dus region for this */ /* allocate qp dma memory, uacce uses dus region for this */
...@@ -1108,18 +1173,36 @@ struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type) ...@@ -1108,18 +1173,36 @@ struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type)
qp->alg_type = alg_type; qp->alg_type = alg_type;
qp->c_flag = 1; qp->c_flag = 1;
init_completion(&qp->completion); init_completion(&qp->completion);
atomic_set(&qp->qp_status.flags, QP_INIT);
return qp; return qp;
err_clear_bit: err_clear_bit:
write_lock(&qm->qps_lock);
qm->qp_array[qp_id] = NULL; qm->qp_array[qp_id] = NULL;
clear_bit(qp_id, qm->qp_bitmap); clear_bit(qp_id, qm->qp_bitmap);
write_unlock(&qm->qps_lock);
err_free_qp: err_free_qp:
kfree(qp); kfree(qp);
return ERR_PTR(ret); return ERR_PTR(ret);
} }
/**
* hisi_qm_create_qp() - Create a queue pair from qm.
* @qm: The qm we create a qp from.
* @alg_type: Accelerator specific algorithm type in sqc.
*
* return created qp, -EBUSY if all qps in qm allocated, -ENOMEM if allocating
* qp memory fails.
*/
struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type)
{
struct hisi_qp *qp;
down_write(&qm->qps_lock);
qp = hisi_qm_create_qp_lockless(qm, alg_type);
up_write(&qm->qps_lock);
return qp;
}
EXPORT_SYMBOL_GPL(hisi_qm_create_qp); EXPORT_SYMBOL_GPL(hisi_qm_create_qp);
/** /**
...@@ -1134,16 +1217,20 @@ void hisi_qm_release_qp(struct hisi_qp *qp) ...@@ -1134,16 +1217,20 @@ void hisi_qm_release_qp(struct hisi_qp *qp)
struct qm_dma *qdma = &qp->qdma; struct qm_dma *qdma = &qp->qdma;
struct device *dev = &qm->pdev->dev; struct device *dev = &qm->pdev->dev;
down_write(&qm->qps_lock);
if (!qm_qp_avail_state(qm, qp, QP_CLOSE)) {
up_write(&qm->qps_lock);
return;
}
if (qm->use_dma_api && qdma->va) if (qm->use_dma_api && qdma->va)
dma_free_coherent(dev, qdma->size, qdma->va, qdma->dma); dma_free_coherent(dev, qdma->size, qdma->va, qdma->dma);
write_lock(&qm->qps_lock);
dev_dbg(dev, "release qp %d\n", qp->qp_id); dev_dbg(dev, "release qp %d\n", qp->qp_id);
qm->qp_array[qp->qp_id] = NULL; qm->qp_array[qp->qp_id] = NULL;
clear_bit(qp->qp_id, qm->qp_bitmap); clear_bit(qp->qp_id, qm->qp_bitmap);
write_unlock(&qm->qps_lock);
kfree(qp); kfree(qp);
up_write(&qm->qps_lock);
} }
EXPORT_SYMBOL_GPL(hisi_qm_release_qp); EXPORT_SYMBOL_GPL(hisi_qm_release_qp);
...@@ -1254,15 +1341,7 @@ static int qm_qp_ctx_cfg(struct hisi_qp *qp, int qp_id, int pasid) ...@@ -1254,15 +1341,7 @@ static int qm_qp_ctx_cfg(struct hisi_qp *qp, int qp_id, int pasid)
return qm_cq_ctx_cfg(qp, qp_id, pasid); return qm_cq_ctx_cfg(qp, qp_id, pasid);
} }
/** static int hisi_qm_start_qp_lockless(struct hisi_qp *qp, unsigned long arg)
* hisi_qm_start_qp() - Start a qp into running.
* @qp: The qp we want to start to run.
* @arg: Accelerator specific argument.
*
* After this function, qp can receive request from user. Return qp_id if
* successful, Return -EBUSY if failed.
*/
int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg)
{ {
struct hisi_qm *qm = qp->qm; struct hisi_qm *qm = qp->qm;
struct device *dev = &qm->pdev->dev; struct device *dev = &qm->pdev->dev;
...@@ -1271,6 +1350,8 @@ int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg) ...@@ -1271,6 +1350,8 @@ int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg)
int pasid = arg; int pasid = arg;
size_t off = 0; size_t off = 0;
int ret; int ret;
if (!qm_qp_avail_state(qm, qp, QP_START))
return -EPERM;
#define QP_INIT_BUF(qp, type, size) do { \ #define QP_INIT_BUF(qp, type, size) do { \
(qp)->type = ((qp)->qdma.va + (off)); \ (qp)->type = ((qp)->qdma.va + (off)); \
...@@ -1302,42 +1383,76 @@ int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg) ...@@ -1302,42 +1383,76 @@ int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg)
ret = qm_qp_ctx_cfg(qp, qp_id, pasid); ret = qm_qp_ctx_cfg(qp, qp_id, pasid);
if (ret) if (ret)
return ret; return ret;
atomic_set(&qp->qp_status.flags, QP_START);
dev_dbg(dev, "queue %d started\n", qp_id); dev_dbg(dev, "queue %d started\n", qp_id);
return qp_id; return qp_id;
} }
EXPORT_SYMBOL_GPL(hisi_qm_start_qp);
/** /**
* hisi_qm_stop_qp() - Stop a qp in qm. * hisi_qm_start_qp() - Start a qp into running.
* @qp: The qp we want to stop. * @qp: The qp we want to start to run.
* @arg: Accelerator specific argument.
* *
* This function is reverse of hisi_qm_start_qp. Return 0 if successful. * After this function, qp can receive request from user. Return qp_id if
* successful, Return -EBUSY if failed.
*/ */
int hisi_qm_stop_qp(struct hisi_qp *qp) int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg)
{
struct hisi_qm *qm = qp->qm;
int ret;
down_write(&qm->qps_lock);
ret = hisi_qm_start_qp_lockless(qp, arg);
up_write(&qm->qps_lock);
return ret;
}
EXPORT_SYMBOL_GPL(hisi_qm_start_qp);
static int hisi_qm_stop_qp_lockless(struct hisi_qp *qp)
{ {
struct device *dev = &qp->qm->pdev->dev; struct device *dev = &qp->qm->pdev->dev;
int i = 0; int i = 0;
/* it is stopped */ /* it is stopped */
if (test_bit(QP_STOP, &qp->qp_status.flags)) if (atomic_read(&qp->qp_status.flags) == QP_STOP)
return 0; return 0;
if (!qm_qp_avail_state(qp->qm, qp, QP_STOP))
return -EPERM;
atomic_set(&qp->qp_status.flags, QP_STOP);
while (atomic_read(&qp->qp_status.used)) { while (atomic_read(&qp->qp_status.used)) {
i++; i++;
msleep(WAIT_PERIOD); msleep(WAIT_PERIOD);
if (i == MAX_WAIT_COUNTS) { if (i == MAX_WAIT_COUNTS) {
dev_info(dev, "Cannot drain out data for stopping, force to stop!\n"); dev_err(dev, "Cannot drain out data for stopping, system may hang up!!!\n");
break; break;
} }
} }
set_bit(QP_STOP, &qp->qp_status.flags);
dev_dbg(dev, "stop queue %u!", qp->qp_id); dev_dbg(dev, "stop queue %u!", qp->qp_id);
return 0; return 0;
} }
/**
* hisi_qm_stop_qp() - Stop a qp in qm.
* @qp: The qp we want to stop.
*
* This function is reverse of hisi_qm_start_qp. Return 0 if successful.
*/
int hisi_qm_stop_qp(struct hisi_qp *qp)
{
int ret;
down_write(&qp->qm->qps_lock);
ret = hisi_qm_stop_qp_lockless(qp);
up_write(&qp->qm->qps_lock);
return ret;
}
EXPORT_SYMBOL_GPL(hisi_qm_stop_qp); EXPORT_SYMBOL_GPL(hisi_qm_stop_qp);
/** /**
...@@ -1348,7 +1463,12 @@ EXPORT_SYMBOL_GPL(hisi_qm_stop_qp); ...@@ -1348,7 +1463,12 @@ EXPORT_SYMBOL_GPL(hisi_qm_stop_qp);
* This function will return -EBUSY if qp is currently full, and -EAGAIN * This function will return -EBUSY if qp is currently full, and -EAGAIN
* if qp related qm is resetting. * if qp related qm is resetting.
* *
* Note: Do not support concurrent call of this function. * Note: This function may run with qm_irq_thread and ACC reset at same time.
* It has no race with qm_irq_thread. However, during hisi_qp_send, ACC
* reset may happen, we have no lock here considering performance. This
* causes current qm_db sending fail or can not receive sended sqe. QM
* sync/async receive function should handle the error sqe. ACC reset
* done function should clear used sqe to 0.
*/ */
int hisi_qp_send(struct hisi_qp *qp, const void *msg) int hisi_qp_send(struct hisi_qp *qp, const void *msg)
{ {
...@@ -1357,8 +1477,8 @@ int hisi_qp_send(struct hisi_qp *qp, const void *msg) ...@@ -1357,8 +1477,8 @@ int hisi_qp_send(struct hisi_qp *qp, const void *msg)
u16 sq_tail_next = (sq_tail + 1) % QM_Q_DEPTH; u16 sq_tail_next = (sq_tail + 1) % QM_Q_DEPTH;
void *sqe = qm_get_avail_sqe(qp); void *sqe = qm_get_avail_sqe(qp);
if (unlikely(test_bit(QP_STOP, &qp->qp_status.flags) || if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP ||
test_bit(QM_RESET, &qp->qm->status.flags))) { atomic_read(&qp->qm->status.flags) == QM_STOP)) {
dev_info(&qp->qm->pdev->dev, "QM resetting...\n"); dev_info(&qp->qm->pdev->dev, "QM resetting...\n");
return -EAGAIN; return -EAGAIN;
} }
...@@ -1416,11 +1536,11 @@ int hisi_qm_get_free_qp_num(struct hisi_qm *qm) ...@@ -1416,11 +1536,11 @@ int hisi_qm_get_free_qp_num(struct hisi_qm *qm)
{ {
int i, ret; int i, ret;
read_lock(&qm->qps_lock); down_read(&qm->qps_lock);
for (i = 0, ret = 0; i < qm->qp_num; i++) for (i = 0, ret = 0; i < qm->qp_num; i++)
if (!qm->qp_array[i]) if (!qm->qp_array[i])
ret++; ret++;
read_unlock(&qm->qps_lock); up_read(&qm->qps_lock);
if (!qm->use_dma_api) if (!qm->use_dma_api)
ret = (ret == qm->qp_num) ? 1 : 0; ret = (ret == qm->qp_num) ? 1 : 0;
...@@ -1440,6 +1560,17 @@ static int hisi_qm_get_available_instances(struct uacce *uacce) ...@@ -1440,6 +1560,17 @@ static int hisi_qm_get_available_instances(struct uacce *uacce)
return hisi_qm_get_free_qp_num(uacce->priv); return hisi_qm_get_free_qp_num(uacce->priv);
} }
static void hisi_qm_send_signals(struct hisi_qm *qm)
{
struct hisi_qp *qp;
int i;
for (i = 0; i < qm->qp_num; i++) {
qp = qm->qp_array[i];
if (qp && qp->uacce_q)
uacce_send_sig_to_client(qp->uacce_q);
}
}
static int hisi_qm_uacce_get_queue(struct uacce *uacce, unsigned long arg, static int hisi_qm_uacce_get_queue(struct uacce *uacce, unsigned long arg,
struct uacce_queue **q) struct uacce_queue **q)
{ {
...@@ -1448,12 +1579,16 @@ static int hisi_qm_uacce_get_queue(struct uacce *uacce, unsigned long arg, ...@@ -1448,12 +1579,16 @@ static int hisi_qm_uacce_get_queue(struct uacce *uacce, unsigned long arg,
struct uacce_queue *wd_q; struct uacce_queue *wd_q;
u8 alg_type = 0; u8 alg_type = 0;
qp = hisi_qm_create_qp(qm, alg_type); down_write(&qm->qps_lock);
if (IS_ERR(qp)) qp = hisi_qm_create_qp_lockless(qm, alg_type);
if (IS_ERR(qp)) {
up_write(&qm->qps_lock);
return PTR_ERR(qp); return PTR_ERR(qp);
}
wd_q = kzalloc(sizeof(struct uacce_queue), GFP_KERNEL); wd_q = kzalloc(sizeof(struct uacce_queue), GFP_KERNEL);
if (!wd_q) { if (!wd_q) {
up_write(&qm->qps_lock);
hisi_qm_release_qp(qp); hisi_qm_release_qp(qp);
return -ENOMEM; return -ENOMEM;
} }
...@@ -1464,7 +1599,9 @@ static int hisi_qm_uacce_get_queue(struct uacce *uacce, unsigned long arg, ...@@ -1464,7 +1599,9 @@ static int hisi_qm_uacce_get_queue(struct uacce *uacce, unsigned long arg,
qp->uacce_q = wd_q; qp->uacce_q = wd_q;
qp->event_cb = qm_qp_event_notifier; qp->event_cb = qm_qp_event_notifier;
qp->pasid = arg; qp->pasid = arg;
init_waitqueue_head(&wd_q->wait);
up_write(&qm->qps_lock);
return 0; return 0;
} }
...@@ -1484,7 +1621,6 @@ static void hisi_qm_uacce_put_queue(struct uacce_queue *q) ...@@ -1484,7 +1621,6 @@ static void hisi_qm_uacce_put_queue(struct uacce_queue *q)
/* need to stop hardware, but can not support in v1 */ /* need to stop hardware, but can not support in v1 */
hisi_qm_release_qp(qp); hisi_qm_release_qp(qp);
kfree(q);
} }
/* map sq/cq/doorbell to user space */ /* map sq/cq/doorbell to user space */
...@@ -1573,7 +1709,7 @@ static int hisi_qm_uacce_start_queue(struct uacce_queue *q) ...@@ -1573,7 +1709,7 @@ static int hisi_qm_uacce_start_queue(struct uacce_queue *q)
ret = hisi_qm_start_qp(qp, qp->pasid); ret = hisi_qm_start_qp(qp, qp->pasid);
if (ret && !qm->use_dma_api) if (ret && !qm->use_dma_api)
hisi_qm_stop(qm); hisi_qm_stop(qm, QM_NORMAL);
return ret; return ret;
} }
...@@ -1586,7 +1722,7 @@ static void hisi_qm_uacce_stop_queue(struct uacce_queue *q) ...@@ -1586,7 +1722,7 @@ static void hisi_qm_uacce_stop_queue(struct uacce_queue *q)
hisi_qm_stop_qp(qp); hisi_qm_stop_qp(qp);
if (!qm->use_dma_api) { if (!qm->use_dma_api) {
hisi_qm_stop(qm); hisi_qm_stop(qm, QM_NORMAL);
/* /*
* In uacce_mode=1, we flush qm sqc here. * In uacce_mode=1, we flush qm sqc here.
* In uacce_fops_release, the working flow is stop_queue -> * In uacce_fops_release, the working flow is stop_queue ->
...@@ -1602,9 +1738,9 @@ static int qm_set_sqctype(struct uacce_queue *q, u16 type) ...@@ -1602,9 +1738,9 @@ static int qm_set_sqctype(struct uacce_queue *q, u16 type)
struct hisi_qm *qm = q->uacce->priv; struct hisi_qm *qm = q->uacce->priv;
struct hisi_qp *qp = q->priv; struct hisi_qp *qp = q->priv;
write_lock(&qm->qps_lock); down_write(&qm->qps_lock);
qp->alg_type = type; qp->alg_type = type;
write_unlock(&qm->qps_lock); up_write(&qm->qps_lock);
return 0; return 0;
} }
...@@ -1773,7 +1909,8 @@ int hisi_qm_init(struct hisi_qm *qm) ...@@ -1773,7 +1909,8 @@ int hisi_qm_init(struct hisi_qm *qm)
goto err_free_irq_vectors; goto err_free_irq_vectors;
mutex_init(&qm->mailbox_lock); mutex_init(&qm->mailbox_lock);
rwlock_init(&qm->qps_lock); init_rwsem(&qm->qps_lock);
atomic_set(&qm->status.flags, QM_INIT);
dev_dbg(dev, "init qm %s with %s\n", dev_dbg(dev, "init qm %s with %s\n",
pdev->is_physfn ? "pf" : "vf", pdev->is_physfn ? "pf" : "vf",
...@@ -1805,6 +1942,11 @@ void hisi_qm_uninit(struct hisi_qm *qm) ...@@ -1805,6 +1942,11 @@ void hisi_qm_uninit(struct hisi_qm *qm)
struct pci_dev *pdev = qm->pdev; struct pci_dev *pdev = qm->pdev;
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
down_write(&qm->qps_lock);
if (!qm_avail_state(qm, QM_CLOSE)) {
up_write(&qm->qps_lock);
return;
}
/* qm hardware buffer free on put_queue if no dma api */ /* qm hardware buffer free on put_queue if no dma api */
if (qm->use_dma_api && qm->qdma.va) { if (qm->use_dma_api && qm->qdma.va) {
hisi_qm_cache_wb(qm); hisi_qm_cache_wb(qm);
...@@ -1823,6 +1965,7 @@ void hisi_qm_uninit(struct hisi_qm *qm) ...@@ -1823,6 +1965,7 @@ void hisi_qm_uninit(struct hisi_qm *qm)
if (qm->use_uacce) if (qm->use_uacce)
uacce_unregister(&qm->uacce); uacce_unregister(&qm->uacce);
#endif #endif
up_write(&qm->qps_lock);
} }
EXPORT_SYMBOL_GPL(hisi_qm_uninit); EXPORT_SYMBOL_GPL(hisi_qm_uninit);
...@@ -1837,7 +1980,7 @@ int hisi_qm_frozen(struct hisi_qm *qm) ...@@ -1837,7 +1980,7 @@ int hisi_qm_frozen(struct hisi_qm *qm)
{ {
int ret, i; int ret, i;
write_lock(&qm->qps_lock); down_write(&qm->qps_lock);
for (i = 0, ret = 0; i < qm->qp_num; i++) for (i = 0, ret = 0; i < qm->qp_num; i++)
if (!qm->qp_array[i]) if (!qm->qp_array[i])
ret++; ret++;
...@@ -1845,10 +1988,10 @@ int hisi_qm_frozen(struct hisi_qm *qm) ...@@ -1845,10 +1988,10 @@ int hisi_qm_frozen(struct hisi_qm *qm)
if (ret == qm->qp_num) { if (ret == qm->qp_num) {
bitmap_set(qm->qp_bitmap, 0, qm->qp_num); bitmap_set(qm->qp_bitmap, 0, qm->qp_num);
} else { } else {
write_unlock(&qm->qps_lock); up_write(&qm->qps_lock);
return -EBUSY; return -EBUSY;
} }
write_unlock(&qm->qps_lock); up_write(&qm->qps_lock);
return 0; return 0;
} }
...@@ -2072,6 +2215,39 @@ static int __hisi_qm_start(struct hisi_qm *qm) ...@@ -2072,6 +2215,39 @@ static int __hisi_qm_start(struct hisi_qm *qm)
return 0; return 0;
} }
/* restart stopped qm and qps in reset flow */
int hisi_qm_restart(struct hisi_qm *qm)
{
struct device *dev = &qm->pdev->dev;
struct hisi_qp *qp;
int ret, i;
ret = hisi_qm_start(qm);
if (ret < 0)
return ret;
down_write(&qm->qps_lock);
for (i = 0; i < qm->qp_num; i++) {
qp = qm->qp_array[i];
if (qp && atomic_read(&qp->qp_status.flags) == QP_STOP &&
qp->is_resetting == true) {
ret = hisi_qm_start_qp_lockless(qp, 0);
if (ret < 0) {
dev_err(dev, "Failed to start qp%d!\n", i);
up_write(&qm->qps_lock);
return ret;
}
qp->is_resetting = false;
}
}
up_write(&qm->qps_lock);
return 0;
}
EXPORT_SYMBOL_GPL(hisi_qm_restart);
/** /**
* hisi_qm_start() - start qm * hisi_qm_start() - start qm
* @qm: The qm to be started. * @qm: The qm to be started.
...@@ -2087,7 +2263,17 @@ int hisi_qm_start(struct hisi_qm *qm) ...@@ -2087,7 +2263,17 @@ int hisi_qm_start(struct hisi_qm *qm)
unsigned long dus_page_nr = 0; unsigned long dus_page_nr = 0;
unsigned long dko_page_nr = 0; unsigned long dko_page_nr = 0;
unsigned long mmio_page_nr; unsigned long mmio_page_nr;
#endif
int ret = 0;
down_write(&qm->qps_lock);
if (!qm_avail_state(qm, QM_START)) {
up_write(&qm->qps_lock);
return -EPERM;
}
#ifdef CONFIG_CRYPTO_QM_UACCE
if (qm->use_uacce) { if (qm->use_uacce) {
dus_page_nr = (PAGE_SIZE - 1 + qm->sqe_size * QM_Q_DEPTH + dus_page_nr = (PAGE_SIZE - 1 + qm->sqe_size * QM_Q_DEPTH +
sizeof(struct cqe) * QM_Q_DEPTH) >> PAGE_SHIFT; sizeof(struct cqe) * QM_Q_DEPTH) >> PAGE_SHIFT;
...@@ -2105,7 +2291,8 @@ int hisi_qm_start(struct hisi_qm *qm) ...@@ -2105,7 +2291,8 @@ int hisi_qm_start(struct hisi_qm *qm)
if (!qm->qp_num) { if (!qm->qp_num) {
dev_err(dev, "qp_num should not be 0\n"); dev_err(dev, "qp_num should not be 0\n");
return -EINVAL; ret = -EINVAL;
goto err_unlock;
} }
/* reset qfr definition */ /* reset qfr definition */
...@@ -2138,8 +2325,10 @@ int hisi_qm_start(struct hisi_qm *qm) ...@@ -2138,8 +2325,10 @@ int hisi_qm_start(struct hisi_qm *qm)
qm->qp_array = devm_kcalloc(dev, qm->qp_num, qm->qp_array = devm_kcalloc(dev, qm->qp_num,
sizeof(struct hisi_qp *), sizeof(struct hisi_qp *),
GFP_KERNEL); GFP_KERNEL);
if (!qm->qp_bitmap || !qm->qp_array) if (!qm->qp_bitmap || !qm->qp_array) {
return -ENOMEM; ret = -ENOMEM;
goto err_unlock;
}
} }
if (!qm->use_dma_api) { if (!qm->use_dma_api) {
...@@ -2148,6 +2337,7 @@ int hisi_qm_start(struct hisi_qm *qm) ...@@ -2148,6 +2337,7 @@ int hisi_qm_start(struct hisi_qm *qm)
* mapped * mapped
*/ */
dev_dbg(&qm->pdev->dev, "qm delay start\n"); dev_dbg(&qm->pdev->dev, "qm delay start\n");
up_write(&qm->qps_lock);
return 0; return 0;
} else if (!qm->qdma.va) { } else if (!qm->qdma.va) {
qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * QM_EQ_DEPTH) + qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * QM_EQ_DEPTH) +
...@@ -2159,14 +2349,44 @@ int hisi_qm_start(struct hisi_qm *qm) ...@@ -2159,14 +2349,44 @@ int hisi_qm_start(struct hisi_qm *qm)
GFP_KERNEL | __GFP_ZERO); GFP_KERNEL | __GFP_ZERO);
dev_dbg(dev, "allocate qm dma buf(va=%p, dma=%pad, size=%lx)\n", dev_dbg(dev, "allocate qm dma buf(va=%p, dma=%pad, size=%lx)\n",
qm->qdma.va, &qm->qdma.dma, qm->qdma.size); qm->qdma.va, &qm->qdma.dma, qm->qdma.size);
if (!qm->qdma.va) if (!qm->qdma.va) {
return -ENOMEM; ret = -ENOMEM;
goto err_unlock;
}
} }
return __hisi_qm_start(qm); ret = __hisi_qm_start(qm);
if (!ret)
atomic_set(&qm->status.flags, QM_START);
err_unlock:
up_write(&qm->qps_lock);
return ret;
} }
EXPORT_SYMBOL_GPL(hisi_qm_start); EXPORT_SYMBOL_GPL(hisi_qm_start);
/* Stop started qps in reset flow */
static int qm_stop_started_qp(struct hisi_qm *qm)
{
struct device *dev = &qm->pdev->dev;
struct hisi_qp *qp;
int i, ret;
for (i = 0; i < qm->qp_num; i++) {
qp = qm->qp_array[i];
if (qp && atomic_read(&qp->qp_status.flags) == QP_START) {
ret = hisi_qm_stop_qp_lockless(qp);
if (ret < 0) {
dev_err(dev, "Failed to stop qp%d!\n", i);
return ret;
}
qp->is_resetting = true;
}
}
return 0;
}
/** /**
* hisi_qm_stop() - Stop a qm. * hisi_qm_stop() - Stop a qm.
* @qm: The qm which will be stopped. * @qm: The qm which will be stopped.
...@@ -2175,46 +2395,50 @@ EXPORT_SYMBOL_GPL(hisi_qm_start); ...@@ -2175,46 +2395,50 @@ EXPORT_SYMBOL_GPL(hisi_qm_start);
* Related resources are not released at this state, we can use hisi_qm_start * Related resources are not released at this state, we can use hisi_qm_start
* to let qm start again. * to let qm start again.
*/ */
int hisi_qm_stop(struct hisi_qm *qm) int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r)
{ {
struct device *dev; struct device *dev = &qm->pdev->dev;
struct hisi_qp *qp; int ret = 0;
int ret, i;
pr_debug("hisi_qm stop\n"); down_write(&qm->qps_lock);
if (!qm || !qm->pdev) { qm->status.stop_reason = r;
WARN_ON(1);
return -EINVAL; if (!qm_avail_state(qm, QM_STOP)) {
up_write(&qm->qps_lock);
ret = -EPERM;
goto err_unlock;
} }
dev = &qm->pdev->dev; if (qm->status.stop_reason == QM_SOFT_RESET ||
qm->status.stop_reason == QM_FLR) {
ret = qm_stop_started_qp(qm);
if (ret < 0) {
up_write(&qm->qps_lock);
goto err_unlock;
}
#ifdef CONFIG_CRYPTO_QM_UACCE
hisi_qm_send_signals(qm);
#endif
}
/* Mask eq and aeq irq */ /* Mask eq and aeq irq */
writel(0x1, qm->io_base + QM_VF_EQ_INT_MASK); writel(0x1, qm->io_base + QM_VF_EQ_INT_MASK);
writel(0x1, qm->io_base + QM_VF_AEQ_INT_MASK); writel(0x1, qm->io_base + QM_VF_AEQ_INT_MASK);
/* Stop all qps belong to this qm */
for (i = 0; i < qm->qp_num; i++) {
qp = qm->qp_array[i];
if (qp) {
ret = hisi_qm_stop_qp(qp);
if (ret < 0) {
dev_err(dev, "Failed to stop qp%d!\n", i);
return -EBUSY;
}
}
}
if (qm->fun_type == QM_HW_PF) { if (qm->fun_type == QM_HW_PF) {
ret = hisi_qm_set_vft(qm, 0, 0, 0); ret = hisi_qm_set_vft(qm, 0, 0, 0);
if (ret) { if (ret) {
dev_err(dev, "Failed to set vft!\n"); dev_err(dev, "Failed to set vft!\n");
return -EBUSY; ret = -EBUSY;
goto err_unlock;
} }
} }
return 0; atomic_set(&qm->status.flags, QM_STOP);
err_unlock:
up_write(&qm->qps_lock);
return ret;
} }
EXPORT_SYMBOL_GPL(hisi_qm_stop); EXPORT_SYMBOL_GPL(hisi_qm_stop);
......
...@@ -87,12 +87,23 @@ ...@@ -87,12 +87,23 @@
#define PCI_BAR_2 2 #define PCI_BAR_2 2
enum qm_stop_reason {
QM_NORMAL,
QM_SOFT_RESET,
QM_FLR,
};
enum qm_state { enum qm_state {
QM_RESET, QM_INIT = 0,
QM_START,
QM_CLOSE,
QM_STOP,
}; };
enum qp_state { enum qp_state {
QP_INIT = 1,
QP_START,
QP_STOP, QP_STOP,
QP_CLOSE,
}; };
enum qm_hw_ver { enum qm_hw_ver {
...@@ -217,7 +228,8 @@ struct hisi_qm_status { ...@@ -217,7 +228,8 @@ struct hisi_qm_status {
bool eqc_phase; bool eqc_phase;
u32 aeq_head; u32 aeq_head;
bool aeqc_phase; bool aeqc_phase;
unsigned long flags; atomic_t flags;
int stop_reason;
}; };
struct hisi_qm { struct hisi_qm {
...@@ -243,7 +255,7 @@ struct hisi_qm { ...@@ -243,7 +255,7 @@ struct hisi_qm {
struct hisi_qm_status status; struct hisi_qm_status status;
rwlock_t qps_lock; struct rw_semaphore qps_lock;
unsigned long *qp_bitmap; unsigned long *qp_bitmap;
struct hisi_qp **qp_array; struct hisi_qp **qp_array;
...@@ -276,7 +288,7 @@ struct hisi_qp_status { ...@@ -276,7 +288,7 @@ struct hisi_qp_status {
u16 sq_head; u16 sq_head;
u16 cq_head; u16 cq_head;
bool cqc_phase; bool cqc_phase;
unsigned long flags; atomic_t flags;
}; };
struct hisi_qp_ops { struct hisi_qp_ops {
...@@ -303,6 +315,7 @@ struct hisi_qp { ...@@ -303,6 +315,7 @@ struct hisi_qp {
void (*event_cb)(struct hisi_qp *qp); void (*event_cb)(struct hisi_qp *qp);
struct hisi_qm *qm; struct hisi_qm *qm;
bool is_resetting;
#ifdef CONFIG_CRYPTO_QM_UACCE #ifdef CONFIG_CRYPTO_QM_UACCE
u16 pasid; u16 pasid;
...@@ -314,7 +327,7 @@ int hisi_qm_init(struct hisi_qm *qm); ...@@ -314,7 +327,7 @@ int hisi_qm_init(struct hisi_qm *qm);
void hisi_qm_uninit(struct hisi_qm *qm); void hisi_qm_uninit(struct hisi_qm *qm);
int hisi_qm_frozen(struct hisi_qm *qm); int hisi_qm_frozen(struct hisi_qm *qm);
int hisi_qm_start(struct hisi_qm *qm); int hisi_qm_start(struct hisi_qm *qm);
int hisi_qm_stop(struct hisi_qm *qm); int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r);
struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type); struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type);
int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg); int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg);
int hisi_qm_stop_qp(struct hisi_qp *qp); int hisi_qm_stop_qp(struct hisi_qp *qp);
...@@ -331,4 +344,5 @@ void hisi_qm_hw_error_init(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe, ...@@ -331,4 +344,5 @@ void hisi_qm_hw_error_init(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe,
int hisi_qm_hw_error_handle(struct hisi_qm *qm); int hisi_qm_hw_error_handle(struct hisi_qm *qm);
void hisi_qm_clear_queues(struct hisi_qm *qm); void hisi_qm_clear_queues(struct hisi_qm *qm);
enum qm_hw_ver hisi_qm_get_hw_version(struct pci_dev *pdev); enum qm_hw_ver hisi_qm_get_hw_version(struct pci_dev *pdev);
int hisi_qm_restart(struct hisi_qm *qm);
#endif #endif
...@@ -22,12 +22,58 @@ ...@@ -22,12 +22,58 @@
struct hisi_rde_ctrl; struct hisi_rde_ctrl;
enum hisi_rde_status {
HISI_RDE_RESET,
};
struct hisi_rde { struct hisi_rde {
struct hisi_qm qm; struct hisi_qm qm;
struct list_head list; struct list_head list;
struct hisi_rde_ctrl *ctrl; struct hisi_rde_ctrl *ctrl;
struct work_struct reset_work;
unsigned long status;
}; };
//#define DEBUG
#define RDE_CM_LOAD_ENABLE 1
#define RDE_MPCC_MAX_SRC_NUM 17
#define RDE_FLEXEC_MAX_SRC_NUM 32
#define RDE_MPCC_CMSIZE 2176
#define RDE_FLEXEC_CMSIZE 1024
#define RDE_BUF_TYPE_SHIFT 3
#define SGL_DATA_OFFSET_SHIFT 8
#define RDE_COEF_GF_SHIFT 32
#define RDE_LBA_BLK 8
#define RDE_LBA_DWORD_CNT 5
#define DIF_CHK_GRD_CTRL_SHIFT 4
#define DIF_CHK_REF_CTRL_SHIFT 32
#define DIF_LBA_SHIFT 32
#define DIF_GEN_PAD_CTRL_SHIFT 32
#define DIF_GEN_REF_CTRL_SHIFT 35
#define DIF_GEN_APP_CTRL_SHIFT 38
#define DIF_GEN_VER_CTRL_SHIFT 41
#define DIF_GEN_GRD_CTRL_SHIFT 44
#define DIF_APP_TAG_SHIFT 48
#define DIF_VERSION_SHIFT 56
#define RDE_TASK_DONE_STATUS 0x80
#define RDE_CRC16_IV 0x301004
#define RDE_PRP_PAGE_SIZE 0x30122c
#define RDE_SGL_SGE_OFFSET 0x301228
#define RDE_ALG_TYPE_MSK 0x60
#define RDE_BUF_TYPE_MSK 0x18
#define RDE_MAX_PLATE_NUM 32
#define SRC_ADDR_TABLE_NUM 48
#define DST_ADDR_TABLE_NUM 26
#define SRC_DIF_TABLE_NUM 20
#define DST_DIF_TABLE_NUM 17
#define RDE_STATUS_MSK 0x7f
#define RDE_DONE_MSK 0x1
#define RDE_DONE_SHIFT 7
#define RDE_PER_SRC_COEF_SIZE 32
#define RDE_PER_SRC_COEF_TIMES 4
struct hisi_rde *find_rde_device(int node); struct hisi_rde *find_rde_device(int node);
int hisi_rde_abnormal_fix(struct hisi_qm *qm);
#endif #endif
...@@ -87,11 +87,13 @@ ...@@ -87,11 +87,13 @@
#define HRDE_PF_DEF_Q_BASE 0 #define HRDE_PF_DEF_Q_BASE 0
#define HRDE_RD_INTVRL_US 10 #define HRDE_RD_INTVRL_US 10
#define HRDE_RD_TMOUT_US 1000 #define HRDE_RD_TMOUT_US 1000
#define FORMAT_DECIMAL 10
static const char hisi_rde_name[] = "hisi_rde"; static const char hisi_rde_name[] = "hisi_rde";
static struct dentry *hrde_debugfs_root; static struct dentry *hrde_debugfs_root;
LIST_HEAD(hisi_rde_list); LIST_HEAD(hisi_rde_list);
DEFINE_MUTEX(hisi_rde_list_lock); DEFINE_MUTEX(hisi_rde_list_lock);
static void hisi_rde_ras_proc(struct work_struct *work);
struct hisi_rde *find_rde_device(int node) struct hisi_rde *find_rde_device(int node)
{ {
...@@ -243,12 +245,34 @@ static const struct kernel_param_ops pf_q_num_ops = { ...@@ -243,12 +245,34 @@ static const struct kernel_param_ops pf_q_num_ops = {
.get = param_get_int, .get = param_get_int,
}; };
static int uacce_mode_set(const char *val, const struct kernel_param *kp)
{
u32 n;
int ret;
if (!val)
return -EINVAL;
ret = kstrtou32(val, FORMAT_DECIMAL, &n);
if (ret != 0 || n > UACCE_MODE_NOIOMMU)
return -EINVAL;
return param_set_int(val, kp);
}
static const struct kernel_param_ops uacce_mode_ops = {
.set = uacce_mode_set,
.get = param_get_int,
};
static u32 pf_q_num = HRDE_PF_DEF_Q_NUM; static u32 pf_q_num = HRDE_PF_DEF_Q_NUM;
module_param_cb(pf_q_num, &pf_q_num_ops, &pf_q_num, 0444); module_param_cb(pf_q_num, &pf_q_num_ops, &pf_q_num, 0444);
MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 0-4096, v2 0-1024)"); MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 0-4096, v2 0-1024)");
static int uacce_mode = UACCE_MODE_NOUACCE; static int uacce_mode = UACCE_MODE_NOUACCE;
module_param(uacce_mode, int, 0444); module_param_cb(uacce_mode, &uacce_mode_ops, &uacce_mode, 0444);
MODULE_PARM_DESC(uacce_mode, "Mode of UACCE can be 0(default), 1, 2");
static const struct pci_device_id hisi_rde_dev_ids[] = { static const struct pci_device_id hisi_rde_dev_ids[] = {
{PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HRDE_PCI_DEVICE_ID)}, {PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HRDE_PCI_DEVICE_ID)},
...@@ -288,6 +312,7 @@ static u32 current_qm_read(struct ctrl_debug_file *file) ...@@ -288,6 +312,7 @@ static u32 current_qm_read(struct ctrl_debug_file *file)
static int current_qm_write(struct ctrl_debug_file *file, u32 val) static int current_qm_write(struct ctrl_debug_file *file, u32 val)
{ {
struct hisi_qm *qm = file_to_qm(file); struct hisi_qm *qm = file_to_qm(file);
u32 tmp;
if (val > 0) { if (val > 0) {
pr_err("function id should be smaller than 0.\n"); pr_err("function id should be smaller than 0.\n");
...@@ -297,6 +322,14 @@ static int current_qm_write(struct ctrl_debug_file *file, u32 val) ...@@ -297,6 +322,14 @@ static int current_qm_write(struct ctrl_debug_file *file, u32 val)
writel(val, qm->io_base + QM_DFX_MB_CNT_VF); writel(val, qm->io_base + QM_DFX_MB_CNT_VF);
writel(val, qm->io_base + QM_DFX_DB_CNT_VF); writel(val, qm->io_base + QM_DFX_DB_CNT_VF);
tmp = val |
(readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & QM_VF_CNT_MASK);
writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
tmp = val |
(readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & QM_VF_CNT_MASK);
writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
return 0; return 0;
} }
...@@ -614,6 +647,8 @@ static int hisi_rde_probe(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -614,6 +647,8 @@ static int hisi_rde_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return -ENOMEM; return -ENOMEM;
pci_set_drvdata(pdev, hisi_rde); pci_set_drvdata(pdev, hisi_rde);
INIT_WORK(&hisi_rde->reset_work, hisi_rde_ras_proc);
qm = &hisi_rde->qm; qm = &hisi_rde->qm;
ret = hisi_rde_qm_pre_init(qm, pdev); ret = hisi_rde_qm_pre_init(qm, pdev);
if (ret) { if (ret) {
...@@ -657,10 +692,10 @@ static void hisi_rde_remove(struct pci_dev *pdev) ...@@ -657,10 +692,10 @@ static void hisi_rde_remove(struct pci_dev *pdev)
struct hisi_rde *hisi_rde = pci_get_drvdata(pdev); struct hisi_rde *hisi_rde = pci_get_drvdata(pdev);
struct hisi_qm *qm = &hisi_rde->qm; struct hisi_qm *qm = &hisi_rde->qm;
hisi_rde_hw_error_set_state(hisi_rde, false);
hisi_rde_remove_from_list(hisi_rde); hisi_rde_remove_from_list(hisi_rde);
hisi_rde_debugfs_exit(hisi_rde); hisi_rde_debugfs_exit(hisi_rde);
hisi_qm_stop(qm); hisi_qm_stop(qm, QM_NORMAL);
hisi_rde_hw_error_set_state(hisi_rde, false);
hisi_qm_uninit(qm); hisi_qm_uninit(qm);
} }
...@@ -672,14 +707,14 @@ static void hisi_rde_hw_error_log(struct hisi_rde *hisi_rde, u32 err_sts) ...@@ -672,14 +707,14 @@ static void hisi_rde_hw_error_log(struct hisi_rde *hisi_rde, u32 err_sts)
while (err->msg) { while (err->msg) {
if (err->int_msk & err_sts) { if (err->int_msk & err_sts) {
dev_warn(dev, "%s [error status=0x%x] found\n", dev_err(dev, "%s [error status=0x%x] found\n",
err->msg, err->int_msk); err->msg, err->int_msk);
if (HRDE_ECC_2BIT_ERR & err_sts) { if (HRDE_ECC_2BIT_ERR & err_sts) {
err_val = err_val =
(readl(hisi_rde->qm.io_base + HRDE_ERR_CNT) (readl(hisi_rde->qm.io_base + HRDE_ERR_CNT)
& GENMASK(15, 0)); & GENMASK(15, 0));
dev_warn(dev, "rde ecc 2bit sram num=0x%x\n", dev_err(dev, "rde ecc 2bit sram num=0x%x\n",
err_val); err_val);
} }
} }
...@@ -691,8 +726,6 @@ static pci_ers_result_t hisi_rde_hw_error_handle(struct hisi_rde *hisi_rde) ...@@ -691,8 +726,6 @@ static pci_ers_result_t hisi_rde_hw_error_handle(struct hisi_rde *hisi_rde)
{ {
u32 err_sts; u32 err_sts;
/*msk err interrupts */
/* read err sts */ /* read err sts */
err_sts = readl(hisi_rde->qm.io_base + HRDE_INT_STATUS); err_sts = readl(hisi_rde->qm.io_base + HRDE_INT_STATUS);
if (err_sts) { if (err_sts) {
...@@ -729,26 +762,60 @@ static pci_ers_result_t hisi_rde_hw_error_process(struct pci_dev *pdev) ...@@ -729,26 +762,60 @@ static pci_ers_result_t hisi_rde_hw_error_process(struct pci_dev *pdev)
return ret; return ret;
} }
static int hisi_rde_reset_prepare_rdy(struct hisi_rde *hisi_rde)
{
int delay = 1;
u32 flag = 1;
int ret = 0;
#define TIMEOUT_VF 20000
while (flag) {
flag = 0;
if (delay > TIMEOUT_VF) {
ret = -EBUSY;
break;
}
msleep(delay);
delay *= 2;
if (test_and_set_bit(HISI_RDE_RESET, &hisi_rde->status))
flag = 1;
}
return ret;
}
static int hisi_rde_controller_reset_prepare(struct hisi_rde *hisi_rde) static int hisi_rde_controller_reset_prepare(struct hisi_rde *hisi_rde)
{ {
struct hisi_qm *qm = &hisi_rde->qm; struct hisi_qm *qm = &hisi_rde->qm;
struct pci_dev *pdev = qm->pdev; struct pci_dev *pdev = qm->pdev;
int retry = 0;
int ret; int ret;
ret = hisi_qm_stop(qm); ret = hisi_rde_reset_prepare_rdy(hisi_rde);
if (ret) { if (ret) {
dev_err(&pdev->dev, "Fails to stop QM!\n"); dev_err(&pdev->dev, "Controller reset not ready!\n");
return ret; return ret;
} }
if (test_and_set_bit(QM_RESET, &qm->status.flags)) { ret = hisi_qm_stop(qm, QM_SOFT_RESET);
dev_warn(&pdev->dev, "Failed to set reset flag!"); if (ret) {
return -EPERM; dev_err(&pdev->dev, "Fails to stop QM!\n");
return ret;
} }
#ifdef CONFIG_CRYPTO_QM_UACCE #ifdef CONFIG_CRYPTO_QM_UACCE
if (qm->use_uacce) /* wait 10s for uacce_queue to release */
uacce_reset_prepare(&qm->uacce); while (retry++ < 1000) {
msleep(20);
if (!uacce_unregister(&qm->uacce))
break;
if (retry == 1000)
return -EBUSY;
}
#endif #endif
return 0; return 0;
...@@ -760,9 +827,9 @@ static int hisi_rde_set_peh_msi(struct hisi_rde *hisi_rde, bool set) ...@@ -760,9 +827,9 @@ static int hisi_rde_set_peh_msi(struct hisi_rde *hisi_rde, bool set)
u32 val; u32 val;
if (set) { if (set) {
(void)pci_write_config_dword(pdev, PEH_MSI_MASK_SHIFT, 0); pci_write_config_dword(pdev, PEH_MSI_MASK_SHIFT, 0);
} else { } else {
(void)pci_write_config_dword(pdev, pci_write_config_dword(pdev,
PEH_MSI_MASK_SHIFT, GENMASK(31, 0)); PEH_MSI_MASK_SHIFT, GENMASK(31, 0));
usleep_range(1000, 2000); usleep_range(1000, 2000);
val = readl_relaxed(hisi_rde->qm.io_base + val = readl_relaxed(hisi_rde->qm.io_base +
...@@ -822,35 +889,23 @@ static int hisi_rde_controller_reset_done(struct hisi_rde *hisi_rde) ...@@ -822,35 +889,23 @@ static int hisi_rde_controller_reset_done(struct hisi_rde *hisi_rde)
{ {
struct hisi_qm *qm = &hisi_rde->qm; struct hisi_qm *qm = &hisi_rde->qm;
struct pci_dev *pdev = qm->pdev; struct pci_dev *pdev = qm->pdev;
struct hisi_qp *qp; int ret;
int i, ret;
hisi_qm_clear_queues(qm); hisi_qm_clear_queues(qm);
hisi_rde_set_user_domain_and_cache(hisi_rde); hisi_rde_set_user_domain_and_cache(hisi_rde);
hisi_rde_hw_error_init(hisi_rde); hisi_rde_hw_error_init(hisi_rde);
(void)hisi_rde_set_peh_msi(hisi_rde, true); hisi_rde_set_peh_msi(hisi_rde, true);
ret = hisi_qm_start(qm); ret = hisi_qm_restart(qm);
if (ret) { if (ret) {
dev_err(&pdev->dev, "Failed to start QM!\n"); dev_err(&pdev->dev, "Failed to start QM!\n");
return -EPERM; return -EPERM;
} }
for (i = 0; i < qm->qp_num; i++) {
qp = qm->qp_array[i];
if (qp) {
ret = hisi_qm_start_qp(qp, 0);
if (ret < 0) {
dev_err(&pdev->dev, "Start qp%d failed\n", i);
return -EPERM;
}
}
}
#ifdef CONFIG_CRYPTO_QM_UACCE #ifdef CONFIG_CRYPTO_QM_UACCE
if (qm->use_uacce) if (qm->use_uacce)
uacce_reset_done(&qm->uacce); uacce_register(&qm->uacce);
#endif #endif
return 0; return 0;
...@@ -877,54 +932,61 @@ static int hisi_rde_controller_reset(struct hisi_rde *hisi_rde) ...@@ -877,54 +932,61 @@ static int hisi_rde_controller_reset(struct hisi_rde *hisi_rde)
if (ret) if (ret)
return ret; return ret;
clear_bit(HISI_RDE_RESET, &hisi_rde->status);
dev_info(dev, "Controller reset complete\n"); dev_info(dev, "Controller reset complete\n");
clear_bit(QM_RESET, &hisi_rde->qm.status.flags);
return 0; return 0;
} }
static int hisi_rde_ras_proc(struct pci_dev *pdev) static void hisi_rde_ras_proc(struct work_struct *work)
{ {
struct hisi_rde *hisi_rde = pci_get_drvdata(pdev); struct pci_dev *pdev;
struct device *dev = &pdev->dev; struct hisi_rde *hisi_rde;
pci_ers_result_t ret; pci_ers_result_t ret;
int col_ret;
if (!hisi_rde) { hisi_rde = container_of(work, struct hisi_rde, reset_work);
dev_err(dev, "Can't recover rde-error at dev init\n"); if (!hisi_rde)
return -ENODEV; return;
}
pdev = hisi_rde->qm.pdev;
if (!pdev)
return;
ret = hisi_rde_hw_error_process(pdev); ret = hisi_rde_hw_error_process(pdev);
if (ret == PCI_ERS_RESULT_NEED_RESET) { if (ret == PCI_ERS_RESULT_NEED_RESET)
col_ret = hisi_rde_controller_reset(hisi_rde); if (hisi_rde_controller_reset(hisi_rde))
if (col_ret) dev_err(&pdev->dev, "hisi_rde reset fail.\n");
return ret;
}
return 0; return;
} }
static pci_ers_result_t hisi_rde_error_detected(struct pci_dev *pdev, int hisi_rde_abnormal_fix(struct hisi_qm *qm)
pci_channel_state_t state)
{ {
dev_info(&pdev->dev, "PCI error detected, state(=%d)!!\n", state); struct pci_dev *pdev;
if (state == pci_channel_io_perm_failure) struct hisi_rde *hisi_rde;
return PCI_ERS_RESULT_DISCONNECT;
return hisi_rde_ras_proc(pdev); if (!qm)
} return -EINVAL;
static const struct pci_error_handlers hisi_rde_err_handler = { pdev = qm->pdev;
.error_detected = hisi_rde_error_detected, if (!pdev)
}; return -EINVAL;
hisi_rde = pci_get_drvdata(pdev);
if (!hisi_rde) {
dev_err(&pdev->dev, "hisi_rde is NULL.\n");
return -EINVAL;
}
return schedule_work(&hisi_rde->reset_work);
}
static struct pci_driver hisi_rde_pci_driver = { static struct pci_driver hisi_rde_pci_driver = {
.name = "hisi_rde", .name = "hisi_rde",
.id_table = hisi_rde_dev_ids, .id_table = hisi_rde_dev_ids,
.probe = hisi_rde_probe, .probe = hisi_rde_probe,
.remove = hisi_rde_remove, .remove = hisi_rde_remove,
.err_handler = &hisi_rde_err_handler, //todo,just for compile
}; };
static void hisi_rde_register_debugfs(void) static void hisi_rde_register_debugfs(void)
......
...@@ -42,139 +42,5 @@ struct hisi_rde_sqe { ...@@ -42,139 +42,5 @@ struct hisi_rde_sqe {
__u64 dw7; __u64 dw7;
}; };
/** @addtogroup RDE_SET_OPT
* @brief RDE_SET_OPT is set of flag.
* @{
*/
/**
* @brief RDE algorithm types.
*/
enum {
MPCC = 0x00, /*!< EC */
PQ = 0x40, /*!< RAID5/RAID6/FlexEC */
XOR = 0x60, /*!< XOR */
};
/**
* @brief RDE buffer access types.
*/
enum {
PBUF = 0x00, /*!< Direct Access */
SGL = 0x08, /*!< Scatter Gather List */
PRP = 0x10, /*!< Physical Region Page List */
REVD = 0x18, /*!< Reserved */
};
/**
* @brief RDE Memory saving types.
*/
enum {
NO_MEM_SAVE = 0x00, /*!< Non-Memory Saving */
MEM_SAVE = 0x04, /*!< Memory Saving, only support MPCC EC */
};
/**
* @brief RDE opration types.
*/
enum {
GEN = 0x00, /*!< Generate */
VLD = 0x01, /*!< Validate */
UPD = 0x02, /*!< Update */
RCT = 0x03, /*!< Reconstruct */
};
/**
* @}
*/
/** @addtogroup ACC_CTRL
* @brief ACC_CTRL is arguments to acc_set_ctrl().
* @{
*/
/**
* @brief RDE DIF GRD types.
*/
enum {
NO_GRD = 0, /*!< no GRD domain */
GRD = 1, /*!< GRD domain without checking */
GRD_CHECK = 2, /*!< GRD domain with checking */
};
/**
* @brief RDE DIF REF types.
*/
enum {
NO_REF = 0, /*!< no REF domain */
REF = 1, /*!< REF domain without checking */
REF_CHECK_LBA = 2, /*!< REF domain checking with lab */
REF_CHECK_PRI = 3, /*!< REF domain checking with private info */
};
/**
* @}
*/
/** @addtogroup ACC_SDK_API
* @brief ACC_SDK_API is export to users.
* @{
*/
/**
* @brief RDE max numbers of data blocks.
*/
enum {
MAX_DST_NUM = 0x11, /*!< destination blocks */
MAX_SRC_NUM = 0x20, /*!< source blocks */
};
/**
* @brief RDE IO abort switch.
*/
enum {
NO_ABT = 0x0, /*!< don't abort the io */
ABT = 0x1, /*!< abort the io */
};
/**
* @brief RDE coefficient matrix load enable.
*/
enum {
NO_CML = 0x0, /*!< don't load matrix */
CML = 0x1, /*!< load matrix */
};
/**
* @brief RDE coefficient matrix types.
*/
enum {
CM_ENCODE = 0x0, /*!< encode type */
CM_DECODE = 0x1, /*!< decode type */
};
/**
* @brief RDE algorithms block size.
*/
enum {
ABS0 = 0x0, /*!< 512 bytes */
ABS1 = 0x1, /*!< 4K bytes */
};
/**
* @brief RDE crc iv enable.
*/
enum {
NO_CRCIV = 0x0, /*!< default IV is 0 */
CRCIV = 0x1, /*!< IV is register's value */
};
/**
* @brief RDE crc iv switch.
*/
enum {
CRCIV0 = 0x0, /*!< select crc16_iv0 of register */
CRCIV1 = 0x1, /*!< select crc16_iv1 of register */
};
/**
* @brief RDE DIF types.
*/
enum {
NO_RDE_DIF = 0x0, /*!< without DIF */
RDE_DIF = 0x1, /*!< DIF */
};
/**
* @brief RDE page padding types.
*/
enum {
NO_PAD = 0, /*!< without padding */
PRE_PAD = 1, /*!< padding before DIF */
POST_PAD = 2, /*!< padding after DIF */
};
#endif #endif
...@@ -28,12 +28,17 @@ enum sec_endian { ...@@ -28,12 +28,17 @@ enum sec_endian {
struct hisi_sec_ctrl; struct hisi_sec_ctrl;
enum hisi_sec_status {
HISI_SEC_RESET,
};
struct hisi_sec { struct hisi_sec {
struct hisi_qm qm; struct hisi_qm qm;
struct list_head list; struct list_head list;
struct hisi_sec_ctrl *ctrl; struct hisi_sec_ctrl *ctrl;
struct dma_pool *sgl_pool; struct dma_pool *sgl_pool;
int ctx_q_num; int ctx_q_num;
unsigned long status;
}; };
struct hisi_sec *find_sec_device(int node); struct hisi_sec *find_sec_device(int node);
......
...@@ -143,11 +143,7 @@ static int hisi_sec_alloc_req_id(struct hisi_sec_req *req, ...@@ -143,11 +143,7 @@ static int hisi_sec_alloc_req_id(struct hisi_sec_req *req,
if (req_id >= ctx->req_limit) { if (req_id >= ctx->req_limit) {
spin_unlock_irqrestore(&qp_ctx->req_lock, flags); spin_unlock_irqrestore(&qp_ctx->req_lock, flags);
dump_data((uint8_t *)qp_ctx->req_bitmap, ctx->req_limit / 8); dump_data((uint8_t *)qp_ctx->req_bitmap, ctx->req_limit / 8);
pr_info("[%s][%d] used[%d]\n", __func__, __LINE__,
atomic_read(&qp_ctx->qp->qp_status.used));
dev_err(ctx->sec_dev, "no free req id\n"); dev_err(ctx->sec_dev, "no free req id\n");
pr_info("[%s][%d] max_thread_cnt[%d]\n", __func__, __LINE__,
ctx->max_thread_cnt);
return -ENOBUFS; return -ENOBUFS;
} }
set_bit(req_id, qp_ctx->req_bitmap); set_bit(req_id, qp_ctx->req_bitmap);
......
...@@ -252,6 +252,7 @@ ...@@ -252,6 +252,7 @@
#define SEC_CHAIN_ABN_WR_LEN 0x318 #define SEC_CHAIN_ABN_WR_LEN 0x318
#define SEC_CHAIN_ABN_LEN 128UL #define SEC_CHAIN_ABN_LEN 128UL
#define FORMAT_DECIMAL 10
static const char hisi_sec_name[] = "hisi_sec"; static const char hisi_sec_name[] = "hisi_sec";
static struct dentry *hsec_debugfs_root; static struct dentry *hsec_debugfs_root;
...@@ -401,12 +402,33 @@ static const struct kernel_param_ops pf_q_num_ops = { ...@@ -401,12 +402,33 @@ static const struct kernel_param_ops pf_q_num_ops = {
.get = param_get_int, .get = param_get_int,
}; };
static int uacce_mode_set(const char *val, const struct kernel_param *kp)
{
u32 n;
int ret;
if (!val)
return -EINVAL;
ret = kstrtou32(val, FORMAT_DECIMAL, &n);
if (ret != 0 || n > UACCE_MODE_NOIOMMU)
return -EINVAL;
return param_set_int(val, kp);
}
static const struct kernel_param_ops uacce_mode_ops = {
.set = uacce_mode_set,
.get = param_get_int,
};
static u32 pf_q_num = HSEC_PF_DEF_Q_NUM; static u32 pf_q_num = HSEC_PF_DEF_Q_NUM;
module_param_cb(pf_q_num, &pf_q_num_ops, &pf_q_num, 0444); module_param_cb(pf_q_num, &pf_q_num_ops, &pf_q_num, 0444);
MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 0-4096, v2 0-1024)"); MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 0-4096, v2 0-1024)");
static int uacce_mode = UACCE_MODE_NOUACCE; static int uacce_mode = UACCE_MODE_NOUACCE;
module_param(uacce_mode, int, 0444); module_param_cb(uacce_mode, &uacce_mode_ops, &uacce_mode, 0444);
MODULE_PARM_DESC(uacce_mode, "Mode of UACCE can be 0(default), 1, 2");
static int enable_sm4_ctr; static int enable_sm4_ctr;
module_param(enable_sm4_ctr, int, 0444); module_param(enable_sm4_ctr, int, 0444);
...@@ -590,6 +612,7 @@ static int current_qm_write(struct ctrl_debug_file *file, u32 val) ...@@ -590,6 +612,7 @@ static int current_qm_write(struct ctrl_debug_file *file, u32 val)
{ {
struct hisi_qm *qm = file_to_qm(file); struct hisi_qm *qm = file_to_qm(file);
struct hisi_sec_ctrl *ctrl = file->ctrl; struct hisi_sec_ctrl *ctrl = file->ctrl;
u32 tmp;
if (val > ctrl->num_vfs) if (val > ctrl->num_vfs)
return -EINVAL; return -EINVAL;
...@@ -597,6 +620,14 @@ static int current_qm_write(struct ctrl_debug_file *file, u32 val) ...@@ -597,6 +620,14 @@ static int current_qm_write(struct ctrl_debug_file *file, u32 val)
writel(val, qm->io_base + QM_DFX_MB_CNT_VF); writel(val, qm->io_base + QM_DFX_MB_CNT_VF);
writel(val, qm->io_base + QM_DFX_DB_CNT_VF); writel(val, qm->io_base + QM_DFX_DB_CNT_VF);
tmp = val |
(readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & QM_VF_CNT_MASK);
writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
tmp = val |
(readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & QM_VF_CNT_MASK);
writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
return 0; return 0;
} }
...@@ -1047,7 +1078,7 @@ static void hisi_sec_remove(struct pci_dev *pdev) ...@@ -1047,7 +1078,7 @@ static void hisi_sec_remove(struct pci_dev *pdev)
hisi_sec_sriov_disable(pdev); hisi_sec_sriov_disable(pdev);
hisi_sec_debugfs_exit(hisi_sec); hisi_sec_debugfs_exit(hisi_sec);
hisi_qm_stop(qm); hisi_qm_stop(qm, QM_NORMAL);
if (qm->fun_type == QM_HW_PF) { if (qm->fun_type == QM_HW_PF) {
hisi_sec_hw_error_set_state(hisi_sec, false); hisi_sec_hw_error_set_state(hisi_sec, false);
...@@ -1066,17 +1097,17 @@ static void hisi_sec_log_hw_error(struct hisi_sec *hisi_sec, u32 err_sts) ...@@ -1066,17 +1097,17 @@ static void hisi_sec_log_hw_error(struct hisi_sec *hisi_sec, u32 err_sts)
while (err->msg) { while (err->msg) {
if (err->int_msk & err_sts) { if (err->int_msk & err_sts) {
dev_warn(dev, "%s [error status=0x%x] found\n", dev_err(dev, "%s [error status=0x%x] found\n",
err->msg, err->int_msk); err->msg, err->int_msk);
if (HSEC_CORE_INT_STATUS_M_ECC & err_sts) { if (HSEC_CORE_INT_STATUS_M_ECC & err_sts) {
err_val = readl(hisi_sec->qm.io_base + err_val = readl(hisi_sec->qm.io_base +
HSEC_CORE_SRAM_ECC_ERR_INFO); HSEC_CORE_SRAM_ECC_ERR_INFO);
dev_warn(dev, dev_err(dev,
"hisi-sec multi ecc sram num=0x%x\n", "hisi-sec multi ecc sram num=0x%x\n",
((err_val >> SRAM_ECC_ERR_NUM_SHIFT) & ((err_val >> SRAM_ECC_ERR_NUM_SHIFT) &
0xFF)); 0xFF));
dev_warn(dev, dev_err(dev,
"hisi-sec multi ecc sram addr=0x%x\n", "hisi-sec multi ecc sram addr=0x%x\n",
(err_val >> SRAM_ECC_ERR_ADDR_SHIFT)); (err_val >> SRAM_ECC_ERR_ADDR_SHIFT));
} }
...@@ -1139,23 +1170,97 @@ static pci_ers_result_t hisi_sec_error_detected(struct pci_dev *pdev, ...@@ -1139,23 +1170,97 @@ static pci_ers_result_t hisi_sec_error_detected(struct pci_dev *pdev,
return hisi_sec_process_hw_error(pdev); return hisi_sec_process_hw_error(pdev);
} }
static int hisi_sec_reset_prepare_rdy(struct hisi_sec *hisi_sec)
{
int delay = 1;
u32 flag = 1;
int ret = 0;
#define RESET_WAIT_TIMEOUT 20000
while (flag) {
flag = 0;
if (delay > RESET_WAIT_TIMEOUT) {
ret = -EBUSY;
break;
}
msleep(delay);
delay *= 2;
if (test_and_set_bit(HISI_SEC_RESET, &hisi_sec->status))
flag = 1;
}
return ret;
}
static int hisi_sec_vf_reset_prepare(struct pci_dev *pdev,
enum qm_stop_reason stop_reason)
{
struct hisi_sec *hisi_sec;
struct pci_dev *dev;
struct hisi_qm *qm;
int ret = 0;
mutex_lock(&hisi_sec_list_lock);
if (pdev->is_physfn) {
list_for_each_entry(hisi_sec, &hisi_sec_list, list) {
dev = hisi_sec->qm.pdev;
if (dev == pdev)
continue;
if (pci_physfn(dev) == pdev) {
qm = &hisi_sec->qm;
ret = hisi_qm_stop(qm, stop_reason);
if (ret)
goto prepare_fail;
}
}
}
prepare_fail:
mutex_unlock(&hisi_sec_list_lock);
return ret;
}
static int hisi_sec_controller_reset_prepare(struct hisi_sec *hisi_sec) static int hisi_sec_controller_reset_prepare(struct hisi_sec *hisi_sec)
{ {
struct hisi_qm *qm = &hisi_sec->qm; struct hisi_qm *qm = &hisi_sec->qm;
struct pci_dev *pdev = qm->pdev; struct pci_dev *pdev = qm->pdev;
int retry = 0;
int ret; int ret;
ret = hisi_qm_stop(qm); ret = hisi_sec_reset_prepare_rdy(hisi_sec);
if (ret) { if (ret) {
dev_err(&pdev->dev, "Fails to stop QM!\n"); dev_err(&pdev->dev, "Controller reset not ready!\n");
return ret; return ret;
} }
if (test_and_set_bit(QM_RESET, &qm->status.flags)) { ret = hisi_sec_vf_reset_prepare(pdev, QM_SOFT_RESET);
dev_warn(&pdev->dev, "Failed to set reset flag!"); if (ret) {
return -EPERM; dev_err(&pdev->dev, "Fails to stop VFs!\n");
return ret;
} }
ret = hisi_qm_stop(qm, QM_SOFT_RESET);
if (ret) {
dev_err(&pdev->dev, "Fails to stop QM!\n");
return ret;
}
#ifdef CONFIG_CRYPTO_QM_UACCE
/* wait 10s for uacce_queue to release */
while (retry++ < 1000) {
msleep(20);
if (!uacce_unregister(&qm->uacce))
break;
if (retry == 1000)
return -EBUSY;
}
#endif
return 0; return 0;
} }
...@@ -1201,8 +1306,7 @@ static int hisi_sec_soft_reset(struct hisi_sec *hisi_sec) ...@@ -1201,8 +1306,7 @@ static int hisi_sec_soft_reset(struct hisi_sec *hisi_sec)
/* The reset related sub-control registers are not in PCI BAR */ /* The reset related sub-control registers are not in PCI BAR */
if (ACPI_HANDLE(dev)) { if (ACPI_HANDLE(dev)) {
acpi_status s; acpi_status s;
s = acpi_evaluate_object(ACPI_HANDLE(dev), "SRST", NULL, NULL);
s = acpi_evaluate_object(ACPI_HANDLE(dev), "ZRST", NULL, NULL);
if (ACPI_FAILURE(s)) { if (ACPI_FAILURE(s)) {
dev_err(dev, "Controller reset fails\n"); dev_err(dev, "Controller reset fails\n");
return -EIO; return -EIO;
...@@ -1215,6 +1319,34 @@ static int hisi_sec_soft_reset(struct hisi_sec *hisi_sec) ...@@ -1215,6 +1319,34 @@ static int hisi_sec_soft_reset(struct hisi_sec *hisi_sec)
return 0; return 0;
} }
static int hisi_sec_vf_reset_done(struct pci_dev *pdev)
{
struct hisi_sec *hisi_sec;
struct pci_dev *dev;
struct hisi_qm *qm;
int ret = 0;
mutex_lock(&hisi_sec_list_lock);
list_for_each_entry(hisi_sec, &hisi_sec_list, list) {
dev = hisi_sec->qm.pdev;
if (dev == pdev)
continue;
if (pci_physfn(dev) == pdev) {
qm = &hisi_sec->qm;
hisi_qm_clear_queues(qm);
ret = hisi_qm_restart(qm);
if (ret)
goto reset_fail;
}
}
reset_fail:
mutex_unlock(&hisi_sec_list_lock);
return ret;
}
static int hisi_sec_controller_reset_done(struct hisi_sec *hisi_sec) static int hisi_sec_controller_reset_done(struct hisi_sec *hisi_sec)
{ {
struct hisi_qm *qm = &hisi_sec->qm; struct hisi_qm *qm = &hisi_sec->qm;
...@@ -1227,7 +1359,7 @@ static int hisi_sec_controller_reset_done(struct hisi_sec *hisi_sec) ...@@ -1227,7 +1359,7 @@ static int hisi_sec_controller_reset_done(struct hisi_sec *hisi_sec)
hisi_sec_set_user_domain_and_cache(hisi_sec); hisi_sec_set_user_domain_and_cache(hisi_sec);
hisi_sec_hw_error_init(hisi_sec); hisi_sec_hw_error_init(hisi_sec);
ret = hisi_qm_start(qm); ret = hisi_qm_restart(qm);
if (ret) { if (ret) {
dev_err(&pdev->dev, "Failed to start QM!\n"); dev_err(&pdev->dev, "Failed to start QM!\n");
return -EPERM; return -EPERM;
...@@ -1250,6 +1382,17 @@ static int hisi_sec_controller_reset_done(struct hisi_sec *hisi_sec) ...@@ -1250,6 +1382,17 @@ static int hisi_sec_controller_reset_done(struct hisi_sec *hisi_sec)
/* Clear VF MSE bit */ /* Clear VF MSE bit */
hisi_sec_set_mse(hisi_sec, 1); hisi_sec_set_mse(hisi_sec, 1);
ret = hisi_sec_vf_reset_done(pdev);
if (ret) {
dev_err(&pdev->dev, "Failed to start VFs!\n");
return -EPERM;
}
#ifdef CONFIG_CRYPTO_QM_UACCE
if (qm->use_uacce)
uacce_register(&qm->uacce);
#endif
return 0; return 0;
} }
...@@ -1274,8 +1417,8 @@ static int hisi_sec_controller_reset(struct hisi_sec *hisi_sec) ...@@ -1274,8 +1417,8 @@ static int hisi_sec_controller_reset(struct hisi_sec *hisi_sec)
if (ret) if (ret)
return ret; return ret;
clear_bit(HISI_SEC_RESET, &hisi_sec->status);
dev_info(dev, "Controller reset complete\n"); dev_info(dev, "Controller reset complete\n");
clear_bit(QM_RESET, &hisi_sec->qm.status.flags);
return 0; return 0;
} }
...@@ -1303,6 +1446,30 @@ static pci_ers_result_t hisi_sec_slot_reset(struct pci_dev *pdev) ...@@ -1303,6 +1446,30 @@ static pci_ers_result_t hisi_sec_slot_reset(struct pci_dev *pdev)
return PCI_ERS_RESULT_RECOVERED; return PCI_ERS_RESULT_RECOVERED;
} }
static void hisi_sec_flr_prepare_rdy(struct pci_dev *pdev)
{
struct pci_dev *pf_pdev = pci_physfn(pdev);
struct hisi_sec *hisi_sec = pci_get_drvdata(pf_pdev);
int delay = 1;
u32 flag = 1;
#define FLR_WAIT_TIMEOUT 60000
#define FLR_DELAY_INC 2000
while (flag) {
flag = 0;
msleep(delay);
if (delay > FLR_WAIT_TIMEOUT) {
flag = 1;
delay = 1;
dev_err(&pdev->dev, "Device error, please exit FLR!\n");
} else if (test_and_set_bit(HISI_SEC_RESET, &hisi_sec->status))
flag = 1;
delay += FLR_DELAY_INC;
}
}
static void hisi_sec_reset_prepare(struct pci_dev *pdev) static void hisi_sec_reset_prepare(struct pci_dev *pdev)
{ {
struct hisi_sec *hisi_sec = pci_get_drvdata(pdev); struct hisi_sec *hisi_sec = pci_get_drvdata(pdev);
...@@ -1310,56 +1477,56 @@ static void hisi_sec_reset_prepare(struct pci_dev *pdev) ...@@ -1310,56 +1477,56 @@ static void hisi_sec_reset_prepare(struct pci_dev *pdev)
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
int ret; int ret;
ret = hisi_qm_stop(qm); hisi_sec_flr_prepare_rdy(pdev);
ret = hisi_sec_vf_reset_prepare(pdev, QM_FLR);
if (ret) { if (ret) {
dev_err(&pdev->dev, "Fails to stop QM!\n"); dev_err(&pdev->dev, "Fails to prepare reset!\n");
return; return;
} }
if (test_and_set_bit(QM_RESET, &qm->status.flags)) { ret = hisi_qm_stop(qm, QM_FLR);
dev_warn(dev, "Failed to set reset flag!"); if (ret) {
dev_err(&pdev->dev, "Fails to stop QM!\n");
return; return;
} }
dev_info(dev, "FLR resetting...\n"); dev_info(dev, "FLR resetting...\n");
} }
static void hisi_sec_flr_reset_complete(struct pci_dev *pdev)
{
struct pci_dev *pf_pdev = pci_physfn(pdev);
struct hisi_sec *hisi_sec = pci_get_drvdata(pf_pdev);
clear_bit(HISI_SEC_RESET, &hisi_sec->status);
}
static void hisi_sec_reset_done(struct pci_dev *pdev) static void hisi_sec_reset_done(struct pci_dev *pdev)
{ {
struct hisi_sec *hisi_sec = pci_get_drvdata(pdev); struct hisi_sec *hisi_sec = pci_get_drvdata(pdev);
struct hisi_qm *qm = &hisi_sec->qm; struct hisi_qm *qm = &hisi_sec->qm;
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
struct hisi_qp *qp; int ret;
int i, ret;
if (pdev->is_physfn) { hisi_qm_clear_queues(qm);
hisi_qm_clear_queues(qm); ret = hisi_qm_restart(qm);
if (ret) {
dev_err(dev, "Failed to start QM!\n");
return;
}
if (pdev->is_physfn) {
hisi_sec_set_user_domain_and_cache(hisi_sec); hisi_sec_set_user_domain_and_cache(hisi_sec);
hisi_sec_hw_error_init(hisi_sec); hisi_sec_hw_error_init(hisi_sec);
ret = hisi_qm_start(qm);
if (ret) {
dev_err(dev, "Failed to start QM!\n");
return;
}
for (i = 0; i < qm->qp_num; i++) {
qp = qm->qp_array[i];
if (qp) {
ret = hisi_qm_start_qp(qp, 0);
if (ret < 0) {
dev_err(dev, "Start qp%d failed\n", i);
return;
}
}
}
if (hisi_sec->ctrl->num_vfs) if (hisi_sec->ctrl->num_vfs)
hisi_sec_vf_q_assign(hisi_sec, hisi_sec->ctrl->num_vfs); hisi_sec_vf_q_assign(hisi_sec, hisi_sec->ctrl->num_vfs);
dev_info(dev, "FLR reset complete\n"); hisi_sec_vf_reset_done(pdev);
} }
hisi_sec_flr_reset_complete(pdev);
dev_info(dev, "FLR reset complete\n");
} }
static const struct pci_error_handlers hisi_sec_err_handler = { static const struct pci_error_handlers hisi_sec_err_handler = {
......
...@@ -75,6 +75,10 @@ ...@@ -75,6 +75,10 @@
#define HZIP_CORE_INT_STATUS 0x3010AC #define HZIP_CORE_INT_STATUS 0x3010AC
#define HZIP_CORE_INT_STATUS_M_ECC BIT(1) #define HZIP_CORE_INT_STATUS_M_ECC BIT(1)
#define HZIP_CORE_SRAM_ECC_ERR_INFO 0x301148 #define HZIP_CORE_SRAM_ECC_ERR_INFO 0x301148
#define HZIP_CORE_INT_RAS_CE_ENB 0x301160
#define HZIP_CORE_INT_RAS_NFE_ENB 0x301164
#define HZIP_CORE_INT_RAS_FE_ENB 0x301168
#define HZIP_CORE_INT_RAS_NFE_ENABLE 0x7FE
#define SRAM_ECC_ERR_NUM_SHIFT 16 #define SRAM_ECC_ERR_NUM_SHIFT 16
#define SRAM_ECC_ERR_ADDR_SHIFT 24 #define SRAM_ECC_ERR_ADDR_SHIFT 24
#define HZIP_CORE_INT_DISABLE 0x000007FF #define HZIP_CORE_INT_DISABLE 0x000007FF
...@@ -93,7 +97,8 @@ ...@@ -93,7 +97,8 @@
#define HZIP_NUMA_DISTANCE 100 #define HZIP_NUMA_DISTANCE 100
#define HZIP_BUF_SIZE 20 #define HZIP_BUF_SIZE 20
#define FORMAT_DECIMAL 10 #define FORMAT_DECIMAL 10
#define HZIP_REG_RD_INTVRL_US 10
#define HZIP_REG_RD_TMOUT_US 1000
static const char hisi_zip_name[] = "hisi_zip"; static const char hisi_zip_name[] = "hisi_zip";
static struct dentry *hzip_debugfs_root; static struct dentry *hzip_debugfs_root;
...@@ -422,6 +427,12 @@ static void hisi_zip_hw_error_set_state(struct hisi_zip *hisi_zip, bool state) ...@@ -422,6 +427,12 @@ static void hisi_zip_hw_error_set_state(struct hisi_zip *hisi_zip, bool state)
return; return;
} }
/* configure error type */
writel(0x1, hisi_zip->qm.io_base + HZIP_CORE_INT_RAS_CE_ENB);
writel(0x0, hisi_zip->qm.io_base + HZIP_CORE_INT_RAS_FE_ENB);
writel(HZIP_CORE_INT_RAS_NFE_ENABLE,
hisi_zip->qm.io_base + HZIP_CORE_INT_RAS_NFE_ENB);
if (state) { if (state) {
/* clear ZIP hw error source if having */ /* clear ZIP hw error source if having */
writel(HZIP_CORE_INT_DISABLE, hisi_zip->qm.io_base + writel(HZIP_CORE_INT_DISABLE, hisi_zip->qm.io_base +
...@@ -923,7 +934,7 @@ static void hisi_zip_remove(struct pci_dev *pdev) ...@@ -923,7 +934,7 @@ static void hisi_zip_remove(struct pci_dev *pdev)
hisi_zip_sriov_disable(pdev); hisi_zip_sriov_disable(pdev);
hisi_zip_debugfs_exit(hisi_zip); hisi_zip_debugfs_exit(hisi_zip);
hisi_qm_stop(qm); hisi_qm_stop(qm, QM_NORMAL);
if (qm->fun_type == QM_HW_PF) if (qm->fun_type == QM_HW_PF)
hisi_zip_hw_error_set_state(hisi_zip, false); hisi_zip_hw_error_set_state(hisi_zip, false);
...@@ -1017,12 +1028,12 @@ static int hisi_zip_reset_prepare_rdy(struct hisi_zip *hisi_zip) ...@@ -1017,12 +1028,12 @@ static int hisi_zip_reset_prepare_rdy(struct hisi_zip *hisi_zip)
u32 flag = 1; u32 flag = 1;
int ret = 0; int ret = 0;
#define TIMEOUT_VF 20000 #define RESET_WAIT_TIMEOUT 20000
while (flag) { while (flag) {
flag = 0; flag = 0;
if (delay > TIMEOUT_VF) { if (delay > RESET_WAIT_TIMEOUT) {
ret = -ENOTTY; ret = -EBUSY;
break; break;
} }
...@@ -1036,7 +1047,8 @@ static int hisi_zip_reset_prepare_rdy(struct hisi_zip *hisi_zip) ...@@ -1036,7 +1047,8 @@ static int hisi_zip_reset_prepare_rdy(struct hisi_zip *hisi_zip)
return ret; return ret;
} }
static int hisi_zip_vf_reset_prepare(struct pci_dev *pdev) static int hisi_zip_vf_reset_prepare(struct pci_dev *pdev,
enum qm_stop_reason stop_reason)
{ {
struct hisi_zip *hisi_zip; struct hisi_zip *hisi_zip;
struct pci_dev *dev; struct pci_dev *dev;
...@@ -1053,8 +1065,7 @@ static int hisi_zip_vf_reset_prepare(struct pci_dev *pdev) ...@@ -1053,8 +1065,7 @@ static int hisi_zip_vf_reset_prepare(struct pci_dev *pdev)
if (pci_physfn(dev) == pdev) { if (pci_physfn(dev) == pdev) {
qm = &hisi_zip->qm; qm = &hisi_zip->qm;
set_bit(QM_RESET, &qm->status.flags); ret = hisi_qm_stop(qm, stop_reason);
ret = hisi_qm_stop(qm);
if (ret) if (ret)
goto prepare_fail; goto prepare_fail;
} }
...@@ -1064,13 +1075,13 @@ static int hisi_zip_vf_reset_prepare(struct pci_dev *pdev) ...@@ -1064,13 +1075,13 @@ static int hisi_zip_vf_reset_prepare(struct pci_dev *pdev)
prepare_fail: prepare_fail:
mutex_unlock(&hisi_zip_list_lock); mutex_unlock(&hisi_zip_list_lock);
return ret; return ret;
} }
static int hisi_zip_controller_reset_prepare(struct hisi_zip *hisi_zip) static int hisi_zip_controller_reset_prepare(struct hisi_zip *hisi_zip)
{ {
struct hisi_qm *qm = &hisi_zip->qm; struct hisi_qm *qm = &hisi_zip->qm;
struct pci_dev *pdev = qm->pdev; struct pci_dev *pdev = qm->pdev;
int retry = 0;
int ret; int ret;
ret = hisi_zip_reset_prepare_rdy(hisi_zip); ret = hisi_zip_reset_prepare_rdy(hisi_zip);
...@@ -1079,22 +1090,28 @@ static int hisi_zip_controller_reset_prepare(struct hisi_zip *hisi_zip) ...@@ -1079,22 +1090,28 @@ static int hisi_zip_controller_reset_prepare(struct hisi_zip *hisi_zip)
return ret; return ret;
} }
ret = hisi_zip_vf_reset_prepare(pdev); ret = hisi_zip_vf_reset_prepare(pdev, QM_SOFT_RESET);
if (ret) { if (ret) {
dev_err(&pdev->dev, "Fails to stop VFs!\n"); dev_err(&pdev->dev, "Fails to stop VFs!\n");
return ret; return ret;
} }
set_bit(QM_RESET, &qm->status.flags); ret = hisi_qm_stop(qm, QM_SOFT_RESET);
ret = hisi_qm_stop(qm);
if (ret) { if (ret) {
dev_err(&pdev->dev, "Fails to stop QM!\n"); dev_err(&pdev->dev, "Fails to stop QM!\n");
return ret; return ret;
} }
#ifdef CONFIG_CRYPTO_QM_UACCE #ifdef CONFIG_CRYPTO_QM_UACCE
if (qm->use_uacce) /* wait 10s for uacce_queue to release */
uacce_reset_prepare(&qm->uacce); while (retry++ < 1000) {
msleep(20);
if (!uacce_unregister(&qm->uacce))
break;
if (retry == 1000)
return -EBUSY;
}
#endif #endif
return 0; return 0;
...@@ -1132,8 +1149,9 @@ static int hisi_zip_soft_reset(struct hisi_zip *hisi_zip) ...@@ -1132,8 +1149,9 @@ static int hisi_zip_soft_reset(struct hisi_zip *hisi_zip)
/* If bus lock, reset chip */ /* If bus lock, reset chip */
ret = readl_relaxed_poll_timeout(hisi_zip->qm.io_base + ret = readl_relaxed_poll_timeout(hisi_zip->qm.io_base +
HZIP_MASTER_TRANS_RETURN, val, HZIP_MASTER_TRANS_RETURN, val,
(val == MASTER_TRANS_RETURN_RW), 10, (val == MASTER_TRANS_RETURN_RW),
1000); HZIP_REG_RD_INTVRL_US,
HZIP_REG_RD_TMOUT_US);
if (ret) { if (ret) {
dev_emerg(dev, "Bus lock! Please reset system.\n"); dev_emerg(dev, "Bus lock! Please reset system.\n");
return ret; return ret;
...@@ -1160,10 +1178,8 @@ static int hisi_zip_vf_reset_done(struct pci_dev *pdev) ...@@ -1160,10 +1178,8 @@ static int hisi_zip_vf_reset_done(struct pci_dev *pdev)
{ {
struct hisi_zip *hisi_zip; struct hisi_zip *hisi_zip;
struct pci_dev *dev; struct pci_dev *dev;
struct hisi_qp *qp;
struct hisi_qm *qm; struct hisi_qm *qm;
int ret = 0; int ret = 0;
int i;
mutex_lock(&hisi_zip_list_lock); mutex_lock(&hisi_zip_list_lock);
list_for_each_entry(hisi_zip, &hisi_zip_list, list) { list_for_each_entry(hisi_zip, &hisi_zip_list, list) {
...@@ -1175,20 +1191,9 @@ static int hisi_zip_vf_reset_done(struct pci_dev *pdev) ...@@ -1175,20 +1191,9 @@ static int hisi_zip_vf_reset_done(struct pci_dev *pdev)
qm = &hisi_zip->qm; qm = &hisi_zip->qm;
hisi_qm_clear_queues(qm); hisi_qm_clear_queues(qm);
ret = hisi_qm_start(qm); ret = hisi_qm_restart(qm);
if (ret) if (ret)
goto reset_fail; goto reset_fail;
for (i = 0; i < qm->qp_num; i++) {
qp = qm->qp_array[i];
if (qp) {
ret = hisi_qm_start_qp(qp, 0);
if (ret < 0)
goto reset_fail;
}
}
clear_bit(QM_RESET, &qm->status.flags);
} }
} }
...@@ -1201,31 +1206,19 @@ static int hisi_zip_controller_reset_done(struct hisi_zip *hisi_zip) ...@@ -1201,31 +1206,19 @@ static int hisi_zip_controller_reset_done(struct hisi_zip *hisi_zip)
{ {
struct hisi_qm *qm = &hisi_zip->qm; struct hisi_qm *qm = &hisi_zip->qm;
struct pci_dev *pdev = qm->pdev; struct pci_dev *pdev = qm->pdev;
struct hisi_qp *qp; int ret;
int i, ret;
hisi_qm_clear_queues(qm); hisi_qm_clear_queues(qm);
hisi_zip_set_user_domain_and_cache(hisi_zip); hisi_zip_set_user_domain_and_cache(hisi_zip);
hisi_zip_hw_error_init(hisi_zip); hisi_zip_hw_error_init(hisi_zip);
ret = hisi_qm_start(qm); ret = hisi_qm_restart(qm);
if (ret) { if (ret) {
dev_err(&pdev->dev, "Failed to start QM!\n"); dev_err(&pdev->dev, "Failed to start QM!\n");
return -EPERM; return -EPERM;
} }
for (i = 0; i < qm->qp_num; i++) {
qp = qm->qp_array[i];
if (qp) {
ret = hisi_qm_start_qp(qp, 0);
if (ret < 0) {
dev_err(&pdev->dev, "Start qp%d failed\n", i);
return -EPERM;
}
}
}
if (hisi_zip->ctrl->num_vfs) if (hisi_zip->ctrl->num_vfs)
hisi_zip_vf_q_assign(hisi_zip, hisi_zip->ctrl->num_vfs); hisi_zip_vf_q_assign(hisi_zip, hisi_zip->ctrl->num_vfs);
...@@ -1239,7 +1232,7 @@ static int hisi_zip_controller_reset_done(struct hisi_zip *hisi_zip) ...@@ -1239,7 +1232,7 @@ static int hisi_zip_controller_reset_done(struct hisi_zip *hisi_zip)
#ifdef CONFIG_CRYPTO_QM_UACCE #ifdef CONFIG_CRYPTO_QM_UACCE
if (qm->use_uacce) if (qm->use_uacce)
uacce_reset_done(&qm->uacce); uacce_register(&qm->uacce);
#endif #endif
return 0; return 0;
...@@ -1267,7 +1260,6 @@ static int hisi_zip_controller_reset(struct hisi_zip *hisi_zip) ...@@ -1267,7 +1260,6 @@ static int hisi_zip_controller_reset(struct hisi_zip *hisi_zip)
if (ret) if (ret)
return ret; return ret;
clear_bit(QM_RESET, &qm->status.flags);
clear_bit(HISI_ZIP_RESET, &hisi_zip->status); clear_bit(HISI_ZIP_RESET, &hisi_zip->status);
dev_info(dev, "Controller reset complete\n"); dev_info(dev, "Controller reset complete\n");
...@@ -1305,20 +1297,20 @@ static void hisi_zip_flr_prepare_rdy(struct pci_dev *pdev) ...@@ -1305,20 +1297,20 @@ static void hisi_zip_flr_prepare_rdy(struct pci_dev *pdev)
int delay = 1; int delay = 1;
u32 flag = 1; u32 flag = 1;
#define TIMEOUT 60000 #define FLR_WAIT_TIMEOUT 60000
#define DELAY_INC 2000 #define FLR_DELAY_INC 2000
while (flag) { while (flag) {
flag = 0; flag = 0;
msleep(delay); msleep(delay);
if (delay > TIMEOUT) { if (delay > FLR_WAIT_TIMEOUT) {
flag = 1; flag = 1;
delay = 1; delay = 1;
dev_err(&pdev->dev, "Device error, please exit FLR!\n"); dev_err(&pdev->dev, "Device error, please exit FLR!\n");
} else if (test_and_set_bit(HISI_ZIP_RESET, &hisi_zip->status)) } else if (test_and_set_bit(HISI_ZIP_RESET, &hisi_zip->status))
flag = 1; flag = 1;
delay += DELAY_INC; delay += FLR_DELAY_INC;
} }
} }
...@@ -1331,24 +1323,18 @@ static void hisi_zip_reset_prepare(struct pci_dev *pdev) ...@@ -1331,24 +1323,18 @@ static void hisi_zip_reset_prepare(struct pci_dev *pdev)
hisi_zip_flr_prepare_rdy(pdev); hisi_zip_flr_prepare_rdy(pdev);
ret = hisi_zip_vf_reset_prepare(pdev); ret = hisi_zip_vf_reset_prepare(pdev, QM_FLR);
if (ret) { if (ret) {
dev_err(&pdev->dev, "Fails to prepare reset!\n"); dev_err(&pdev->dev, "Fails to prepare reset!\n");
return; return;
} }
set_bit(QM_RESET, &qm->status.flags); ret = hisi_qm_stop(qm, QM_FLR);
ret = hisi_qm_stop(qm);
if (ret) { if (ret) {
dev_err(&pdev->dev, "Fails to stop QM!\n"); dev_err(&pdev->dev, "Fails to stop QM!\n");
return; return;
} }
#ifdef CONFIG_CRYPTO_QM_UACCE
if (qm->use_uacce)
uacce_reset_prepare(&qm->uacce);
#endif
dev_info(dev, "FLR resetting...\n"); dev_info(dev, "FLR resetting...\n");
} }
...@@ -1365,27 +1351,15 @@ static void hisi_zip_reset_done(struct pci_dev *pdev) ...@@ -1365,27 +1351,15 @@ static void hisi_zip_reset_done(struct pci_dev *pdev)
struct hisi_zip *hisi_zip = pci_get_drvdata(pdev); struct hisi_zip *hisi_zip = pci_get_drvdata(pdev);
struct hisi_qm *qm = &hisi_zip->qm; struct hisi_qm *qm = &hisi_zip->qm;
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
struct hisi_qp *qp; int ret;
int i, ret;
hisi_qm_clear_queues(qm); hisi_qm_clear_queues(qm);
ret = hisi_qm_start(qm); ret = hisi_qm_restart(qm);
if (ret) { if (ret) {
dev_err(dev, "Failed to start QM!\n"); dev_err(dev, "Failed to start QM!\n");
return; return;
} }
for (i = 0; i < qm->qp_num; i++) {
qp = qm->qp_array[i];
if (qp) {
ret = hisi_qm_start_qp(qp, 0);
if (ret < 0) {
dev_err(dev, "Start qp%d failed\n", i);
return;
}
}
}
if (pdev->is_physfn) { if (pdev->is_physfn) {
hisi_zip_set_user_domain_and_cache(hisi_zip); hisi_zip_set_user_domain_and_cache(hisi_zip);
hisi_zip_hw_error_init(hisi_zip); hisi_zip_hw_error_init(hisi_zip);
...@@ -1395,11 +1369,6 @@ static void hisi_zip_reset_done(struct pci_dev *pdev) ...@@ -1395,11 +1369,6 @@ static void hisi_zip_reset_done(struct pci_dev *pdev)
hisi_zip_vf_reset_done(pdev); hisi_zip_vf_reset_done(pdev);
} }
#ifdef CONFIG_CRYPTO_QM_UACCE
if (qm->use_uacce)
uacce_reset_done(&qm->uacce);
#endif
clear_bit(QM_RESET, &qm->status.flags);
hisi_zip_flr_reset_complete(pdev); hisi_zip_flr_reset_complete(pdev);
dev_info(dev, "FLR reset complete\n"); dev_info(dev, "FLR reset complete\n");
......
...@@ -26,7 +26,6 @@ ...@@ -26,7 +26,6 @@
static struct class *uacce_class; static struct class *uacce_class;
static DEFINE_IDR(uacce_idr); static DEFINE_IDR(uacce_idr);
static dev_t uacce_devt; static dev_t uacce_devt;
static DEFINE_MUTEX(uacce_mutex); /* mutex to protect uacce */
/* lock to protect all queues management */ /* lock to protect all queues management */
#ifdef CONFIG_UACCE_FIX_MMAP #ifdef CONFIG_UACCE_FIX_MMAP
...@@ -47,7 +46,7 @@ static DEFINE_RWLOCK(uacce_qs_lock); ...@@ -47,7 +46,7 @@ static DEFINE_RWLOCK(uacce_qs_lock);
static const struct file_operations uacce_fops; static const struct file_operations uacce_fops;
static int uacce_fops_fasync(int fd, struct file *file, int mode); static int uacce_fops_fasync(int fd, struct file *file, int mode);
static int uacce_put_queue(struct file *filep); static long uacce_put_queue(struct uacce_queue *q);
/* match with enum uacce_qfrt */ /* match with enum uacce_qfrt */
static const char *const qfrt_str[] = { static const char *const qfrt_str[] = {
...@@ -70,50 +69,29 @@ const char *uacce_qfrt_str(struct uacce_qfile_region *qfr) ...@@ -70,50 +69,29 @@ const char *uacce_qfrt_str(struct uacce_qfile_region *qfr)
EXPORT_SYMBOL_GPL(uacce_qfrt_str); EXPORT_SYMBOL_GPL(uacce_qfrt_str);
/** /**
* uacce_reset_prepare - notify users uacce will be reset and release queues. * uacce_send_sig_to_client - notify users uacce_queue should be released.
* @uacce: the uacce which will be reset. * @q: the uacce_queue which will be stopped.
* *
* This function walks all uacce_queues and sends signal to processes which * This function sends signal to process which is using uacce_queue.
* are using these uacce_queues. Before a hardware which driver is registered *
* to uacce framework resets, this function can be used to send a signal to * Note: This function can be called in low level driver, which may bring a race
* userspace. * with uacce_fops_release. The problem is this function may be called
* when q is NULL. Low level driver should avoid this by locking hardware
* queue pool and check if there is related hardware queue before calling
* this function.
*
* And from view of uacce_queue state, uacce_queue state does not be
* changed. Operation of queue should also be protected by low level
* driver.
*/ */
void uacce_reset_prepare(struct uacce *uacce) void uacce_send_sig_to_client(struct uacce_queue *q)
{ {
struct uacce_queue *q; if (!q)
atomic_set(&uacce->state, UACCE_ST_RST);
mutex_lock(&uacce->q_lock);
if (list_empty(&uacce->qs)) {
mutex_unlock(&uacce->q_lock);
return; return;
}
list_for_each_entry(q, &uacce->qs, q_dev) {
kill_fasync(&q->async_queue, SIGIO, POLL_IN);
}
mutex_unlock(&uacce->q_lock); kill_fasync(&q->async_queue, SIGIO, POLL_IN);
/* make sure above single been handled */
mdelay(UACCE_RESET_DELAY_MS);
} }
EXPORT_SYMBOL_GPL(uacce_reset_prepare); EXPORT_SYMBOL_GPL(uacce_send_sig_to_client);
/**
* uacce_reset_done - Set uacce as normal state.
* @uacce: the uacce which reset is done.
*
* This function set uacce as normal state, after this uacce can be opened
* again.
*/
void uacce_reset_done(struct uacce *uacce)
{
atomic_set(&uacce->state, UACCE_ST_INIT);
}
EXPORT_SYMBOL_GPL(uacce_reset_done);
/** /**
* uacce_wake_up - Wake up the process who is waiting this queue * uacce_wake_up - Wake up the process who is waiting this queue
...@@ -126,6 +104,82 @@ void uacce_wake_up(struct uacce_queue *q) ...@@ -126,6 +104,82 @@ void uacce_wake_up(struct uacce_queue *q)
} }
EXPORT_SYMBOL_GPL(uacce_wake_up); EXPORT_SYMBOL_GPL(uacce_wake_up);
static bool uacce_q_avail_ioctl(struct uacce_queue *q, unsigned int cmd)
{
enum uacce_q_state state = q->state;
bool avail = false;
switch (state) {
case UACCE_Q_INIT:
switch (cmd) {
case UACCE_CMD_SHARE_SVAS:
case UACCE_CMD_GET_SS_DMA:
case UACCE_CMD_PUT_Q:
avail = true;
break;
case UACCE_CMD_START:
if (q->qfrs[UACCE_QFRT_MMIO] &&
q->qfrs[UACCE_QFRT_DUS])
avail = true;
break;
/* acc specific ioctl */
default:
avail = true;
}
break;
case UACCE_Q_STARTED:
switch (cmd) {
case UACCE_CMD_SHARE_SVAS:
case UACCE_CMD_GET_SS_DMA:
case UACCE_CMD_PUT_Q:
avail = true;
break;
case UACCE_CMD_START:
break;
default:
avail = true;
}
break;
case UACCE_Q_ZOMBIE:
break;
default:
break;
}
return avail;
}
static bool uacce_q_avail_mmap(struct uacce_queue *q, unsigned int type)
{
enum uacce_q_state state = q->state;
bool avail = false;
switch (state) {
case UACCE_Q_INIT:
avail = true;
break;
case UACCE_Q_STARTED:
switch (type) {
case UACCE_QFRT_DKO:
/* fix me: ss map should be done before start queue */
case UACCE_QFRT_SS:
avail = true;
break;
case UACCE_QFRT_MMIO:
case UACCE_QFRT_DUS:
default:
break;
}
break;
case UACCE_Q_ZOMBIE:
break;
default:
break;
}
return avail;
}
static inline int uacce_iommu_map_qfr(struct uacce_queue *q, static inline int uacce_iommu_map_qfr(struct uacce_queue *q,
struct uacce_qfile_region *qfr) struct uacce_qfile_region *qfr)
{ {
...@@ -439,20 +493,17 @@ static long uacce_cmd_share_qfr(struct uacce_queue *tgt, int fd) ...@@ -439,20 +493,17 @@ static long uacce_cmd_share_qfr(struct uacce_queue *tgt, int fd)
dev_dbg(&src->uacce->dev, "share ss with %s\n", dev_dbg(&src->uacce->dev, "share ss with %s\n",
dev_name(&tgt->uacce->dev)); dev_name(&tgt->uacce->dev));
uacce_qs_wlock();
if (!src->qfrs[UACCE_QFRT_SS] || tgt->qfrs[UACCE_QFRT_SS]) if (!src->qfrs[UACCE_QFRT_SS] || tgt->qfrs[UACCE_QFRT_SS])
goto out_with_lock; goto out_with_fd;
ret = uacce_queue_map_qfr(tgt, src->qfrs[UACCE_QFRT_SS]); ret = uacce_queue_map_qfr(tgt, src->qfrs[UACCE_QFRT_SS]);
if (ret) if (ret)
goto out_with_lock; goto out_with_fd;
tgt->qfrs[UACCE_QFRT_SS] = src->qfrs[UACCE_QFRT_SS]; tgt->qfrs[UACCE_QFRT_SS] = src->qfrs[UACCE_QFRT_SS];
list_add(&tgt->list, &src->qfrs[UACCE_QFRT_SS]->qs); list_add(&tgt->list, &src->qfrs[UACCE_QFRT_SS]->qs);
ret = 0; ret = 0;
out_with_lock:
uacce_qs_wunlock();
out_with_fd: out_with_fd:
fput(filep); fput(filep);
return ret; return ret;
...@@ -460,9 +511,9 @@ static long uacce_cmd_share_qfr(struct uacce_queue *tgt, int fd) ...@@ -460,9 +511,9 @@ static long uacce_cmd_share_qfr(struct uacce_queue *tgt, int fd)
static int uacce_start_queue(struct uacce_queue *q) static int uacce_start_queue(struct uacce_queue *q)
{ {
int ret, i, j;
struct uacce_qfile_region *qfr;
struct device *dev = &q->uacce->dev; struct device *dev = &q->uacce->dev;
struct uacce_qfile_region *qfr;
int ret, i, j;
/* /*
* map KMAP qfr to kernel * map KMAP qfr to kernel
...@@ -490,8 +541,9 @@ static int uacce_start_queue(struct uacce_queue *q) ...@@ -490,8 +541,9 @@ static int uacce_start_queue(struct uacce_queue *q)
if (ret < 0) if (ret < 0)
goto err_with_vmap; goto err_with_vmap;
dev_dbg(&q->uacce->dev, "uacce state switch to STARTED\n"); dev_dbg(&q->uacce->dev, "uacce queue state switch to STARTED\n");
atomic_set(&q->uacce->state, UACCE_ST_STARTED); q->state = UACCE_Q_STARTED;
return 0; return 0;
err_with_vmap: err_with_vmap:
...@@ -505,55 +557,78 @@ static int uacce_start_queue(struct uacce_queue *q) ...@@ -505,55 +557,78 @@ static int uacce_start_queue(struct uacce_queue *q)
return ret; return ret;
} }
static long uacce_get_ss_dma(struct uacce_queue *q, void __user *arg) static long uacce_get_ss_dma(struct uacce_queue *q, unsigned long *arg)
{ {
struct uacce *uacce = q->uacce; struct uacce *uacce = q->uacce;
long ret = 0;
unsigned long dma = 0; unsigned long dma = 0;
if (!(uacce->flags & UACCE_DEV_NOIOMMU)) if (!(uacce->flags & UACCE_DEV_NOIOMMU))
return -EINVAL; return -EINVAL;
uacce_qs_wlock();
if (q->qfrs[UACCE_QFRT_SS]) { if (q->qfrs[UACCE_QFRT_SS]) {
dma = (unsigned long)(q->qfrs[UACCE_QFRT_SS]->dma); dma = (unsigned long)(q->qfrs[UACCE_QFRT_SS]->dma);
dev_dbg(&uacce->dev, "%s(%lx)\n", __func__, dma); dev_dbg(&uacce->dev, "%s(%lx)\n", __func__, dma);
} else } else {
ret = -EINVAL; return -EINVAL;
uacce_qs_wunlock(); }
if (copy_to_user(arg, &dma, sizeof(dma))) *arg = dma;
ret = -EFAULT;
return ret; return 0;
} }
static long uacce_fops_unl_ioctl(struct file *filep, static long uacce_fops_unl_ioctl(struct file *filep,
unsigned int cmd, unsigned long arg) unsigned int cmd, unsigned long arg)
{ {
struct uacce_queue *q = filep->private_data; struct uacce_queue *q;
struct uacce *uacce = q->uacce; struct uacce *uacce;
unsigned long dma = 0;
long ret = 0;
uacce_qs_wlock();
if (unlikely(!filep->private_data)) {
uacce_qs_wunlock();
return -EBADF;
}
q = filep->private_data;
uacce = q->uacce;
if (!uacce_q_avail_ioctl(q, cmd)) {
uacce_qs_wunlock();
return -EINVAL;
}
switch (cmd) { switch (cmd) {
case UACCE_CMD_SHARE_SVAS: case UACCE_CMD_SHARE_SVAS:
return uacce_cmd_share_qfr(q, arg); ret = uacce_cmd_share_qfr(q, arg);
break;
case UACCE_CMD_START: case UACCE_CMD_START:
return uacce_start_queue(q); ret = uacce_start_queue(q);
break;
case UACCE_CMD_GET_SS_DMA: case UACCE_CMD_GET_SS_DMA:
return uacce_get_ss_dma(q, (void __user *)arg); ret = uacce_get_ss_dma(q, &dma);
break;
case UACCE_CMD_PUT_Q: case UACCE_CMD_PUT_Q:
return uacce_put_queue(filep); ret = uacce_put_queue(q);
break;
default: default:
uacce_qs_wunlock();
if (uacce->ops->ioctl) if (uacce->ops->ioctl)
/* This is not protected by uacce_qs_lock */
return uacce->ops->ioctl(q, cmd, arg); return uacce->ops->ioctl(q, cmd, arg);
dev_err(&uacce->dev, "ioctl cmd (%d) is not supported!\n", cmd); dev_err(&uacce->dev, "ioctl cmd (%d) is not supported!\n", cmd);
return -EINVAL; return -EINVAL;
} }
uacce_qs_wunlock();
if (cmd == UACCE_CMD_GET_SS_DMA && !ret)
if (copy_to_user((void __user *)arg, &dma, sizeof(dma)))
ret = -EFAULT;
return ret;
} }
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
...@@ -577,30 +652,24 @@ static int uacce_dev_open_check(struct uacce *uacce) ...@@ -577,30 +652,24 @@ static int uacce_dev_open_check(struct uacce *uacce)
*/ */
if (uacce->flags & (UACCE_DEV_PASID | UACCE_DEV_NOIOMMU)) if (uacce->flags & (UACCE_DEV_PASID | UACCE_DEV_NOIOMMU))
return 0; return 0;
else {
if (!atomic_read(&uacce->ref))
return 0;
if (atomic_cmpxchg(&uacce->state, UACCE_ST_INIT, UACCE_ST_OPENNED) !=
UACCE_ST_INIT) {
dev_info(&uacce->dev, "this device can be openned only once\n"); dev_info(&uacce->dev, "this device can be openned only once\n");
return -EBUSY; return -EBUSY;
} }
dev_dbg(&uacce->dev, "state switch to OPENNED!\n");
return 0;
} }
/* To be fixed: only drain queue relatives */ /* To be fixed: only drain queue relatives */
static int uacce_queue_drain(struct uacce_queue *q) static int uacce_queue_drain(struct uacce_queue *q)
{ {
struct uacce *uacce = q->uacce;
struct uacce_qfile_region *qfr; struct uacce_qfile_region *qfr;
struct uacce *uacce;
int i;
bool is_to_free_region; bool is_to_free_region;
int i;
if (!q) if ((q->state == UACCE_Q_INIT || q->state == UACCE_Q_STARTED) &&
return -EINVAL;
uacce = q->uacce;
if (atomic_read(&uacce->state) == UACCE_ST_STARTED &&
uacce->ops->stop_queue) uacce->ops->stop_queue)
uacce->ops->stop_queue(q); uacce->ops->stop_queue(q);
...@@ -625,40 +694,46 @@ static int uacce_queue_drain(struct uacce_queue *q) ...@@ -625,40 +694,46 @@ static int uacce_queue_drain(struct uacce_queue *q)
if (uacce->flags & UACCE_DEV_SVA) if (uacce->flags & UACCE_DEV_SVA)
iommu_sva_unbind_device(uacce->pdev, q->pasid); iommu_sva_unbind_device(uacce->pdev, q->pasid);
#endif #endif
if (uacce->ops->put_queue) if ((q->state == UACCE_Q_INIT || q->state == UACCE_Q_STARTED) &&
uacce->ops->put_queue)
uacce->ops->put_queue(q); uacce->ops->put_queue(q);
dev_dbg(&uacce->dev, "uacce state switch to INIT\n"); /*
if (atomic_dec_and_test(&uacce->ref)) * Put_queue above just put hardware queue, but not free uacce_q.
atomic_set(&uacce->state, UACCE_ST_INIT); *
* Put_queue(and stop_queue) is used to support UACCE_PUT_QUEUE
* ioctl, UACCE_PUT_QUEUE is defined only to put low level hardware
* queue, after UACCE_PUT_QUEUE ioctl, uacce_queue enters into zombie
* state. So uacce_queue can only be freed here.
*/
kfree(q);
atomic_dec(&uacce->ref);
module_put(uacce->pdev->driver->owner);
return 0; return 0;
} }
/* While user space releases a queue, all the relatives on the queue /*
* While user space releases a queue, all the relatives on the queue
* should be released imediately by this putting. * should be released imediately by this putting.
*/ */
static int uacce_put_queue(struct file *filep) static long uacce_put_queue(struct uacce_queue *q)
{ {
struct uacce_queue *q = filep->private_data; struct uacce *uacce = q->uacce;
struct uacce *uacce;
if (!q) /*
return 0; * To do: we should vm_munmap mmio and dus regions, currently we munmap
uacce = q->uacce; * mmio and dus region before put queue.
*/
if (UACCE_ST_INIT == if (uacce->ops->stop_queue)
atomic_cmpxchg(&q->status, UACCE_ST_OPENNED, UACCE_ST_INIT)) uacce->ops->stop_queue(q);
return 0;
uacce_fops_fasync(-1, filep, 0); if (uacce->ops->put_queue)
mutex_lock(&uacce->q_lock); uacce->ops->put_queue(q);
list_del(&q->q_dev);
mutex_unlock(&uacce->q_lock); q->state = UACCE_Q_ZOMBIE;
filep->private_data = NULL;
return uacce_queue_drain(q); return 0;
} }
static int uacce_fops_open(struct inode *inode, struct file *filep) static int uacce_fops_open(struct inode *inode, struct file *filep)
...@@ -672,9 +747,6 @@ static int uacce_fops_open(struct inode *inode, struct file *filep) ...@@ -672,9 +747,6 @@ static int uacce_fops_open(struct inode *inode, struct file *filep)
if (!uacce) if (!uacce)
return -ENODEV; return -ENODEV;
if (atomic_read(&uacce->state) == UACCE_ST_RST)
return -EINVAL;
if (!uacce->ops->get_queue) if (!uacce->ops->get_queue)
return -EINVAL; return -EINVAL;
...@@ -682,21 +754,23 @@ static int uacce_fops_open(struct inode *inode, struct file *filep) ...@@ -682,21 +754,23 @@ static int uacce_fops_open(struct inode *inode, struct file *filep)
return -ENODEV; return -ENODEV;
ret = uacce_dev_open_check(uacce); ret = uacce_dev_open_check(uacce);
if (ret) if (ret)
goto open_err; goto err_open;
#ifdef CONFIG_IOMMU_SVA2 #ifdef CONFIG_IOMMU_SVA2
if (uacce->flags & UACCE_DEV_PASID) { if (uacce->flags & UACCE_DEV_PASID) {
ret = iommu_sva_bind_device(uacce->pdev, current->mm, &pasid, ret = iommu_sva_bind_device(uacce->pdev, current->mm, &pasid,
IOMMU_SVA_FEAT_IOPF, NULL); IOMMU_SVA_FEAT_IOPF, NULL);
if (ret) if (ret)
goto open_err; goto err_open;
} }
#endif #endif
uacce_qs_wlock();
ret = uacce->ops->get_queue(uacce, pasid, &q); ret = uacce->ops->get_queue(uacce, pasid, &q);
if (ret < 0) if (ret < 0) {
goto open_err; uacce_qs_wunlock();
goto err_unbind;
}
atomic_inc(&uacce->ref);
atomic_set(&q->status, UACCE_ST_OPENNED);
q->pasid = pasid; q->pasid = pasid;
q->uacce = uacce; q->uacce = uacce;
q->mm = current->mm; q->mm = current->mm;
...@@ -704,36 +778,51 @@ static int uacce_fops_open(struct inode *inode, struct file *filep) ...@@ -704,36 +778,51 @@ static int uacce_fops_open(struct inode *inode, struct file *filep)
INIT_LIST_HEAD(&q->list); INIT_LIST_HEAD(&q->list);
init_waitqueue_head(&q->wait); init_waitqueue_head(&q->wait);
filep->private_data = q; filep->private_data = q;
mutex_lock(&uacce->q_lock); q->state = UACCE_Q_INIT;
list_add(&q->q_dev, &uacce->qs); atomic_inc(&uacce->ref);
mutex_unlock(&uacce->q_lock);
uacce_qs_wunlock();
return 0; return 0;
open_err:
err_unbind:
#ifdef CONFIG_IOMMU_SVA2
if (uacce->flags & UACCE_DEV_PASID)
iommu_sva_unbind_device(uacce->pdev, pasid);
#endif
err_open:
module_put(uacce->pdev->driver->owner); module_put(uacce->pdev->driver->owner);
return ret; return ret;
} }
static int uacce_fops_release(struct inode *inode, struct file *filep) static int uacce_fops_release(struct inode *inode, struct file *filep)
{ {
struct uacce_queue *q = filep->private_data; struct uacce_queue *q;
struct uacce *uacce;
if (!q) int ret = 0;
return 0;
if (UACCE_ST_INIT == uacce_qs_wlock();
atomic_cmpxchg(&q->status, UACCE_ST_OPENNED, UACCE_ST_INIT))
return 0;
uacce_fops_fasync(-1, filep, 0); uacce_fops_fasync(-1, filep, 0);
mutex_lock(&q->uacce->q_lock);
list_del(&q->q_dev);
mutex_unlock(&q->uacce->q_lock);
/* As user space exception(without release queue), it will fall into q = filep->private_data;
* this logic as the task exits to prevent hardware resources leaking if (q) {
*/ uacce = q->uacce;
return uacce_queue_drain(q); /*
* As user space exception(without release queue), it will
* fall into this logic as the task exits to prevent hardware
* resources leaking.
*/
ret = uacce_queue_drain(q);
filep->private_data = NULL;
}
uacce_qs_wunlock();
if (q)
module_put(uacce->pdev->driver->owner);
return ret;
} }
static enum uacce_qfrt uacce_get_region_type(struct uacce *uacce, static enum uacce_qfrt uacce_get_region_type(struct uacce *uacce,
...@@ -808,40 +897,49 @@ static enum uacce_qfrt uacce_get_region_type(struct uacce *uacce, ...@@ -808,40 +897,49 @@ static enum uacce_qfrt uacce_get_region_type(struct uacce *uacce,
static int uacce_fops_mmap(struct file *filep, struct vm_area_struct *vma) static int uacce_fops_mmap(struct file *filep, struct vm_area_struct *vma)
{ {
struct uacce_queue *q = filep->private_data; struct uacce_queue *q;
struct uacce *uacce = q->uacce; struct uacce *uacce;
enum uacce_qfrt type; enum uacce_qfrt type;
struct uacce_qfile_region *qfr; struct uacce_qfile_region *qfr;
unsigned int flags = 0; unsigned int flags = 0;
int ret; int ret;
vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
uacce_qs_wlock();
if (unlikely(!filep->private_data)) {
uacce_qs_wunlock();
return -EBADF;
}
q = filep->private_data;
uacce = q->uacce;
type = uacce_get_region_type(uacce, vma); type = uacce_get_region_type(uacce, vma);
if (type == UACCE_QFRT_INVALID)
return -EINVAL;
dev_dbg(&uacce->dev, "mmap q file(t=%s, off=%lx, start=%lx, end=%lx)\n", dev_dbg(&uacce->dev, "mmap q file(t=%s, off=%lx, start=%lx, end=%lx)\n",
qfrt_str[type], vma->vm_pgoff, vma->vm_start, vma->vm_end); qfrt_str[type], vma->vm_pgoff, vma->vm_start, vma->vm_end);
vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND; if (type == UACCE_QFRT_INVALID) {
ret = -EINVAL;
uacce_qs_wlock(); goto out_with_lock;
}
if (q->qfrs[type]) { if (q->qfrs[type]) {
ret = -EBUSY; ret = -EBUSY;
goto out_with_lock; goto out_with_lock;
} }
if (!uacce_q_avail_mmap(q, type)) {
ret = -EINVAL;
goto out_with_lock;
}
switch (type) { switch (type) {
case UACCE_QFRT_MMIO: case UACCE_QFRT_MMIO:
flags = UACCE_QFRF_SELFMT; flags = UACCE_QFRF_SELFMT;
break; break;
case UACCE_QFRT_SS: case UACCE_QFRT_SS:
if (atomic_read(&uacce->state) != UACCE_ST_STARTED) {
ret = -EINVAL;
goto out_with_lock;
}
flags = UACCE_QFRF_MAP | UACCE_QFRF_MMAP; flags = UACCE_QFRF_MAP | UACCE_QFRF_MMAP;
if (uacce->flags & UACCE_DEV_NOIOMMU) if (uacce->flags & UACCE_DEV_NOIOMMU)
...@@ -896,14 +994,27 @@ static int uacce_fops_mmap(struct file *filep, struct vm_area_struct *vma) ...@@ -896,14 +994,27 @@ static int uacce_fops_mmap(struct file *filep, struct vm_area_struct *vma)
static __poll_t uacce_fops_poll(struct file *file, poll_table *wait) static __poll_t uacce_fops_poll(struct file *file, poll_table *wait)
{ {
struct uacce_queue *q = file->private_data; struct uacce_queue *q;
struct uacce *uacce = q->uacce; struct uacce *uacce;
int ret = 0;
uacce_qs_wlock();
if (unlikely(!file->private_data)) {
uacce_qs_wunlock();
ret = EPOLLERR;
return ret;
}
q = file->private_data;
uacce = q->uacce;
poll_wait(file, &q->wait, wait); poll_wait(file, &q->wait, wait);
if (uacce->ops->is_q_updated && uacce->ops->is_q_updated(q)) if (uacce->ops->is_q_updated && uacce->ops->is_q_updated(q))
return EPOLLIN | EPOLLRDNORM; ret = EPOLLIN | EPOLLRDNORM;
return 0; uacce_qs_wunlock();
return ret;
} }
static int uacce_fops_fasync(int fd, struct file *file, int mode) static int uacce_fops_fasync(int fd, struct file *file, int mode)
...@@ -1049,6 +1160,8 @@ static const struct attribute_group *uacce_dev_attr_groups[] = { ...@@ -1049,6 +1160,8 @@ static const struct attribute_group *uacce_dev_attr_groups[] = {
NULL NULL
}; };
static void uacce_dev_release(struct device *dev) {}
static int uacce_create_chrdev(struct uacce *uacce) static int uacce_create_chrdev(struct uacce *uacce)
{ {
int ret; int ret;
...@@ -1065,6 +1178,7 @@ static int uacce_create_chrdev(struct uacce *uacce) ...@@ -1065,6 +1178,7 @@ static int uacce_create_chrdev(struct uacce *uacce)
uacce->dev.class = uacce_class; uacce->dev.class = uacce_class;
uacce->dev.groups = uacce_dev_attr_groups; uacce->dev.groups = uacce_dev_attr_groups;
uacce->dev.parent = uacce->pdev; uacce->dev.parent = uacce->pdev;
uacce->dev.release = uacce_dev_release;
dev_set_name(&uacce->dev, "%s-%d", uacce->drv_name, uacce->dev_id); dev_set_name(&uacce->dev, "%s-%d", uacce->drv_name, uacce->dev_id);
ret = cdev_device_add(&uacce->cdev, &uacce->dev); ret = cdev_device_add(&uacce->cdev, &uacce->dev);
if (ret) if (ret)
...@@ -1081,6 +1195,8 @@ static int uacce_create_chrdev(struct uacce *uacce) ...@@ -1081,6 +1195,8 @@ static int uacce_create_chrdev(struct uacce *uacce)
static void uacce_destroy_chrdev(struct uacce *uacce) static void uacce_destroy_chrdev(struct uacce *uacce)
{ {
cdev_device_del(&uacce->cdev, &uacce->dev); cdev_device_del(&uacce->cdev, &uacce->dev);
put_device(&uacce->dev);
memset(&uacce->dev, 0, sizeof(struct device));
idr_remove(&uacce_idr, uacce->dev_id); idr_remove(&uacce_idr, uacce->dev_id);
} }
...@@ -1233,13 +1349,13 @@ static void uacce_unset_iommu_domain(struct uacce *uacce) ...@@ -1233,13 +1349,13 @@ static void uacce_unset_iommu_domain(struct uacce *uacce)
#endif #endif
/** /**
* uacce_register - register an accelerator * uacce_register - register an accelerator
* @uacce: the accelerator structure * @uacce: the accelerator structure
*/ */
int uacce_register(struct uacce *uacce) int uacce_register(struct uacce *uacce)
{ {
int ret;
struct device *dev = uacce->pdev; struct device *dev = uacce->pdev;
int ret;
if (!dev) { if (!dev) {
pr_err("uacce parent device not set\n"); pr_err("uacce parent device not set\n");
...@@ -1271,12 +1387,10 @@ int uacce_register(struct uacce *uacce) ...@@ -1271,12 +1387,10 @@ int uacce_register(struct uacce *uacce)
return ret; return ret;
#endif #endif
mutex_lock(&uacce_mutex);
ret = uacce_create_chrdev(uacce); ret = uacce_create_chrdev(uacce);
if (ret) { if (ret) {
dev_err(dev, "uacce creates cdev fail!\n"); dev_err(dev, "uacce creates cdev fail!\n");
goto err_with_lock; return ret;
} }
if (uacce->flags & UACCE_DEV_PASID) { if (uacce->flags & UACCE_DEV_PASID) {
...@@ -1286,24 +1400,17 @@ int uacce_register(struct uacce *uacce) ...@@ -1286,24 +1400,17 @@ int uacce_register(struct uacce *uacce)
if (ret) { if (ret) {
dev_err(dev, "uacce sva init fail!\n"); dev_err(dev, "uacce sva init fail!\n");
uacce_destroy_chrdev(uacce); uacce_destroy_chrdev(uacce);
goto err_with_lock; return ret;
} }
#else #else
uacce->flags &= ~(UACCE_DEV_FAULT_FROM_DEV | UACCE_DEV_PASID); uacce->flags &= ~(UACCE_DEV_FAULT_FROM_DEV | UACCE_DEV_PASID);
#endif #endif
} }
dev_dbg(dev, "uacce state initialized to INIT\n"); dev_dbg(&uacce->dev, "register to uacce!\n");
atomic_set(&uacce->state, UACCE_ST_INIT);
atomic_set(&uacce->ref, 0); atomic_set(&uacce->ref, 0);
INIT_LIST_HEAD(&uacce->qs);
mutex_init(&uacce->q_lock);
mutex_unlock(&uacce_mutex);
return 0;
err_with_lock: return 0;
mutex_unlock(&uacce_mutex);
return ret;
} }
EXPORT_SYMBOL_GPL(uacce_register); EXPORT_SYMBOL_GPL(uacce_register);
...@@ -1314,9 +1421,12 @@ EXPORT_SYMBOL_GPL(uacce_register); ...@@ -1314,9 +1421,12 @@ EXPORT_SYMBOL_GPL(uacce_register);
* Unregister an accelerator that wat previously successully registered with * Unregister an accelerator that wat previously successully registered with
* uacce_register(). * uacce_register().
*/ */
void uacce_unregister(struct uacce *uacce) int uacce_unregister(struct uacce *uacce)
{ {
mutex_lock(&uacce_mutex); if (atomic_read(&uacce->ref) > 0) {
printk_ratelimited("Fail to unregister uacce, please close all uacce queues!\n");
return -EAGAIN;
}
#ifdef CONFIG_IOMMU_SVA2 #ifdef CONFIG_IOMMU_SVA2
iommu_sva_shutdown_device(uacce->pdev); iommu_sva_shutdown_device(uacce->pdev);
...@@ -1326,7 +1436,7 @@ void uacce_unregister(struct uacce *uacce) ...@@ -1326,7 +1436,7 @@ void uacce_unregister(struct uacce *uacce)
uacce_destroy_chrdev(uacce); uacce_destroy_chrdev(uacce);
mutex_unlock(&uacce_mutex); return 0;
} }
EXPORT_SYMBOL_GPL(uacce_unregister); EXPORT_SYMBOL_GPL(uacce_unregister);
......
...@@ -61,6 +61,13 @@ struct uacce_ops { ...@@ -61,6 +61,13 @@ struct uacce_ops {
unsigned long arg); unsigned long arg);
}; };
enum uacce_q_state {
UACCE_Q_INIT,
UACCE_Q_STARTED,
UACCE_Q_ZOMBIE,
UACCE_Q_CLOSED,
};
struct uacce_queue { struct uacce_queue {
struct uacce *uacce; struct uacce *uacce;
__u32 flags; __u32 flags;
...@@ -76,14 +83,9 @@ struct uacce_queue { ...@@ -76,14 +83,9 @@ struct uacce_queue {
struct uacce_qfile_region *qfrs[UACCE_QFRT_MAX]; struct uacce_qfile_region *qfrs[UACCE_QFRT_MAX];
struct fasync_struct *async_queue; struct fasync_struct *async_queue;
struct list_head q_dev; enum uacce_q_state state;
}; };
#define UACCE_ST_INIT 0
#define UACCE_ST_OPENNED 1
#define UACCE_ST_STARTED 2
#define UACCE_ST_RST 3
struct uacce { struct uacce {
const char *name; const char *name;
const char *drv_name; const char *drv_name;
...@@ -99,18 +101,14 @@ struct uacce { ...@@ -99,18 +101,14 @@ struct uacce {
struct cdev cdev; struct cdev cdev;
struct device dev; struct device dev;
void *priv; void *priv;
atomic_t state;
atomic_t ref; atomic_t ref;
int prot; int prot;
struct mutex q_lock;
struct list_head qs;
}; };
int uacce_register(struct uacce *uacce); int uacce_register(struct uacce *uacce);
void uacce_unregister(struct uacce *uacce); int uacce_unregister(struct uacce *uacce);
void uacce_wake_up(struct uacce_queue *q); void uacce_wake_up(struct uacce_queue *q);
void uacce_reset_prepare(struct uacce *uacce);
void uacce_reset_done(struct uacce *uacce);
const char *uacce_qfrt_str(struct uacce_qfile_region *qfr); const char *uacce_qfrt_str(struct uacce_qfile_region *qfr);
void uacce_send_sig_to_client(struct uacce_queue *q);
#endif #endif
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册