提交 2a79a6bc 编写于 作者: L lingmingqiang 提交者: Xie XiuQi

UACCE: use mmap flag to handle hw error

driver inclusion
category: bugfix
bugzilla: NA
CVE: NA

Feature or Bugfix:Feature
Signed-off-by: Ntanshukun (A) <tanshukun1@huawei.com>
Reviewed-by: Nxuzaibo <xuzaibo@huawei.com>
Signed-off-by: Nlingmingqiang <lingmingqiang@huawei.com>
Reviewed-by: Nlingmingqiang <lingmingqiang@huawei.com>
Reviewed-by: NYang Yingliang <yangyingliang@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 733c2fd1
......@@ -1137,7 +1137,6 @@ static int hpre_controller_reset_prepare(struct hpre *hpre)
{
struct hisi_qm *qm = &hpre->qm;
struct pci_dev *pdev = qm->pdev;
int retry = 0;
int ret;
ret = hpre_reset_prepare_rdy(hpre);
......@@ -1158,18 +1157,6 @@ static int hpre_controller_reset_prepare(struct hpre *hpre)
return ret;
}
#ifdef CONFIG_CRYPTO_QM_UACCE
/* wait 10s for uacce_queue to release */
while (retry++ < 1000) {
msleep(20);
if (!uacce_unregister(&qm->uacce))
break;
if (retry == 1000)
return -EBUSY;
}
#endif
return 0;
}
......@@ -1288,11 +1275,6 @@ static int hpre_controller_reset_done(struct hpre *hpre)
return -EPERM;
}
#ifdef CONFIG_CRYPTO_QM_UACCE
if (qm->use_uacce)
uacce_register(&qm->uacce);
#endif
return 0;
}
......
......@@ -1578,7 +1578,7 @@ static int hisi_qm_get_available_instances(struct uacce *uacce)
return hisi_qm_get_free_qp_num(uacce->priv);
}
static void hisi_qm_send_signals(struct hisi_qm *qm)
static void hisi_qm_set_hw_reset(struct hisi_qm *qm)
{
struct hisi_qp *qp;
int i;
......@@ -1586,9 +1586,10 @@ static void hisi_qm_send_signals(struct hisi_qm *qm)
for (i = 0; i < qm->qp_num; i++) {
qp = qm->qp_array[i];
if (qp && qp->uacce_q)
uacce_send_sig_to_client(qp->uacce_q);
uacce_q_set_hw_reset(qp->uacce_q);
}
}
static int hisi_qm_uacce_get_queue(struct uacce *uacce, unsigned long arg,
struct uacce_queue **q)
{
......@@ -1790,6 +1791,18 @@ static long hisi_qm_uacce_ioctl(struct uacce_queue *q, unsigned int cmd,
return 0;
}
static enum uacce_dev_state hisi_qm_get_state(struct uacce *uacce)
{
struct hisi_qm *qm = uacce->priv;
enum qm_state curr;
curr = atomic_read(&qm->status.flags);
if (curr == QM_STOP)
return UACCE_DEV_ERR;
else
return UACCE_DEV_NORMAL;
}
/*
* the device is set the UACCE_DEV_SVA, but it will be cut if SVA patch is not
* available
......@@ -1802,6 +1815,7 @@ static struct uacce_ops uacce_qm_ops = {
.stop_queue = hisi_qm_uacce_stop_queue,
.mmap = hisi_qm_uacce_mmap,
.ioctl = hisi_qm_uacce_ioctl,
.get_dev_state = hisi_qm_get_state,
};
static int qm_register_uacce(struct hisi_qm *qm)
......@@ -2324,16 +2338,21 @@ int hisi_qm_start(struct hisi_qm *qm)
uacce->qf_pg_start[UACCE_QFRT_MMIO] = 0;
uacce->qf_pg_start[UACCE_QFRT_DKO] = UACCE_QFR_NA;
uacce->qf_pg_start[UACCE_QFRT_DUS] = mmio_page_nr;
uacce->qf_pg_start[UACCE_QFRT_SS] = mmio_page_nr +
uacce->qf_pg_start[UACCE_QFRT_DS] = mmio_page_nr +
dus_page_nr;
uacce->qf_pg_start[UACCE_QFRT_SS] = mmio_page_nr +
dus_page_nr + 1;
} else if (qm->use_uacce) {
uacce->qf_pg_start[UACCE_QFRT_MMIO] = 0;
uacce->qf_pg_start[UACCE_QFRT_DKO] = mmio_page_nr;
uacce->qf_pg_start[UACCE_QFRT_DUS] = mmio_page_nr +
dko_page_nr;
uacce->qf_pg_start[UACCE_QFRT_SS] = mmio_page_nr +
uacce->qf_pg_start[UACCE_QFRT_DS] = mmio_page_nr +
dko_page_nr +
dus_page_nr;
uacce->qf_pg_start[UACCE_QFRT_SS] = mmio_page_nr +
dko_page_nr +
dus_page_nr + 1;
}
#endif
......@@ -2436,7 +2455,7 @@ int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r)
goto err_unlock;
}
#ifdef CONFIG_CRYPTO_QM_UACCE
hisi_qm_send_signals(qm);
hisi_qm_set_hw_reset(qm);
#endif
}
......
......@@ -1081,7 +1081,6 @@ static int hisi_zip_controller_reset_prepare(struct hisi_zip *hisi_zip)
{
struct hisi_qm *qm = &hisi_zip->qm;
struct pci_dev *pdev = qm->pdev;
int retry = 0;
int ret;
ret = hisi_zip_reset_prepare_rdy(hisi_zip);
......@@ -1102,18 +1101,6 @@ static int hisi_zip_controller_reset_prepare(struct hisi_zip *hisi_zip)
return ret;
}
#ifdef CONFIG_CRYPTO_QM_UACCE
/* wait 10s for uacce_queue to release */
while (retry++ < 1000) {
msleep(20);
if (!uacce_unregister(&qm->uacce))
break;
if (retry == 1000)
return -EBUSY;
}
#endif
return 0;
}
......@@ -1230,11 +1217,6 @@ static int hisi_zip_controller_reset_done(struct hisi_zip *hisi_zip)
return -EPERM;
}
#ifdef CONFIG_CRYPTO_QM_UACCE
if (qm->use_uacce)
uacce_register(&qm->uacce);
#endif
return 0;
}
......
......@@ -53,10 +53,22 @@ static const char *const qfrt_str[] = {
"mmio",
"dko",
"dus",
"ds",
"ss",
"invalid"
};
void uacce_q_set_hw_reset(struct uacce_queue *q)
{
struct uacce_qfile_region *qfr = q->qfrs[UACCE_QFRT_DS];
*(u32 *)qfr->kaddr = 1;
/* make sure setup is completed */
mb();
}
EXPORT_SYMBOL_GPL(uacce_q_set_hw_reset);
const char *uacce_qfrt_str(struct uacce_qfile_region *qfr)
{
enum uacce_qfrt type = qfr->type;
......@@ -68,31 +80,6 @@ const char *uacce_qfrt_str(struct uacce_qfile_region *qfr)
}
EXPORT_SYMBOL_GPL(uacce_qfrt_str);
/**
* uacce_send_sig_to_client - notify users uacce_queue should be released.
* @q: the uacce_queue which will be stopped.
*
* This function sends signal to process which is using uacce_queue.
*
* Note: This function can be called in low level driver, which may bring a race
* with uacce_fops_release. The problem is this function may be called
* when q is NULL. Low level driver should avoid this by locking hardware
* queue pool and check if there is related hardware queue before calling
* this function.
*
* And from view of uacce_queue state, uacce_queue state does not be
* changed. Operation of queue should also be protected by low level
* driver.
*/
void uacce_send_sig_to_client(struct uacce_queue *q)
{
if (!q)
return;
kill_fasync(&q->async_queue, SIGIO, POLL_IN);
}
EXPORT_SYMBOL_GPL(uacce_send_sig_to_client);
/**
* uacce_wake_up - Wake up the process who is waiting this queue
* @q the accelerator queue to wake up
......@@ -856,6 +843,9 @@ static enum uacce_qfrt uacce_get_region_type(struct uacce *uacce,
case UACCE_QFRT_DUS:
break;
case UACCE_QFRT_DS:
break;
case UACCE_QFRT_SS:
/* todo: this can be valid to protect the process space */
if (uacce->flags & UACCE_DEV_FAULT_FROM_DEV)
......@@ -967,6 +957,10 @@ static int uacce_fops_mmap(struct file *filep, struct vm_area_struct *vma)
flags |= UACCE_QFRF_DMA;
break;
case UACCE_QFRT_DS:
flags = UACCE_QFRF_KMAP | UACCE_QFRF_MMAP;
break;
default:
WARN_ON(&uacce->dev);
break;
......@@ -1139,6 +1133,15 @@ static ssize_t uacce_dev_show_qfrs_offset(struct device *dev,
}
static DEVICE_ATTR(qfrs_offset, S_IRUGO, uacce_dev_show_qfrs_offset, NULL);
static ssize_t dev_state_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct uacce *uacce = UACCE_FROM_CDEV_ATTR(dev);
return sprintf(buf, "%d\n", uacce->ops->get_dev_state(uacce));
}
static DEVICE_ATTR_RO(dev_state);
static struct attribute *uacce_dev_attrs[] = {
&dev_attr_id.attr,
&dev_attr_api.attr,
......@@ -1148,6 +1151,7 @@ static struct attribute *uacce_dev_attrs[] = {
&dev_attr_available_instances.attr,
&dev_attr_algorithms.attr,
&dev_attr_qfrs_offset.attr,
&dev_attr_dev_state.attr,
NULL,
};
......
......@@ -59,6 +59,12 @@ struct uacce_ops {
int (*reset_queue)(struct uacce_queue *q);
long (*ioctl)(struct uacce_queue *q, unsigned int cmd,
unsigned long arg);
enum uacce_dev_state (*get_dev_state)(struct uacce *uacce);
};
enum uacce_dev_state {
UACCE_DEV_ERR = -1,
UACCE_DEV_NORMAL,
};
enum uacce_q_state {
......@@ -109,6 +115,6 @@ int uacce_register(struct uacce *uacce);
int uacce_unregister(struct uacce *uacce);
void uacce_wake_up(struct uacce_queue *q);
const char *uacce_qfrt_str(struct uacce_qfile_region *qfr);
void uacce_send_sig_to_client(struct uacce_queue *q);
void uacce_q_set_hw_reset(struct uacce_queue *q);
#endif
......@@ -48,6 +48,7 @@ enum uacce_qfrt {
UACCE_QFRT_MMIO = 0, /* device mmio region */
UACCE_QFRT_DKO, /* device kernel-only */
UACCE_QFRT_DUS, /* device user share */
UACCE_QFRT_DS, /* device state */
UACCE_QFRT_SS, /* static share memory */
UACCE_QFRT_MAX,
};
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册