提交 d1b93f65 编写于 作者: Z zhangwei 提交者: Xie XiuQi

ACC: optimize qm workqueue code to reduce CPU usage

driver inclusion
category: bugfix
bugzilla: NA
CVE: NA

Feature or Bugfix:Feature
Signed-off-by: NZhangwei <zhangwei375@huawei.com>
Reviewed-by: Nwangzhou <wangzhou1@hisilicon.com>
Signed-off-by: Nlingmingqiang <lingmingqiang@huawei.com>
Reviewed-by: Nlingmingqiang <lingmingqiang@huawei.com>
Reviewed-by: NYang Yingliang <yangyingliang@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 3bc26c9c
......@@ -107,6 +107,7 @@
#define QM_SQC_VFT_NUM_MASK_v2 0x3ff
#define QM_DFX_CNT_CLR_CE 0x100118
#define QM_IN_IDLE_ST_REG 0x1040e4
#define QM_ABNORMAL_INT_SOURCE 0x100000
#define QM_ABNORMAL_INT_MASK 0x100004
......@@ -212,6 +213,7 @@ struct hisi_qm_hw_ops {
static const char * const qm_debug_file_name[] = {
[CURRENT_Q] = "current_q",
[CLEAR_ENABLE] = "clear_enable",
[QM_STATE] = "qm_state",
};
struct hisi_qm_hw_error {
......@@ -518,17 +520,9 @@ static void qm_poll_qp(struct hisi_qp *qp, struct hisi_qm *qm)
}
}
static void qp_work_process(struct work_struct *work)
static void qm_work_process(struct work_struct *work)
{
struct hisi_qp *qp;
qp = container_of(work, struct hisi_qp, work);
qm_poll_qp(qp, qp->qm);
}
static irqreturn_t do_qm_irq(int irq, void *data)
{
struct hisi_qm *qm = data;
struct hisi_qm *qm = container_of(work, struct hisi_qm, work);
struct qm_eqe *eqe = qm->eqe + qm->status.eq_head;
struct hisi_qp *qp;
int eqe_num = 0;
......@@ -536,12 +530,8 @@ static irqreturn_t do_qm_irq(int irq, void *data)
while (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) {
eqe_num++;
qp = qm_to_hisi_qp(qm, eqe);
if (qp) {
if (qm->wq)
queue_work(qm->wq, &qp->work);
else
schedule_work(&qp->work);
}
if (qp)
qm_poll_qp(qp, qm);
if (qm->status.eq_head == QM_EQ_DEPTH - 1) {
qm->status.eqc_phase = !qm->status.eqc_phase;
......@@ -559,6 +549,16 @@ static irqreturn_t do_qm_irq(int irq, void *data)
}
qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
}
static irqreturn_t do_qm_irq(int irq, void *data)
{
struct hisi_qm *qm = (struct hisi_qm *)data;
if (qm->wq)
queue_work(qm->wq, &qm->work);
else
schedule_work(&qm->work);
return IRQ_HANDLED;
}
......@@ -859,6 +859,13 @@ static int clear_enable_write(struct debugfs_file *file, u32 rd_clr_ctrl)
return 0;
}
static u32 qm_state_read(struct debugfs_file *file)
{
struct hisi_qm *qm = file_to_qm(file);
return readl(qm->io_base + QM_IN_IDLE_ST_REG);
}
static ssize_t qm_debug_read(struct file *filp, char __user *buf,
size_t count, loff_t *pos)
{
......@@ -876,6 +883,9 @@ static ssize_t qm_debug_read(struct file *filp, char __user *buf,
case CLEAR_ENABLE:
val = clear_enable_read(file);
break;
case QM_STATE:
val = qm_state_read(file);
break;
default:
mutex_unlock(&file->lock);
return -EINVAL;
......@@ -1188,7 +1198,6 @@ static struct hisi_qp *hisi_qm_create_qp_nolock(struct hisi_qm *qm,
qp->qp_id = qp_id;
qp->alg_type = alg_type;
qp->c_flag = 1;
INIT_WORK(&qp->work, qp_work_process);
init_completion(&qp->completion);
atomic_set(&qp->qp_status.flags, QP_INIT);
......@@ -1945,6 +1954,7 @@ int hisi_qm_init(struct hisi_qm *qm)
mutex_init(&qm->mailbox_lock);
init_rwsem(&qm->qps_lock);
atomic_set(&qm->status.flags, QM_INIT);
INIT_WORK(&qm->work, qm_work_process);
dev_dbg(dev, "init qm %s with %s\n",
pdev->is_physfn ? "pf" : "vf",
......
......@@ -120,6 +120,7 @@ enum qm_fun_type {
enum qm_debug_file {
CURRENT_Q,
CLEAR_ENABLE,
QM_STATE,
DEBUG_FILE_NUM,
};
......@@ -282,6 +283,7 @@ struct hisi_qm {
dma_addr_t reserve_dma;
#endif
struct workqueue_struct *wq;
struct work_struct work;
/* design for module not support aer, such as rde */
int (*abnormal_fix)(struct hisi_qm *qm);
};
......@@ -325,7 +327,6 @@ struct hisi_qp {
u16 pasid;
struct uacce_queue *uacce_q;
#endif
struct work_struct work;
};
int hisi_qm_init(struct hisi_qm *qm);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册