提交 9151e925 编写于 作者: T tanghui20 提交者: Xie XiuQi

ACC:use %pK format pointer print

driver inclusion
category: bugfix
bugzilla: NA
CVE: NA

The format specifier "%p" "%llx" can leak kernel addresses.
Use "%pK" instead.
Signed-off-by: Ntanghui20 <tanghui20@huawei.com>
Reviewed-by: Nwangzhou <wangzhou1@hisilicon.com>
Signed-off-by: Nlingmingqiang <lingmingqiang@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 8bf65d55
......@@ -1165,7 +1165,7 @@ static struct hisi_qp *hisi_qm_create_qp_lockless(struct hisi_qm *qm,
goto err_clear_bit;
}
dev_dbg(dev, "allocate qp dma buf(va=%p, dma=%pad, size=%lx)\n",
dev_dbg(dev, "allocate qp dma buf(va=%pK, dma=%pad, size=%lx)\n",
qp->qdma.va, &qp->qdma.dma, qp->qdma.size);
}
......@@ -1374,11 +1374,11 @@ static int hisi_qm_start_qp_lockless(struct hisi_qp *qp, unsigned long arg)
QP_INIT_BUF(qp, cqe, sizeof(struct cqe) * QM_Q_DEPTH);
dev_dbg(dev, "init qp buffer(v%d):\n"
" sqe (%lx, %lx)\n"
" cqe (%lx, %lx)\n",
" sqe (%pK, %lx)\n"
" cqe (%pK, %lx)\n",
ver,
(unsigned long)qp->sqe, (unsigned long)qp->sqe_dma,
(unsigned long)qp->cqe, (unsigned long)qp->cqe_dma);
qp->sqe, (unsigned long)qp->sqe_dma,
qp->cqe, (unsigned long)qp->cqe_dma);
ret = qm_qp_ctx_cfg(qp, qp_id, pasid);
if (ret)
......@@ -1695,8 +1695,8 @@ static int hisi_qm_uacce_start_queue(struct uacce_queue *q)
qm->qdma.va = q->qfrs[UACCE_QFRT_DKO]->kaddr;
qm->qdma.size = q->qfrs[UACCE_QFRT_DKO]->nr_pages >> PAGE_SHIFT;
dev_dbg(&q->uacce->dev,
"use dko space: va=%lx, dma=%lx, size=%llx\n",
(unsigned long)qm->qdma.va, (unsigned long)qm->qdma.dma,
"use dko space: va=%pK, dma=%lx, size=%llx\n",
qm->qdma.va, (unsigned long)qm->qdma.dma,
qm->size);
ret = __hisi_qm_start(qm);
if (ret)
......@@ -2174,14 +2174,14 @@ static int __hisi_qm_start(struct hisi_qm *qm)
off += PAGE_SIZE;
dev_dbg(dev, "init qm buffer:\n"
" eqe (%lx, %lx)\n"
" aeqe (%lx, %lx)\n"
" sqc (%lx, %lx)\n"
" cqc (%lx, %lx)\n",
(unsigned long)qm->eqe, (unsigned long)qm->eqe_dma,
(unsigned long)qm->aeqe, (unsigned long)qm->aeqe_dma,
(unsigned long)qm->sqc, (unsigned long)qm->sqc_dma,
(unsigned long)qm->cqc, (unsigned long)qm->cqc_dma);
" eqe (%pK, %lx)\n"
" aeqe (%pK, %lx)\n"
" sqc (%pK, %lx)\n"
" cqc (%pK, %lx)\n",
qm->eqe, (unsigned long)qm->eqe_dma,
qm->aeqe, (unsigned long)qm->aeqe_dma,
qm->sqc, (unsigned long)qm->sqc_dma,
qm->cqc, (unsigned long)qm->cqc_dma);
#ifdef CONFIG_CRYPTO_QM_UACCE
/* check if the size exceed the DKO boundary */
......@@ -2347,7 +2347,7 @@ int hisi_qm_start(struct hisi_qm *qm)
qm->qdma.va = dma_alloc_coherent(dev, qm->qdma.size,
&qm->qdma.dma,
GFP_KERNEL | __GFP_ZERO);
dev_dbg(dev, "allocate qm dma buf(va=%p, dma=%pad, size=%lx)\n",
dev_dbg(dev, "allocate qm dma buf(va=%pK, dma=%pad, size=%lx)\n",
qm->qdma.va, &qm->qdma.dma, qm->qdma.size);
if (!qm->qdma.va) {
ret = -ENOMEM;
......
......@@ -116,9 +116,9 @@ static void _queue_work(struct dummy_hw_queue *hwq)
hwq->reg->ring[hwq->tail].tgt_addr,
hwq->reg->ring[hwq->tail].src_addr,
hwq->reg->ring[hwq->tail].size);
dev_dbg(dev, "memcpy(%lx, %lx, %ld) = %d",
(unsigned long)hwq->reg->ring[hwq->tail].tgt_addr,
(unsigned long)hwq->reg->ring[hwq->tail].src_addr,
dev_dbg(dev, "memcpy(%pK, %pK, %ld) = %d",
hwq->reg->ring[hwq->tail].tgt_addr,
hwq->reg->ring[hwq->tail].src_addr,
hwq->reg->ring[hwq->tail].size,
hwq->reg->ring[hwq->tail].ret);
hwq->tail = (hwq->tail+1)%bd_num;
......
......@@ -233,8 +233,8 @@ static int uacce_queue_map_qfr(struct uacce_queue *q,
if (!(qfr->flags & UACCE_QFRF_MAP) || (qfr->flags & UACCE_QFRF_DMA))
return 0;
dev_dbg(&q->uacce->dev, "queue map %s qfr(npage=%d, iova=%lx)\n",
uacce_qfrt_str(qfr), qfr->nr_pages, qfr->iova);
dev_dbg(&q->uacce->dev, "queue map %s qfr(npage=%d, iova=%pK)\n",
uacce_qfrt_str(qfr), qfr->nr_pages, (void *)qfr->iova);
return uacce_iommu_map_qfr(q, qfr);
}
......@@ -245,8 +245,8 @@ static void uacce_queue_unmap_qfr(struct uacce_queue *q,
if (!(qfr->flags & UACCE_QFRF_MAP) || (qfr->flags & UACCE_QFRF_DMA))
return;
dev_dbg(&q->uacce->dev, "queue map %s qfr(npage=%d, iova=%lx)\n",
uacce_qfrt_str(qfr), qfr->nr_pages, qfr->iova);
dev_dbg(&q->uacce->dev, "queue map %s qfr(npage=%d, iova=%pK)\n",
uacce_qfrt_str(qfr), qfr->nr_pages, (void *)qfr->iova);
uacce_iommu_unmap_qfr(q, qfr);
}
......@@ -449,8 +449,8 @@ static void uacce_destroy_region(struct uacce_queue *q,
struct uacce *uacce = q->uacce;
if (qfr->flags & UACCE_QFRF_DMA) {
dev_dbg(uacce->pdev, "free dma qfr %s (kaddr=%lx, dma=%llx)\n",
uacce_qfrt_str(qfr), (unsigned long)qfr->kaddr,
dev_dbg(uacce->pdev, "free dma qfr %s (kaddr=%pK, dma=%llx)\n",
uacce_qfrt_str(qfr), qfr->kaddr,
qfr->dma);
if (current->mm)
vm_munmap((unsigned long)qfr->iova,
......@@ -531,9 +531,9 @@ static int uacce_start_queue(struct uacce_queue *q)
goto err_with_vmap;
}
dev_dbg(dev, "kernel vmap %s qfr(%d pages) to %lx\n",
dev_dbg(dev, "kernel vmap %s qfr(%d pages) to %pK\n",
uacce_qfrt_str(qfr), qfr->nr_pages,
(unsigned long)qfr->kaddr);
qfr->kaddr);
}
}
......@@ -567,7 +567,7 @@ static long uacce_get_ss_dma(struct uacce_queue *q, unsigned long *arg)
if (q->qfrs[UACCE_QFRT_SS]) {
dma = (unsigned long)(q->qfrs[UACCE_QFRT_SS]->dma);
dev_dbg(&uacce->dev, "%s(%lx)\n", __func__, dma);
dev_dbg(&uacce->dev, "%s(%lx).\n", __func__, dma);
} else {
return -EINVAL;
}
......@@ -916,8 +916,9 @@ static int uacce_fops_mmap(struct file *filep, struct vm_area_struct *vma)
uacce = q->uacce;
type = uacce_get_region_type(uacce, vma);
dev_dbg(&uacce->dev, "mmap q file(t=%s, off=%lx, start=%lx, end=%lx)\n",
qfrt_str[type], vma->vm_pgoff, vma->vm_start, vma->vm_end);
dev_dbg(&uacce->dev, "mmap q file(t=%s, off=%lx, start=%pK, end=%pK)\n",
qfrt_str[type], vma->vm_pgoff,
(void *)vma->vm_start, (void *)vma->vm_end);
if (type == UACCE_QFRT_INVALID) {
ret = -EINVAL;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册