提交 299b4bc2 编写于 作者: L lingmingqiang 提交者: Xie XiuQi

merge the driver code to hulk branch and format rectificaiton

driver inclusion
category: bugfix
bugzilla: NA
CVE: NA

Feature or Bugfix: Bugfix

1. [42feaced] HPRE Crypto warning clean
Warning -- Suspicious Truncation in arithmetic expression combining
with pointer

2. [4b3f837d] crypto/zip: Fix to get queue from possible zip functions
Original commit message:
Current code just gets queue from the closest function, return fail
if closest function has no available queue. In this patch, we firstly
sort all available functions, then get queue from sorted functions one
by one if closer function has no available queue.

3. [7250b1a9] crypto/qm: Export function to get free qp number for acc

4. [86eeda2b] crypto/hisilicon/qm: Fix static check warning
Reduce loop complexity of qm_qp_ctx_cfg function.

5. [f1c558c0] Fix static check warning

6. [dfdfef8f] crypto/hisilicon/qm: Fix QM task timeout bug
There is a domain segment in eqc/aeqc should be assignd value
in D06 ES, Which is reserved in D06 CS.

7. [4bf721fe] Bugfixed as two kill signal gotten by user processes
As two kill signals are gotten by the processes, file->ops->flush will
be called twice. As uacce->ops->flush will be called twice too. Currently,
flush cannot be called again at the same uacce_queue file, or core dump
will show.So, status of uacce queue is added, as flush and release
operations doing,queue status will be checked atomically. If already being
released, do nothing.

8. [20bd4257] uacce/dummy: Fix dummy compile problem
Original commit message:
As we move flags, api_ver, gf_pg_start to uacce from uacce_ops, so also fix
dummy driver to work together with current uacce.
Signed-off-by: Nlingmingqiang <lingmingqiang@huawei.com>

Changes to be committed:
	modified:   drivers/crypto/hisilicon/Kconfig
	modified:   drivers/crypto/hisilicon/hpre/hpre_crypto.c
	modified:   drivers/crypto/hisilicon/qm.c
	modified:   drivers/crypto/hisilicon/qm.h
	modified:   drivers/crypto/hisilicon/zip/zip_main.c
	modified:   drivers/uacce/Kconfig
	modified:   drivers/uacce/dummy_drv/dummy_wd_dev.c
	modified:   drivers/uacce/dummy_drv/dummy_wd_v2.c
	modified:   drivers/uacce/uacce.c
	modified:   include/linux/uacce.h
Reviewed-by: Nhucheng.hu <hucheng.hu@huawei.com>
Reviewed-by: NYang Yingliang <yangyingliang@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 9d89c01d
......@@ -24,9 +24,6 @@ config CRYPTO_QM_UACCE
help
Support UACCE interface in Hisi QM.
To compile this as a module, choose M here: the module
will be called qm.
config CRYPTO_DEV_HISI_ZIP
tristate "Support for HISI ZIP Driver"
depends on ARM64
......@@ -34,9 +31,6 @@ config CRYPTO_DEV_HISI_ZIP
help
Support for HiSilicon HIP08 ZIP Driver.
To compile this as a module, choose M here: the module
will be called hisi_zip
config CRYPTO_DEV_HISI_HPRE
tristate "Support for HISI HPRE Engine"
depends on ARM64
......@@ -56,10 +50,8 @@ config CRYPTO_DEV_HISI_SEC2
Support for HiSilicon HIP09 SEC Driver.
config CRYPTO_DEV_HISI_RDE
tristate "Support for HISI RDE Driver"
depends on ARM64
select CRYPTO_DEV_HISI_QM
help
Support for HiSilicon HIP09 RDE Driver.
tristate "Support for HISI RDE Driver"
depends on ARM64
select CRYPTO_DEV_HISI_QM
help
Support for HiSilicon HIP09 RDE Driver.
......@@ -189,52 +189,81 @@ static struct hisi_qp *hpre_get_qp(void)
return qp;
}
static int _get_data_dma_addr(struct hpre_asym_request *hpre_req,
struct scatterlist *data, unsigned int len,
int is_src, dma_addr_t *tmp)
{
enum dma_data_direction dma_dir;
struct hpre_ctx *ctx = hpre_req->ctx;
struct device *dev = &GET_DEV(ctx);
if (is_src) {
hpre_req->src_align = NULL;
dma_dir = DMA_TO_DEVICE;
} else {
hpre_req->dst_align = NULL;
dma_dir = DMA_FROM_DEVICE;
}
*tmp = dma_map_single(dev, sg_virt(data),
len, dma_dir);
if (unlikely(dma_mapping_error(dev, *tmp))) {
dev_err(dev, "dma map data err!\n");
return -ENOMEM;
}
return 0;
}
static int _cp_data_to_dma_buf(struct hpre_asym_request *hpre_req,
struct scatterlist *data, unsigned int len,
int is_src, int is_dh, dma_addr_t *tmp)
{
struct hpre_ctx *ctx = hpre_req->ctx;
struct device *dev = &GET_DEV(ctx);
char *ptr;
int shift;
shift = ctx->key_sz - len;
if (shift < 0)
return -EINVAL;
ptr = dma_alloc_coherent(dev, ctx->key_sz, tmp, GFP_KERNEL);
if (unlikely(!ptr)) {
dev_err(dev, "dma alloc data err!\n");
return -ENOMEM;
}
if (is_src) {
scatterwalk_map_and_copy(ptr + shift, data, 0, len, 0);
if (is_dh)
(void)hpre_bn_format(ptr, ctx->key_sz);
hpre_req->src_align = ptr;
} else {
hpre_req->dst_align = ptr;
}
return 0;
}
static int _hw_data_init(struct hpre_asym_request *hpre_req,
struct scatterlist *data, unsigned int len,
int is_src, int is_dh)
{
struct hpre_sqe *msg = &hpre_req->req;
struct hpre_ctx *ctx = hpre_req->ctx;
struct device *dev = &GET_DEV(ctx);
enum dma_data_direction dma_dir;
dma_addr_t tmp;
char *ptr;
int shift;
int ret;
/* when the data is dh's source, we should format it */
if ((sg_is_last(data) && len == ctx->key_sz) &&
((is_dh && !is_src) || !is_dh)) {
if (is_src) {
hpre_req->src_align = NULL;
dma_dir = DMA_TO_DEVICE;
} else {
hpre_req->dst_align = NULL;
dma_dir = DMA_FROM_DEVICE;
}
tmp = dma_map_single(dev, sg_virt(data),
len, dma_dir);
if (unlikely(dma_mapping_error(dev, tmp))) {
dev_err(dev, "\ndma map data err!");
return -ENOMEM;
}
ret = _get_data_dma_addr(hpre_req, data, len, is_src, &tmp);
if (ret)
return ret;
} else {
shift = ctx->key_sz - len;
if (shift < 0)
return -EINVAL;
ptr = dma_alloc_coherent(dev, ctx->key_sz, &tmp, GFP_KERNEL);
if (unlikely(!ptr)) {
dev_err(dev, "\ndma alloc data err!");
return -ENOMEM;
}
if (is_src) {
scatterwalk_map_and_copy(ptr + shift, data, 0, len, 0);
if (is_dh)
(void)hpre_bn_format(ptr, ctx->key_sz);
hpre_req->src_align = ptr;
} else {
hpre_req->dst_align = ptr;
}
ret = _cp_data_to_dma_buf(hpre_req, data, len,
is_src, is_dh, &tmp);
if (ret)
return ret;
}
if (is_src) {
msg->low_in = lower_32_bits(tmp);
......@@ -807,6 +836,7 @@ static int hpre_rsa_setkey_crt(struct hpre_ctx *ctx, struct rsa_key *rsa_key)
struct device *dev = &GET_DEV(ctx);
unsigned int hlf_ksz = ctx->key_sz >> 1;
int ret;
u64 offset;
ctx->rsa.crt_prikey = dma_alloc_coherent(dev, hlf_ksz * _CRT_PRMS,
&ctx->rsa.dma_crt_prikey,
......@@ -821,25 +851,29 @@ static int hpre_rsa_setkey_crt(struct hpre_ctx *ctx, struct rsa_key *rsa_key)
goto free_key;
/* dp */
ret = hpre_crt_para_get(ctx->rsa.crt_prikey + hlf_ksz, rsa_key->dp,
offset = hlf_ksz;
ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, rsa_key->dp,
rsa_key->dp_sz, hlf_ksz);
if (ret)
goto free_key;
/* q */
ret = hpre_crt_para_get(ctx->rsa.crt_prikey + hlf_ksz * _CRT_Q,
offset = hlf_ksz * _CRT_Q;
ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset,
rsa_key->q, rsa_key->q_sz, hlf_ksz);
if (ret)
goto free_key;
/* p */
ret = hpre_crt_para_get(ctx->rsa.crt_prikey + hlf_ksz * _CRT_P,
offset = hlf_ksz * _CRT_P;
ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset,
rsa_key->p, rsa_key->p_sz, hlf_ksz);
if (ret)
goto free_key;
/* qinv */
ret = hpre_crt_para_get(ctx->rsa.crt_prikey + hlf_ksz * _CRT_INV,
offset = hlf_ksz * _CRT_INV;
ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset,
rsa_key->qinv, rsa_key->qinv_sz, hlf_ksz);
if (ret)
goto free_key;
......
......@@ -60,7 +60,7 @@
#define QM_QC_CQE_SIZE 4
/* eqc shift */
#define QM_EQC_EQE_SHIFT 12
#define QM_EQE_AEQE_SIZE (2UL << 12)
#define QM_EQC_PHASE_SHIFT 16
#define QM_EQE_PHASE(eqe) (((eqe)->dw0 >> 16) & 0x1)
......@@ -1124,19 +1124,15 @@ void hisi_qm_release_qp(struct hisi_qp *qp)
}
EXPORT_SYMBOL_GPL(hisi_qm_release_qp);
static int qm_qp_ctx_cfg(struct hisi_qp *qp, int qp_id, int pasid)
static int qm_sq_ctx_cfg(struct hisi_qp *qp, int qp_id, int pasid)
{
struct hisi_qm *qm = qp->qm;
struct device *dev = &qm->pdev->dev;
enum qm_hw_ver ver = qm->ver;
struct qm_sqc *sqc;
struct qm_cqc *cqc;
dma_addr_t sqc_dma;
dma_addr_t cqc_dma;
int ret;
qm_init_qp_status(qp);
if (qm->use_dma_api) {
sqc = kzalloc(sizeof(struct qm_sqc), GFP_KERNEL);
if (!sqc)
......@@ -1171,8 +1167,18 @@ static int qm_qp_ctx_cfg(struct hisi_qp *qp, int qp_id, int pasid)
} else {
memset(sqc, 0, sizeof(struct qm_sqc));
}
if (ret)
return ret;
return ret;
}
static int qm_cq_ctx_cfg(struct hisi_qp *qp, int qp_id, int pasid)
{
struct hisi_qm *qm = qp->qm;
struct device *dev = &qm->pdev->dev;
enum qm_hw_ver ver = qm->ver;
struct qm_cqc *cqc;
dma_addr_t cqc_dma;
int ret;
if (qm->use_dma_api) {
cqc = kzalloc(sizeof(struct qm_cqc), GFP_KERNEL);
......@@ -1212,6 +1218,19 @@ static int qm_qp_ctx_cfg(struct hisi_qp *qp, int qp_id, int pasid)
return ret;
}
static int qm_qp_ctx_cfg(struct hisi_qp *qp, int qp_id, int pasid)
{
int ret;
qm_init_qp_status(qp);
ret = qm_sq_ctx_cfg(qp, qp_id, pasid);
if (ret)
return ret;
return qm_cq_ctx_cfg(qp, qp_id, pasid);
}
/**
* hisi_qm_start_qp() - Start a qp into running.
* @qp: The qp we want to start to run.
......@@ -1371,15 +1390,8 @@ static void hisi_qm_cache_wb(struct hisi_qm *qm)
}
}
#ifdef CONFIG_CRYPTO_QM_UACCE
static void qm_qp_event_notifier(struct hisi_qp *qp)
{
uacce_wake_up(qp->uacce_q);
}
static int hisi_qm_get_available_instances(struct uacce *uacce)
int hisi_qm_get_free_qp_num(struct hisi_qm *qm)
{
struct hisi_qm *qm = uacce->priv;
int i, ret;
read_lock(&qm->qps_lock);
......@@ -1393,6 +1405,18 @@ static int hisi_qm_get_available_instances(struct uacce *uacce)
return ret;
}
EXPORT_SYMBOL_GPL(hisi_qm_get_free_qp_num);
#ifdef CONFIG_CRYPTO_QM_UACCE
static void qm_qp_event_notifier(struct hisi_qp *qp)
{
uacce_wake_up(qp->uacce_q);
}
static int hisi_qm_get_available_instances(struct uacce *uacce)
{
return hisi_qm_get_free_qp_num(uacce->priv);
}
static int hisi_qm_uacce_get_queue(struct uacce *uacce, unsigned long arg,
struct uacce_queue **q)
......@@ -1871,6 +1895,8 @@ static int qm_eq_ctx_cfg(struct hisi_qm *qm)
eqc->base_l = lower_32_bits(qm->eqe_dma);
eqc->base_h = upper_32_bits(qm->eqe_dma);
if (qm->ver == QM_HW_V1)
eqc->dw3 = QM_EQE_AEQE_SIZE;
eqc->dw6 = (QM_Q_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT);
ret = qm_mb(qm, QM_MB_CMD_EQC, eqc_dma, 0, 0, 0);
if (qm->use_dma_api) {
......
......@@ -313,6 +313,7 @@ int hisi_qm_stop_qp(struct hisi_qp *qp);
void hisi_qm_release_qp(struct hisi_qp *qp);
int hisi_qp_send(struct hisi_qp *qp, const void *msg);
int hisi_qp_wait(struct hisi_qp *qp);
int hisi_qm_get_free_qp_num(struct hisi_qm *qm);
int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number);
int hisi_qm_set_vft(struct hisi_qm *qm, u32 fun_num, u32 base, u32 number);
int hisi_qm_debug_init(struct hisi_qm *qm);
......
......@@ -89,23 +89,61 @@ static struct dentry *hzip_debugfs_root;
LIST_HEAD(hisi_zip_list);
DEFINE_MUTEX(hisi_zip_list_lock);
struct hisi_zip_resource {
struct hisi_zip *hzip;
int distance;
struct list_head list;
};
static void free_list(struct list_head *head)
{
struct hisi_zip_resource *res, *tmp;
list_for_each_entry_safe(res, tmp, head, list) {
list_del(&res->list);
kfree(res);
}
}
struct hisi_zip *find_zip_device(int node)
{
struct hisi_zip *ret = NULL;
#ifdef CONFIG_NUMA
struct hisi_zip_resource *res, *tmp;
struct hisi_zip *hisi_zip;
int min_distance = HZIP_NUMA_DISTANCE;
struct list_head *n;
struct device *dev;
LIST_HEAD(head);
mutex_lock(&hisi_zip_list_lock);
list_for_each_entry(hisi_zip, &hisi_zip_list, list) {
res = kzalloc(sizeof(*res), GFP_KERNEL);
if (!res)
goto err;
dev = &hisi_zip->qm.pdev->dev;
if (node_distance(dev->numa_node, node) < min_distance) {
ret = hisi_zip;
min_distance = node_distance(dev->numa_node, node);
res->hzip = hisi_zip;
res->distance = node_distance(dev->numa_node, node);
n = &head;
list_for_each_entry(tmp, &head, list) {
if (res->distance < tmp->distance) {
n = &tmp->list;
break;
}
}
list_add_tail(&res->list, n);
}
list_for_each_entry(tmp, &head, list) {
if (hisi_qm_get_free_qp_num(&tmp->hzip->qm)) {
ret = tmp->hzip;
break;
}
}
free_list(&head);
#else
mutex_lock(&hisi_zip_list_lock);
......@@ -114,6 +152,10 @@ struct hisi_zip *find_zip_device(int node)
mutex_unlock(&hisi_zip_list_lock);
return ret;
err:
free_list(&head);
return NULL;
}
struct hisi_zip_hw_error {
......@@ -289,18 +331,18 @@ static void hisi_zip_set_user_domain_and_cache(struct hisi_zip *hisi_zip)
/* qm user domain */
writel(AXUSER_BASE, hisi_zip->qm.io_base + QM_ARUSER_M_CFG_1);
writel(ARUSER_M_CFG_ENABLE, hisi_zip->qm.io_base +
QM_ARUSER_M_CFG_ENABLE);
QM_ARUSER_M_CFG_ENABLE);
writel(AXUSER_BASE, hisi_zip->qm.io_base + QM_AWUSER_M_CFG_1);
writel(AWUSER_M_CFG_ENABLE, hisi_zip->qm.io_base +
QM_AWUSER_M_CFG_ENABLE);
QM_AWUSER_M_CFG_ENABLE);
writel(WUSER_M_CFG_ENABLE, hisi_zip->qm.io_base +
QM_WUSER_M_CFG_ENABLE);
QM_WUSER_M_CFG_ENABLE);
/* qm cache */
writel(CACHE_ALL_EN, hisi_zip->qm.io_base + HZIP_PORT_ARCA_CHE_0);
writel(CACHE_ALL_EN, hisi_zip->qm.io_base + HZIP_PORT_ARCA_CHE_1);
writel(CACHE_ALL_EN, hisi_zip->qm.io_base + HZIP_PORT_AWCA_CHE_0);
writel(CACHE_ALL_EN, hisi_zip->qm.io_base + HZIP_PORT_AWCA_CHE_1);
writel(AXI_M_CFG, hisi_zip->qm.io_base + QM_AXI_M_CFG);
writel(AXI_M_CFG_ENABLE, hisi_zip->qm.io_base + QM_AXI_M_CFG_ENABLE);
writel(PEH_AXUSER_CFG_ENABLE, hisi_zip->qm.io_base +
QM_PEH_AXUSER_CFG_ENABLE);
/* cache */
writel(CACHE_ALL_EN, hisi_zip->qm.io_base + HZIP_PORT_ARCA_CHE_0);
......@@ -315,26 +357,25 @@ static void hisi_zip_set_user_domain_and_cache(struct hisi_zip *hisi_zip)
if (qm->use_sva) {
writel(AXUSER_BASE | AXUSER_SSV, hisi_zip->qm.io_base +
HZIP_DATA_RUSER_32_63);
HZIP_DATA_RUSER_32_63);
writel(AXUSER_BASE | AXUSER_SSV, hisi_zip->qm.io_base +
HZIP_DATA_WUSER_32_63);
HZIP_DATA_WUSER_32_63);
} else {
writel(AXUSER_BASE, hisi_zip->qm.io_base +
HZIP_DATA_RUSER_32_63);
HZIP_DATA_RUSER_32_63);
writel(AXUSER_BASE, hisi_zip->qm.io_base +
HZIP_DATA_WUSER_32_63);
HZIP_DATA_WUSER_32_63);
}
/* let's open all compression/decompression cores */
writel(DECOMP_CHECK_ENABLE | ALL_COMP_DECOMP_EN,
hisi_zip->qm.io_base + HZIP_CLOCK_GATE_CTRL);
hisi_zip->qm.io_base + HZIP_CLOCK_GATE_CTRL);
/* enable sqc,cqc writeback */
writel(SQC_CACHE_ENABLE | CQC_CACHE_ENABLE | SQC_CACHE_WB_ENABLE |
CQC_CACHE_WB_ENABLE | FIELD_PREP(SQC_CACHE_WB_THRD, 1) |
FIELD_PREP(CQC_CACHE_WB_THRD, 1),
hisi_zip->qm.io_base + QM_CACHE_CTL);
CQC_CACHE_WB_ENABLE | FIELD_PREP(SQC_CACHE_WB_THRD, 1) |
FIELD_PREP(CQC_CACHE_WB_THRD, 1),
hisi_zip->qm.io_base + QM_CACHE_CTL);
}
static void hisi_zip_hw_error_set_state(struct hisi_zip *hisi_zip, bool state)
......
......@@ -12,7 +12,6 @@ menuconfig UACCE
config WD_DUMMY_DEV
tristate "Support for WrapDrive Dummy Device"
depends on ARM64
depends on UACCE
help
Support for WarpDrive test driver with devices (NOT for upstream).
......
......@@ -233,7 +233,7 @@ int queue_worker(void *data)
do {
_queue_work(hwq);
schedule_timeout_interruptible(
msecs_to_jiffies(QUEUE_YEILD_MS));
msecs_to_jiffies(QUEUE_YEILD_MS));
} while (!kthread_should_stop());
hwq->work_thread = NULL;
......@@ -321,6 +321,12 @@ static int dummy_wd_probe(struct platform_device *pdev)
uacce->ops = &dummy_ops;
uacce->drv_name = DUMMY_WD;
uacce->algs = "memcpy\n";
uacce->api_ver = "dummy_v1";
uacce->flags = UACCE_DEV_NOIOMMU;
uacce->qf_pg_start[UACCE_QFRT_MMIO] = 0;
uacce->qf_pg_start[UACCE_QFRT_DKO] = UACCE_QFR_NA;
uacce->qf_pg_start[UACCE_QFRT_DUS] = UACCE_QFR_NA;
uacce->qf_pg_start[UACCE_QFRT_SS] = 1;
#ifdef CONFIG_NUMA
/*
......
......@@ -106,8 +106,8 @@ static bool dummy_wd2_iommu_capable(enum iommu_cap cap)
}
}
static struct iommu_domain *dummy_wd2_iommu_domain_alloc(unsigned
iommu_domain_type);
static struct iommu_domain *dummy_wd2_iommu_domain_alloc(
unsigned int iommu_domain_type);
static void dummy_wd2_iommu_domain_free(struct iommu_domain *domain);
static int dummy_wd2_iommu_attach_dev(struct iommu_domain *domain,
......@@ -152,9 +152,11 @@ static int dummy_wd2_iommu_map(struct iommu_domain *domain, unsigned long iova,
dev_dbg(&d->hw->dummy_wd2_dev,
"iommu_map %d asid=%lld, %llx=>%llx\n", i,
d->hw->pt[i].asid,
d->hw->pt[i].iova, d->hw->pt[i].pa);
d->hw->pt[i].iova,
d->hw->pt[i].pa);
/* flush to hardware */
writeq(MAX_PT_ENTRIES, d->hw->io_base + DUMMY2_IO_PTSZ);
writeq(MAX_PT_ENTRIES,
d->hw->io_base + DUMMY2_IO_PTSZ);
return 0;
}
}
......@@ -176,10 +178,12 @@ static size_t dummy_wd2_iommu_unmap(struct iommu_domain *domain,
dev_dbg(&d->hw->dummy_wd2_dev,
"iommu_unmap %d asid=%lld, %llx=>%llx\n", i,
d->hw->pt[i].asid,
d->hw->pt[i].iova, d->hw->pt[i].pa);
d->hw->pt[i].iova,
d->hw->pt[i].pa);
d->hw->pt[i].asid = (uint64_t)-1;
/* flush to hardware */
writeq(MAX_PT_ENTRIES, d->hw->io_base + DUMMY2_IO_PTSZ);
writeq(MAX_PT_ENTRIES,
d->hw->io_base + DUMMY2_IO_PTSZ);
return DUMMY2_DMA_PAGE_SIZE;
}
}
......@@ -198,11 +202,11 @@ static struct iommu_ops dummy_wd2_iommu_ops = {
.pgsize_bitmap = SZ_4K,
};
static struct iommu_domain *dummy_wd2_iommu_domain_alloc(unsigned
iommu_domain_type)
static struct iommu_domain *dummy_wd2_iommu_domain_alloc(
unsigned int iommu_domain_type)
{
struct dummy_wd2_iommu_domain *domain =
kzalloc(sizeof(struct iommu_domain), GFP_KERNEL);
struct dummy_wd2_iommu_domain *domain = kzalloc(
sizeof(struct iommu_domain), GFP_KERNEL);
if (domain)
domain->domain.ops = &dummy_wd2_iommu_ops;
......@@ -217,10 +221,10 @@ static void dummy_wd2_iommu_domain_free(struct iommu_domain *domain)
}
static struct bus_type dummy_wd2_bus_type = {
.name = "dummy_wd2_bus",
.probe = dummy_wd2_bus_probe,
.remove = dummy_wd2_bus_remove,
.iommu_ops = &dummy_wd2_iommu_ops,
.name = "dummy_wd2_bus",
.probe = dummy_wd2_bus_probe,
.remove = dummy_wd2_bus_remove,
.iommu_ops = &dummy_wd2_iommu_ops,
};
static int dummy_wd2_is_q_updated(struct uacce_queue *q)
......@@ -229,7 +233,7 @@ static int dummy_wd2_is_q_updated(struct uacce_queue *q)
}
static int dummy_wd2_get_queue(struct uacce *uacce, unsigned long arg,
struct uacce_queue **q)
struct uacce_queue **q)
{
int i;
struct dummy_wd2_hw *hw = (struct dummy_wd2_hw *)uacce->priv;
......@@ -262,7 +266,7 @@ static void dummy_wd2_put_queue(struct uacce_queue *q)
}
static int dummy_wd2_mmap(struct uacce_queue *q, struct vm_area_struct *vma,
struct uacce_qfile_region *qfr)
struct uacce_qfile_region *qfr)
{
struct dummy_wd2_hw_queue *hwq = (struct dummy_wd2_hw_queue *)q->priv;
......@@ -271,7 +275,7 @@ static int dummy_wd2_mmap(struct uacce_queue *q, struct vm_area_struct *vma,
return -EINVAL;
return remap_pfn_range(vma, vma->vm_start,
(u64) hwq->db_pa >> PAGE_SHIFT,
(u64)hwq->db_pa >> PAGE_SHIFT,
DUMMY2_DMA_PAGE_SIZE,
pgprot_noncached(vma->vm_page_prot));
}
......@@ -378,8 +382,8 @@ static int dummy_wd2_probe(struct platform_device *pdev)
return PTR_ERR(hw->io_base);
hw->pt = dmam_alloc_coherent(dev,
sizeof(struct pt_entry) * MAX_PT_ENTRIES,
&hw->pt_dma, GFP_KERNEL);
sizeof(struct pt_entry) * MAX_PT_ENTRIES,
&hw->pt_dma, GFP_KERNEL);
if (!hw->pt)
return -ENOMEM;
......@@ -393,7 +397,7 @@ static int dummy_wd2_probe(struct platform_device *pdev)
}
dev_info(dev, "v2 device (%llx, %llx), header: %llx\n",
(u64) hw->pt, hw->pt_dma, readq(hw->io_base + DUMMY2_IO_TAG));
(u64)hw->pt, hw->pt_dma, readq(hw->io_base + DUMMY2_IO_TAG));
/* set page tables */
writeq(hw->pt_dma, hw->io_base + DUMMY2_IO_PTPA);
......@@ -401,9 +405,9 @@ static int dummy_wd2_probe(struct platform_device *pdev)
for (i = 0; i < RING_NUM; i++) {
hw->qs[i].used = false;
hw->qs[i].db_pa = (void __iomem *)res->start +
((i + 1) << DUMMY2_DMA_PAGE_SHIFT);
((i+1)<<DUMMY2_DMA_PAGE_SHIFT);
hw->qs[i].ring_io_base = hw->io_base + DUMMY2_IO_RING_BEGIN +
sizeof(struct ring_io) * i;
sizeof(struct ring_io) * i;
hw->qs[i].hw = hw;
hw->qs[i].q.priv = &hw->qs[i];
}
......@@ -447,6 +451,13 @@ static int dummy_wd2_probe(struct platform_device *pdev)
uacce->ops = &dummy_wd2_ops;
uacce->drv_name = DUMMY2_WD;
uacce->algs = "memcpy\n";
uacce->api_ver = "dummy_v2";
uacce->flags = 0;
uacce->qf_pg_start[UACCE_QFRT_MMIO] = 0;
uacce->qf_pg_start[UACCE_QFRT_DKO] = UACCE_QFR_NA;
uacce->qf_pg_start[UACCE_QFRT_DUS] = 1;
uacce->qf_pg_start[UACCE_QFRT_SS] = 2;
ret = uacce_register(uacce);
if (ret) {
dev_warn(uacce->pdev, "uacce register fail %d\n", ret);
......@@ -455,9 +466,9 @@ static int dummy_wd2_probe(struct platform_device *pdev)
return 0;
err_with_group:
err_with_group:
iommu_group_put(hw->iommu_group);
err_with_device:
err_with_device:
put_device(&hw->dummy_wd2_dev);
return ret;
}
......@@ -471,11 +482,11 @@ static int dummy_wd2_remove(struct platform_device *pdev)
}
static struct platform_driver dummy_wd2_pdrv = {
.probe = dummy_wd2_probe,
.remove = dummy_wd2_remove,
.driver = {
.name = DUMMY2_WD,
},
.probe = dummy_wd2_probe,
.remove = dummy_wd2_remove,
.driver = {
.name = DUMMY2_WD,
},
};
static int __init dummy_wd2_init(void)
......
......@@ -25,7 +25,7 @@
static struct class *uacce_class;
static DEFINE_IDR(uacce_idr);
static dev_t uacce_devt;
static DEFINE_MUTEX(uacce_mutex); /* mutex to protect uacce */
static DEFINE_MUTEX(uacce_mutex); /* mutex to protect uacce */
/* lock to protect all queues management */
#ifdef CONFIG_UACCE_FIX_MMAP
......@@ -122,8 +122,8 @@ void uacce_wake_up(struct uacce_queue *q)
}
EXPORT_SYMBOL_GPL(uacce_wake_up);
static inline int
uacce_iommu_map_qfr(struct uacce_queue *q, struct uacce_qfile_region *qfr)
static inline int uacce_iommu_map_qfr(struct uacce_queue *q,
struct uacce_qfile_region *qfr)
{
struct device *dev = q->uacce->pdev;
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
......@@ -145,7 +145,7 @@ uacce_iommu_map_qfr(struct uacce_queue *q, struct uacce_qfile_region *qfr)
return 0;
err_with_map_pages:
err_with_map_pages:
for (j = i - 1; j >= 0; j--) {
iommu_unmap(domain, qfr->iova + j * PAGE_SIZE, PAGE_SIZE);
put_page(qfr->pages[j]);
......@@ -153,8 +153,8 @@ uacce_iommu_map_qfr(struct uacce_queue *q, struct uacce_qfile_region *qfr)
return ret;
}
static inline void
uacce_iommu_unmap_qfr(struct uacce_queue *q, struct uacce_qfile_region *qfr)
static inline void uacce_iommu_unmap_qfr(struct uacce_queue *q,
struct uacce_qfile_region *qfr)
{
struct device *dev = q->uacce->pdev;
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
......@@ -169,8 +169,8 @@ uacce_iommu_unmap_qfr(struct uacce_queue *q, struct uacce_qfile_region *qfr)
}
}
static int
uacce_queue_map_qfr(struct uacce_queue *q, struct uacce_qfile_region *qfr)
static int uacce_queue_map_qfr(struct uacce_queue *q,
struct uacce_qfile_region *qfr)
{
if (!(qfr->flags & UACCE_QFRF_MAP) || (qfr->flags & UACCE_QFRF_DMA))
return 0;
......@@ -181,8 +181,8 @@ uacce_queue_map_qfr(struct uacce_queue *q, struct uacce_qfile_region *qfr)
return uacce_iommu_map_qfr(q, qfr);
}
static void
uacce_queue_unmap_qfr(struct uacce_queue *q, struct uacce_qfile_region *qfr)
static void uacce_queue_unmap_qfr(struct uacce_queue *q,
struct uacce_qfile_region *qfr)
{
if (!(qfr->flags & UACCE_QFRF_MAP) || (qfr->flags & UACCE_QFRF_DMA))
return;
......@@ -221,7 +221,7 @@ static vm_fault_t uacce_shm_vm_fault(struct vm_fault *vmf)
vmf->page = qfr->pages[page_offset];
ret = 0;
out:
out:
uacce_qs_runlock();
return ret;
}
......@@ -247,7 +247,7 @@ static int uacce_qfr_alloc_pages(struct uacce_qfile_region *qfr)
return 0;
err_with_pages:
err_with_pages:
for (j = i - 1; j >= 0; j--)
put_page(qfr->pages[j]);
......@@ -265,9 +265,9 @@ static void uacce_qfr_free_pages(struct uacce_qfile_region *qfr)
kfree(qfr->pages);
}
static inline int
uacce_queue_mmap_qfr(struct uacce_queue *q,
struct uacce_qfile_region *qfr, struct vm_area_struct *vma)
static inline int uacce_queue_mmap_qfr(struct uacce_queue *q,
struct uacce_qfile_region *qfr,
struct vm_area_struct *vma)
{
#ifdef CONFIG_UACCE_FIX_MMAP
int i, ret;
......@@ -293,8 +293,8 @@ uacce_queue_mmap_qfr(struct uacce_queue *q,
}
static struct uacce_qfile_region *uacce_create_region(struct uacce_queue *q,
struct vm_area_struct *vma,
enum uacce_qfrt type, u32 flags)
struct vm_area_struct *vma,
enum uacce_qfrt type, u32 flags)
{
struct uacce_qfile_region *qfr;
struct uacce *uacce = q->uacce;
......@@ -327,9 +327,9 @@ static struct uacce_qfile_region *uacce_create_region(struct uacce_queue *q,
/* allocate memory */
if (flags & UACCE_QFRF_DMA) {
dev_dbg(uacce->pdev, "allocate dma %d pages\n", qfr->nr_pages);
qfr->kaddr = dma_alloc_coherent(uacce->pdev,
qfr->nr_pages << PAGE_SHIFT,
&qfr->dma, GFP_KERNEL);
qfr->kaddr = dma_alloc_coherent(uacce->pdev, qfr->nr_pages <<
PAGE_SHIFT, &qfr->dma,
GFP_KERNEL);
if (!qfr->kaddr) {
ret = -ENOMEM;
goto err_with_qfr;
......@@ -370,23 +370,23 @@ static struct uacce_qfile_region *uacce_create_region(struct uacce_queue *q,
return qfr;
err_with_mapped_qfr:
err_with_mapped_qfr:
uacce_queue_unmap_qfr(q, qfr);
err_with_pages:
err_with_pages:
if (flags & UACCE_QFRF_DMA)
dma_free_coherent(uacce->pdev, qfr->nr_pages << PAGE_SHIFT,
qfr->kaddr, qfr->dma);
else
uacce_qfr_free_pages(qfr);
err_with_qfr:
err_with_qfr:
kfree(qfr);
return ERR_PTR(ret);
}
/* we assume you have uacce_queue_unmap_qfr(q, qfr) from all related queues */
static void
uacce_destroy_region(struct uacce_queue *q, struct uacce_qfile_region *qfr)
static void uacce_destroy_region(struct uacce_queue *q,
struct uacce_qfile_region *qfr)
{
struct uacce *uacce = q->uacce;
......@@ -444,9 +444,9 @@ static long uacce_cmd_share_qfr(struct uacce_queue *tgt, int fd)
list_add(&tgt->list, &src->qfrs[UACCE_QFRT_SS]->qs);
ret = 0;
out_with_lock:
out_with_lock:
uacce_qs_wunlock();
out_with_fd:
out_with_fd:
fput(filep);
return ret;
}
......@@ -464,9 +464,8 @@ static int uacce_start_queue(struct uacce_queue *q)
for (i = 0; i < UACCE_QFRT_MAX; i++) {
qfr = q->qfrs[i];
if (qfr && (qfr->flags & UACCE_QFRF_KMAP) && !qfr->kaddr) {
qfr->kaddr =
vmap(qfr->pages, qfr->nr_pages, VM_MAP,
PAGE_KERNEL);
qfr->kaddr = vmap(qfr->pages, qfr->nr_pages, VM_MAP,
PAGE_KERNEL);
if (!qfr->kaddr) {
ret = -ENOMEM;
dev_dbg(dev, "fail to kmap %s qfr(%d pages)\n",
......@@ -488,7 +487,7 @@ static int uacce_start_queue(struct uacce_queue *q)
atomic_set(&q->uacce->state, UACCE_ST_STARTED);
return 0;
err_with_vmap:
err_with_vmap:
for (j = i; j >= 0; j--) {
qfr = q->qfrs[j];
if (qfr && qfr->kaddr) {
......@@ -522,8 +521,8 @@ static long uacce_get_ss_dma(struct uacce_queue *q, void __user *arg)
return ret;
}
static long
uacce_fops_unl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
static long uacce_fops_unl_ioctl(struct file *filep,
unsigned int cmd, unsigned long arg)
{
struct uacce_queue *q = filep->private_data;
struct uacce *uacce = q->uacce;
......@@ -548,8 +547,8 @@ uacce_fops_unl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
}
#ifdef CONFIG_COMPAT
static long
uacce_fops_compat_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
static long uacce_fops_compat_ioctl(struct file *filep,
unsigned int cmd, unsigned long arg)
{
arg = (unsigned long)compat_ptr(arg);
return uacce_fops_unl_ioctl(filep, cmd, arg);
......@@ -635,6 +634,10 @@ static int uacce_fops_flush(struct file *filep, fl_owner_t id)
struct uacce_queue *q = filep->private_data;
struct uacce *uacce = q->uacce;
if (UACCE_ST_INIT ==
atomic_cmpxchg(&q->status, UACCE_ST_OPENNED, UACCE_ST_INIT))
return 0;
/*
* It is different between CI and kernel-dev here, so delete list
* entry in flush callback and release callback. After flush is called
......@@ -646,9 +649,6 @@ static int uacce_fops_flush(struct file *filep, fl_owner_t id)
list_del(&q->q_dev);
mutex_unlock(&uacce->q_lock);
/* filep->private_date is still used by above uacce_fops_fasync */
filep->private_data = NULL;
return uacce_queue_drain(q);
}
......@@ -687,6 +687,7 @@ static int uacce_fops_open(struct inode *inode, struct file *filep)
goto open_err;
atomic_inc(&uacce->ref);
atomic_set(&q->status, UACCE_ST_OPENNED);
q->pasid = pasid;
q->uacce = uacce;
q->mm = current->mm;
......@@ -708,8 +709,8 @@ static int uacce_fops_release(struct inode *inode, struct file *filep)
{
struct uacce_queue *q = filep->private_data;
/* task has put the queue */
if (!q)
if (UACCE_ST_INIT ==
atomic_cmpxchg(&q->status, UACCE_ST_OPENNED, UACCE_ST_INIT))
return 0;
uacce_fops_fasync(-1, filep, 0);
......@@ -723,8 +724,8 @@ static int uacce_fops_release(struct inode *inode, struct file *filep)
return uacce_queue_drain(q);
}
static enum uacce_qfrt
uacce_get_region_type(struct uacce *uacce, struct vm_area_struct *vma)
static enum uacce_qfrt uacce_get_region_type(struct uacce *uacce,
struct vm_area_struct *vma)
{
enum uacce_qfrt type = UACCE_QFRT_MAX;
int i;
......@@ -781,8 +782,7 @@ uacce_get_region_type(struct uacce *uacce, struct vm_area_struct *vma)
if (vma_pages(vma) !=
next_start - uacce->qf_pg_start[type]) {
dev_err(&uacce->dev,
"invalid mmap size "
dev_err(&uacce->dev, "invalid mmap size "
"(%ld vs %ld pages) for region %s.\n",
vma_pages(vma),
next_start - uacce->qf_pg_start[type],
......@@ -877,7 +877,7 @@ static int uacce_fops_mmap(struct file *filep, struct vm_area_struct *vma)
return 0;
out_with_lock:
out_with_lock:
uacce_qs_wunlock();
return ret;
}
......@@ -902,44 +902,42 @@ static int uacce_fops_fasync(int fd, struct file *file, int mode)
}
static const struct file_operations uacce_fops = {
.owner = THIS_MODULE,
.open = uacce_fops_open,
.flush = uacce_fops_flush,
.release = uacce_fops_release,
.unlocked_ioctl = uacce_fops_unl_ioctl,
.owner = THIS_MODULE,
.open = uacce_fops_open,
.flush = uacce_fops_flush,
.release = uacce_fops_release,
.unlocked_ioctl = uacce_fops_unl_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = uacce_fops_compat_ioctl,
.compat_ioctl = uacce_fops_compat_ioctl,
#endif
.mmap = uacce_fops_mmap,
.poll = uacce_fops_poll,
.fasync = uacce_fops_fasync,
.mmap = uacce_fops_mmap,
.poll = uacce_fops_poll,
.fasync = uacce_fops_fasync,
};
#define UACCE_FROM_CDEV_ATTR(dev) container_of(dev, struct uacce, dev)
static ssize_t
uacce_dev_show_id(struct device *dev, struct device_attribute *attr, char *buf)
static ssize_t uacce_dev_show_id(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct uacce *uacce = UACCE_FROM_CDEV_ATTR(dev);
return sprintf(buf, "%d\n", uacce->dev_id);
}
static DEVICE_ATTR(id, S_IRUGO, uacce_dev_show_id, NULL);
static ssize_t
uacce_dev_show_api(struct device *dev, struct device_attribute *attr, char *buf)
static ssize_t uacce_dev_show_api(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct uacce *uacce = UACCE_FROM_CDEV_ATTR(dev);
return sprintf(buf, "%s\n", uacce->api_ver);
}
static DEVICE_ATTR(api, S_IRUGO, uacce_dev_show_api, NULL);
static ssize_t
uacce_dev_show_numa_distance(struct device *dev,
struct device_attribute *attr, char *buf)
static ssize_t uacce_dev_show_numa_distance(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct uacce *uacce = UACCE_FROM_CDEV_ATTR(dev);
int distance = 0;
......@@ -949,12 +947,11 @@ uacce_dev_show_numa_distance(struct device *dev,
#endif
return sprintf(buf, "%d\n", abs(distance));
}
static DEVICE_ATTR(numa_distance, S_IRUGO, uacce_dev_show_numa_distance, NULL);
static ssize_t
uacce_dev_show_node_id(struct device *dev,
struct device_attribute *attr, char *buf)
static ssize_t uacce_dev_show_node_id(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct uacce *uacce = UACCE_FROM_CDEV_ATTR(dev);
int node_id = -1;
......@@ -964,46 +961,42 @@ uacce_dev_show_node_id(struct device *dev,
#endif
return sprintf(buf, "%d\n", node_id);
}
static DEVICE_ATTR(node_id, S_IRUGO, uacce_dev_show_node_id, NULL);
static ssize_t
uacce_dev_show_flags(struct device *dev,
struct device_attribute *attr, char *buf)
static ssize_t uacce_dev_show_flags(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct uacce *uacce = UACCE_FROM_CDEV_ATTR(dev);
return sprintf(buf, "%d\n", uacce->flags);
}
static DEVICE_ATTR(flags, S_IRUGO, uacce_dev_show_flags, NULL);
static ssize_t
uacce_dev_show_available_instances(struct device *dev,
struct device_attribute *attr, char *buf)
static ssize_t uacce_dev_show_available_instances(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct uacce *uacce = UACCE_FROM_CDEV_ATTR(dev);
return sprintf(buf, "%d\n", uacce->ops->get_available_instances(uacce));
}
static DEVICE_ATTR(available_instances, S_IRUGO,
uacce_dev_show_available_instances, NULL);
static ssize_t
uacce_dev_show_algorithms(struct device *dev,
struct device_attribute *attr, char *buf)
static ssize_t uacce_dev_show_algorithms(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct uacce *uacce = UACCE_FROM_CDEV_ATTR(dev);
return sprintf(buf, "%s", uacce->algs);
}
static DEVICE_ATTR(algorithms, S_IRUGO, uacce_dev_show_algorithms, NULL);
static ssize_t uacce_dev_show_qfrs_offset(struct device *dev,
struct device_attribute *attr,
char *buf)
struct device_attribute *attr,
char *buf)
{
struct uacce *uacce = UACCE_FROM_CDEV_ATTR(dev);
int i, ret;
......@@ -1036,8 +1029,8 @@ static struct attribute *uacce_dev_attrs[] = {
};
static const struct attribute_group uacce_dev_attr_group = {
.name = UACCE_DEV_ATTRS,
.attrs = uacce_dev_attrs,
.name = UACCE_DEV_ATTRS,
.attrs = uacce_dev_attrs,
};
static const struct attribute_group *uacce_dev_attr_groups[] = {
......@@ -1069,7 +1062,7 @@ static int uacce_create_chrdev(struct uacce *uacce)
dev_dbg(&uacce->dev, "create uacce minior=%d\n", uacce->dev_id);
return 0;
err_with_idr:
err_with_idr:
idr_remove(&uacce_idr, uacce->dev_id);
return ret;
}
......@@ -1101,8 +1094,8 @@ static int uacce_dev_match(struct device *dev, void *data)
}
/* Borrowed from VFIO */
static bool
uacce_iommu_has_sw_msi(struct iommu_group *group, phys_addr_t *base)
static bool uacce_iommu_has_sw_msi(struct iommu_group *group,
phys_addr_t *base)
{
struct list_head group_resv_regions;
struct iommu_resv_region *region, *next;
......@@ -1130,7 +1123,7 @@ uacce_iommu_has_sw_msi(struct iommu_group *group, phys_addr_t *base)
}
}
list_for_each_entry_safe(region, next, &group_resv_regions, list)
kfree(region);
kfree(region);
return ret;
}
......@@ -1188,14 +1181,14 @@ static int uacce_set_iommu_domain(struct uacce *uacce)
if (resv_msi) {
if (!irq_domain_check_msi_remap() &&
!iommu_capable(dev->bus, IOMMU_CAP_INTR_REMAP)) {
!iommu_capable(dev->bus, IOMMU_CAP_INTR_REMAP)) {
dev_err(dev, "No interrupt remapping support!\n");
ret = -EPERM;
goto err_with_domain;
}
dev_dbg(dev, "Set resv msi %llx on iommu domain!\n",
(u64) resv_msi_base);
(u64)resv_msi_base);
ret = iommu_get_msi_cookie(domain, resv_msi_base);
if (ret) {
dev_err(dev, "fail to get msi cookie from domain!\n");
......@@ -1205,7 +1198,7 @@ static int uacce_set_iommu_domain(struct uacce *uacce)
return 0;
err_with_domain:
err_with_domain:
iommu_domain_free(domain);
return ret;
}
......@@ -1237,22 +1230,20 @@ int uacce_register(struct uacce *uacce)
int ret;
struct device *dev = uacce->pdev;
if (!uacce->pdev) {
if (!dev) {
pr_err("uacce parent device not set\n");
return -ENODEV;
}
if (uacce->flags & UACCE_DEV_NOIOMMU) {
add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
dev_warn(dev, "register to noiommu mode, "
"this may export kernel data to user space and "
"open the kernel for user attacked");
dev_warn(dev, "register to noiommu mode, this may be attacked\n");
}
/* if dev support fault-from-dev, it should support pasid */
if ((uacce->flags & UACCE_DEV_FAULT_FROM_DEV) &&
!(uacce->flags & UACCE_DEV_PASID)) {
dev_warn(dev, "SVM/SAV device should support PASID\n");
dev_err(dev, "SVM/SVA device should support PASID\n");
return -EINVAL;
}
......@@ -1261,7 +1252,7 @@ int uacce_register(struct uacce *uacce)
if (!uacce->ops->get_available_instances)
uacce->ops->get_available_instances =
uacce_default_get_available_instances;
uacce_default_get_available_instances;
#ifndef CONFIG_IOMMU_SVA
ret = uacce_set_iommu_domain(uacce);
......@@ -1287,8 +1278,7 @@ int uacce_register(struct uacce *uacce)
goto err_with_lock;
}
#else
uacce->flags &=
~(UACCE_DEV_FAULT_FROM_DEV | UACCE_DEV_PASID);
uacce->flags &= ~(UACCE_DEV_FAULT_FROM_DEV | UACCE_DEV_PASID);
#endif
}
......@@ -1300,7 +1290,7 @@ int uacce_register(struct uacce *uacce)
mutex_unlock(&uacce_mutex);
return 0;
err_with_lock:
err_with_lock:
mutex_unlock(&uacce_mutex);
return ret;
}
......@@ -1348,9 +1338,9 @@ static int __init uacce_init(void)
return 0;
err_with_class:
err_with_class:
class_destroy(uacce_class);
err:
err:
return ret;
}
......
......@@ -46,28 +46,29 @@ struct uacce_qfile_region {
struct uacce_ops {
int (*get_available_instances)(struct uacce *uacce);
int (*get_queue)(struct uacce *uacce, unsigned long arg,
struct uacce_queue **q);
struct uacce_queue **q);
void (*put_queue)(struct uacce_queue *q);
int (*start_queue)(struct uacce_queue *q);
void (*stop_queue)(struct uacce_queue *q);
int (*is_q_updated)(struct uacce_queue *q);
void (*mask_notify)(struct uacce_queue *q, int event_mask);
int (*mmap)(struct uacce_queue *q, struct vm_area_struct *vma,
struct uacce_qfile_region *qfr);
struct uacce_qfile_region *qfr);
int (*reset)(struct uacce *uacce);
int (*reset_queue)(struct uacce_queue *q);
long (*ioctl)(struct uacce_queue *q, unsigned int cmd,
unsigned long arg);
unsigned long arg);
};
struct uacce_queue {
struct uacce *uacce;
__u32 flags;
atomic_t status;
void *priv;
wait_queue_head_t wait;
int pasid;
struct list_head list; /* as list for as->qs */
struct list_head list; /* as list for as->qs */
struct mm_struct *mm;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册