提交 bfed4439 编写于 作者: L lingmingqiang 提交者: Xie XiuQi

Revert the commit related to wrapdriver based on vfio-mdev

driver inclusion
category: bugfix
bugzilla: NA
CVE: NA

This reverts commit 53c9f99d845eab27a0ab7d65f825cd874b661688.

Changes to be committed:
	modified:   drivers/crypto/hisilicon/Kconfig
	modified:   drivers/vfio/spimdev/vfio_spimdev.c
	modified:   samples/warpdrive/wd.c

Feature or Bugfix:Bugfix
Signed-off-by: Nlingmingqiang <lingmingqiang@huawei.com>
Reviewed-by: Nhucheng.hu <hucheng.hu@huawei.com>
Reviewed-by: NYang Yingliang <yangyingliang@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 510fef4d
......@@ -35,8 +35,6 @@ config CRYPTO_DEV_HISI_HPRE
tristate "Support for HISI HPRE Engine"
depends on ARM64
select CRYPTO_DEV_HISI_QM
select CRYPTO_DH
select CRYPTO_RSA
help
Support for HiSilicon HIP09 HPRE engine.
......
......@@ -7,7 +7,7 @@
struct _mdev_pool_entry {
struct vfio_spimdev_queue *q;
atomic_t is_free;
bool is_free;
};
struct _spimdev {
......@@ -76,6 +76,7 @@ static const struct vfio_iommu_driver_ops vfio_spimdev_iommu_ops = {
static void _spimdev_get(struct _spimdev *dev)
{
atomic_inc(&dev->ref);
dev->pid = current->pid;
}
......@@ -220,12 +221,15 @@ static ssize_t mdev_get_show(struct device *dev,
if (!spimdev)
return -ENODEV;
mdev_state = spimdev->mstate;
mutex_lock(&mdev_state->lock);
list_for_each_entry(mdev, &mdev_state->mdev_list, next) {
if (atomic_cmpxchg(&mdev->ref, 0, 1))
if (atomic_read(&mdev->ref))
continue;
_spimdev_get(mdev);
mutex_unlock(&mdev_state->lock);
return sprintf(buf, "%s_%d\n", mdev->mdev, mdev->group_id);
}
mutex_unlock(&mdev_state->lock);
return -ENODEV;
}
......@@ -342,7 +346,7 @@ static void *_mdev_create_qpool(struct vfio_spimdev *spimdev,
ret = spimdev->ops->get_queue(spimdev, alg, &pool[0].q);
if (ret < 0)
return NULL;
atomic_set(&pool[0].is_free, 1);
pool[0].is_free = true;
return pool;
} else if (spimdev->flags & VFIO_SPIMDEV_DIFF_ALG_QFLG) {
......@@ -358,7 +362,7 @@ static void *_mdev_create_qpool(struct vfio_spimdev *spimdev,
ret = spimdev->ops->get_queue(spimdev, alg, &pool[i].q);
if (ret < 0)
goto create_pool_fail;
atomic_set(&pool[i].is_free, 1);
pool[i].is_free = true;
}
return pool;
} else
......@@ -378,7 +382,7 @@ static void _mdev_destroy_qpool(struct vfio_spimdev *spimdev,
int i = 0;
/* all the pool queues should be free, while remove mdev */
while (atomic_read(&pool[i].is_free) && pool[i].q) {
while (pool[i].is_free && pool[i].q) {
spimdev->ops->put_queue(pool[i].q);
i++;
}
......@@ -501,29 +505,31 @@ static int _get_queue_from_pool(struct mdev_device *mdev, const char *alg,
if (!spimdev)
return -ENODEV;
pool = smdev->pool;
mutex_lock(&smdev->lock);
if (spimdev->flags & VFIO_SPIMDEV_SAME_ALG_QFLG) {
if (atomic_cmpxchg(&pool[0].is_free, 1, 0)) {
if (pool[0].is_free) {
*q = pool[0].q;
if (spimdev->ops->reset_queue)
(void)spimdev->ops->reset_queue(*q);
pool[0].is_free = false;
mutex_unlock(&smdev->lock);
return 0;
}
mutex_unlock(&smdev->lock);
return -ENODEV;
}
groups = spimdev->mdev_fops.supported_type_groups;
while (groups[i]) {
if (atomic_cmpxchg(&pool[i].is_free, 1, 0) &&
!strncmp(groups[i]->name, alg, strlen(alg))) {
if (pool[i].is_free && !strncmp(groups[i]->name, alg,
strlen(alg))) {
*q = pool[i].q;
if (spimdev->ops->reset_queue)
(void)spimdev->ops->reset_queue(*q);
pool[i].is_free = false;
mutex_unlock(&smdev->lock);
return 0;
}
i++;
}
mutex_unlock(&smdev->lock);
return -ENODEV;
}
......@@ -547,20 +553,39 @@ static int _put_queue_to_pool(struct mdev_device *mdev,
return -ENODEV;
groups = spimdev->mdev_fops.supported_type_groups;
pool = smdev->pool;
mutex_lock(&smdev->lock);
if (spimdev->flags & VFIO_SPIMDEV_SAME_ALG_QFLG) {
if (pool[0].q == q && !atomic_cmpxchg(&pool[0].is_free, 0, 1))
if (pool[0].is_free) {
mutex_unlock(&smdev->lock);
return -EEXIST;
} else if (pool[0].q == q) {
pool[0].is_free = true;
if (spimdev->ops->reset_queue)
(void)spimdev->ops->reset_queue(q);
mutex_unlock(&smdev->lock);
return 0;
}
mutex_unlock(&smdev->lock);
return -EEXIST;
}
while (groups[i]) {
if (!strncmp(groups[i]->name, q->alg, strlen(q->alg))) {
if (pool[i].q == q && !atomic_cmpxchg(&pool[i].is_free,
0, 1))
if (pool[i].is_free) {
continue;
} else if (pool[i].q == q) {
pool[i].is_free = true;
if (spimdev->ops->reset_queue)
(void)spimdev->ops->reset_queue(q);
mutex_unlock(&smdev->lock);
return 0;
}
}
i++;
}
mutex_unlock(&smdev->lock);
return -EINVAL;
}
......@@ -744,6 +769,11 @@ static int _vfio_mdevs_release(struct device *dev, void *data)
static void vfio_spimdev_mdev_release(struct mdev_device *mdev)
{
struct _spimdev *smdev;
smdev = mdev_get_drvdata(mdev);
if (!smdev)
return;
(void)class_for_each_device(spimdev_class, NULL, NULL,
_vfio_mdevs_release);
}
......
......@@ -182,18 +182,16 @@ static int _mdev_get(struct wd_dev_info *wd_info)
if (strlen(wd_info->group_id) > 0)
return 0;
if (wd_info->mdev_fd == 0) {
memset(mdev_info, '\0', SYS_VAL_SIZE);
val = _get_dir_attr_str(wd_info->attr_path, SPIMDEV_MDEV_GET,
mdev_info);
if (val <= 0)
return -ENODEV;
mdev_info[val - 1] = '\0';
memcpy(wd_info->mdev_name, mdev_info, UUID_STR_SZ);
wd_info->mdev_name[UUID_STR_SZ] = '\0';
strncpy(wd_info->group_id, &mdev_info[UUID_STR_SZ + 1],
SYS_VAL_SIZE);
}
memset(mdev_info, '\0', SYS_VAL_SIZE);
val = _get_dir_attr_str(wd_info->attr_path, SPIMDEV_MDEV_GET,
mdev_info);
if (val <= 0)
return val;
mdev_info[val - 1] = '\0';
memcpy(wd_info->mdev_name, mdev_info, UUID_STR_SZ);
wd_info->mdev_name[UUID_STR_SZ] = '\0';
strncpy(wd_info->group_id, &mdev_info[UUID_STR_SZ + 1],
SYS_VAL_SIZE);
return 0;
}
......@@ -254,6 +252,10 @@ static int _get_wd_alg_info(struct wd_dev_info *dinfo, struct wd_capa *capa)
}
if (capa && __capa_check(ainfo, capa) < 0)
goto no_alg_exit;
ainfo->available_instances =
_get_dir_attr_int(ainfo->algo_path, "available_instances");
if (ainfo->available_instances < 0)
goto no_alg_exit;
ainfo->type =
_get_dir_attr_int(ainfo->algo_path, "type");
__add_alg(ainfo, dinfo);
......@@ -382,8 +384,7 @@ static int _find_available_res(struct wd_capa *capa)
alg = alg->next;
continue;
}
if (_mdev_get(dinfo))
return -ENODEV;
return 1;
}
}
......@@ -529,10 +530,8 @@ static int _get_algo_mdev(struct wd_queue *q)
}
#if (defined(HAVE_NUMA) & HAVE_NUMA)
ret = _get_dev_numa_distance(dinfo);
if (ret < 0) {
WD_ERR("Fail to get numa info on %s\n", dinfo->name);
if (ret < 0)
return ret;
}
if (ret > q->numa_dis) {
if (!TAILQ_NEXT(dinfo, next)) {
q->numa_dis++;
......@@ -549,10 +548,8 @@ static int _get_algo_mdev(struct wd_queue *q)
q->fd = ioctl(q->mdev,
VFIO_SPIMDEV_CMD_GET_Q,
(unsigned long)q->type);
if (q->fd < 0) {
WD_ERR("No available Q on %s\n", dinfo->name);
if (q->fd < 0)
continue;
}
} else {
ret = _get_mdev_group(q, dinfo);
if (ret) {
......@@ -702,16 +699,21 @@ static void _put_vfio_facility(struct wd_queue *q)
}
}
#endif
/* I think we can leave these clear work to do_exit of process */
if (!__atomic_load_n(&dinfo->ref, __ATOMIC_ACQUIRE)) {
if (q->group > 0)
;
if (q->mdev > 0)
;
if (q->mdev > 0) {
dinfo->mdev_fd = 0;
close(q->mdev);
}
if (q->group > 0) {
dinfo->group_fd = 0;
close(q->group);
}
}
if (q->container > 0 &&
!__atomic_sub_fetch(&container.ref, 1, __ATOMIC_ACQUIRE))
;
!__atomic_sub_fetch(&container.ref, 1, __ATOMIC_ACQUIRE)) {
close(q->container);
container.container = 0;
}
}
int _get_queue(struct wd_queue *q)
......@@ -751,19 +753,19 @@ int wd_request_queue(struct wd_queue *q)
}
ret = _get_vfio_facility(q);
if (ret) {
wd_unspinlock(&_wd_pmutex);
WD_ERR("Fail to get VFIO facility!\n");
goto out_with_mdev;
}
wd_unspinlock(&_wd_pmutex);
ret = _get_queue(q);
if (ret) {
WD_ERR("Fail to get queue!\n");
goto out_with_mdev;
}
wd_unspinlock(&_wd_pmutex);
ret = drv_open(q);
if (ret) {
WD_ERR("Driver queue init fail!\n");
wd_spinlock(&_wd_pmutex);
goto out_with_queue;
}
return ret;
......@@ -771,7 +773,6 @@ int wd_request_queue(struct wd_queue *q)
out_with_queue:
_put_queue(q);
out_with_mdev:
wd_spinlock(&_wd_pmutex);
_put_algo_mdev(q);
_put_vfio_facility(q);
wd_unspinlock(&_wd_pmutex);
......@@ -782,8 +783,8 @@ int wd_request_queue(struct wd_queue *q)
void wd_release_queue(struct wd_queue *q)
{
drv_close(q);
_put_queue(q);
wd_spinlock(&_wd_pmutex);
_put_queue(q);
_put_algo_mdev(q);
_put_vfio_facility(q);
wd_unspinlock(&_wd_pmutex);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册