提交 af8734e8 编写于 作者: K Kunkun Jiang 提交者: Zheng Zengkai

Revert "vfio: VFIO_IOMMU_CACHE_INVALIDATE"

virt inclusion
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I61SPO
CVE: NA

--------------------------------

This reverts commit 04ba12c4.
Signed-off-by: NKunkun Jiang <jiangkunkun@huawei.com>
Reviewed-by: NKeqian Zhu <zhukeqian1@huawei.com>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
上级 b4d93036
...@@ -159,36 +159,6 @@ struct vfio_regions { ...@@ -159,36 +159,6 @@ struct vfio_regions {
#define DIRTY_BITMAP_PAGES_MAX ((u64)INT_MAX) #define DIRTY_BITMAP_PAGES_MAX ((u64)INT_MAX)
#define DIRTY_BITMAP_SIZE_MAX DIRTY_BITMAP_BYTES(DIRTY_BITMAP_PAGES_MAX) #define DIRTY_BITMAP_SIZE_MAX DIRTY_BITMAP_BYTES(DIRTY_BITMAP_PAGES_MAX)
#define WAITED 1
struct domain_capsule {
struct iommu_domain *domain;
void *data;
};
/* iommu->lock must be held */
static int
vfio_iommu_lookup_dev(struct vfio_iommu *iommu,
int (*fn)(struct device *dev, void *data),
unsigned long arg)
{
struct domain_capsule dc = {.data = &arg};
struct vfio_domain *d;
struct vfio_group *g;
int ret = 0;
list_for_each_entry(d, &iommu->domain_list, next) {
dc.domain = d->domain;
list_for_each_entry(g, &d->group_list, next) {
ret = iommu_group_for_each_dev(g->iommu_group,
&dc, fn);
if (ret)
break;
}
}
return ret;
}
static int put_pfn(unsigned long pfn, int prot); static int put_pfn(unsigned long pfn, int prot);
static struct vfio_group *vfio_iommu_find_iommu_group(struct vfio_iommu *iommu, static struct vfio_group *vfio_iommu_find_iommu_group(struct vfio_iommu *iommu,
...@@ -3080,13 +3050,6 @@ vfio_attach_pasid_table(struct vfio_iommu *iommu, unsigned long arg) ...@@ -3080,13 +3050,6 @@ vfio_attach_pasid_table(struct vfio_iommu *iommu, unsigned long arg)
mutex_unlock(&iommu->lock); mutex_unlock(&iommu->lock);
return ret; return ret;
} }
static int vfio_cache_inv_fn(struct device *dev, void *data)
{
struct domain_capsule *dc = (struct domain_capsule *)data;
unsigned long arg = *(unsigned long *)dc->data;
return iommu_uapi_cache_invalidate(dc->domain, dev, (void __user *)arg);
}
static int vfio_iommu_migration_build_caps(struct vfio_iommu *iommu, static int vfio_iommu_migration_build_caps(struct vfio_iommu *iommu,
struct vfio_info_cap *caps) struct vfio_info_cap *caps)
...@@ -3264,27 +3227,6 @@ static void vfio_iommu_dirty_log_switch(struct vfio_iommu *iommu, bool enable) ...@@ -3264,27 +3227,6 @@ static void vfio_iommu_dirty_log_switch(struct vfio_iommu *iommu, bool enable)
} }
} }
static int vfio_iommu_type1_cache_invalidate(struct vfio_iommu *iommu,
unsigned long arg)
{
struct vfio_iommu_type1_cache_invalidate cache_inv;
unsigned long minsz;
int ret;
minsz = offsetofend(struct vfio_iommu_type1_cache_invalidate, flags);
if (copy_from_user(&cache_inv, (void __user *)arg, minsz))
return -EFAULT;
if (cache_inv.argsz < minsz || cache_inv.flags)
return -EINVAL;
mutex_lock(&iommu->lock);
ret = vfio_iommu_lookup_dev(iommu, vfio_cache_inv_fn, arg + minsz);
mutex_unlock(&iommu->lock);
return ret;
}
static int vfio_iommu_type1_dirty_pages(struct vfio_iommu *iommu, static int vfio_iommu_type1_dirty_pages(struct vfio_iommu *iommu,
unsigned long arg) unsigned long arg)
{ {
...@@ -3592,8 +3534,6 @@ static long vfio_iommu_type1_ioctl(void *iommu_data, ...@@ -3592,8 +3534,6 @@ static long vfio_iommu_type1_ioctl(void *iommu_data,
return vfio_iommu_type1_unbind(iommu, arg); return vfio_iommu_type1_unbind(iommu, arg);
case VFIO_IOMMU_SET_PASID_TABLE: case VFIO_IOMMU_SET_PASID_TABLE:
return vfio_iommu_type1_set_pasid_table(iommu, arg); return vfio_iommu_type1_set_pasid_table(iommu, arg);
case VFIO_IOMMU_CACHE_INVALIDATE:
return vfio_iommu_type1_cache_invalidate(iommu, arg);
default: default:
return -ENOTTY; return -ENOTTY;
} }
......
...@@ -1310,19 +1310,6 @@ struct vfio_iommu_type1_set_pasid_table { ...@@ -1310,19 +1310,6 @@ struct vfio_iommu_type1_set_pasid_table {
#define VFIO_IOMMU_SET_PASID_TABLE _IO(VFIO_TYPE, VFIO_BASE + 18) #define VFIO_IOMMU_SET_PASID_TABLE _IO(VFIO_TYPE, VFIO_BASE + 18)
/**
* VFIO_IOMMU_CACHE_INVALIDATE - _IOWR(VFIO_TYPE, VFIO_BASE + 19,
* struct vfio_iommu_type1_cache_invalidate)
*
* Propagate guest IOMMU cache invalidation to the host.
*/
struct vfio_iommu_type1_cache_invalidate {
__u32 argsz;
__u32 flags;
struct iommu_cache_invalidate_info info;
};
#define VFIO_IOMMU_CACHE_INVALIDATE _IO(VFIO_TYPE, VFIO_BASE + 19)
/* -------- Additional API for SPAPR TCE (Server POWERPC) IOMMU -------- */ /* -------- Additional API for SPAPR TCE (Server POWERPC) IOMMU -------- */
/* /*
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册