提交 b4d93036 编写于 作者: K Kunkun Jiang 提交者: Zheng Zengkai

Revert "vfio: VFIO_IOMMU_SET_MSI_BINDING"

virt inclusion
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I61SPO
CVE: NA

--------------------------------

This reverts commit ac16d334.
Signed-off-by: NKunkun Jiang <jiangkunkun@huawei.com>
Reviewed-by: NKeqian Zhu <zhukeqian1@huawei.com>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
上级 6a6b7f56
...@@ -3088,41 +3088,6 @@ static int vfio_cache_inv_fn(struct device *dev, void *data) ...@@ -3088,41 +3088,6 @@ static int vfio_cache_inv_fn(struct device *dev, void *data)
return iommu_uapi_cache_invalidate(dc->domain, dev, (void __user *)arg); return iommu_uapi_cache_invalidate(dc->domain, dev, (void __user *)arg);
} }
static int
vfio_bind_msi(struct vfio_iommu *iommu,
dma_addr_t giova, phys_addr_t gpa, size_t size)
{
struct vfio_domain *d;
int ret = 0;
mutex_lock(&iommu->lock);
list_for_each_entry(d, &iommu->domain_list, next) {
ret = iommu_bind_guest_msi(d->domain, giova, gpa, size);
if (ret)
goto unwind;
}
goto unlock;
unwind:
list_for_each_entry_continue_reverse(d, &iommu->domain_list, next) {
iommu_unbind_guest_msi(d->domain, giova);
}
unlock:
mutex_unlock(&iommu->lock);
return ret;
}
static void
vfio_unbind_msi(struct vfio_iommu *iommu, dma_addr_t giova)
{
struct vfio_domain *d;
mutex_lock(&iommu->lock);
list_for_each_entry(d, &iommu->domain_list, next)
iommu_unbind_guest_msi(d->domain, giova);
mutex_unlock(&iommu->lock);
}
static int vfio_iommu_migration_build_caps(struct vfio_iommu *iommu, static int vfio_iommu_migration_build_caps(struct vfio_iommu *iommu,
struct vfio_info_cap *caps) struct vfio_info_cap *caps)
{ {
...@@ -3320,31 +3285,6 @@ static int vfio_iommu_type1_cache_invalidate(struct vfio_iommu *iommu, ...@@ -3320,31 +3285,6 @@ static int vfio_iommu_type1_cache_invalidate(struct vfio_iommu *iommu,
return ret; return ret;
} }
static int vfio_iommu_type1_set_msi_binding(struct vfio_iommu *iommu,
unsigned long arg)
{
struct vfio_iommu_type1_set_msi_binding msi_binding;
unsigned long minsz;
minsz = offsetofend(struct vfio_iommu_type1_set_msi_binding,
size);
if (copy_from_user(&msi_binding, (void __user *)arg, minsz))
return -EFAULT;
if (msi_binding.argsz < minsz)
return -EINVAL;
if (msi_binding.flags == VFIO_IOMMU_UNBIND_MSI) {
vfio_unbind_msi(iommu, msi_binding.iova);
return 0;
} else if (msi_binding.flags == VFIO_IOMMU_BIND_MSI) {
return vfio_bind_msi(iommu, msi_binding.iova,
msi_binding.gpa, msi_binding.size);
}
return -EINVAL;
}
static int vfio_iommu_type1_dirty_pages(struct vfio_iommu *iommu, static int vfio_iommu_type1_dirty_pages(struct vfio_iommu *iommu,
unsigned long arg) unsigned long arg)
{ {
...@@ -3654,8 +3594,6 @@ static long vfio_iommu_type1_ioctl(void *iommu_data, ...@@ -3654,8 +3594,6 @@ static long vfio_iommu_type1_ioctl(void *iommu_data,
return vfio_iommu_type1_set_pasid_table(iommu, arg); return vfio_iommu_type1_set_pasid_table(iommu, arg);
case VFIO_IOMMU_CACHE_INVALIDATE: case VFIO_IOMMU_CACHE_INVALIDATE:
return vfio_iommu_type1_cache_invalidate(iommu, arg); return vfio_iommu_type1_cache_invalidate(iommu, arg);
case VFIO_IOMMU_SET_MSI_BINDING:
return vfio_iommu_type1_set_msi_binding(iommu, arg);
default: default:
return -ENOTTY; return -ENOTTY;
} }
......
...@@ -1323,26 +1323,6 @@ struct vfio_iommu_type1_cache_invalidate { ...@@ -1323,26 +1323,6 @@ struct vfio_iommu_type1_cache_invalidate {
}; };
#define VFIO_IOMMU_CACHE_INVALIDATE _IO(VFIO_TYPE, VFIO_BASE + 19) #define VFIO_IOMMU_CACHE_INVALIDATE _IO(VFIO_TYPE, VFIO_BASE + 19)
/**
* VFIO_IOMMU_SET_MSI_BINDING - _IOWR(VFIO_TYPE, VFIO_BASE + 20,
* struct vfio_iommu_type1_set_msi_binding)
*
* Pass a stage 1 MSI doorbell mapping to the host so that this
* latter can build a nested stage2 mapping. Or conversely tear
* down a previously bound stage 1 MSI binding.
*/
struct vfio_iommu_type1_set_msi_binding {
__u32 argsz;
__u32 flags;
#define VFIO_IOMMU_BIND_MSI (1 << 0)
#define VFIO_IOMMU_UNBIND_MSI (1 << 1)
__u64 iova; /* MSI guest IOVA */
/* Fields below are used on BIND */
__u64 gpa; /* MSI guest physical address */
__u64 size; /* size of stage1 mapping (bytes) */
};
#define VFIO_IOMMU_SET_MSI_BINDING _IO(VFIO_TYPE, VFIO_BASE + 20)
/* -------- Additional API for SPAPR TCE (Server POWERPC) IOMMU -------- */ /* -------- Additional API for SPAPR TCE (Server POWERPC) IOMMU -------- */
/* /*
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册