提交 04ba12c4 编写于 作者: L Liu, Yi L 提交者: Zheng Zengkai

vfio: VFIO_IOMMU_CACHE_INVALIDATE

virt inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I401IF
CVE: NA

------------------------------

When the guest "owns" the stage 1 translation structures,  the host
IOMMU driver has no knowledge of caching structure updates unless
the guest invalidation requests are trapped and passed down to the
host.

This patch adds the VFIO_IOMMU_CACHE_INVALIDATE ioctl with aims
at propagating guest stage1 IOMMU cache invalidations to the host.
Signed-off-by: NLiu, Yi L <yi.l.liu@linux.intel.com>
Signed-off-by: NEric Auger <eric.auger@redhat.com>
Signed-off-by: Kunkun Jiang<jiangkunkun@huawei.com>
Reviewed-by: NKeqian Zhu <zhukeqian1@huawei.com>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
上级 4b042357
......@@ -159,6 +159,36 @@ struct vfio_regions {
#define DIRTY_BITMAP_PAGES_MAX ((u64)INT_MAX)
#define DIRTY_BITMAP_SIZE_MAX DIRTY_BITMAP_BYTES(DIRTY_BITMAP_PAGES_MAX)
#define WAITED 1
struct domain_capsule {
struct iommu_domain *domain;
void *data;
};
/* iommu->lock must be held */
static int
vfio_iommu_lookup_dev(struct vfio_iommu *iommu,
int (*fn)(struct device *dev, void *data),
unsigned long arg)
{
struct domain_capsule dc = {.data = &arg};
struct vfio_domain *d;
struct vfio_group *g;
int ret = 0;
list_for_each_entry(d, &iommu->domain_list, next) {
dc.domain = d->domain;
list_for_each_entry(g, &d->group_list, next) {
ret = iommu_group_for_each_dev(g->iommu_group,
&dc, fn);
if (ret)
break;
}
}
return ret;
}
static int put_pfn(unsigned long pfn, int prot);
static struct vfio_group *vfio_iommu_find_iommu_group(struct vfio_iommu *iommu,
......@@ -3050,6 +3080,13 @@ vfio_attach_pasid_table(struct vfio_iommu *iommu, unsigned long arg)
mutex_unlock(&iommu->lock);
return ret;
}
static int vfio_cache_inv_fn(struct device *dev, void *data)
{
struct domain_capsule *dc = (struct domain_capsule *)data;
unsigned long arg = *(unsigned long *)dc->data;
return iommu_uapi_cache_invalidate(dc->domain, dev, (void __user *)arg);
}
static int vfio_iommu_migration_build_caps(struct vfio_iommu *iommu,
struct vfio_info_cap *caps)
......@@ -3227,6 +3264,27 @@ static void vfio_iommu_dirty_log_switch(struct vfio_iommu *iommu, bool enable)
}
}
static int vfio_iommu_type1_cache_invalidate(struct vfio_iommu *iommu,
unsigned long arg)
{
struct vfio_iommu_type1_cache_invalidate cache_inv;
unsigned long minsz;
int ret;
minsz = offsetofend(struct vfio_iommu_type1_cache_invalidate, flags);
if (copy_from_user(&cache_inv, (void __user *)arg, minsz))
return -EFAULT;
if (cache_inv.argsz < minsz || cache_inv.flags)
return -EINVAL;
mutex_lock(&iommu->lock);
ret = vfio_iommu_lookup_dev(iommu, vfio_cache_inv_fn, arg + minsz);
mutex_unlock(&iommu->lock);
return ret;
}
static int vfio_iommu_type1_dirty_pages(struct vfio_iommu *iommu,
unsigned long arg)
{
......@@ -3534,6 +3592,8 @@ static long vfio_iommu_type1_ioctl(void *iommu_data,
return vfio_iommu_type1_unbind(iommu, arg);
case VFIO_IOMMU_SET_PASID_TABLE:
return vfio_iommu_type1_set_pasid_table(iommu, arg);
case VFIO_IOMMU_CACHE_INVALIDATE:
return vfio_iommu_type1_cache_invalidate(iommu, arg);
default:
return -ENOTTY;
}
......
......@@ -1310,6 +1310,19 @@ struct vfio_iommu_type1_set_pasid_table {
#define VFIO_IOMMU_SET_PASID_TABLE _IO(VFIO_TYPE, VFIO_BASE + 18)
/**
* VFIO_IOMMU_CACHE_INVALIDATE - _IOWR(VFIO_TYPE, VFIO_BASE + 19,
* struct vfio_iommu_type1_cache_invalidate)
*
* Propagate guest IOMMU cache invalidation to the host.
*/
struct vfio_iommu_type1_cache_invalidate {
__u32 argsz;
__u32 flags;
struct iommu_cache_invalidate_info info;
};
#define VFIO_IOMMU_CACHE_INVALIDATE _IO(VFIO_TYPE, VFIO_BASE + 19)
/* -------- Additional API for SPAPR TCE (Server POWERPC) IOMMU -------- */
/*
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册