提交 dda01f78 编写于 作者: A Alex Williamson

vfio: Simplify capability helper

The vfio_info_add_capability() helper requires the caller to pass a
capability ID, which it then uses to fill in header fields, assuming
hard coded versions.  This makes for an awkward and rigid interface.
The only thing we want this helper to do is allocate sufficient
space in the caps buffer and chain this capability into the list.
Reduce it to that simple task.
Reviewed-by: NAlexey Kardashevskiy <aik@ozlabs.ru>
Acked-by: NZhenyu Wang <zhenyuw@linux.intel.com>
Reviewed-by: NKirti Wankhede <kwankhede@nvidia.com>
Reviewed-by: NPeter Xu <peterx@redhat.com>
Reviewed-by: NEric Auger <eric.auger@redhat.com>
Signed-off-by: NAlex Williamson <alex.williamson@redhat.com>
上级 2170dd04
......@@ -1012,6 +1012,8 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
if (!sparse)
return -ENOMEM;
sparse->header.id = VFIO_REGION_INFO_CAP_SPARSE_MMAP;
sparse->header.version = 1;
sparse->nr_areas = nr_areas;
cap_type_id = VFIO_REGION_INFO_CAP_SPARSE_MMAP;
sparse->areas[0].offset =
......@@ -1033,7 +1035,9 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
break;
default:
{
struct vfio_region_info_cap_type cap_type;
struct vfio_region_info_cap_type cap_type = {
.header.id = VFIO_REGION_INFO_CAP_TYPE,
.header.version = 1 };
if (info.index >= VFIO_PCI_NUM_REGIONS +
vgpu->vdev.num_regions)
......@@ -1050,8 +1054,8 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
cap_type.subtype = vgpu->vdev.region[i].subtype;
ret = vfio_info_add_capability(&caps,
VFIO_REGION_INFO_CAP_TYPE,
&cap_type);
&cap_type.header,
sizeof(cap_type));
if (ret)
return ret;
}
......@@ -1061,8 +1065,9 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
switch (cap_type_id) {
case VFIO_REGION_INFO_CAP_SPARSE_MMAP:
ret = vfio_info_add_capability(&caps,
VFIO_REGION_INFO_CAP_SPARSE_MMAP,
sparse);
&sparse->header, sizeof(*sparse) +
(sparse->nr_areas *
sizeof(*sparse->areas)));
kfree(sparse);
if (ret)
return ret;
......
......@@ -585,6 +585,8 @@ static int msix_sparse_mmap_cap(struct vfio_pci_device *vdev,
if (!sparse)
return -ENOMEM;
sparse->header.id = VFIO_REGION_INFO_CAP_SPARSE_MMAP;
sparse->header.version = 1;
sparse->nr_areas = nr_areas;
if (vdev->msix_offset & PAGE_MASK) {
......@@ -600,8 +602,7 @@ static int msix_sparse_mmap_cap(struct vfio_pci_device *vdev,
i++;
}
ret = vfio_info_add_capability(caps, VFIO_REGION_INFO_CAP_SPARSE_MMAP,
sparse);
ret = vfio_info_add_capability(caps, &sparse->header, size);
kfree(sparse);
return ret;
......@@ -744,7 +745,9 @@ static long vfio_pci_ioctl(void *device_data,
break;
default:
{
struct vfio_region_info_cap_type cap_type;
struct vfio_region_info_cap_type cap_type = {
.header.id = VFIO_REGION_INFO_CAP_TYPE,
.header.version = 1 };
if (info.index >=
VFIO_PCI_NUM_REGIONS + vdev->num_regions)
......@@ -759,9 +762,8 @@ static long vfio_pci_ioctl(void *device_data,
cap_type.type = vdev->region[i].type;
cap_type.subtype = vdev->region[i].subtype;
ret = vfio_info_add_capability(&caps,
VFIO_REGION_INFO_CAP_TYPE,
&cap_type);
ret = vfio_info_add_capability(&caps, &cap_type.header,
sizeof(cap_type));
if (ret)
return ret;
......
......@@ -1857,63 +1857,19 @@ void vfio_info_cap_shift(struct vfio_info_cap *caps, size_t offset)
}
EXPORT_SYMBOL(vfio_info_cap_shift);
static int sparse_mmap_cap(struct vfio_info_cap *caps, void *cap_type)
int vfio_info_add_capability(struct vfio_info_cap *caps,
struct vfio_info_cap_header *cap, size_t size)
{
struct vfio_info_cap_header *header;
struct vfio_region_info_cap_sparse_mmap *sparse_cap, *sparse = cap_type;
size_t size;
size = sizeof(*sparse) + sparse->nr_areas * sizeof(*sparse->areas);
header = vfio_info_cap_add(caps, size,
VFIO_REGION_INFO_CAP_SPARSE_MMAP, 1);
header = vfio_info_cap_add(caps, size, cap->id, cap->version);
if (IS_ERR(header))
return PTR_ERR(header);
sparse_cap = container_of(header,
struct vfio_region_info_cap_sparse_mmap, header);
sparse_cap->nr_areas = sparse->nr_areas;
memcpy(sparse_cap->areas, sparse->areas,
sparse->nr_areas * sizeof(*sparse->areas));
return 0;
}
static int region_type_cap(struct vfio_info_cap *caps, void *cap_type)
{
struct vfio_info_cap_header *header;
struct vfio_region_info_cap_type *type_cap, *cap = cap_type;
memcpy(header + 1, cap + 1, size - sizeof(*header));
header = vfio_info_cap_add(caps, sizeof(*cap),
VFIO_REGION_INFO_CAP_TYPE, 1);
if (IS_ERR(header))
return PTR_ERR(header);
type_cap = container_of(header, struct vfio_region_info_cap_type,
header);
type_cap->type = cap->type;
type_cap->subtype = cap->subtype;
return 0;
}
int vfio_info_add_capability(struct vfio_info_cap *caps, int cap_type_id,
void *cap_type)
{
int ret = -EINVAL;
if (!cap_type)
return 0;
switch (cap_type_id) {
case VFIO_REGION_INFO_CAP_SPARSE_MMAP:
ret = sparse_mmap_cap(caps, cap_type);
break;
case VFIO_REGION_INFO_CAP_TYPE:
ret = region_type_cap(caps, cap_type);
break;
}
return ret;
}
EXPORT_SYMBOL(vfio_info_add_capability);
int vfio_set_irqs_validate_and_prepare(struct vfio_irq_set *hdr, int num_irqs,
......
......@@ -145,7 +145,8 @@ extern struct vfio_info_cap_header *vfio_info_cap_add(
extern void vfio_info_cap_shift(struct vfio_info_cap *caps, size_t offset);
extern int vfio_info_add_capability(struct vfio_info_cap *caps,
int cap_type_id, void *cap_type);
struct vfio_info_cap_header *cap,
size_t size);
extern int vfio_set_irqs_validate_and_prepare(struct vfio_irq_set *hdr,
int num_irqs, int max_irq_type,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册