未验证 提交 306b4220 编写于 作者: O openeuler-ci-bot 提交者: Gitee

!326 vdpa: Add the vdpa device management mechanism and optimize the iotlb

Merge Pull Request from: @Zhao_Py 
 
Sync from https://gitee.com/openeuler/kernel/pulls/309

#I5WXCZ

[Description]​
1、add vdpa tool support in vp_vdpa 
2、multiple address spaces support in vdpa
3、support ASID based IOTLB API in vhost-vdpa
4、add suspend support vhost-vdpa

[Testing]
1. Environmental preparation
Server hardware: TaiShan 200
Network card: SmartNIC
Operating system: openEuler 22.03 LTS
2. Operation Procedure
Apply the relevant patches in the kernel and reinstall the kernel.
Enable the SmartNIC vf.
Insert the driver related to the vdpa:
```
        insmod drivers/vhost/vhost_iotlb.ko
        insmod drivers/vhost/vhost.ko
        insmod drivers/vdpa/vdpa.ko
        insmod drivers/vhost/vhost_vdpa.ko
        insmod drivers/vdpa/virtio_pci/vp_vdpa.ko
```
Run the following command to bind the VF to vp-vdpa:
```
        echo -n "1af4 1000" > /sys/bus/pci/drivers/vp-vdpa/new_id
        vdpa dev add name vdpa1 mgmtdev pci/0000:01:01.1
```
Start qemu with the following parameter to enable generic VDPA device:
```
        -device vhost-vdpa-device-pci,vhostdev=/dev/vhost-vdpa-0
```
Create a bridge on the host and add the VF to the bridge.
Configure the guest network adapter and the port on the host bridge to be in the same network segment.  
3. Expected results
The guest NIC and the port on the host bridge can be pinged.  
4. Actual results
Meets expectations
 
 
Link:https://gitee.com/openeuler/kernel/pulls/326 
Reviewed-by: Kevin Zhu <zhukeqian1@huawei.com> 
Signed-off-by: Xie XiuQi <xiexiuqi@huawei.com> 
What: /sys/bus/vdpa/driver_autoprobe
Date: March 2020
Contact: virtualization@lists.linux-foundation.org
Description:
This file determines whether new devices are immediately bound
to a driver after the creation. It initially contains 1, which
means the kernel automatically binds devices to a compatible
driver immediately after they are created.
Writing "0" to this file disable this feature, any other string
enable it.
What: /sys/bus/vdpa/driver_probe
Date: March 2020
Contact: virtualization@lists.linux-foundation.org
Description:
Writing a device name to this file will cause the kernel binds
devices to a compatible driver.
This can be useful when /sys/bus/vdpa/driver_autoprobe is
disabled.
What: /sys/bus/vdpa/drivers/.../bind
Date: March 2020
Contact: virtualization@lists.linux-foundation.org
Description:
Writing a device name to this file will cause the driver to
attempt to bind to the device. This is useful for overriding
default bindings.
What: /sys/bus/vdpa/drivers/.../unbind
Date: March 2020
Contact: virtualization@lists.linux-foundation.org
Description:
Writing a device name to this file will cause the driver to
attempt to unbind from the device. This may be useful when
overriding default bindings.
......@@ -18682,6 +18682,7 @@ M: "Michael S. Tsirkin" <mst@redhat.com>
M: Jason Wang <jasowang@redhat.com>
L: virtualization@lists.linux-foundation.org
S: Maintained
F: Documentation/ABI/testing/sysfs-bus-vdpa
F: Documentation/devicetree/bindings/virtio/
F: drivers/block/virtio_blk.c
F: drivers/crypto/virtio/
......
......@@ -933,7 +933,7 @@ static void virtblk_remove(struct virtio_device *vdev)
mutex_lock(&vblk->vdev_mutex);
/* Stop all the virtqueues. */
vdev->config->reset(vdev);
virtio_reset_device(vdev);
/* Virtqueues are stopped, nothing can use vblk->vdev anymore. */
vblk->vdev = NULL;
......@@ -953,7 +953,7 @@ static int virtblk_freeze(struct virtio_device *vdev)
struct virtio_blk *vblk = vdev->priv;
/* Ensure we don't receive any more interrupts */
vdev->config->reset(vdev);
virtio_reset_device(vdev);
/* Make sure no work handler is accessing the device. */
flush_work(&vblk->config_work);
......
......@@ -134,7 +134,7 @@ static void remove_common(struct virtio_device *vdev)
vi->hwrng_removed = true;
vi->data_avail = 0;
complete(&vi->have_data);
vdev->config->reset(vdev);
virtio_reset_device(vdev);
vi->busy = false;
if (vi->hwrng_register_done)
hwrng_unregister(&vi->hwrng);
......
......@@ -1967,7 +1967,7 @@ static void virtcons_remove(struct virtio_device *vdev)
flush_work(&portdev->config_work);
/* Disable interrupts for vqs */
vdev->config->reset(vdev);
virtio_reset_device(vdev);
/* Finish up work that's lined up */
if (use_multiport(portdev))
cancel_work_sync(&portdev->control_work);
......@@ -2149,7 +2149,7 @@ static int virtcons_freeze(struct virtio_device *vdev)
portdev = vdev->priv;
vdev->config->reset(vdev);
virtio_reset_device(vdev);
if (use_multiport(portdev))
virtqueue_disable_cb(portdev->c_ivq);
......
......@@ -404,7 +404,7 @@ static int virtcrypto_probe(struct virtio_device *vdev)
free_engines:
virtcrypto_clear_crypto_engines(vcrypto);
free_vqs:
vcrypto->vdev->config->reset(vdev);
virtio_reset_device(vdev);
virtcrypto_del_vqs(vcrypto);
free_dev:
virtcrypto_devmgr_rm_dev(vcrypto);
......@@ -436,7 +436,7 @@ static void virtcrypto_remove(struct virtio_device *vdev)
if (virtcrypto_dev_started(vcrypto))
virtcrypto_dev_stop(vcrypto);
vdev->config->reset(vdev);
virtio_reset_device(vdev);
virtcrypto_free_unused_reqs(vcrypto);
virtcrypto_clear_crypto_engines(vcrypto);
virtcrypto_del_vqs(vcrypto);
......@@ -456,7 +456,7 @@ static int virtcrypto_freeze(struct virtio_device *vdev)
{
struct virtio_crypto *vcrypto = vdev->priv;
vdev->config->reset(vdev);
virtio_reset_device(vdev);
virtcrypto_free_unused_reqs(vcrypto);
if (virtcrypto_dev_started(vcrypto))
virtcrypto_dev_stop(vcrypto);
......@@ -492,7 +492,7 @@ static int virtcrypto_restore(struct virtio_device *vdev)
free_engines:
virtcrypto_clear_crypto_engines(vcrypto);
free_vqs:
vcrypto->vdev->config->reset(vdev);
virtio_reset_device(vdev);
virtcrypto_del_vqs(vcrypto);
return err;
}
......
......@@ -232,7 +232,7 @@ void virtio_gpu_deinit(struct drm_device *dev)
flush_work(&vgdev->ctrlq.dequeue_work);
flush_work(&vgdev->cursorq.dequeue_work);
flush_work(&vgdev->config_changed_work);
vgdev->vdev->config->reset(vgdev->vdev);
virtio_reset_device(vgdev->vdev);
vgdev->vdev->config->del_vqs(vgdev->vdev);
}
......
......@@ -1115,7 +1115,7 @@ static void viommu_remove(struct virtio_device *vdev)
iommu_device_unregister(&viommu->iommu);
/* Stop all virtqueues */
vdev->config->reset(vdev);
virtio_reset_device(vdev);
vdev->config->del_vqs(vdev);
dev_info(&vdev->dev, "device removed\n");
......
......@@ -764,7 +764,7 @@ static void cfv_remove(struct virtio_device *vdev)
debugfs_remove_recursive(cfv->debugfs);
vringh_kiov_cleanup(&cfv->ctx.riov);
vdev->config->reset(vdev);
virtio_reset_device(vdev);
vdev->vringh_config->del_vrhs(cfv->vdev);
cfv->vr_rx = NULL;
vdev->config->del_vqs(cfv->vdev);
......
......@@ -3255,7 +3255,7 @@ static int virtnet_probe(struct virtio_device *vdev)
return 0;
free_unregister_netdev:
vi->vdev->config->reset(vdev);
virtio_reset_device(vdev);
unregister_netdev(dev);
free_failover:
......@@ -3271,7 +3271,7 @@ static int virtnet_probe(struct virtio_device *vdev)
static void remove_vq_common(struct virtnet_info *vi)
{
vi->vdev->config->reset(vi->vdev);
virtio_reset_device(vi->vdev);
/* Free unused buffers in both send and recv, if any. */
free_unused_bufs(vi);
......
......@@ -4318,7 +4318,7 @@ static void remove_vqs(struct virtio_device *vdev)
{
int i;
vdev->config->reset(vdev);
virtio_reset_device(vdev);
for (i = 0; i < ARRAY_SIZE(hwsim_vqs); i++) {
struct virtqueue *vq = hwsim_vqs[i];
......
......@@ -105,7 +105,7 @@ static void virtio_pmem_remove(struct virtio_device *vdev)
nvdimm_bus_unregister(nvdimm_bus);
vdev->config->del_vqs(vdev);
vdev->config->reset(vdev);
virtio_reset_device(vdev);
}
static struct virtio_driver virtio_pmem_driver = {
......
......@@ -1012,7 +1012,7 @@ static void rpmsg_remove(struct virtio_device *vdev)
size_t total_buf_space = vrp->num_bufs * vrp->buf_size;
int ret;
vdev->config->reset(vdev);
virtio_reset_device(vdev);
ret = device_for_each_child(&vdev->dev, NULL, rpmsg_remove_device);
if (ret)
......
......@@ -780,7 +780,7 @@ static void virtscsi_init_vq(struct virtio_scsi_vq *virtscsi_vq,
static void virtscsi_remove_vqs(struct virtio_device *vdev)
{
/* Stop all the virtqueues. */
vdev->config->reset(vdev);
virtio_reset_device(vdev);
vdev->config->del_vqs(vdev);
}
......
......@@ -167,7 +167,7 @@ static struct ifcvf_hw *vdpa_to_vf(struct vdpa_device *vdpa_dev)
return &adapter->vf;
}
static u64 ifcvf_vdpa_get_features(struct vdpa_device *vdpa_dev)
static u64 ifcvf_vdpa_get_device_features(struct vdpa_device *vdpa_dev)
{
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
u64 features;
......@@ -177,7 +177,7 @@ static u64 ifcvf_vdpa_get_features(struct vdpa_device *vdpa_dev)
return features;
}
static int ifcvf_vdpa_set_features(struct vdpa_device *vdpa_dev, u64 features)
static int ifcvf_vdpa_set_driver_features(struct vdpa_device *vdpa_dev, u64 features)
{
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
......@@ -186,6 +186,13 @@ static int ifcvf_vdpa_set_features(struct vdpa_device *vdpa_dev, u64 features)
return 0;
}
static u64 ifcvf_vdpa_get_driver_features(struct vdpa_device *vdpa_dev)
{
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
return vf->req_features;
}
static u8 ifcvf_vdpa_get_status(struct vdpa_device *vdpa_dev)
{
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
......@@ -391,8 +398,9 @@ static int ifcvf_vdpa_get_vq_irq(struct vdpa_device *vdpa_dev,
* implemented set_map()/dma_map()/dma_unmap()
*/
static const struct vdpa_config_ops ifc_vdpa_ops = {
.get_features = ifcvf_vdpa_get_features,
.set_features = ifcvf_vdpa_set_features,
.get_device_features = ifcvf_vdpa_get_device_features,
.set_driver_features = ifcvf_vdpa_set_driver_features,
.get_driver_features = ifcvf_vdpa_get_driver_features,
.get_status = ifcvf_vdpa_get_status,
.set_status = ifcvf_vdpa_set_status,
.reset = ifcvf_vdpa_reset,
......@@ -457,7 +465,7 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa,
dev, &ifc_vdpa_ops, NULL);
dev, &ifc_vdpa_ops, 1, 1, NULL, false);
if (adapter == NULL) {
IFCVF_ERR(pdev, "Failed to allocate vDPA structure");
return -ENOMEM;
......
......@@ -1467,7 +1467,7 @@ static u64 mlx_to_vritio_features(u16 dev_features)
return result;
}
static u64 mlx5_vdpa_get_features(struct vdpa_device *vdev)
static u64 mlx5_vdpa_get_device_features(struct vdpa_device *vdev)
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
......@@ -1550,7 +1550,7 @@ static __virtio16 cpu_to_mlx5vdpa16(struct mlx5_vdpa_dev *mvdev, u16 val)
return __cpu_to_virtio16(mlx5_vdpa_is_little_endian(mvdev), val);
}
static int mlx5_vdpa_set_features(struct vdpa_device *vdev, u64 features)
static int mlx5_vdpa_set_driver_features(struct vdpa_device *vdev, u64 features)
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
......@@ -1843,7 +1843,8 @@ static u32 mlx5_vdpa_get_generation(struct vdpa_device *vdev)
return mvdev->generation;
}
static int mlx5_vdpa_set_map(struct vdpa_device *vdev, struct vhost_iotlb *iotlb)
static int mlx5_vdpa_set_map(struct vdpa_device *vdev, unsigned int asid,
struct vhost_iotlb *iotlb)
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
......@@ -1891,6 +1892,13 @@ static int mlx5_get_vq_irq(struct vdpa_device *vdv, u16 idx)
return -EOPNOTSUPP;
}
static u64 mlx5_vdpa_get_driver_features(struct vdpa_device *vdev)
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
return mvdev->actual_features;
}
static const struct vdpa_config_ops mlx5_vdpa_ops = {
.set_vq_address = mlx5_vdpa_set_vq_address,
.set_vq_num = mlx5_vdpa_set_vq_num,
......@@ -1903,8 +1911,9 @@ static const struct vdpa_config_ops mlx5_vdpa_ops = {
.get_vq_notification = mlx5_get_vq_notification,
.get_vq_irq = mlx5_get_vq_irq,
.get_vq_align = mlx5_vdpa_get_vq_align,
.get_features = mlx5_vdpa_get_features,
.set_features = mlx5_vdpa_set_features,
.get_device_features = mlx5_vdpa_get_device_features,
.set_driver_features = mlx5_vdpa_set_driver_features,
.get_driver_features = mlx5_vdpa_get_driver_features,
.set_config_cb = mlx5_vdpa_set_config_cb,
.get_vq_num_max = mlx5_vdpa_get_vq_num_max,
.get_device_id = mlx5_vdpa_get_device_id,
......@@ -2006,7 +2015,7 @@ void *mlx5_vdpa_add_dev(struct mlx5_core_dev *mdev)
max_vqs = min_t(u32, max_vqs, MLX5_MAX_SUPPORTED_VQS);
ndev = vdpa_alloc_device(struct mlx5_vdpa_net, mvdev.vdev, mdev->device, &mlx5_vdpa_ops,
NULL);
1, 1, NULL, false);
if (IS_ERR(ndev))
return ndev;
......
此差异已折叠。
......@@ -52,6 +52,17 @@ static struct vdpasim *dev_to_sim(struct device *dev)
return vdpa_to_sim(vdpa);
}
static void vdpasim_vq_notify(struct vringh *vring)
{
struct vdpasim_virtqueue *vq =
container_of(vring, struct vdpasim_virtqueue, vring);
if (!vq->cb)
return;
vq->cb(vq->private);
}
static void vdpasim_queue_ready(struct vdpasim *vdpasim, unsigned int idx)
{
struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
......@@ -63,6 +74,8 @@ static void vdpasim_queue_ready(struct vdpasim *vdpasim, unsigned int idx)
(uintptr_t)vq->driver_addr,
(struct vring_used *)
(uintptr_t)vq->device_addr);
vq->vring.notify = vdpasim_vq_notify;
}
static void vdpasim_vq_reset(struct vdpasim *vdpasim,
......@@ -76,6 +89,8 @@ static void vdpasim_vq_reset(struct vdpasim *vdpasim,
vq->private = NULL;
vringh_init_iotlb(&vq->vring, vdpasim->dev_attr.supported_features,
VDPASIM_QUEUE_MAX, false, NULL, NULL, NULL);
vq->vring.notify = NULL;
}
static void vdpasim_do_reset(struct vdpasim *vdpasim)
......@@ -221,8 +236,8 @@ struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr)
else
ops = &vdpasim_net_config_ops;
vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL, ops,
dev_attr->name);
vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL, ops, 1,
1, dev_attr->name, false);
if (!vdpasim)
goto err_alloc;
......@@ -363,14 +378,19 @@ static u32 vdpasim_get_vq_align(struct vdpa_device *vdpa)
return VDPASIM_QUEUE_ALIGN;
}
static u64 vdpasim_get_features(struct vdpa_device *vdpa)
static u32 vdpasim_get_vq_group(struct vdpa_device *vdpa, u16 idx)
{
return 0;
}
static u64 vdpasim_get_device_features(struct vdpa_device *vdpa)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
return vdpasim->dev_attr.supported_features;
}
static int vdpasim_set_features(struct vdpa_device *vdpa, u64 features)
static int vdpasim_set_driver_features(struct vdpa_device *vdpa, u64 features)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
......@@ -383,6 +403,13 @@ static int vdpasim_set_features(struct vdpa_device *vdpa, u64 features)
return 0;
}
static u64 vdpasim_get_driver_features(struct vdpa_device *vdpa)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
return vdpasim->features;
}
static void vdpasim_set_config_cb(struct vdpa_device *vdpa,
struct vdpa_callback *cb)
{
......@@ -491,7 +518,7 @@ static struct vdpa_iova_range vdpasim_get_iova_range(struct vdpa_device *vdpa)
return range;
}
static int vdpasim_set_map(struct vdpa_device *vdpa,
static int vdpasim_set_map(struct vdpa_device *vdpa, unsigned int asid,
struct vhost_iotlb *iotlb)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
......@@ -518,21 +545,23 @@ static int vdpasim_set_map(struct vdpa_device *vdpa,
return ret;
}
static int vdpasim_dma_map(struct vdpa_device *vdpa, u64 iova, u64 size,
u64 pa, u32 perm)
static int vdpasim_dma_map(struct vdpa_device *vdpa, unsigned int asid,
u64 iova, u64 size,
u64 pa, u32 perm, void *opaque)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
int ret;
spin_lock(&vdpasim->iommu_lock);
ret = vhost_iotlb_add_range(vdpasim->iommu, iova, iova + size - 1, pa,
perm);
ret = vhost_iotlb_add_range_ctx(vdpasim->iommu, iova, iova + size - 1,
pa, perm, opaque);
spin_unlock(&vdpasim->iommu_lock);
return ret;
}
static int vdpasim_dma_unmap(struct vdpa_device *vdpa, u64 iova, u64 size)
static int vdpasim_dma_unmap(struct vdpa_device *vdpa, unsigned int asid,
u64 iova, u64 size)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
......@@ -565,8 +594,10 @@ static const struct vdpa_config_ops vdpasim_net_config_ops = {
.set_vq_state = vdpasim_set_vq_state,
.get_vq_state = vdpasim_get_vq_state,
.get_vq_align = vdpasim_get_vq_align,
.get_features = vdpasim_get_features,
.set_features = vdpasim_set_features,
.get_vq_group = vdpasim_get_vq_group,
.get_device_features = vdpasim_get_device_features,
.set_driver_features = vdpasim_set_driver_features,
.get_driver_features = vdpasim_get_driver_features,
.set_config_cb = vdpasim_set_config_cb,
.get_vq_num_max = vdpasim_get_vq_num_max,
.get_device_id = vdpasim_get_device_id,
......@@ -594,8 +625,10 @@ static const struct vdpa_config_ops vdpasim_net_batch_config_ops = {
.set_vq_state = vdpasim_set_vq_state,
.get_vq_state = vdpasim_get_vq_state,
.get_vq_align = vdpasim_get_vq_align,
.get_features = vdpasim_get_features,
.set_features = vdpasim_set_features,
.get_vq_group = vdpasim_get_vq_group,
.get_device_features = vdpasim_get_device_features,
.set_driver_features = vdpasim_set_driver_features,
.get_driver_features = vdpasim_get_driver_features,
.set_config_cb = vdpasim_set_config_cb,
.get_vq_num_max = vdpasim_get_vq_num_max,
.get_device_id = vdpasim_get_device_id,
......
......@@ -61,6 +61,7 @@ struct vdpasim {
u32 status;
u32 generation;
u64 features;
u32 groups;
/* spinlock to synchronize iommu table */
spinlock_t iommu_lock;
};
......
......@@ -105,7 +105,8 @@ static struct device vdpasim_blk_mgmtdev = {
.release = vdpasim_blk_mgmtdev_release,
};
static int vdpasim_blk_dev_add(struct vdpa_mgmt_dev *mdev, const char *name)
static int vdpasim_blk_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
const struct vdpa_dev_set_config *config)
{
struct vdpasim_dev_attr dev_attr = {};
struct vdpasim *simdev;
......
......@@ -127,7 +127,8 @@ static struct device vdpasim_net_mgmtdev = {
.release = vdpasim_net_mgmtdev_release,
};
static int vdpasim_net_dev_add(struct vdpa_mgmt_dev *mdev, const char *name)
static int vdpasim_net_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
const struct vdpa_dev_set_config *config)
{
struct vdpasim_dev_attr dev_attr = {};
struct vdpasim *simdev;
......
......@@ -32,7 +32,7 @@ struct vp_vring {
struct vp_vdpa {
struct vdpa_device vdpa;
struct virtio_pci_modern_device mdev;
struct virtio_pci_modern_device *mdev;
struct vp_vring *vring;
struct vdpa_callback config_cb;
char msix_name[VP_VDPA_NAME_SIZE];
......@@ -41,6 +41,12 @@ struct vp_vdpa {
int vectors;
};
struct vp_vdpa_mgmtdev {
struct vdpa_mgmt_dev mgtdev;
struct virtio_pci_modern_device *mdev;
struct vp_vdpa *vp_vdpa;
};
static struct vp_vdpa *vdpa_to_vp(struct vdpa_device *vdpa)
{
return container_of(vdpa, struct vp_vdpa, vdpa);
......@@ -50,17 +56,22 @@ static struct virtio_pci_modern_device *vdpa_to_mdev(struct vdpa_device *vdpa)
{
struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
return &vp_vdpa->mdev;
return vp_vdpa->mdev;
}
static struct virtio_pci_modern_device *vp_vdpa_to_mdev(struct vp_vdpa *vp_vdpa)
{
return vp_vdpa->mdev;
}
static u64 vp_vdpa_get_features(struct vdpa_device *vdpa)
static u64 vp_vdpa_get_device_features(struct vdpa_device *vdpa)
{
struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
return vp_modern_get_features(mdev);
}
static int vp_vdpa_set_features(struct vdpa_device *vdpa, u64 features)
static int vp_vdpa_set_driver_features(struct vdpa_device *vdpa, u64 features)
{
struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
......@@ -69,6 +80,13 @@ static int vp_vdpa_set_features(struct vdpa_device *vdpa, u64 features)
return 0;
}
static u64 vp_vdpa_get_driver_features(struct vdpa_device *vdpa)
{
struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
return vp_modern_get_driver_features(mdev);
}
static u8 vp_vdpa_get_status(struct vdpa_device *vdpa)
{
struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
......@@ -89,7 +107,7 @@ static int vp_vdpa_get_vq_irq(struct vdpa_device *vdpa, u16 idx)
static void vp_vdpa_free_irq(struct vp_vdpa *vp_vdpa)
{
struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev;
struct virtio_pci_modern_device *mdev = vp_vdpa_to_mdev(vp_vdpa);
struct pci_dev *pdev = mdev->pci_dev;
int i;
......@@ -136,7 +154,7 @@ static irqreturn_t vp_vdpa_config_handler(int irq, void *arg)
static int vp_vdpa_request_irq(struct vp_vdpa *vp_vdpa)
{
struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev;
struct virtio_pci_modern_device *mdev = vp_vdpa_to_mdev(vp_vdpa);
struct pci_dev *pdev = mdev->pci_dev;
int i, ret, irq;
int queues = vp_vdpa->queues;
......@@ -191,7 +209,7 @@ static int vp_vdpa_request_irq(struct vp_vdpa *vp_vdpa)
static void vp_vdpa_set_status(struct vdpa_device *vdpa, u8 status)
{
struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev;
struct virtio_pci_modern_device *mdev = vp_vdpa_to_mdev(vp_vdpa);
u8 s = vp_vdpa_get_status(vdpa);
if (status & VIRTIO_CONFIG_S_DRIVER_OK &&
......@@ -205,7 +223,7 @@ static void vp_vdpa_set_status(struct vdpa_device *vdpa, u8 status)
static int vp_vdpa_reset(struct vdpa_device *vdpa)
{
struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev;
struct virtio_pci_modern_device *mdev = vp_vdpa_to_mdev(vp_vdpa);
u8 s = vp_vdpa_get_status(vdpa);
vp_modern_set_status(mdev, 0);
......@@ -365,7 +383,7 @@ static void vp_vdpa_get_config(struct vdpa_device *vdpa,
void *buf, unsigned int len)
{
struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev;
struct virtio_pci_modern_device *mdev = vp_vdpa_to_mdev(vp_vdpa);
u8 old, new;
u8 *p;
int i;
......@@ -385,7 +403,7 @@ static void vp_vdpa_set_config(struct vdpa_device *vdpa,
unsigned int len)
{
struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev;
struct virtio_pci_modern_device *mdev = vp_vdpa_to_mdev(vp_vdpa);
const u8 *p = buf;
int i;
......@@ -405,7 +423,7 @@ static struct vdpa_notification_area
vp_vdpa_get_vq_notification(struct vdpa_device *vdpa, u16 qid)
{
struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev;
struct virtio_pci_modern_device *mdev = vp_vdpa_to_mdev(vp_vdpa);
struct vdpa_notification_area notify;
notify.addr = vp_vdpa->vring[qid].notify_pa;
......@@ -415,8 +433,9 @@ vp_vdpa_get_vq_notification(struct vdpa_device *vdpa, u16 qid)
}
static const struct vdpa_config_ops vp_vdpa_ops = {
.get_features = vp_vdpa_get_features,
.set_features = vp_vdpa_set_features,
.get_device_features = vp_vdpa_get_device_features,
.set_driver_features = vp_vdpa_set_driver_features,
.get_driver_features = vp_vdpa_get_driver_features,
.get_status = vp_vdpa_get_status,
.set_status = vp_vdpa_set_status,
.reset = vp_vdpa_reset,
......@@ -446,38 +465,31 @@ static void vp_vdpa_free_irq_vectors(void *data)
pci_free_irq_vectors(data);
}
static int vp_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id)
static int vp_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
const struct vdpa_dev_set_config *add_config)
{
struct virtio_pci_modern_device *mdev;
struct vp_vdpa_mgmtdev *vp_vdpa_mgtdev =
container_of(v_mdev, struct vp_vdpa_mgmtdev, mgtdev);
struct virtio_pci_modern_device *mdev = vp_vdpa_mgtdev->mdev;
struct pci_dev *pdev = mdev->pci_dev;
struct device *dev = &pdev->dev;
struct vp_vdpa *vp_vdpa;
struct vp_vdpa *vp_vdpa = NULL;
int ret, i;
ret = pcim_enable_device(pdev);
if (ret)
return ret;
vp_vdpa = vdpa_alloc_device(struct vp_vdpa, vdpa,
dev, &vp_vdpa_ops, NULL);
dev, &vp_vdpa_ops, 1, 1, name, false);
if (IS_ERR(vp_vdpa)) {
dev_err(dev, "vp_vdpa: Failed to allocate vDPA structure\n");
return PTR_ERR(vp_vdpa);
}
mdev = &vp_vdpa->mdev;
mdev->pci_dev = pdev;
ret = vp_modern_probe(mdev);
if (ret) {
dev_err(&pdev->dev, "Failed to probe modern PCI device\n");
goto err;
}
pci_set_master(pdev);
pci_set_drvdata(pdev, vp_vdpa);
vp_vdpa_mgtdev->vp_vdpa = vp_vdpa;
vp_vdpa->vdpa.dma_dev = &pdev->dev;
vp_vdpa->queues = vp_modern_get_num_queues(mdev);
vp_vdpa->mdev = mdev;
ret = devm_add_action_or_reset(dev, vp_vdpa_free_irq_vectors, pdev);
if (ret) {
......@@ -501,13 +513,15 @@ static int vp_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id)
vp_modern_map_vq_notify(mdev, i,
&vp_vdpa->vring[i].notify_pa);
if (!vp_vdpa->vring[i].notify) {
ret = -EINVAL;
dev_warn(&pdev->dev, "Fail to map vq notify %d\n", i);
goto err;
}
}
vp_vdpa->config_irq = VIRTIO_MSI_NO_VECTOR;
ret = vdpa_register_device(&vp_vdpa->vdpa, vp_vdpa->queues);
vp_vdpa->vdpa.mdev = &vp_vdpa_mgtdev->mgtdev;
ret = _vdpa_register_device(&vp_vdpa->vdpa, vp_vdpa->queues);
if (ret) {
dev_err(&pdev->dev, "Failed to register to vdpa bus\n");
goto err;
......@@ -520,12 +534,104 @@ static int vp_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return ret;
}
static void vp_vdpa_dev_del(struct vdpa_mgmt_dev *v_mdev,
struct vdpa_device *dev)
{
struct vp_vdpa_mgmtdev *vp_vdpa_mgtdev =
container_of(v_mdev, struct vp_vdpa_mgmtdev, mgtdev);
struct vp_vdpa *vp_vdpa = vp_vdpa_mgtdev->vp_vdpa;
_vdpa_unregister_device(&vp_vdpa->vdpa);
vp_vdpa_mgtdev->vp_vdpa = NULL;
}
static const struct vdpa_mgmtdev_ops vp_vdpa_mdev_ops = {
.dev_add = vp_vdpa_dev_add,
.dev_del = vp_vdpa_dev_del,
};
static int vp_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct vp_vdpa_mgmtdev *vp_vdpa_mgtdev = NULL;
struct vdpa_mgmt_dev *mgtdev;
struct device *dev = &pdev->dev;
struct virtio_pci_modern_device *mdev = NULL;
struct virtio_device_id *mdev_id = NULL;
int err;
vp_vdpa_mgtdev = kzalloc(sizeof(*vp_vdpa_mgtdev), GFP_KERNEL);
if (!vp_vdpa_mgtdev)
return -ENOMEM;
mgtdev = &vp_vdpa_mgtdev->mgtdev;
mgtdev->ops = &vp_vdpa_mdev_ops;
mgtdev->device = dev;
mdev = kzalloc(sizeof(struct virtio_pci_modern_device), GFP_KERNEL);
if (!mdev) {
err = -ENOMEM;
goto mdev_err;
}
mdev_id = kzalloc(sizeof(struct virtio_device_id), GFP_KERNEL);
if (!mdev_id) {
err = -ENOMEM;
goto mdev_id_err;
}
vp_vdpa_mgtdev->mdev = mdev;
mdev->pci_dev = pdev;
err = pcim_enable_device(pdev);
if (err) {
goto probe_err;
}
err = vp_modern_probe(mdev);
if (err) {
dev_err(&pdev->dev, "Failed to probe modern PCI device\n");
goto probe_err;
}
mdev_id->device = mdev->id.device;
mdev_id->vendor = mdev->id.vendor;
mgtdev->id_table = mdev_id;
mgtdev->max_supported_vqs = vp_modern_get_num_queues(mdev);
mgtdev->supported_features = vp_modern_get_features(mdev);
pci_set_master(pdev);
pci_set_drvdata(pdev, vp_vdpa_mgtdev);
err = vdpa_mgmtdev_register(mgtdev);
if (err) {
dev_err(&pdev->dev, "Failed to register vdpa mgmtdev device\n");
goto register_err;
}
return 0;
register_err:
vp_modern_remove(vp_vdpa_mgtdev->mdev);
probe_err:
kfree(mdev_id);
mdev_id_err:
kfree(mdev);
mdev_err:
kfree(vp_vdpa_mgtdev);
return err;
}
static void vp_vdpa_remove(struct pci_dev *pdev)
{
struct vp_vdpa *vp_vdpa = pci_get_drvdata(pdev);
struct vp_vdpa_mgmtdev *vp_vdpa_mgtdev = pci_get_drvdata(pdev);
struct virtio_pci_modern_device *mdev = NULL;
vp_modern_remove(&vp_vdpa->mdev);
vdpa_unregister_device(&vp_vdpa->vdpa);
mdev = vp_vdpa_mgtdev->mdev;
vp_modern_remove(mdev);
vdpa_mgmtdev_unregister(&vp_vdpa_mgtdev->mgtdev);
kfree(vp_vdpa_mgtdev->mgtdev.id_table);
kfree(mdev);
kfree(vp_vdpa_mgtdev);
}
static struct pci_driver vp_vdpa_driver = {
......
......@@ -36,25 +36,42 @@ void vhost_iotlb_map_free(struct vhost_iotlb *iotlb,
EXPORT_SYMBOL_GPL(vhost_iotlb_map_free);
/**
* vhost_iotlb_add_range - add a new range to vhost IOTLB
* vhost_iotlb_add_range_ctx - add a new range to vhost IOTLB
* @iotlb: the IOTLB
* @start: start of the IOVA range
* @last: last of IOVA range
* @addr: the address that is mapped to @start
* @perm: access permission of this range
* @opaque: the opaque pointer for the new mapping
*
* Returns an error last is smaller than start or memory allocation
* fails
*/
int vhost_iotlb_add_range(struct vhost_iotlb *iotlb,
u64 start, u64 last,
u64 addr, unsigned int perm)
int vhost_iotlb_add_range_ctx(struct vhost_iotlb *iotlb,
u64 start, u64 last,
u64 addr, unsigned int perm,
void *opaque)
{
struct vhost_iotlb_map *map;
if (last < start)
return -EFAULT;
/* If the range being mapped is [0, ULONG_MAX], split it into two entries
* otherwise its size would overflow u64.
*/
if (start == 0 && last == ULONG_MAX) {
u64 mid = last / 2;
int err = vhost_iotlb_add_range_ctx(iotlb, start, mid, addr,
perm, opaque);
if (err)
return err;
addr += mid + 1;
start = mid + 1;
}
if (iotlb->limit &&
iotlb->nmaps == iotlb->limit &&
iotlb->flags & VHOST_IOTLB_FLAG_RETIRE) {
......@@ -71,6 +88,7 @@ int vhost_iotlb_add_range(struct vhost_iotlb *iotlb,
map->last = last;
map->addr = addr;
map->perm = perm;
map->opaque = opaque;
iotlb->nmaps++;
vhost_iotlb_itree_insert(map, &iotlb->root);
......@@ -80,6 +98,15 @@ int vhost_iotlb_add_range(struct vhost_iotlb *iotlb,
return 0;
}
EXPORT_SYMBOL_GPL(vhost_iotlb_add_range_ctx);
int vhost_iotlb_add_range(struct vhost_iotlb *iotlb,
u64 start, u64 last,
u64 addr, unsigned int perm)
{
return vhost_iotlb_add_range_ctx(iotlb, start, last,
addr, perm, NULL);
}
EXPORT_SYMBOL_GPL(vhost_iotlb_add_range);
/**
......@@ -98,6 +125,23 @@ void vhost_iotlb_del_range(struct vhost_iotlb *iotlb, u64 start, u64 last)
}
EXPORT_SYMBOL_GPL(vhost_iotlb_del_range);
/**
* vhost_iotlb_init - initialize a vhost IOTLB
* @iotlb: the IOTLB that needs to be initialized
* @limit: maximum number of IOTLB entries
* @flags: VHOST_IOTLB_FLAG_XXX
*/
void vhost_iotlb_init(struct vhost_iotlb *iotlb, unsigned int limit,
unsigned int flags)
{
iotlb->root = RB_ROOT_CACHED;
iotlb->limit = limit;
iotlb->nmaps = 0;
iotlb->flags = flags;
INIT_LIST_HEAD(&iotlb->list);
}
EXPORT_SYMBOL_GPL(vhost_iotlb_init);
/**
* vhost_iotlb_alloc - add a new vhost IOTLB
* @limit: maximum number of IOTLB entries
......@@ -112,11 +156,7 @@ struct vhost_iotlb *vhost_iotlb_alloc(unsigned int limit, unsigned int flags)
if (!iotlb)
return NULL;
iotlb->root = RB_ROOT_CACHED;
iotlb->limit = limit;
iotlb->nmaps = 0;
iotlb->flags = flags;
INIT_LIST_HEAD(&iotlb->list);
vhost_iotlb_init(iotlb, limit, flags);
return iotlb;
}
......
此差异已折叠。
......@@ -468,7 +468,7 @@ void vhost_dev_init(struct vhost_dev *dev,
struct vhost_virtqueue **vqs, int nvqs,
int iov_limit, int weight, int byte_weight,
bool use_worker,
int (*msg_handler)(struct vhost_dev *dev,
int (*msg_handler)(struct vhost_dev *dev, u32 asid,
struct vhost_iotlb_msg *msg))
{
struct vhost_virtqueue *vq;
......@@ -1090,11 +1090,14 @@ static bool umem_access_ok(u64 uaddr, u64 size, int access)
return true;
}
static int vhost_process_iotlb_msg(struct vhost_dev *dev,
static int vhost_process_iotlb_msg(struct vhost_dev *dev, u32 asid,
struct vhost_iotlb_msg *msg)
{
int ret = 0;
if (asid != 0)
return -EINVAL;
mutex_lock(&dev->mutex);
vhost_dev_lock_vqs(dev);
switch (msg->type) {
......@@ -1141,6 +1144,7 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
struct vhost_iotlb_msg msg;
size_t offset;
int type, ret;
u32 asid = 0;
ret = copy_from_iter(&type, sizeof(type), from);
if (ret != sizeof(type)) {
......@@ -1156,7 +1160,16 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
offset = offsetof(struct vhost_msg, iotlb) - sizeof(int);
break;
case VHOST_IOTLB_MSG_V2:
offset = sizeof(__u32);
if (vhost_backend_has_feature(dev->vqs[0],
VHOST_BACKEND_F_IOTLB_ASID)) {
ret = copy_from_iter(&asid, sizeof(asid), from);
if (ret != sizeof(asid)) {
ret = -EINVAL;
goto done;
}
offset = 0;
} else
offset = sizeof(__u32);
break;
default:
ret = -EINVAL;
......@@ -1170,10 +1183,17 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
goto done;
}
if ((msg.type == VHOST_IOTLB_UPDATE ||
msg.type == VHOST_IOTLB_INVALIDATE) &&
msg.size == 0) {
ret = -EINVAL;
goto done;
}
if (dev->msg_handler)
ret = dev->msg_handler(dev, &msg);
ret = dev->msg_handler(dev, asid, &msg);
else
ret = vhost_process_iotlb_msg(dev, &msg);
ret = vhost_process_iotlb_msg(dev, asid, &msg);
if (ret) {
ret = -EFAULT;
goto done;
......
......@@ -162,7 +162,7 @@ struct vhost_dev {
int byte_weight;
u64 kcov_handle;
bool use_worker;
int (*msg_handler)(struct vhost_dev *dev,
int (*msg_handler)(struct vhost_dev *dev, u32 asid,
struct vhost_iotlb_msg *msg);
};
......@@ -170,7 +170,7 @@ bool vhost_exceeds_weight(struct vhost_virtqueue *vq, int pkts, int total_len);
void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs,
int nvqs, int iov_limit, int weight, int byte_weight,
bool use_worker,
int (*msg_handler)(struct vhost_dev *dev,
int (*msg_handler)(struct vhost_dev *dev, u32 asid,
struct vhost_iotlb_msg *msg));
long vhost_dev_set_owner(struct vhost_dev *dev);
bool vhost_dev_has_owner(struct vhost_dev *dev);
......
......@@ -203,6 +203,28 @@ static int virtio_features_ok(struct virtio_device *dev)
return 0;
}
/**
* virtio_reset_device - quiesce device for removal
* @dev: the device to reset
*
* Prevents device from sending interrupts and accessing memory.
*
* Generally used for cleanup during driver / device removal.
*
* Once this has been invoked, caller must ensure that
* virtqueue_notify / virtqueue_kick are not in progress.
*
* Note: this guarantees that vq callbacks are not in progress, however caller
* is responsible for preventing access from other contexts, such as a system
* call/workqueue/bh. Invoking virtio_break_device then flushing any such
* contexts is one way to handle that.
* */
void virtio_reset_device(struct virtio_device *dev)
{
dev->config->reset(dev);
}
EXPORT_SYMBOL_GPL(virtio_reset_device);
static int virtio_dev_probe(struct device *_d)
{
int err, i;
......@@ -362,7 +384,7 @@ int register_virtio_device(struct virtio_device *dev)
/* We always start by resetting the device, in case a previous
* driver messed it up. This also tests that code path a little. */
dev->config->reset(dev);
virtio_reset_device(dev);
/* Acknowledge that we've seen the device. */
virtio_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE);
......@@ -422,7 +444,7 @@ int virtio_device_restore(struct virtio_device *dev)
/* We always start by resetting the device, in case a previous
* driver messed it up. */
dev->config->reset(dev);
virtio_reset_device(dev);
/* Acknowledge that we've seen the device. */
virtio_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE);
......
......@@ -1039,7 +1039,7 @@ static void remove_common(struct virtio_balloon *vb)
return_free_pages_to_mm(vb, ULONG_MAX);
/* Now we reset the device so we can clean up the queues. */
vb->vdev->config->reset(vb->vdev);
virtio_reset_device(vb->vdev);
vb->vdev->config->del_vqs(vb->vdev);
}
......
......@@ -323,7 +323,7 @@ static void virtinput_remove(struct virtio_device *vdev)
spin_unlock_irqrestore(&vi->lock, flags);
input_unregister_device(vi->idev);
vdev->config->reset(vdev);
virtio_reset_device(vdev);
while ((buf = virtqueue_detach_unused_buf(vi->sts)) != NULL)
kfree(buf);
vdev->config->del_vqs(vdev);
......
......@@ -1889,7 +1889,7 @@ static void virtio_mem_remove(struct virtio_device *vdev)
vfree(vm->sb_bitmap);
/* reset the device and cleanup the queues */
vdev->config->reset(vdev);
virtio_reset_device(vdev);
vdev->config->del_vqs(vdev);
kfree(vm);
......
......@@ -176,6 +176,29 @@ static void vp_reset(struct virtio_device *vdev)
vp_synchronize_vectors(vdev);
}
static int vp_active_vq(struct virtqueue *vq, u16 msix_vec)
{
struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
unsigned long index;
index = vq->index;
/* activate the queue */
vp_modern_set_queue_size(mdev, index, virtqueue_get_vring_size(vq));
vp_modern_queue_address(mdev, index, virtqueue_get_desc_addr(vq),
virtqueue_get_avail_addr(vq),
virtqueue_get_used_addr(vq));
if (msix_vec != VIRTIO_MSI_NO_VECTOR) {
msix_vec = vp_modern_queue_vector(mdev, index, msix_vec);
if (msix_vec == VIRTIO_MSI_NO_VECTOR)
return -EBUSY;
}
return 0;
}
static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
{
return vp_modern_config_vector(&vp_dev->mdev, vector);
......@@ -218,32 +241,19 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
if (!vq)
return ERR_PTR(-ENOMEM);
/* activate the queue */
vp_modern_set_queue_size(mdev, index, virtqueue_get_vring_size(vq));
vp_modern_queue_address(mdev, index, virtqueue_get_desc_addr(vq),
virtqueue_get_avail_addr(vq),
virtqueue_get_used_addr(vq));
err = vp_active_vq(vq, msix_vec);
if (err)
goto err;
vq->priv = (void __force *)vp_modern_map_vq_notify(mdev, index, NULL);
if (!vq->priv) {
err = -ENOMEM;
goto err_map_notify;
}
if (msix_vec != VIRTIO_MSI_NO_VECTOR) {
msix_vec = vp_modern_queue_vector(mdev, index, msix_vec);
if (msix_vec == VIRTIO_MSI_NO_VECTOR) {
err = -EBUSY;
goto err_assign_vector;
}
goto err;
}
return vq;
err_assign_vector:
if (!mdev->notify_base)
pci_iounmap(mdev->pci_dev, (void __iomem __force *)vq->priv);
err_map_notify:
err:
vring_del_virtqueue(vq);
return ERR_PTR(err);
}
......
......@@ -3,6 +3,7 @@
#include <linux/virtio_pci_modern.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/delay.h>
/*
* vp_modern_map_capability - map a part of virtio pci capability
......@@ -17,11 +18,10 @@
*
* Returns the io address of for the part of the capability
*/
void __iomem *vp_modern_map_capability(struct virtio_pci_modern_device *mdev, int off,
size_t minlen,
u32 align,
u32 start, u32 size,
size_t *len, resource_size_t *pa)
static void __iomem*
vp_modern_map_capability(struct virtio_pci_modern_device *mdev, int off,
size_t minlen, u32 align, u32 start, u32 size,
size_t *len, resource_size_t *pa)
{
struct pci_dev *dev = mdev->pci_dev;
u8 bar;
......@@ -95,7 +95,6 @@ void __iomem *vp_modern_map_capability(struct virtio_pci_modern_device *mdev, in
return p;
}
EXPORT_SYMBOL_GPL(vp_modern_map_capability);
/**
* virtio_pci_find_capability - walk capabilities to find device info.
......@@ -466,6 +465,44 @@ void vp_modern_set_status(struct virtio_pci_modern_device *mdev,
}
EXPORT_SYMBOL_GPL(vp_modern_set_status);
/*
* vp_modern_get_queue_reset - get the queue reset status
* @mdev: the modern virtio-pci device
* @index: queue index
*/
int vp_modern_get_queue_reset(struct virtio_pci_modern_device *mdev, u16 index)
{
struct virtio_pci_modern_common_cfg __iomem *cfg;
cfg = (struct virtio_pci_modern_common_cfg __iomem *)mdev->common;
vp_iowrite16(index, &cfg->cfg.queue_select);
return vp_ioread16(&cfg->queue_reset);
}
EXPORT_SYMBOL_GPL(vp_modern_get_queue_reset);
/*
* vp_modern_set_queue_reset - reset the queue
* @mdev: the modern virtio-pci device
* @index: queue index
*/
void vp_modern_set_queue_reset(struct virtio_pci_modern_device *mdev, u16 index)
{
struct virtio_pci_modern_common_cfg __iomem *cfg;
cfg = (struct virtio_pci_modern_common_cfg __iomem *)mdev->common;
vp_iowrite16(index, &cfg->cfg.queue_select);
vp_iowrite16(1, &cfg->queue_reset);
while (vp_ioread16(&cfg->queue_reset))
msleep(1);
while (vp_ioread16(&cfg->cfg.queue_enable))
msleep(1);
}
EXPORT_SYMBOL_GPL(vp_modern_set_queue_reset);
/*
* vp_modern_queue_vector - set the MSIX vector for a specific virtqueue
* @mdev: the modern virtio-pci device
......@@ -612,14 +649,13 @@ EXPORT_SYMBOL_GPL(vp_modern_get_num_queues);
*
* Returns the notification offset for a virtqueue
*/
u16 vp_modern_get_queue_notify_off(struct virtio_pci_modern_device *mdev,
u16 index)
static u16 vp_modern_get_queue_notify_off(struct virtio_pci_modern_device *mdev,
u16 index)
{
vp_iowrite16(index, &mdev->common->queue_select);
return vp_ioread16(&mdev->common->queue_notify_off);
}
EXPORT_SYMBOL_GPL(vp_modern_get_queue_notify_off);
/*
* vp_modern_map_vq_notify - map notification area for a
......
......@@ -65,9 +65,8 @@ static void virtio_vdpa_set(struct virtio_device *vdev, unsigned offset,
const void *buf, unsigned len)
{
struct vdpa_device *vdpa = vd_get_vdpa(vdev);
const struct vdpa_config_ops *ops = vdpa->config;
ops->set_config(vdpa, offset, buf, len);
vdpa_set_config(vdpa, offset, buf, len);
}
static u32 virtio_vdpa_generation(struct virtio_device *vdev)
......@@ -92,9 +91,8 @@ static u8 virtio_vdpa_get_status(struct virtio_device *vdev)
static void virtio_vdpa_set_status(struct virtio_device *vdev, u8 status)
{
struct vdpa_device *vdpa = vd_get_vdpa(vdev);
const struct vdpa_config_ops *ops = vdpa->config;
return ops->set_status(vdpa, status);
return vdpa_set_status(vdpa, status);
}
static void virtio_vdpa_reset(struct virtio_device *vdev)
......@@ -142,8 +140,11 @@ virtio_vdpa_setup_vq(struct virtio_device *vdev, unsigned int index,
struct vdpa_callback cb;
struct virtqueue *vq;
u64 desc_addr, driver_addr, device_addr;
/* Assume split virtqueue, switch to packed if necessary */
struct vdpa_vq_state state = {0};
unsigned long flags;
u32 align, num;
u32 align, max_num, min_num = 1;
bool may_reduce_num = true;
int err;
if (!name)
......@@ -161,16 +162,21 @@ virtio_vdpa_setup_vq(struct virtio_device *vdev, unsigned int index,
if (!info)
return ERR_PTR(-ENOMEM);
num = ops->get_vq_num_max(vdpa);
if (num == 0) {
max_num = ops->get_vq_num_max(vdpa);
if (max_num == 0) {
err = -ENOENT;
goto error_new_virtqueue;
}
if (ops->get_vq_num_min)
min_num = ops->get_vq_num_min(vdpa);
may_reduce_num = (max_num == min_num) ? false : true;
/* Create the vring */
align = ops->get_vq_align(vdpa);
vq = vring_create_virtqueue(index, num, align, vdev,
true, true, ctx,
vq = vring_create_virtqueue(index, max_num, align, vdev,
true, may_reduce_num, ctx,
virtio_vdpa_notify, callback, name);
if (!vq) {
err = -ENOMEM;
......@@ -178,7 +184,7 @@ virtio_vdpa_setup_vq(struct virtio_device *vdev, unsigned int index,
}
/* Setup virtqueue callback */
cb.callback = virtio_vdpa_virtqueue_cb;
cb.callback = callback ? virtio_vdpa_virtqueue_cb : NULL;
cb.private = info;
ops->set_vq_cb(vdpa, index, &cb);
ops->set_vq_num(vdpa, index, virtqueue_get_vring_size(vq));
......@@ -194,6 +200,19 @@ virtio_vdpa_setup_vq(struct virtio_device *vdev, unsigned int index,
goto err_vq;
}
/* reset virtqueue state index */
if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
struct vdpa_vq_state_packed *s = &state.packed;
s->last_avail_counter = 1;
s->last_avail_idx = 0;
s->last_used_counter = 1;
s->last_used_idx = 0;
}
err = ops->set_vq_state(vdpa, index, &state);
if (err)
goto err_vq;
ops->set_vq_ready(vdpa, index, 1);
vq->priv = info;
......@@ -228,9 +247,8 @@ static void virtio_vdpa_del_vq(struct virtqueue *vq)
list_del(&info->node);
spin_unlock_irqrestore(&vd_dev->lock, flags);
/* Select and deactivate the queue */
/* Select and deactivate the queue (best effort) */
ops->set_vq_ready(vdpa, index, 0);
WARN_ON(ops->get_vq_ready(vdpa, index));
vring_del_virtqueue(vq);
......@@ -289,7 +307,7 @@ static u64 virtio_vdpa_get_features(struct virtio_device *vdev)
struct vdpa_device *vdpa = vd_get_vdpa(vdev);
const struct vdpa_config_ops *ops = vdpa->config;
return ops->get_features(vdpa);
return ops->get_device_features(vdpa);
}
static int virtio_vdpa_finalize_features(struct virtio_device *vdev)
......
......@@ -894,7 +894,7 @@ static int virtio_fs_probe(struct virtio_device *vdev)
return 0;
out_vqs:
vdev->config->reset(vdev);
virtio_reset_device(vdev);
virtio_fs_cleanup_vqs(vdev, fs);
kfree(fs->vqs);
......@@ -926,7 +926,7 @@ static void virtio_fs_remove(struct virtio_device *vdev)
list_del_init(&fs->list);
virtio_fs_stop_all_queues(fs);
virtio_fs_drain_all_queues_locked(fs);
vdev->config->reset(vdev);
virtio_reset_device(vdev);
virtio_fs_cleanup_vqs(vdev, fs);
vdev->priv = NULL;
......
......@@ -6,9 +6,11 @@
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/vhost_iotlb.h>
#include <linux/virtio_net.h>
#include <linux/if_ether.h>
/**
* vDPA callback definition.
* struct vdpa_calllback - vDPA callback definition.
* @callback: interrupt callback function
* @private: the data passed to the callback function
*/
......@@ -18,7 +20,7 @@ struct vdpa_callback {
};
/**
* vDPA notification area
* struct vdpa_notification_area - vDPA notification area
* @addr: base address of the notification area
* @size: size of the notification area
*/
......@@ -43,29 +45,33 @@ struct vdpa_vq_state_split {
* @last_used_idx: used index
*/
struct vdpa_vq_state_packed {
u16 last_avail_counter:1;
u16 last_avail_idx:15;
u16 last_used_counter:1;
u16 last_used_idx:15;
u16 last_avail_counter:1;
u16 last_avail_idx:15;
u16 last_used_counter:1;
u16 last_used_idx:15;
};
struct vdpa_vq_state {
union {
struct vdpa_vq_state_split split;
struct vdpa_vq_state_packed packed;
};
union {
struct vdpa_vq_state_split split;
struct vdpa_vq_state_packed packed;
};
};
struct vdpa_mgmt_dev;
/**
* vDPA device - representation of a vDPA device
* struct vdpa_device - representation of a vDPA device
* @dev: underlying device
* @dma_dev: the actual device that is performing DMA
* @driver_override: driver name to force a match
* @config: the configuration ops for this device.
* @cf_lock: Protects get and set access to configuration layout.
* @index: device index
* @features_valid: were features initialized? for legacy guests
* @ngroups: the number of virtqueue groups
* @nas: the number of address spaces
* @use_va: indicate whether virtual address must be used by this device
* @nvqs: maximum number of supported virtqueues
* @mdev: management device pointer; caller must setup when registering device as part
* of dev_add() mgmtdev ops callback before invoking _vdpa_register_device().
......@@ -75,14 +81,18 @@ struct vdpa_device {
struct device *dma_dev;
const char *driver_override;
const struct vdpa_config_ops *config;
struct rw_semaphore cf_lock; /* Protects get/set config */
unsigned int index;
bool features_valid;
int nvqs;
bool use_va;
u32 nvqs;
struct vdpa_mgmt_dev *mdev;
unsigned int ngroups;
unsigned int nas;
};
/**
* vDPA IOVA range - the IOVA range support by the device
* struct vdpa_iova_range - the IOVA range support by the device
* @first: start of the IOVA range
* @last: end of the IOVA range
*/
......@@ -91,8 +101,27 @@ struct vdpa_iova_range {
u64 last;
};
struct vdpa_dev_set_config {
struct {
u8 mac[ETH_ALEN];
u16 mtu;
u16 max_vq_pairs;
} net;
u64 mask;
};
/**
* vDPA_config_ops - operations for configuring a vDPA device.
* Corresponding file area for device memory mapping
* @file: vma->vm_file for the mapping
* @offset: mapping offset in the vm_file
*/
struct vdpa_map_file {
struct file *file;
u64 offset;
};
/**
* struct vdpa_config_ops - operations for configuring a vDPA device.
* Note: vDPA device drivers are required to implement all of the
* operations unless it is mentioned to be optional in the following
* list.
......@@ -133,7 +162,7 @@ struct vdpa_iova_range {
* @vdev: vdpa device
* @idx: virtqueue index
* @state: pointer to returned state (last_avail_idx)
* @get_vq_notification: Get the notification area for a virtqueue
* @get_vq_notification: Get the notification area for a virtqueue (optional)
* @vdev: vdpa device
* @idx: virtqueue index
* Returns the notifcation area
......@@ -147,20 +176,31 @@ struct vdpa_iova_range {
* for the device
* @vdev: vdpa device
* Returns virtqueue algin requirement
* @get_features: Get virtio features supported by the device
* @get_vq_group: Get the group id for a specific
* virtqueue (optional)
* @vdev: vdpa device
* @idx: virtqueue index
* Returns u32: group id for this virtqueue
* @get_device_features: Get virtio features supported by the device
* @vdev: vdpa device
* Returns the virtio features support by the
* device
* @set_features: Set virtio features supported by the driver
* @set_driver_features: Set virtio features supported by the driver
* @vdev: vdpa device
* @features: feature support by the driver
* Returns integer: success (0) or error (< 0)
* @get_driver_features: Get the virtio driver features in action
* @vdev: vdpa device
* Returns the virtio features accepted
* @set_config_cb: Set the config interrupt callback
* @vdev: vdpa device
* @cb: virtio-vdev interrupt callback structure
* @get_vq_num_max: Get the max size of virtqueue
* @vdev: vdpa device
* Returns u16: max size of virtqueue
* @get_vq_num_min: Get the min size of virtqueue (optional)
* @vdev: vdpa device
* Returns u16: min size of virtqueue
* @get_device_id: Get virtio device id
* @vdev: vdpa device
* Returns u32: virtio device id
......@@ -176,6 +216,9 @@ struct vdpa_iova_range {
* @reset: Reset device
* @vdev: vdpa device
* Returns integer: success (0) or error (< 0)
* @suspend: Suspend or resume the device (optional)
* @vdev: vdpa device
* Returns integer: success (0) or error (< 0)
* @get_config_size: Get the size of the configuration space includes
* fields that are conditional on feature bits.
* @vdev: vdpa device
......@@ -201,10 +244,17 @@ struct vdpa_iova_range {
* @vdev: vdpa device
* Returns the iova range supported by
* the device.
* @set_group_asid: Set address space identifier for a
* virtqueue group (optional)
* @vdev: vdpa device
* @group: virtqueue group
* @asid: address space id for this group
* Returns integer: success (0) or error (< 0)
* @set_map: Set device memory mapping (optional)
* Needed for device that using device
* specific DMA translation (on-chip IOMMU)
* @vdev: vdpa device
* @asid: address space identifier
* @iotlb: vhost memory mapping to be
* used by the vDPA
* Returns integer: success (0) or error (< 0)
......@@ -213,6 +263,7 @@ struct vdpa_iova_range {
* specific DMA translation (on-chip IOMMU)
* and preferring incremental map.
* @vdev: vdpa device
* @asid: address space identifier
* @iova: iova to be mapped
* @size: size of the area
* @pa: physical address for the map
......@@ -224,6 +275,7 @@ struct vdpa_iova_range {
* specific DMA translation (on-chip IOMMU)
* and preferring incremental unmap.
* @vdev: vdpa device
* @asid: address space identifier
* @iova: iova to be unmapped
* @size: size of the area
* Returns integer: success (0) or error (< 0)
......@@ -245,6 +297,9 @@ struct vdpa_config_ops {
const struct vdpa_vq_state *state);
int (*get_vq_state)(struct vdpa_device *vdev, u16 idx,
struct vdpa_vq_state *state);
int (*get_vendor_vq_stats)(struct vdpa_device *vdev, u16 idx,
struct sk_buff *msg,
struct netlink_ext_ack *extack);
struct vdpa_notification_area
(*get_vq_notification)(struct vdpa_device *vdev, u16 idx);
/* vq irq is not expected to be changed once DRIVER_OK is set */
......@@ -252,16 +307,20 @@ struct vdpa_config_ops {
/* Device ops */
u32 (*get_vq_align)(struct vdpa_device *vdev);
u64 (*get_features)(struct vdpa_device *vdev);
int (*set_features)(struct vdpa_device *vdev, u64 features);
u32 (*get_vq_group)(struct vdpa_device *vdev, u16 idx);
u64 (*get_device_features)(struct vdpa_device *vdev);
int (*set_driver_features)(struct vdpa_device *vdev, u64 features);
u64 (*get_driver_features)(struct vdpa_device *vdev);
void (*set_config_cb)(struct vdpa_device *vdev,
struct vdpa_callback *cb);
u16 (*get_vq_num_max)(struct vdpa_device *vdev);
u16 (*get_vq_num_min)(struct vdpa_device *vdev);
u32 (*get_device_id)(struct vdpa_device *vdev);
u32 (*get_vendor_id)(struct vdpa_device *vdev);
u8 (*get_status)(struct vdpa_device *vdev);
void (*set_status)(struct vdpa_device *vdev, u8 status);
int (*reset)(struct vdpa_device *vdev);
int (*suspend)(struct vdpa_device *vdev);
size_t (*get_config_size)(struct vdpa_device *vdev);
void (*get_config)(struct vdpa_device *vdev, unsigned int offset,
void *buf, unsigned int len);
......@@ -271,10 +330,14 @@ struct vdpa_config_ops {
struct vdpa_iova_range (*get_iova_range)(struct vdpa_device *vdev);
/* DMA ops */
int (*set_map)(struct vdpa_device *vdev, struct vhost_iotlb *iotlb);
int (*dma_map)(struct vdpa_device *vdev, u64 iova, u64 size,
u64 pa, u32 perm);
int (*dma_unmap)(struct vdpa_device *vdev, u64 iova, u64 size);
int (*set_map)(struct vdpa_device *vdev, unsigned int asid,
struct vhost_iotlb *iotlb);
int (*dma_map)(struct vdpa_device *vdev, unsigned int asid,
u64 iova, u64 size, u64 pa, u32 perm, void *opaque);
int (*dma_unmap)(struct vdpa_device *vdev, unsigned int asid,
u64 iova, u64 size);
int (*set_group_asid)(struct vdpa_device *vdev, unsigned int group,
unsigned int asid);
/* Free device resources */
void (*free)(struct vdpa_device *vdev);
......@@ -282,24 +345,41 @@ struct vdpa_config_ops {
struct vdpa_device *__vdpa_alloc_device(struct device *parent,
const struct vdpa_config_ops *config,
size_t size, const char *name);
unsigned int ngroups, unsigned int nas,
size_t size, const char *name,
bool use_va);
#define vdpa_alloc_device(dev_struct, member, parent, config, name) \
container_of(__vdpa_alloc_device( \
parent, config, \
sizeof(dev_struct) + \
/**
* vdpa_alloc_device - allocate and initilaize a vDPA device
*
* @dev_struct: the type of the parent structure
* @member: the name of struct vdpa_device within the @dev_struct
* @parent: the parent device
* @config: the bus operations that is supported by this device
* @ngroups: the number of virtqueue groups supported by this device
* @nas: the number of address spaces
* @name: name of the vdpa device
* @use_va: indicate whether virtual address must be used by this device
*
* Return allocated data structure or ERR_PTR upon error
*/
#define vdpa_alloc_device(dev_struct, member, parent, config, ngroups, nas, \
name, use_va) \
container_of((__vdpa_alloc_device( \
parent, config, ngroups, nas, \
(sizeof(dev_struct) + \
BUILD_BUG_ON_ZERO(offsetof( \
dev_struct, member)), name), \
dev_struct, member))), name, use_va)), \
dev_struct, member)
int vdpa_register_device(struct vdpa_device *vdev, int nvqs);
int vdpa_register_device(struct vdpa_device *vdev, u32 nvqs);
void vdpa_unregister_device(struct vdpa_device *vdev);
int _vdpa_register_device(struct vdpa_device *vdev, int nvqs);
int _vdpa_register_device(struct vdpa_device *vdev, u32 nvqs);
void _vdpa_unregister_device(struct vdpa_device *vdev);
/**
* vdpa_driver - operations for a vDPA driver
* struct vdpa_driver - operations for a vDPA driver
* @driver: underlying device driver
* @probe: the function to call when a device is found. Returns 0 or -errno.
* @remove: the function to call when a device is removed.
......@@ -346,59 +426,82 @@ static inline struct device *vdpa_get_dma_dev(struct vdpa_device *vdev)
static inline int vdpa_reset(struct vdpa_device *vdev)
{
const struct vdpa_config_ops *ops = vdev->config;
const struct vdpa_config_ops *ops = vdev->config;
int ret;
down_write(&vdev->cf_lock);
vdev->features_valid = false;
return ops->reset(vdev);
ret = ops->reset(vdev);
up_write(&vdev->cf_lock);
return ret;
}
static inline int vdpa_set_features(struct vdpa_device *vdev, u64 features)
static inline int vdpa_set_features_unlocked(struct vdpa_device *vdev, u64 features)
{
const struct vdpa_config_ops *ops = vdev->config;
const struct vdpa_config_ops *ops = vdev->config;
int ret;
vdev->features_valid = true;
return ops->set_features(vdev, features);
}
ret = ops->set_driver_features(vdev, features);
return ret;
}
static inline void vdpa_get_config(struct vdpa_device *vdev, unsigned offset,
void *buf, unsigned int len)
static inline int vdpa_set_features(struct vdpa_device *vdev, u64 features)
{
const struct vdpa_config_ops *ops = vdev->config;
/*
* Config accesses aren't supposed to trigger before features are set.
* If it does happen we assume a legacy guest.
*/
if (!vdev->features_valid)
vdpa_set_features(vdev, 0);
ops->get_config(vdev, offset, buf, len);
int ret;
down_write(&vdev->cf_lock);
ret = vdpa_set_features_unlocked(vdev, features);
up_write(&vdev->cf_lock);
return ret;
}
void vdpa_get_config(struct vdpa_device *vdev, unsigned int offset,
void *buf, unsigned int len);
void vdpa_set_config(struct vdpa_device *dev, unsigned int offset,
const void *buf, unsigned int length);
void vdpa_set_status(struct vdpa_device *vdev, u8 status);
/**
* vdpa_mgmtdev_ops - vdpa device ops
* @dev_add: Add a vdpa device using alloc and register
* @mdev: parent device to use for device addition
* @name: name of the new vdpa device
* Driver need to add a new device using _vdpa_register_device()
* after fully initializing the vdpa device. Driver must return 0
* on success or appropriate error code.
* @dev_del: Remove a vdpa device using unregister
* @mdev: parent device to use for device removal
* @dev: vdpa device to remove
* Driver need to remove the specified device by calling
* _vdpa_unregister_device().
* struct vdpa_mgmtdev_ops - vdpa device ops
* @dev_add: Add a vdpa device using alloc and register
* @mdev: parent device to use for device addition
* @name: name of the new vdpa device
* @config: config attributes to apply to the device under creation
* Driver need to add a new device using _vdpa_register_device()
* after fully initializing the vdpa device. Driver must return 0
* on success or appropriate error code.
* @dev_del: Remove a vdpa device using unregister
* @mdev: parent device to use for device removal
* @dev: vdpa device to remove
* Driver need to remove the specified device by calling
* _vdpa_unregister_device().
*/
struct vdpa_mgmtdev_ops {
int (*dev_add)(struct vdpa_mgmt_dev *mdev, const char *name);
int (*dev_add)(struct vdpa_mgmt_dev *mdev, const char *name,
const struct vdpa_dev_set_config *config);
void (*dev_del)(struct vdpa_mgmt_dev *mdev, struct vdpa_device *dev);
};
/**
* struct vdpa_mgmt_dev - vdpa management device
* @device: Management parent device
* @ops: operations supported by management device
* @id_table: Pointer to device id table of supported ids
* @config_attr_mask: bit mask of attributes of type enum vdpa_attr that
* management device support during dev_add callback
* @list: list entry
*/
struct vdpa_mgmt_dev {
struct device *device;
const struct vdpa_mgmtdev_ops *ops;
const struct virtio_device_id *id_table; /* supported ids */
struct virtio_device_id *id_table;
u64 config_attr_mask;
struct list_head list;
u64 supported_features;
u32 max_supported_vqs;
};
int vdpa_mgmtdev_register(struct vdpa_mgmt_dev *mdev);
......
......@@ -17,6 +17,7 @@ struct vhost_iotlb_map {
u32 perm;
u32 flags_padding;
u64 __subtree_last;
void *opaque;
};
#define VHOST_IOTLB_FLAG_RETIRE 0x1
......@@ -29,10 +30,14 @@ struct vhost_iotlb {
unsigned int flags;
};
int vhost_iotlb_add_range_ctx(struct vhost_iotlb *iotlb, u64 start, u64 last,
u64 addr, unsigned int perm, void *opaque);
int vhost_iotlb_add_range(struct vhost_iotlb *iotlb, u64 start, u64 last,
u64 addr, unsigned int perm);
void vhost_iotlb_del_range(struct vhost_iotlb *iotlb, u64 start, u64 last);
void vhost_iotlb_init(struct vhost_iotlb *iotlb, unsigned int limit,
unsigned int flags);
struct vhost_iotlb *vhost_iotlb_alloc(unsigned int limit, unsigned int flags);
void vhost_iotlb_free(struct vhost_iotlb *iotlb);
void vhost_iotlb_reset(struct vhost_iotlb *iotlb);
......
......@@ -139,6 +139,7 @@ void virtio_config_enable(struct virtio_device *dev);
int virtio_device_freeze(struct virtio_device *dev);
int virtio_device_restore(struct virtio_device *dev);
#endif
void virtio_reset_device(struct virtio_device *dev);
size_t virtio_max_dma_size(struct virtio_device *vdev);
......
......@@ -5,6 +5,13 @@
#include <linux/pci.h>
#include <linux/virtio_pci.h>
struct virtio_pci_modern_common_cfg {
struct virtio_pci_common_cfg cfg;
__le16 queue_notify_data; /* read-write */
__le16 queue_reset; /* read-write */
};
struct virtio_pci_modern_device {
struct pci_dev *pci_dev;
......@@ -102,15 +109,10 @@ void vp_modern_set_queue_size(struct virtio_pci_modern_device *mdev,
u16 vp_modern_get_queue_size(struct virtio_pci_modern_device *mdev,
u16 idx);
u16 vp_modern_get_num_queues(struct virtio_pci_modern_device *mdev);
u16 vp_modern_get_queue_notify_off(struct virtio_pci_modern_device *mdev,
u16 idx);
void __iomem *vp_modern_map_capability(struct virtio_pci_modern_device *mdev, int off,
size_t minlen,
u32 align,
u32 start, u32 size,
size_t *len, resource_size_t *pa);
void __iomem *vp_modern_map_vq_notify(struct virtio_pci_modern_device *mdev,
u16 index, resource_size_t *pa);
int vp_modern_probe(struct virtio_pci_modern_device *mdev);
void vp_modern_remove(struct virtio_pci_modern_device *mdev);
int vp_modern_get_queue_reset(struct virtio_pci_modern_device *mdev, u16 index);
void vp_modern_set_queue_reset(struct virtio_pci_modern_device *mdev, u16 index);
#endif
......@@ -17,11 +17,16 @@ enum vdpa_command {
VDPA_CMD_DEV_NEW,
VDPA_CMD_DEV_DEL,
VDPA_CMD_DEV_GET, /* can dump */
VDPA_CMD_DEV_CONFIG_GET, /* can dump */
VDPA_CMD_DEV_VSTATS_GET,
};
enum vdpa_attr {
VDPA_ATTR_UNSPEC,
/* Pad attribute for 64b alignment */
VDPA_ATTR_PAD = VDPA_ATTR_UNSPEC,
/* bus name (optional) + dev name together make the parent device handle */
VDPA_ATTR_MGMTDEV_BUS_NAME, /* string */
VDPA_ATTR_MGMTDEV_DEV_NAME, /* string */
......@@ -32,6 +37,20 @@ enum vdpa_attr {
VDPA_ATTR_DEV_VENDOR_ID, /* u32 */
VDPA_ATTR_DEV_MAX_VQS, /* u32 */
VDPA_ATTR_DEV_MAX_VQ_SIZE, /* u16 */
VDPA_ATTR_DEV_MIN_VQ_SIZE, /* u16 */
VDPA_ATTR_DEV_NET_CFG_MACADDR, /* binary */
VDPA_ATTR_DEV_NET_STATUS, /* u8 */
VDPA_ATTR_DEV_NET_CFG_MAX_VQP, /* u16 */
VDPA_ATTR_DEV_NET_CFG_MTU, /* u16 */
VDPA_ATTR_DEV_NEGOTIATED_FEATURES, /* u64 */
VDPA_ATTR_DEV_MGMTDEV_MAX_VQS, /* u32 */
VDPA_ATTR_DEV_SUPPORTED_FEATURES, /* u64 */
VDPA_ATTR_DEV_QUEUE_INDEX, /* u32 */
VDPA_ATTR_DEV_VENDOR_ATTR_NAME, /* string */
VDPA_ATTR_DEV_VENDOR_ATTR_VALUE, /* u64 */
/* new attributes must be added above here */
VDPA_ATTR_MAX,
......
......@@ -89,11 +89,6 @@
/* Set or get vhost backend capability */
/* Use message type V2 */
#define VHOST_BACKEND_F_IOTLB_MSG_V2 0x1
/* IOTLB can accept batching hints */
#define VHOST_BACKEND_F_IOTLB_BATCH 0x2
#define VHOST_SET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x25, __u64)
#define VHOST_GET_BACKEND_FEATURES _IOR(VHOST_VIRTIO, 0x26, __u64)
......@@ -150,11 +145,39 @@
/* Get the valid iova range */
#define VHOST_VDPA_GET_IOVA_RANGE _IOR(VHOST_VIRTIO, 0x78, \
struct vhost_vdpa_iova_range)
/* Get the config size */
#define VHOST_VDPA_GET_CONFIG_SIZE _IOR(VHOST_VIRTIO, 0x79, __u32)
/* Get the count of all virtqueues */
#define VHOST_VDPA_GET_VQS_COUNT _IOR(VHOST_VIRTIO, 0x80, __u32)
/* Get the number of virtqueue groups. */
#define VHOST_VDPA_GET_GROUP_NUM _IOR(VHOST_VIRTIO, 0x81, __u32)
/* Get the number of address spaces. */
#define VHOST_VDPA_GET_AS_NUM _IOR(VHOST_VIRTIO, 0x7A, unsigned int)
/* Get the group for a virtqueue: read index, write group in num,
* The virtqueue index is stored in the index field of
* vhost_vring_state. The group for this specific virtqueue is
* returned via num field of vhost_vring_state.
*/
#define VHOST_VDPA_GET_VRING_GROUP _IOWR(VHOST_VIRTIO, 0x7B, \
struct vhost_vring_state)
/* Set the ASID for a virtqueue group. The group index is stored in
* the index field of vhost_vring_state, the ASID associated with this
* group is stored at num field of vhost_vring_state.
*/
#define VHOST_VDPA_SET_GROUP_ASID _IOW(VHOST_VIRTIO, 0x7C, \
struct vhost_vring_state)
/* Suspend a device so it does not process virtqueue requests anymore
*
* After the return of ioctl the device must preserve all the necessary state
* (the virtqueue vring base plus the possible device specific states) that is
* required for restoring in the future. The device must not change its
* configuration after that point.
*/
#define VHOST_VDPA_SUSPEND _IO(VHOST_VIRTIO, 0x7D)
#endif
......@@ -87,7 +87,7 @@ struct vhost_msg {
struct vhost_msg_v2 {
__u32 type;
__u32 reserved;
__u32 asid;
union {
struct vhost_iotlb_msg iotlb;
__u8 padding[64];
......@@ -153,4 +153,15 @@ struct vhost_vdpa_iova_range {
/* vhost-net should add virtio_net_hdr for RX, and strip for TX packets. */
#define VHOST_NET_F_VIRTIO_NET_HDR 27
/* Use message type V2 */
#define VHOST_BACKEND_F_IOTLB_MSG_V2 0x1
/* IOTLB can accept batching hints */
#define VHOST_BACKEND_F_IOTLB_BATCH 0x2
/* IOTLB can accept address space identifier through V2 type of IOTLB
* message
*/
#define VHOST_BACKEND_F_IOTLB_ASID 0x3
/* Device can be suspended */
#define VHOST_BACKEND_F_SUSPEND 0x4
#endif
......@@ -202,6 +202,8 @@ struct virtio_pci_cfg_cap {
#define VIRTIO_PCI_COMMON_Q_AVAILHI 44
#define VIRTIO_PCI_COMMON_Q_USEDLO 48
#define VIRTIO_PCI_COMMON_Q_USEDHI 52
#define VIRTIO_PCI_COMMON_Q_NDATA 56
#define VIRTIO_PCI_COMMON_Q_RESET 58
#endif /* VIRTIO_PCI_NO_MODERN */
......
......@@ -716,7 +716,7 @@ static void p9_virtio_remove(struct virtio_device *vdev)
mutex_unlock(&virtio_9p_lock);
vdev->config->reset(vdev);
virtio_reset_device(vdev);
vdev->config->del_vqs(vdev);
sysfs_remove_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr);
......
......@@ -641,7 +641,7 @@ static void virtio_vsock_remove(struct virtio_device *vdev)
virtio_vsock_reset_sock);
/* Stop all work handlers to make sure no one is accessing the device,
* so we can safely call vdev->config->reset().
* so we can safely call virtio_reset_device().
*/
mutex_lock(&vsock->rx_lock);
vsock->rx_run = false;
......@@ -658,7 +658,7 @@ static void virtio_vsock_remove(struct virtio_device *vdev)
/* Flush all device writes and interrupts, device will not use any
* more buffers.
*/
vdev->config->reset(vdev);
virtio_reset_device(vdev);
mutex_lock(&vsock->rx_lock);
while ((pkt = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_RX])))
......
......@@ -89,11 +89,6 @@
/* Set or get vhost backend capability */
/* Use message type V2 */
#define VHOST_BACKEND_F_IOTLB_MSG_V2 0x1
/* IOTLB can accept batching hints */
#define VHOST_BACKEND_F_IOTLB_BATCH 0x2
#define VHOST_SET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x25, __u64)
#define VHOST_GET_BACKEND_FEATURES _IOR(VHOST_VIRTIO, 0x26, __u64)
......@@ -150,4 +145,39 @@
/* Get the valid iova range */
#define VHOST_VDPA_GET_IOVA_RANGE _IOR(VHOST_VIRTIO, 0x78, \
struct vhost_vdpa_iova_range)
/* Get the config size */
#define VHOST_VDPA_GET_CONFIG_SIZE _IOR(VHOST_VIRTIO, 0x79, __u32)
/* Get the count of all virtqueues */
#define VHOST_VDPA_GET_VQS_COUNT _IOR(VHOST_VIRTIO, 0x80, __u32)
/* Get the number of virtqueue groups. */
#define VHOST_VDPA_GET_GROUP_NUM _IOR(VHOST_VIRTIO, 0x81, __u32)
/* Get the number of address spaces. */
#define VHOST_VDPA_GET_AS_NUM _IOR(VHOST_VIRTIO, 0x7A, unsigned int)
/* Get the group for a virtqueue: read index, write group in num,
* The virtqueue index is stored in the index field of
* vhost_vring_state. The group for this specific virtqueue is
* returned via num field of vhost_vring_state.
*/
#define VHOST_VDPA_GET_VRING_GROUP _IOWR(VHOST_VIRTIO, 0x7B, \
struct vhost_vring_state)
/* Set the ASID for a virtqueue group. The group index is stored in
* the index field of vhost_vring_state, the ASID associated with this
* group is stored at num field of vhost_vring_state.
*/
#define VHOST_VDPA_SET_GROUP_ASID _IOW(VHOST_VIRTIO, 0x7C, \
struct vhost_vring_state)
/* Suspend a device so it does not process virtqueue requests anymore
*
* After the return of ioctl the device must preserve all the necessary state
* (the virtqueue vring base plus the possible device specific states) that is
* required for restoring in the future. The device must not change its
* configuration after that point.
*/
#define VHOST_VDPA_SUSPEND _IO(VHOST_VIRTIO, 0x7D)
#endif
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册