提交 189522da 编写于 作者: L Linus Torvalds

Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost

Pull virtio fixes and cleanups from Michael Tsirkin:

 - Some bug fixes

 - Cleanup a couple of issues that surfaced meanwhile

 - Disable vhost on ARM with OABI for now - to be fixed fully later in
   the cycle or in the next release.

* tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost: (24 commits)
  vhost: disable for OABI
  virtio: drop vringh.h dependency
  virtio_blk: add a missing include
  virtio-balloon: Avoid using the word 'report' when referring to free page hinting
  virtio-balloon: make virtballoon_free_page_report() static
  vdpa: fix comment of vdpa_register_device()
  vdpa: make vhost, virtio depend on menu
  vdpa: allow a 32 bit vq alignment
  drm/virtio: fix up for include file changes
  remoteproc: pull in slab.h
  rpmsg: pull in slab.h
  virtio_input: pull in slab.h
  remoteproc: pull in slab.h
  virtio-rng: pull in slab.h
  virtgpu: pull in uaccess.h
  tools/virtio: make asm/barrier.h self contained
  tools/virtio: define aligned attribute
  virtio/test: fix up after IOTLB changes
  vhost: Create accessors for virtqueues private_data
  vdpasim: Return status in vdpasim_get_status
  ...
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include <linux/blk-mq.h> #include <linux/blk-mq.h>
#include <linux/blk-mq-virtio.h> #include <linux/blk-mq-virtio.h>
#include <linux/numa.h> #include <linux/numa.h>
#include <uapi/linux/virtio_ring.h>
#define PART_BITS 4 #define PART_BITS 4
#define VQ_NAME_LEN 16 #define VQ_NAME_LEN 16
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include <linux/virtio.h> #include <linux/virtio.h>
#include <linux/virtio_rng.h> #include <linux/virtio_rng.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/slab.h>
static DEFINE_IDA(rng_index_ida); static DEFINE_IDA(rng_index_ida);
......
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#include <linux/file.h> #include <linux/file.h>
#include <linux/sync_file.h> #include <linux/sync_file.h>
#include <linux/uaccess.h>
#include <drm/drm_file.h> #include <drm/drm_file.h>
#include <drm/virtgpu_drm.h> #include <drm/virtgpu_drm.h>
......
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#include <linux/virtio.h> #include <linux/virtio.h>
#include <linux/virtio_config.h> #include <linux/virtio_config.h>
#include <linux/virtio_ring.h>
#include <drm/drm_file.h> #include <drm/drm_file.h>
......
...@@ -116,7 +116,7 @@ config MIC_COSM ...@@ -116,7 +116,7 @@ config MIC_COSM
config VOP config VOP
tristate "VOP Driver" tristate "VOP Driver"
depends on VOP_BUS depends on VOP_BUS && VHOST_DPN
select VHOST_RING select VHOST_RING
select VIRTIO select VIRTIO
help help
......
...@@ -50,7 +50,7 @@ config CAIF_HSI ...@@ -50,7 +50,7 @@ config CAIF_HSI
config CAIF_VIRTIO config CAIF_VIRTIO
tristate "CAIF virtio transport driver" tristate "CAIF virtio transport driver"
depends on CAIF && HAS_DMA depends on CAIF && HAS_DMA && VHOST_DPN
select VHOST_RING select VHOST_RING
select VIRTIO select VIRTIO
select GENERIC_ALLOCATOR select GENERIC_ALLOCATOR
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
*/ */
#include <linux/remoteproc.h> #include <linux/remoteproc.h>
#include <linux/slab.h>
#include "remoteproc_internal.h" #include "remoteproc_internal.h"
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include <linux/regmap.h> #include <linux/regmap.h>
#include <linux/remoteproc.h> #include <linux/remoteproc.h>
#include <linux/reset.h> #include <linux/reset.h>
#include <linux/slab.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include "remoteproc_internal.h" #include "remoteproc_internal.h"
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/remoteproc.h> #include <linux/remoteproc.h>
#include <linux/rpmsg/mtk_rpmsg.h> #include <linux/rpmsg/mtk_rpmsg.h>
#include <linux/slab.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include "rpmsg_internal.h" #include "rpmsg_internal.h"
......
# SPDX-License-Identifier: GPL-2.0-only # SPDX-License-Identifier: GPL-2.0-only
config VDPA menuconfig VDPA
tristate tristate "vDPA drivers"
help help
Enable this module to support vDPA device that uses a Enable this module to support vDPA device that uses a
datapath which complies with virtio specifications with datapath which complies with virtio specifications with
vendor specific control path. vendor specific control path.
menuconfig VDPA_MENU if VDPA
bool "VDPA drivers"
default n
if VDPA_MENU
config VDPA_SIM config VDPA_SIM
tristate "vDPA device simulator" tristate "vDPA device simulator"
depends on RUNTIME_TESTING_MENU depends on RUNTIME_TESTING_MENU && HAS_DMA && VHOST_DPN
select VDPA
select VHOST_RING select VHOST_RING
default n default n
help help
...@@ -24,9 +19,8 @@ config VDPA_SIM ...@@ -24,9 +19,8 @@ config VDPA_SIM
development of vDPA. development of vDPA.
config IFCVF config IFCVF
tristate "Intel IFC VF VDPA driver" tristate "Intel IFC VF vDPA driver"
depends on PCI_MSI depends on PCI_MSI
select VDPA
default n default n
help help
This kernel module can drive Intel IFC VF NIC to offload This kernel module can drive Intel IFC VF NIC to offload
...@@ -34,4 +28,4 @@ config IFCVF ...@@ -34,4 +28,4 @@ config IFCVF
To compile this driver as a module, choose M here: the module will To compile this driver as a module, choose M here: the module will
be called ifcvf. be called ifcvf.
endif # VDPA_MENU endif # VDPA
...@@ -301,12 +301,10 @@ int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u64 num) ...@@ -301,12 +301,10 @@ int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u64 num)
static int ifcvf_hw_enable(struct ifcvf_hw *hw) static int ifcvf_hw_enable(struct ifcvf_hw *hw)
{ {
struct ifcvf_lm_cfg __iomem *ifcvf_lm;
struct virtio_pci_common_cfg __iomem *cfg; struct virtio_pci_common_cfg __iomem *cfg;
struct ifcvf_adapter *ifcvf; struct ifcvf_adapter *ifcvf;
u32 i; u32 i;
ifcvf_lm = (struct ifcvf_lm_cfg __iomem *)hw->lm_cfg;
ifcvf = vf_to_adapter(hw); ifcvf = vf_to_adapter(hw);
cfg = hw->common_cfg; cfg = hw->common_cfg;
ifc_iowrite16(IFCVF_MSI_CONFIG_OFF, &cfg->msix_config); ifc_iowrite16(IFCVF_MSI_CONFIG_OFF, &cfg->msix_config);
......
...@@ -31,11 +31,9 @@ static irqreturn_t ifcvf_intr_handler(int irq, void *arg) ...@@ -31,11 +31,9 @@ static irqreturn_t ifcvf_intr_handler(int irq, void *arg)
static int ifcvf_start_datapath(void *private) static int ifcvf_start_datapath(void *private)
{ {
struct ifcvf_hw *vf = ifcvf_private_to_vf(private); struct ifcvf_hw *vf = ifcvf_private_to_vf(private);
struct ifcvf_adapter *ifcvf;
u8 status; u8 status;
int ret; int ret;
ifcvf = vf_to_adapter(vf);
vf->nr_vring = IFCVF_MAX_QUEUE_PAIRS * 2; vf->nr_vring = IFCVF_MAX_QUEUE_PAIRS * 2;
ret = ifcvf_start_hw(vf); ret = ifcvf_start_hw(vf);
if (ret < 0) { if (ret < 0) {
...@@ -228,7 +226,7 @@ static u32 ifcvf_vdpa_get_vendor_id(struct vdpa_device *vdpa_dev) ...@@ -228,7 +226,7 @@ static u32 ifcvf_vdpa_get_vendor_id(struct vdpa_device *vdpa_dev)
return IFCVF_SUBSYS_VENDOR_ID; return IFCVF_SUBSYS_VENDOR_ID;
} }
static u16 ifcvf_vdpa_get_vq_align(struct vdpa_device *vdpa_dev) static u32 ifcvf_vdpa_get_vq_align(struct vdpa_device *vdpa_dev)
{ {
return IFCVF_QUEUE_ALIGNMENT; return IFCVF_QUEUE_ALIGNMENT;
} }
......
...@@ -116,7 +116,7 @@ EXPORT_SYMBOL_GPL(__vdpa_alloc_device); ...@@ -116,7 +116,7 @@ EXPORT_SYMBOL_GPL(__vdpa_alloc_device);
/** /**
* vdpa_register_device - register a vDPA device * vdpa_register_device - register a vDPA device
* Callers must have a succeed call of vdpa_init_device() before. * Callers must have a succeed call of vdpa_alloc_device() before.
* @vdev: the vdpa device to be registered to vDPA bus * @vdev: the vdpa device to be registered to vDPA bus
* *
* Returns an error when fail to add to vDPA bus * Returns an error when fail to add to vDPA bus
......
...@@ -435,7 +435,7 @@ static u64 vdpasim_get_vq_state(struct vdpa_device *vdpa, u16 idx) ...@@ -435,7 +435,7 @@ static u64 vdpasim_get_vq_state(struct vdpa_device *vdpa, u16 idx)
return vrh->last_avail_idx; return vrh->last_avail_idx;
} }
static u16 vdpasim_get_vq_align(struct vdpa_device *vdpa) static u32 vdpasim_get_vq_align(struct vdpa_device *vdpa)
{ {
return VDPASIM_QUEUE_ALIGN; return VDPASIM_QUEUE_ALIGN;
} }
...@@ -488,7 +488,7 @@ static u8 vdpasim_get_status(struct vdpa_device *vdpa) ...@@ -488,7 +488,7 @@ static u8 vdpasim_get_status(struct vdpa_device *vdpa)
status = vdpasim->status; status = vdpasim->status;
spin_unlock(&vdpasim->lock); spin_unlock(&vdpasim->lock);
return vdpasim->status; return status;
} }
static void vdpasim_set_status(struct vdpa_device *vdpa, u8 status) static void vdpasim_set_status(struct vdpa_device *vdpa, u8 status)
......
...@@ -3,6 +3,8 @@ config VHOST_IOTLB ...@@ -3,6 +3,8 @@ config VHOST_IOTLB
tristate tristate
help help
Generic IOTLB implementation for vhost and vringh. Generic IOTLB implementation for vhost and vringh.
This option is selected by any driver which needs to support
an IOMMU in software.
config VHOST_RING config VHOST_RING
tristate tristate
...@@ -11,6 +13,15 @@ config VHOST_RING ...@@ -11,6 +13,15 @@ config VHOST_RING
This option is selected by any driver which needs to access This option is selected by any driver which needs to access
the host side of a virtio ring. the host side of a virtio ring.
config VHOST_DPN
bool
depends on !ARM || AEABI
default y
help
Anything selecting VHOST or VHOST_RING must depend on VHOST_DPN.
This excludes the deprecated ARM ABI since that forces a 4 byte
alignment on all structs - incompatible with virtio spec requirements.
config VHOST config VHOST
tristate tristate
select VHOST_IOTLB select VHOST_IOTLB
...@@ -26,7 +37,7 @@ if VHOST_MENU ...@@ -26,7 +37,7 @@ if VHOST_MENU
config VHOST_NET config VHOST_NET
tristate "Host kernel accelerator for virtio net" tristate "Host kernel accelerator for virtio net"
depends on NET && EVENTFD && (TUN || !TUN) && (TAP || !TAP) depends on NET && EVENTFD && (TUN || !TUN) && (TAP || !TAP) && VHOST_DPN
select VHOST select VHOST
---help--- ---help---
This kernel module can be loaded in host kernel to accelerate This kernel module can be loaded in host kernel to accelerate
...@@ -38,7 +49,7 @@ config VHOST_NET ...@@ -38,7 +49,7 @@ config VHOST_NET
config VHOST_SCSI config VHOST_SCSI
tristate "VHOST_SCSI TCM fabric driver" tristate "VHOST_SCSI TCM fabric driver"
depends on TARGET_CORE && EVENTFD depends on TARGET_CORE && EVENTFD && VHOST_DPN
select VHOST select VHOST
default n default n
---help--- ---help---
...@@ -47,7 +58,7 @@ config VHOST_SCSI ...@@ -47,7 +58,7 @@ config VHOST_SCSI
config VHOST_VSOCK config VHOST_VSOCK
tristate "vhost virtio-vsock driver" tristate "vhost virtio-vsock driver"
depends on VSOCKETS && EVENTFD depends on VSOCKETS && EVENTFD && VHOST_DPN
select VHOST select VHOST
select VIRTIO_VSOCKETS_COMMON select VIRTIO_VSOCKETS_COMMON
default n default n
...@@ -61,9 +72,9 @@ config VHOST_VSOCK ...@@ -61,9 +72,9 @@ config VHOST_VSOCK
config VHOST_VDPA config VHOST_VDPA
tristate "Vhost driver for vDPA-based backend" tristate "Vhost driver for vDPA-based backend"
depends on EVENTFD depends on EVENTFD && VHOST_DPN
select VHOST select VHOST
select VDPA depends on VDPA
help help
This kernel module can be loaded in host kernel to accelerate This kernel module can be loaded in host kernel to accelerate
guest virtio devices with the vDPA-based backends. guest virtio devices with the vDPA-based backends.
......
...@@ -424,7 +424,7 @@ static void vhost_net_disable_vq(struct vhost_net *n, ...@@ -424,7 +424,7 @@ static void vhost_net_disable_vq(struct vhost_net *n,
struct vhost_net_virtqueue *nvq = struct vhost_net_virtqueue *nvq =
container_of(vq, struct vhost_net_virtqueue, vq); container_of(vq, struct vhost_net_virtqueue, vq);
struct vhost_poll *poll = n->poll + (nvq - n->vqs); struct vhost_poll *poll = n->poll + (nvq - n->vqs);
if (!vq->private_data) if (!vhost_vq_get_backend(vq))
return; return;
vhost_poll_stop(poll); vhost_poll_stop(poll);
} }
...@@ -437,7 +437,7 @@ static int vhost_net_enable_vq(struct vhost_net *n, ...@@ -437,7 +437,7 @@ static int vhost_net_enable_vq(struct vhost_net *n,
struct vhost_poll *poll = n->poll + (nvq - n->vqs); struct vhost_poll *poll = n->poll + (nvq - n->vqs);
struct socket *sock; struct socket *sock;
sock = vq->private_data; sock = vhost_vq_get_backend(vq);
if (!sock) if (!sock)
return 0; return 0;
...@@ -524,7 +524,7 @@ static void vhost_net_busy_poll(struct vhost_net *net, ...@@ -524,7 +524,7 @@ static void vhost_net_busy_poll(struct vhost_net *net,
return; return;
vhost_disable_notify(&net->dev, vq); vhost_disable_notify(&net->dev, vq);
sock = rvq->private_data; sock = vhost_vq_get_backend(rvq);
busyloop_timeout = poll_rx ? rvq->busyloop_timeout: busyloop_timeout = poll_rx ? rvq->busyloop_timeout:
tvq->busyloop_timeout; tvq->busyloop_timeout;
...@@ -570,8 +570,10 @@ static int vhost_net_tx_get_vq_desc(struct vhost_net *net, ...@@ -570,8 +570,10 @@ static int vhost_net_tx_get_vq_desc(struct vhost_net *net,
if (r == tvq->num && tvq->busyloop_timeout) { if (r == tvq->num && tvq->busyloop_timeout) {
/* Flush batched packets first */ /* Flush batched packets first */
if (!vhost_sock_zcopy(tvq->private_data)) if (!vhost_sock_zcopy(vhost_vq_get_backend(tvq)))
vhost_tx_batch(net, tnvq, tvq->private_data, msghdr); vhost_tx_batch(net, tnvq,
vhost_vq_get_backend(tvq),
msghdr);
vhost_net_busy_poll(net, rvq, tvq, busyloop_intr, false); vhost_net_busy_poll(net, rvq, tvq, busyloop_intr, false);
...@@ -685,7 +687,7 @@ static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq, ...@@ -685,7 +687,7 @@ static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq,
struct vhost_virtqueue *vq = &nvq->vq; struct vhost_virtqueue *vq = &nvq->vq;
struct vhost_net *net = container_of(vq->dev, struct vhost_net, struct vhost_net *net = container_of(vq->dev, struct vhost_net,
dev); dev);
struct socket *sock = vq->private_data; struct socket *sock = vhost_vq_get_backend(vq);
struct page_frag *alloc_frag = &net->page_frag; struct page_frag *alloc_frag = &net->page_frag;
struct virtio_net_hdr *gso; struct virtio_net_hdr *gso;
struct xdp_buff *xdp = &nvq->xdp[nvq->batched_xdp]; struct xdp_buff *xdp = &nvq->xdp[nvq->batched_xdp];
...@@ -952,7 +954,7 @@ static void handle_tx(struct vhost_net *net) ...@@ -952,7 +954,7 @@ static void handle_tx(struct vhost_net *net)
struct socket *sock; struct socket *sock;
mutex_lock_nested(&vq->mutex, VHOST_NET_VQ_TX); mutex_lock_nested(&vq->mutex, VHOST_NET_VQ_TX);
sock = vq->private_data; sock = vhost_vq_get_backend(vq);
if (!sock) if (!sock)
goto out; goto out;
...@@ -1121,7 +1123,7 @@ static void handle_rx(struct vhost_net *net) ...@@ -1121,7 +1123,7 @@ static void handle_rx(struct vhost_net *net)
int recv_pkts = 0; int recv_pkts = 0;
mutex_lock_nested(&vq->mutex, VHOST_NET_VQ_RX); mutex_lock_nested(&vq->mutex, VHOST_NET_VQ_RX);
sock = vq->private_data; sock = vhost_vq_get_backend(vq);
if (!sock) if (!sock)
goto out; goto out;
...@@ -1345,9 +1347,9 @@ static struct socket *vhost_net_stop_vq(struct vhost_net *n, ...@@ -1345,9 +1347,9 @@ static struct socket *vhost_net_stop_vq(struct vhost_net *n,
container_of(vq, struct vhost_net_virtqueue, vq); container_of(vq, struct vhost_net_virtqueue, vq);
mutex_lock(&vq->mutex); mutex_lock(&vq->mutex);
sock = vq->private_data; sock = vhost_vq_get_backend(vq);
vhost_net_disable_vq(n, vq); vhost_net_disable_vq(n, vq);
vq->private_data = NULL; vhost_vq_set_backend(vq, NULL);
vhost_net_buf_unproduce(nvq); vhost_net_buf_unproduce(nvq);
nvq->rx_ring = NULL; nvq->rx_ring = NULL;
mutex_unlock(&vq->mutex); mutex_unlock(&vq->mutex);
...@@ -1521,7 +1523,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd) ...@@ -1521,7 +1523,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
} }
/* start polling new socket */ /* start polling new socket */
oldsock = vq->private_data; oldsock = vhost_vq_get_backend(vq);
if (sock != oldsock) { if (sock != oldsock) {
ubufs = vhost_net_ubuf_alloc(vq, ubufs = vhost_net_ubuf_alloc(vq,
sock && vhost_sock_zcopy(sock)); sock && vhost_sock_zcopy(sock));
...@@ -1531,7 +1533,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd) ...@@ -1531,7 +1533,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
} }
vhost_net_disable_vq(n, vq); vhost_net_disable_vq(n, vq);
vq->private_data = sock; vhost_vq_set_backend(vq, sock);
vhost_net_buf_unproduce(nvq); vhost_net_buf_unproduce(nvq);
r = vhost_vq_init_access(vq); r = vhost_vq_init_access(vq);
if (r) if (r)
...@@ -1568,7 +1570,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd) ...@@ -1568,7 +1570,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
return 0; return 0;
err_used: err_used:
vq->private_data = oldsock; vhost_vq_set_backend(vq, oldsock);
vhost_net_enable_vq(n, vq); vhost_net_enable_vq(n, vq);
if (ubufs) if (ubufs)
vhost_net_ubuf_put_wait_and_free(ubufs); vhost_net_ubuf_put_wait_and_free(ubufs);
......
...@@ -452,7 +452,7 @@ vhost_scsi_do_evt_work(struct vhost_scsi *vs, struct vhost_scsi_evt *evt) ...@@ -452,7 +452,7 @@ vhost_scsi_do_evt_work(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
unsigned out, in; unsigned out, in;
int head, ret; int head, ret;
if (!vq->private_data) { if (!vhost_vq_get_backend(vq)) {
vs->vs_events_missed = true; vs->vs_events_missed = true;
return; return;
} }
...@@ -892,7 +892,7 @@ vhost_scsi_get_req(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc, ...@@ -892,7 +892,7 @@ vhost_scsi_get_req(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc,
} else { } else {
struct vhost_scsi_tpg **vs_tpg, *tpg; struct vhost_scsi_tpg **vs_tpg, *tpg;
vs_tpg = vq->private_data; /* validated at handler entry */ vs_tpg = vhost_vq_get_backend(vq); /* validated at handler entry */
tpg = READ_ONCE(vs_tpg[*vc->target]); tpg = READ_ONCE(vs_tpg[*vc->target]);
if (unlikely(!tpg)) { if (unlikely(!tpg)) {
...@@ -929,7 +929,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) ...@@ -929,7 +929,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
* We can handle the vq only after the endpoint is setup by calling the * We can handle the vq only after the endpoint is setup by calling the
* VHOST_SCSI_SET_ENDPOINT ioctl. * VHOST_SCSI_SET_ENDPOINT ioctl.
*/ */
vs_tpg = vq->private_data; vs_tpg = vhost_vq_get_backend(vq);
if (!vs_tpg) if (!vs_tpg)
goto out; goto out;
...@@ -1184,7 +1184,7 @@ vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) ...@@ -1184,7 +1184,7 @@ vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
* We can handle the vq only after the endpoint is setup by calling the * We can handle the vq only after the endpoint is setup by calling the
* VHOST_SCSI_SET_ENDPOINT ioctl. * VHOST_SCSI_SET_ENDPOINT ioctl.
*/ */
if (!vq->private_data) if (!vhost_vq_get_backend(vq))
goto out; goto out;
memset(&vc, 0, sizeof(vc)); memset(&vc, 0, sizeof(vc));
...@@ -1322,7 +1322,7 @@ static void vhost_scsi_evt_handle_kick(struct vhost_work *work) ...@@ -1322,7 +1322,7 @@ static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev); struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
mutex_lock(&vq->mutex); mutex_lock(&vq->mutex);
if (!vq->private_data) if (!vhost_vq_get_backend(vq))
goto out; goto out;
if (vs->vs_events_missed) if (vs->vs_events_missed)
...@@ -1460,7 +1460,7 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs, ...@@ -1460,7 +1460,7 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) { for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
vq = &vs->vqs[i].vq; vq = &vs->vqs[i].vq;
mutex_lock(&vq->mutex); mutex_lock(&vq->mutex);
vq->private_data = vs_tpg; vhost_vq_set_backend(vq, vs_tpg);
vhost_vq_init_access(vq); vhost_vq_init_access(vq);
mutex_unlock(&vq->mutex); mutex_unlock(&vq->mutex);
} }
...@@ -1547,7 +1547,7 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs, ...@@ -1547,7 +1547,7 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) { for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
vq = &vs->vqs[i].vq; vq = &vs->vqs[i].vq;
mutex_lock(&vq->mutex); mutex_lock(&vq->mutex);
vq->private_data = NULL; vhost_vq_set_backend(vq, NULL);
mutex_unlock(&vq->mutex); mutex_unlock(&vq->mutex);
} }
} }
......
...@@ -49,7 +49,7 @@ static void handle_vq(struct vhost_test *n) ...@@ -49,7 +49,7 @@ static void handle_vq(struct vhost_test *n)
void *private; void *private;
mutex_lock(&vq->mutex); mutex_lock(&vq->mutex);
private = vq->private_data; private = vhost_vq_get_backend(vq);
if (!private) { if (!private) {
mutex_unlock(&vq->mutex); mutex_unlock(&vq->mutex);
return; return;
...@@ -120,7 +120,7 @@ static int vhost_test_open(struct inode *inode, struct file *f) ...@@ -120,7 +120,7 @@ static int vhost_test_open(struct inode *inode, struct file *f)
vqs[VHOST_TEST_VQ] = &n->vqs[VHOST_TEST_VQ]; vqs[VHOST_TEST_VQ] = &n->vqs[VHOST_TEST_VQ];
n->vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick; n->vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick;
vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX, UIO_MAXIOV, vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX, UIO_MAXIOV,
VHOST_TEST_PKT_WEIGHT, VHOST_TEST_WEIGHT); VHOST_TEST_PKT_WEIGHT, VHOST_TEST_WEIGHT, NULL);
f->private_data = n; f->private_data = n;
...@@ -133,8 +133,8 @@ static void *vhost_test_stop_vq(struct vhost_test *n, ...@@ -133,8 +133,8 @@ static void *vhost_test_stop_vq(struct vhost_test *n,
void *private; void *private;
mutex_lock(&vq->mutex); mutex_lock(&vq->mutex);
private = vq->private_data; private = vhost_vq_get_backend(vq);
vq->private_data = NULL; vhost_vq_set_backend(vq, NULL);
mutex_unlock(&vq->mutex); mutex_unlock(&vq->mutex);
return private; return private;
} }
...@@ -198,8 +198,8 @@ static long vhost_test_run(struct vhost_test *n, int test) ...@@ -198,8 +198,8 @@ static long vhost_test_run(struct vhost_test *n, int test)
priv = test ? n : NULL; priv = test ? n : NULL;
/* start polling new socket */ /* start polling new socket */
oldpriv = vq->private_data; oldpriv = vhost_vq_get_backend(vq);
vq->private_data = priv; vhost_vq_set_backend(vq, priv);
r = vhost_vq_init_access(&n->vqs[index]); r = vhost_vq_init_access(&n->vqs[index]);
...@@ -225,7 +225,7 @@ static long vhost_test_reset_owner(struct vhost_test *n) ...@@ -225,7 +225,7 @@ static long vhost_test_reset_owner(struct vhost_test *n)
{ {
void *priv = NULL; void *priv = NULL;
long err; long err;
struct vhost_umem *umem; struct vhost_iotlb *umem;
mutex_lock(&n->dev.mutex); mutex_lock(&n->dev.mutex);
err = vhost_dev_check_owner(&n->dev); err = vhost_dev_check_owner(&n->dev);
......
...@@ -296,7 +296,6 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd, ...@@ -296,7 +296,6 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
struct vdpa_callback cb; struct vdpa_callback cb;
struct vhost_virtqueue *vq; struct vhost_virtqueue *vq;
struct vhost_vring_state s; struct vhost_vring_state s;
u8 status;
u32 idx; u32 idx;
long r; long r;
...@@ -310,8 +309,6 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd, ...@@ -310,8 +309,6 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
idx = array_index_nospec(idx, v->nvqs); idx = array_index_nospec(idx, v->nvqs);
vq = &v->vqs[idx]; vq = &v->vqs[idx];
status = ops->get_status(vdpa);
if (cmd == VHOST_VDPA_SET_VRING_ENABLE) { if (cmd == VHOST_VDPA_SET_VRING_ENABLE) {
if (copy_from_user(&s, argp, sizeof(s))) if (copy_from_user(&s, argp, sizeof(s)))
return -EFAULT; return -EFAULT;
...@@ -678,8 +675,6 @@ static int vhost_vdpa_open(struct inode *inode, struct file *filep) ...@@ -678,8 +675,6 @@ static int vhost_vdpa_open(struct inode *inode, struct file *filep)
int nvqs, i, r, opened; int nvqs, i, r, opened;
v = container_of(inode->i_cdev, struct vhost_vdpa, cdev); v = container_of(inode->i_cdev, struct vhost_vdpa, cdev);
if (!v)
return -ENODEV;
opened = atomic_cmpxchg(&v->opened, 0, 1); opened = atomic_cmpxchg(&v->opened, 0, 1);
if (opened) if (opened)
......
...@@ -231,6 +231,33 @@ enum { ...@@ -231,6 +231,33 @@ enum {
(1ULL << VIRTIO_F_VERSION_1) (1ULL << VIRTIO_F_VERSION_1)
}; };
/**
* vhost_vq_set_backend - Set backend.
*
* @vq Virtqueue.
* @private_data The private data.
*
* Context: Need to call with vq->mutex acquired.
*/
static inline void vhost_vq_set_backend(struct vhost_virtqueue *vq,
void *private_data)
{
vq->private_data = private_data;
}
/**
* vhost_vq_get_backend - Get backend.
*
* @vq Virtqueue.
*
* Context: Need to call with vq->mutex acquired.
* Return: Private data previously set with vhost_vq_set_backend.
*/
static inline void *vhost_vq_get_backend(struct vhost_virtqueue *vq)
{
return vq->private_data;
}
static inline bool vhost_has_feature(struct vhost_virtqueue *vq, int bit) static inline bool vhost_has_feature(struct vhost_virtqueue *vq, int bit)
{ {
return vq->acked_features & (1ULL << bit); return vq->acked_features & (1ULL << bit);
......
...@@ -13,9 +13,11 @@ ...@@ -13,9 +13,11 @@
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/export.h> #include <linux/export.h>
#if IS_REACHABLE(CONFIG_VHOST_IOTLB)
#include <linux/bvec.h> #include <linux/bvec.h>
#include <linux/highmem.h> #include <linux/highmem.h>
#include <linux/vhost_iotlb.h> #include <linux/vhost_iotlb.h>
#endif
#include <uapi/linux/virtio_config.h> #include <uapi/linux/virtio_config.h>
static __printf(1,2) __cold void vringh_bad(const char *fmt, ...) static __printf(1,2) __cold void vringh_bad(const char *fmt, ...)
...@@ -1059,6 +1061,8 @@ int vringh_need_notify_kern(struct vringh *vrh) ...@@ -1059,6 +1061,8 @@ int vringh_need_notify_kern(struct vringh *vrh)
} }
EXPORT_SYMBOL(vringh_need_notify_kern); EXPORT_SYMBOL(vringh_need_notify_kern);
#if IS_REACHABLE(CONFIG_VHOST_IOTLB)
static int iotlb_translate(const struct vringh *vrh, static int iotlb_translate(const struct vringh *vrh,
u64 addr, u64 len, struct bio_vec iov[], u64 addr, u64 len, struct bio_vec iov[],
int iov_size, u32 perm) int iov_size, u32 perm)
...@@ -1416,5 +1420,6 @@ int vringh_need_notify_iotlb(struct vringh *vrh) ...@@ -1416,5 +1420,6 @@ int vringh_need_notify_iotlb(struct vringh *vrh)
} }
EXPORT_SYMBOL(vringh_need_notify_iotlb); EXPORT_SYMBOL(vringh_need_notify_iotlb);
#endif
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
...@@ -91,7 +91,7 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock, ...@@ -91,7 +91,7 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
mutex_lock(&vq->mutex); mutex_lock(&vq->mutex);
if (!vq->private_data) if (!vhost_vq_get_backend(vq))
goto out; goto out;
/* Avoid further vmexits, we're already processing the virtqueue */ /* Avoid further vmexits, we're already processing the virtqueue */
...@@ -440,7 +440,7 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work) ...@@ -440,7 +440,7 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
mutex_lock(&vq->mutex); mutex_lock(&vq->mutex);
if (!vq->private_data) if (!vhost_vq_get_backend(vq))
goto out; goto out;
vhost_disable_notify(&vsock->dev, vq); vhost_disable_notify(&vsock->dev, vq);
...@@ -533,8 +533,8 @@ static int vhost_vsock_start(struct vhost_vsock *vsock) ...@@ -533,8 +533,8 @@ static int vhost_vsock_start(struct vhost_vsock *vsock)
goto err_vq; goto err_vq;
} }
if (!vq->private_data) { if (!vhost_vq_get_backend(vq)) {
vq->private_data = vsock; vhost_vq_set_backend(vq, vsock);
ret = vhost_vq_init_access(vq); ret = vhost_vq_init_access(vq);
if (ret) if (ret)
goto err_vq; goto err_vq;
...@@ -547,14 +547,14 @@ static int vhost_vsock_start(struct vhost_vsock *vsock) ...@@ -547,14 +547,14 @@ static int vhost_vsock_start(struct vhost_vsock *vsock)
return 0; return 0;
err_vq: err_vq:
vq->private_data = NULL; vhost_vq_set_backend(vq, NULL);
mutex_unlock(&vq->mutex); mutex_unlock(&vq->mutex);
for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
vq = &vsock->vqs[i]; vq = &vsock->vqs[i];
mutex_lock(&vq->mutex); mutex_lock(&vq->mutex);
vq->private_data = NULL; vhost_vq_set_backend(vq, NULL);
mutex_unlock(&vq->mutex); mutex_unlock(&vq->mutex);
} }
err: err:
...@@ -577,7 +577,7 @@ static int vhost_vsock_stop(struct vhost_vsock *vsock) ...@@ -577,7 +577,7 @@ static int vhost_vsock_stop(struct vhost_vsock *vsock)
struct vhost_virtqueue *vq = &vsock->vqs[i]; struct vhost_virtqueue *vq = &vsock->vqs[i];
mutex_lock(&vq->mutex); mutex_lock(&vq->mutex);
vq->private_data = NULL; vhost_vq_set_backend(vq, NULL);
mutex_unlock(&vq->mutex); mutex_unlock(&vq->mutex);
} }
......
...@@ -45,7 +45,7 @@ config VIRTIO_PCI_LEGACY ...@@ -45,7 +45,7 @@ config VIRTIO_PCI_LEGACY
config VIRTIO_VDPA config VIRTIO_VDPA
tristate "vDPA driver for virtio devices" tristate "vDPA driver for virtio devices"
select VDPA depends on VDPA
select VIRTIO select VIRTIO
help help
This driver provides support for virtio based paravirtual This driver provides support for virtio based paravirtual
......
...@@ -165,7 +165,7 @@ static void tell_host(struct virtio_balloon *vb, struct virtqueue *vq) ...@@ -165,7 +165,7 @@ static void tell_host(struct virtio_balloon *vb, struct virtqueue *vq)
} }
int virtballoon_free_page_report(struct page_reporting_dev_info *pr_dev_info, static int virtballoon_free_page_report(struct page_reporting_dev_info *pr_dev_info,
struct scatterlist *sg, unsigned int nents) struct scatterlist *sg, unsigned int nents)
{ {
struct virtio_balloon *vb = struct virtio_balloon *vb =
...@@ -580,7 +580,7 @@ static u32 virtio_balloon_cmd_id_received(struct virtio_balloon *vb) ...@@ -580,7 +580,7 @@ static u32 virtio_balloon_cmd_id_received(struct virtio_balloon *vb)
if (test_and_clear_bit(VIRTIO_BALLOON_CONFIG_READ_CMD_ID, if (test_and_clear_bit(VIRTIO_BALLOON_CONFIG_READ_CMD_ID,
&vb->config_read_bitmap)) &vb->config_read_bitmap))
virtio_cread(vb->vdev, struct virtio_balloon_config, virtio_cread(vb->vdev, struct virtio_balloon_config,
free_page_report_cmd_id, free_page_hint_cmd_id,
&vb->cmd_id_received_cache); &vb->cmd_id_received_cache);
return vb->cmd_id_received_cache; return vb->cmd_id_received_cache;
......
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
#include <linux/virtio.h> #include <linux/virtio.h>
#include <linux/virtio_config.h> #include <linux/virtio_config.h>
#include <linux/input.h> #include <linux/input.h>
#include <linux/slab.h>
#include <uapi/linux/virtio_ids.h> #include <uapi/linux/virtio_ids.h>
#include <uapi/linux/virtio_input.h> #include <uapi/linux/virtio_input.h>
......
...@@ -164,7 +164,7 @@ struct vdpa_config_ops { ...@@ -164,7 +164,7 @@ struct vdpa_config_ops {
u64 (*get_vq_state)(struct vdpa_device *vdev, u16 idx); u64 (*get_vq_state)(struct vdpa_device *vdev, u16 idx);
/* Device ops */ /* Device ops */
u16 (*get_vq_align)(struct vdpa_device *vdev); u32 (*get_vq_align)(struct vdpa_device *vdev);
u64 (*get_features)(struct vdpa_device *vdev); u64 (*get_features)(struct vdpa_device *vdev);
int (*set_features)(struct vdpa_device *vdev, u64 features); int (*set_features)(struct vdpa_device *vdev, u64 features);
void (*set_config_cb)(struct vdpa_device *vdev, void (*set_config_cb)(struct vdpa_device *vdev,
......
...@@ -9,7 +9,6 @@ ...@@ -9,7 +9,6 @@
#include <linux/device.h> #include <linux/device.h>
#include <linux/mod_devicetable.h> #include <linux/mod_devicetable.h>
#include <linux/gfp.h> #include <linux/gfp.h>
#include <linux/vringh.h>
/** /**
* virtqueue - a queue to register buffers for sending or receiving. * virtqueue - a queue to register buffers for sending or receiving.
......
...@@ -14,8 +14,10 @@ ...@@ -14,8 +14,10 @@
#include <linux/virtio_byteorder.h> #include <linux/virtio_byteorder.h>
#include <linux/uio.h> #include <linux/uio.h>
#include <linux/slab.h> #include <linux/slab.h>
#if IS_REACHABLE(CONFIG_VHOST_IOTLB)
#include <linux/dma-direction.h> #include <linux/dma-direction.h>
#include <linux/vhost_iotlb.h> #include <linux/vhost_iotlb.h>
#endif
#include <asm/barrier.h> #include <asm/barrier.h>
/* virtio_ring with information needed for host access. */ /* virtio_ring with information needed for host access. */
...@@ -254,6 +256,8 @@ static inline __virtio64 cpu_to_vringh64(const struct vringh *vrh, u64 val) ...@@ -254,6 +256,8 @@ static inline __virtio64 cpu_to_vringh64(const struct vringh *vrh, u64 val)
return __cpu_to_virtio64(vringh_is_little_endian(vrh), val); return __cpu_to_virtio64(vringh_is_little_endian(vrh), val);
} }
#if IS_REACHABLE(CONFIG_VHOST_IOTLB)
void vringh_set_iotlb(struct vringh *vrh, struct vhost_iotlb *iotlb); void vringh_set_iotlb(struct vringh *vrh, struct vhost_iotlb *iotlb);
int vringh_init_iotlb(struct vringh *vrh, u64 features, int vringh_init_iotlb(struct vringh *vrh, u64 features,
...@@ -284,4 +288,6 @@ void vringh_notify_disable_iotlb(struct vringh *vrh); ...@@ -284,4 +288,6 @@ void vringh_notify_disable_iotlb(struct vringh *vrh);
int vringh_need_notify_iotlb(struct vringh *vrh); int vringh_need_notify_iotlb(struct vringh *vrh);
#endif /* CONFIG_VHOST_IOTLB */
#endif /* _LINUX_VRINGH_H */ #endif /* _LINUX_VRINGH_H */
...@@ -48,8 +48,15 @@ struct virtio_balloon_config { ...@@ -48,8 +48,15 @@ struct virtio_balloon_config {
__u32 num_pages; __u32 num_pages;
/* Number of pages we've actually got in balloon. */ /* Number of pages we've actually got in balloon. */
__u32 actual; __u32 actual;
/* Free page report command id, readonly by guest */ /*
__u32 free_page_report_cmd_id; * Free page hint command id, readonly by guest.
* Was previously named free_page_report_cmd_id so we
* need to carry that name for legacy support.
*/
union {
__u32 free_page_hint_cmd_id;
__u32 free_page_report_cmd_id; /* deprecated */
};
/* Stores PAGE_POISON if page poisoning is in use */ /* Stores PAGE_POISON if page poisoning is in use */
__u32 poison_val; __u32 poison_val;
}; };
......
...@@ -4,7 +4,7 @@ test: virtio_test vringh_test ...@@ -4,7 +4,7 @@ test: virtio_test vringh_test
virtio_test: virtio_ring.o virtio_test.o virtio_test: virtio_ring.o virtio_test.o
vringh_test: vringh_test.o vringh.o virtio_ring.o vringh_test: vringh_test.o vringh.o virtio_ring.o
CFLAGS += -g -O2 -Werror -Wall -I. -I../include/ -I ../../usr/include/ -Wno-pointer-sign -fno-strict-overflow -fno-strict-aliasing -fno-common -MMD -U_FORTIFY_SOURCE CFLAGS += -g -O2 -Werror -Wall -I. -I../include/ -I ../../usr/include/ -Wno-pointer-sign -fno-strict-overflow -fno-strict-aliasing -fno-common -MMD -U_FORTIFY_SOURCE -include ../../include/linux/kconfig.h
vpath %.c ../../drivers/virtio ../../drivers/vhost vpath %.c ../../drivers/virtio ../../drivers/vhost
mod: mod:
${MAKE} -C `pwd`/../.. M=`pwd`/vhost_test V=${V} ${MAKE} -C `pwd`/../.. M=`pwd`/vhost_test V=${V}
...@@ -22,7 +22,8 @@ OOT_CONFIGS=\ ...@@ -22,7 +22,8 @@ OOT_CONFIGS=\
CONFIG_VHOST=m \ CONFIG_VHOST=m \
CONFIG_VHOST_NET=n \ CONFIG_VHOST_NET=n \
CONFIG_VHOST_SCSI=n \ CONFIG_VHOST_SCSI=n \
CONFIG_VHOST_VSOCK=n CONFIG_VHOST_VSOCK=n \
CONFIG_VHOST_RING=n
OOT_BUILD=KCFLAGS="-I "${OOT_VHOST} ${MAKE} -C ${OOT_KSRC} V=${V} OOT_BUILD=KCFLAGS="-I "${OOT_VHOST} ${MAKE} -C ${OOT_KSRC} V=${V}
oot-build: oot-build:
echo "UNSUPPORTED! Don't use the resulting modules in production!" echo "UNSUPPORTED! Don't use the resulting modules in production!"
......
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: GPL-2.0 */
#include <stdlib.h>
#if defined(__i386__) || defined(__x86_64__) #if defined(__i386__) || defined(__x86_64__)
#define barrier() asm volatile("" ::: "memory") #define barrier() asm volatile("" ::: "memory")
#define virt_mb() __sync_synchronize() #define virt_mb() __sync_synchronize()
......
...@@ -7,4 +7,5 @@ ...@@ -7,4 +7,5 @@
#define READ_ONCE(var) (*((volatile typeof(var) *)(&(var)))) #define READ_ONCE(var) (*((volatile typeof(var) *)(&(var))))
#define __aligned(x) __attribute((__aligned__(x)))
#endif #endif
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册