提交 8d10aae2 编写于 作者: L Linus Torvalds

Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost

Pull vhost fixes and cleanups from Michael S Tsirkin:
 "This includes some fixes and cleanups for vhost net and scsi drivers"

* tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost:
  vhost/test: update test after vhost cleanups
  vhost: Make local function static
  vhost: Make vhost a separate module
  vhost-scsi: Rename struct tcm_vhost_cmd *tv_cmd to *cmd
  vhost-scsi: Rename struct tcm_vhost_tpg *tv_tpg to *tpg
  vhost-scsi: Make func indention more consistent
  vhost-scsi: Rename struct vhost_scsi *s to *vs
  vhost-scsi: Remove unnecessary forward struct vhost_scsi declaration
  vhost: Simplify dev->vqs[i] access
  vhost-net: fix use-after-free in vhost_net_flush
config VHOST_NET
tristate "Host kernel accelerator for virtio net"
depends on NET && EVENTFD && (TUN || !TUN) && (MACVTAP || !MACVTAP)
select VHOST
select VHOST_RING
---help---
This kernel module can be loaded in host kernel to accelerate
......@@ -13,6 +14,7 @@ config VHOST_NET
config VHOST_SCSI
tristate "VHOST_SCSI TCM fabric driver"
depends on TARGET_CORE && EVENTFD && m
select VHOST
select VHOST_RING
default n
---help---
......@@ -24,3 +26,9 @@ config VHOST_RING
---help---
This option is selected by any driver which needs to access
the host side of a virtio ring.
config VHOST
tristate
---help---
This option is selected by any driver which needs to access
the core of vhost.
obj-$(CONFIG_VHOST_NET) += vhost_net.o
vhost_net-y := vhost.o net.o
vhost_net-y := net.o
obj-$(CONFIG_VHOST_SCSI) += vhost_scsi.o
vhost_scsi-y := scsi.o
obj-$(CONFIG_VHOST_RING) += vringh.o
obj-$(CONFIG_VHOST) += vhost.o
......@@ -168,7 +168,7 @@ static void vhost_net_clear_ubuf_info(struct vhost_net *n)
}
}
int vhost_net_set_ubuf_info(struct vhost_net *n)
static int vhost_net_set_ubuf_info(struct vhost_net *n)
{
bool zcopy;
int i;
......@@ -189,7 +189,7 @@ int vhost_net_set_ubuf_info(struct vhost_net *n)
return -ENOMEM;
}
void vhost_net_vq_reset(struct vhost_net *n)
static void vhost_net_vq_reset(struct vhost_net *n)
{
int i;
......
此差异已折叠。
......@@ -18,7 +18,7 @@
#include <linux/slab.h>
#include "test.h"
#include "vhost.c"
#include "vhost.h"
/* Max number of bytes transferred before requeueing the job.
* Using this limit prevents one virtqueue from starving others. */
......@@ -38,17 +38,19 @@ struct vhost_test {
* read-size critical section for our kind of RCU. */
static void handle_vq(struct vhost_test *n)
{
struct vhost_virtqueue *vq = &n->dev.vqs[VHOST_TEST_VQ];
struct vhost_virtqueue *vq = &n->vqs[VHOST_TEST_VQ];
unsigned out, in;
int head;
size_t len, total_len = 0;
void *private;
private = rcu_dereference_check(vq->private_data, 1);
if (!private)
mutex_lock(&vq->mutex);
private = vq->private_data;
if (!private) {
mutex_unlock(&vq->mutex);
return;
}
mutex_lock(&vq->mutex);
vhost_disable_notify(&n->dev, vq);
for (;;) {
......@@ -102,15 +104,23 @@ static int vhost_test_open(struct inode *inode, struct file *f)
{
struct vhost_test *n = kmalloc(sizeof *n, GFP_KERNEL);
struct vhost_dev *dev;
struct vhost_virtqueue **vqs;
int r;
if (!n)
return -ENOMEM;
vqs = kmalloc(VHOST_TEST_VQ_MAX * sizeof(*vqs), GFP_KERNEL);
if (!vqs) {
kfree(n);
return -ENOMEM;
}
dev = &n->dev;
vqs[VHOST_TEST_VQ] = &n->vqs[VHOST_TEST_VQ];
n->vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick;
r = vhost_dev_init(dev, n->vqs, VHOST_TEST_VQ_MAX);
r = vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX);
if (r < 0) {
kfree(vqs);
kfree(n);
return r;
}
......@@ -126,9 +136,8 @@ static void *vhost_test_stop_vq(struct vhost_test *n,
void *private;
mutex_lock(&vq->mutex);
private = rcu_dereference_protected(vq->private_data,
lockdep_is_held(&vq->mutex));
rcu_assign_pointer(vq->private_data, NULL);
private = vq->private_data;
vq->private_data = NULL;
mutex_unlock(&vq->mutex);
return private;
}
......@@ -140,7 +149,7 @@ static void vhost_test_stop(struct vhost_test *n, void **privatep)
static void vhost_test_flush_vq(struct vhost_test *n, int index)
{
vhost_poll_flush(&n->dev.vqs[index].poll);
vhost_poll_flush(&n->vqs[index].poll);
}
static void vhost_test_flush(struct vhost_test *n)
......@@ -268,14 +277,14 @@ static long vhost_test_ioctl(struct file *f, unsigned int ioctl,
return -EFAULT;
return vhost_test_run(n, test);
case VHOST_GET_FEATURES:
features = VHOST_NET_FEATURES;
features = VHOST_FEATURES;
if (copy_to_user(featurep, &features, sizeof features))
return -EFAULT;
return 0;
case VHOST_SET_FEATURES:
if (copy_from_user(&features, featurep, sizeof features))
return -EFAULT;
if (features & ~VHOST_NET_FEATURES)
if (features & ~VHOST_FEATURES)
return -EOPNOTSUPP;
return vhost_test_set_features(n, features);
case VHOST_RESET_OWNER:
......
......@@ -25,6 +25,7 @@
#include <linux/slab.h>
#include <linux/kthread.h>
#include <linux/cgroup.h>
#include <linux/module.h>
#include "vhost.h"
......@@ -66,6 +67,7 @@ void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn)
work->flushing = 0;
work->queue_seq = work->done_seq = 0;
}
EXPORT_SYMBOL_GPL(vhost_work_init);
/* Init poll structure */
void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
......@@ -79,6 +81,7 @@ void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
vhost_work_init(&poll->work, fn);
}
EXPORT_SYMBOL_GPL(vhost_poll_init);
/* Start polling a file. We add ourselves to file's wait queue. The caller must
* keep a reference to a file until after vhost_poll_stop is called. */
......@@ -101,6 +104,7 @@ int vhost_poll_start(struct vhost_poll *poll, struct file *file)
return ret;
}
EXPORT_SYMBOL_GPL(vhost_poll_start);
/* Stop polling a file. After this function returns, it becomes safe to drop the
* file reference. You must also flush afterwards. */
......@@ -111,6 +115,7 @@ void vhost_poll_stop(struct vhost_poll *poll)
poll->wqh = NULL;
}
}
EXPORT_SYMBOL_GPL(vhost_poll_stop);
static bool vhost_work_seq_done(struct vhost_dev *dev, struct vhost_work *work,
unsigned seq)
......@@ -123,7 +128,7 @@ static bool vhost_work_seq_done(struct vhost_dev *dev, struct vhost_work *work,
return left <= 0;
}
static void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
{
unsigned seq;
int flushing;
......@@ -138,6 +143,7 @@ static void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
spin_unlock_irq(&dev->work_lock);
BUG_ON(flushing < 0);
}
EXPORT_SYMBOL_GPL(vhost_work_flush);
/* Flush any work that has been scheduled. When calling this, don't hold any
* locks that are also used by the callback. */
......@@ -145,6 +151,7 @@ void vhost_poll_flush(struct vhost_poll *poll)
{
vhost_work_flush(poll->dev, &poll->work);
}
EXPORT_SYMBOL_GPL(vhost_poll_flush);
void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work)
{
......@@ -158,11 +165,13 @@ void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work)
}
spin_unlock_irqrestore(&dev->work_lock, flags);
}
EXPORT_SYMBOL_GPL(vhost_work_queue);
void vhost_poll_queue(struct vhost_poll *poll)
{
vhost_work_queue(poll->dev, &poll->work);
}
EXPORT_SYMBOL_GPL(vhost_poll_queue);
static void vhost_vq_reset(struct vhost_dev *dev,
struct vhost_virtqueue *vq)
......@@ -251,17 +260,16 @@ static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq)
/* Helper to allocate iovec buffers for all vqs. */
static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
{
struct vhost_virtqueue *vq;
int i;
for (i = 0; i < dev->nvqs; ++i) {
dev->vqs[i]->indirect = kmalloc(sizeof *dev->vqs[i]->indirect *
UIO_MAXIOV, GFP_KERNEL);
dev->vqs[i]->log = kmalloc(sizeof *dev->vqs[i]->log * UIO_MAXIOV,
GFP_KERNEL);
dev->vqs[i]->heads = kmalloc(sizeof *dev->vqs[i]->heads *
UIO_MAXIOV, GFP_KERNEL);
if (!dev->vqs[i]->indirect || !dev->vqs[i]->log ||
!dev->vqs[i]->heads)
vq = dev->vqs[i];
vq->indirect = kmalloc(sizeof *vq->indirect * UIO_MAXIOV,
GFP_KERNEL);
vq->log = kmalloc(sizeof *vq->log * UIO_MAXIOV, GFP_KERNEL);
vq->heads = kmalloc(sizeof *vq->heads * UIO_MAXIOV, GFP_KERNEL);
if (!vq->indirect || !vq->log || !vq->heads)
goto err_nomem;
}
return 0;
......@@ -283,6 +291,7 @@ static void vhost_dev_free_iovecs(struct vhost_dev *dev)
long vhost_dev_init(struct vhost_dev *dev,
struct vhost_virtqueue **vqs, int nvqs)
{
struct vhost_virtqueue *vq;
int i;
dev->vqs = vqs;
......@@ -297,19 +306,21 @@ long vhost_dev_init(struct vhost_dev *dev,
dev->worker = NULL;
for (i = 0; i < dev->nvqs; ++i) {
dev->vqs[i]->log = NULL;
dev->vqs[i]->indirect = NULL;
dev->vqs[i]->heads = NULL;
dev->vqs[i]->dev = dev;
mutex_init(&dev->vqs[i]->mutex);
vhost_vq_reset(dev, dev->vqs[i]);
if (dev->vqs[i]->handle_kick)
vhost_poll_init(&dev->vqs[i]->poll,
dev->vqs[i]->handle_kick, POLLIN, dev);
vq = dev->vqs[i];
vq->log = NULL;
vq->indirect = NULL;
vq->heads = NULL;
vq->dev = dev;
mutex_init(&vq->mutex);
vhost_vq_reset(dev, vq);
if (vq->handle_kick)
vhost_poll_init(&vq->poll, vq->handle_kick,
POLLIN, dev);
}
return 0;
}
EXPORT_SYMBOL_GPL(vhost_dev_init);
/* Caller should have device mutex */
long vhost_dev_check_owner(struct vhost_dev *dev)
......@@ -317,6 +328,7 @@ long vhost_dev_check_owner(struct vhost_dev *dev)
/* Are you the owner? If not, I don't think you mean to do that */
return dev->mm == current->mm ? 0 : -EPERM;
}
EXPORT_SYMBOL_GPL(vhost_dev_check_owner);
struct vhost_attach_cgroups_struct {
struct vhost_work work;
......@@ -348,6 +360,7 @@ bool vhost_dev_has_owner(struct vhost_dev *dev)
{
return dev->mm;
}
EXPORT_SYMBOL_GPL(vhost_dev_has_owner);
/* Caller should have device mutex */
long vhost_dev_set_owner(struct vhost_dev *dev)
......@@ -391,11 +404,13 @@ long vhost_dev_set_owner(struct vhost_dev *dev)
err_mm:
return err;
}
EXPORT_SYMBOL_GPL(vhost_dev_set_owner);
struct vhost_memory *vhost_dev_reset_owner_prepare(void)
{
return kmalloc(offsetof(struct vhost_memory, regions), GFP_KERNEL);
}
EXPORT_SYMBOL_GPL(vhost_dev_reset_owner_prepare);
/* Caller should have device mutex */
void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_memory *memory)
......@@ -406,6 +421,7 @@ void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_memory *memory)
memory->nregions = 0;
RCU_INIT_POINTER(dev->memory, memory);
}
EXPORT_SYMBOL_GPL(vhost_dev_reset_owner);
void vhost_dev_stop(struct vhost_dev *dev)
{
......@@ -418,6 +434,7 @@ void vhost_dev_stop(struct vhost_dev *dev)
}
}
}
EXPORT_SYMBOL_GPL(vhost_dev_stop);
/* Caller should have device mutex if and only if locked is set */
void vhost_dev_cleanup(struct vhost_dev *dev, bool locked)
......@@ -458,6 +475,7 @@ void vhost_dev_cleanup(struct vhost_dev *dev, bool locked)
mmput(dev->mm);
dev->mm = NULL;
}
EXPORT_SYMBOL_GPL(vhost_dev_cleanup);
static int log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
{
......@@ -543,6 +561,7 @@ int vhost_log_access_ok(struct vhost_dev *dev)
lockdep_is_held(&dev->mutex));
return memory_access_ok(dev, mp, 1);
}
EXPORT_SYMBOL_GPL(vhost_log_access_ok);
/* Verify access for write logging. */
/* Caller should have vq mutex and device mutex */
......@@ -568,6 +587,7 @@ int vhost_vq_access_ok(struct vhost_virtqueue *vq)
return vq_access_ok(vq->dev, vq->num, vq->desc, vq->avail, vq->used) &&
vq_log_access_ok(vq->dev, vq, vq->log_base);
}
EXPORT_SYMBOL_GPL(vhost_vq_access_ok);
static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
{
......@@ -797,6 +817,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp)
vhost_poll_flush(&vq->poll);
return r;
}
EXPORT_SYMBOL_GPL(vhost_vring_ioctl);
/* Caller must have device mutex */
long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
......@@ -877,6 +898,7 @@ long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
done:
return r;
}
EXPORT_SYMBOL_GPL(vhost_dev_ioctl);
static const struct vhost_memory_region *find_region(struct vhost_memory *mem,
__u64 addr, __u32 len)
......@@ -968,6 +990,7 @@ int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
BUG();
return 0;
}
EXPORT_SYMBOL_GPL(vhost_log_write);
static int vhost_update_used_flags(struct vhost_virtqueue *vq)
{
......@@ -1019,6 +1042,7 @@ int vhost_init_used(struct vhost_virtqueue *vq)
vq->signalled_used_valid = false;
return get_user(vq->last_used_idx, &vq->used->idx);
}
EXPORT_SYMBOL_GPL(vhost_init_used);
static int translate_desc(struct vhost_dev *dev, u64 addr, u32 len,
struct iovec iov[], int iov_size)
......@@ -1295,12 +1319,14 @@ int vhost_get_vq_desc(struct vhost_dev *dev, struct vhost_virtqueue *vq,
BUG_ON(!(vq->used_flags & VRING_USED_F_NO_NOTIFY));
return head;
}
EXPORT_SYMBOL_GPL(vhost_get_vq_desc);
/* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */
void vhost_discard_vq_desc(struct vhost_virtqueue *vq, int n)
{
vq->last_avail_idx -= n;
}
EXPORT_SYMBOL_GPL(vhost_discard_vq_desc);
/* After we've used one of their buffers, we tell them about it. We'll then
* want to notify the guest, using eventfd. */
......@@ -1349,6 +1375,7 @@ int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len)
vq->signalled_used_valid = false;
return 0;
}
EXPORT_SYMBOL_GPL(vhost_add_used);
static int __vhost_add_used_n(struct vhost_virtqueue *vq,
struct vring_used_elem *heads,
......@@ -1418,6 +1445,7 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
}
return r;
}
EXPORT_SYMBOL_GPL(vhost_add_used_n);
static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
{
......@@ -1462,6 +1490,7 @@ void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
if (vq->call_ctx && vhost_notify(dev, vq))
eventfd_signal(vq->call_ctx, 1);
}
EXPORT_SYMBOL_GPL(vhost_signal);
/* And here's the combo meal deal. Supersize me! */
void vhost_add_used_and_signal(struct vhost_dev *dev,
......@@ -1471,6 +1500,7 @@ void vhost_add_used_and_signal(struct vhost_dev *dev,
vhost_add_used(vq, head, len);
vhost_signal(dev, vq);
}
EXPORT_SYMBOL_GPL(vhost_add_used_and_signal);
/* multi-buffer version of vhost_add_used_and_signal */
void vhost_add_used_and_signal_n(struct vhost_dev *dev,
......@@ -1480,6 +1510,7 @@ void vhost_add_used_and_signal_n(struct vhost_dev *dev,
vhost_add_used_n(vq, heads, count);
vhost_signal(dev, vq);
}
EXPORT_SYMBOL_GPL(vhost_add_used_and_signal_n);
/* OK, now we need to know about added descriptors. */
bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
......@@ -1517,6 +1548,7 @@ bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
return avail_idx != vq->avail_idx;
}
EXPORT_SYMBOL_GPL(vhost_enable_notify);
/* We don't need to be notified again. */
void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
......@@ -1533,3 +1565,21 @@ void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
&vq->used->flags, r);
}
}
EXPORT_SYMBOL_GPL(vhost_disable_notify);
static int __init vhost_init(void)
{
return 0;
}
static void __exit vhost_exit(void)
{
}
module_init(vhost_init);
module_exit(vhost_exit);
MODULE_VERSION("0.0.1");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Michael S. Tsirkin");
MODULE_DESCRIPTION("Host kernel accelerator for virtio");
......@@ -46,6 +46,8 @@ int vhost_poll_start(struct vhost_poll *poll, struct file *file);
void vhost_poll_stop(struct vhost_poll *poll);
void vhost_poll_flush(struct vhost_poll *poll);
void vhost_poll_queue(struct vhost_poll *poll);
void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work);
long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp);
struct vhost_log {
u64 addr;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册