提交 5c34d002 编写于 作者: C Christoph Hellwig 提交者: Michael S. Tsirkin

virtio_pci: remove struct virtio_pci_vq_info

We don't really need struct virtio_pci_vq_info, as most field in there
are redundant:

 - the vq backpointer is not strictly neede to start with
 - the entry in the vqs list is not needed - the generic virtqueue already
   has list, we only need to check if it has a callback to get the same
   semantics
 - we can use a simple array to look up the MSI-X vec if needed.
 - That simple array now also duoble serves to replace the per_vq_vectors
   flag
Signed-off-by: NChristoph Hellwig <hch@lst.de>
Signed-off-by: NMichael S. Tsirkin <mst@redhat.com>
上级 e3b56cdd
...@@ -62,16 +62,13 @@ static irqreturn_t vp_config_changed(int irq, void *opaque) ...@@ -62,16 +62,13 @@ static irqreturn_t vp_config_changed(int irq, void *opaque)
static irqreturn_t vp_vring_interrupt(int irq, void *opaque) static irqreturn_t vp_vring_interrupt(int irq, void *opaque)
{ {
struct virtio_pci_device *vp_dev = opaque; struct virtio_pci_device *vp_dev = opaque;
struct virtio_pci_vq_info *info;
irqreturn_t ret = IRQ_NONE; irqreturn_t ret = IRQ_NONE;
unsigned long flags; struct virtqueue *vq;
spin_lock_irqsave(&vp_dev->lock, flags); list_for_each_entry(vq, &vp_dev->vdev.vqs, list) {
list_for_each_entry(info, &vp_dev->virtqueues, node) { if (vq->callback && vring_interrupt(irq, vq) == IRQ_HANDLED)
if (vring_interrupt(irq, info->vq) == IRQ_HANDLED)
ret = IRQ_HANDLED; ret = IRQ_HANDLED;
} }
spin_unlock_irqrestore(&vp_dev->lock, flags);
return ret; return ret;
} }
...@@ -167,55 +164,6 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors, ...@@ -167,55 +164,6 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
return err; return err;
} }
static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned index,
void (*callback)(struct virtqueue *vq),
const char *name,
u16 msix_vec)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
struct virtio_pci_vq_info *info = kmalloc(sizeof *info, GFP_KERNEL);
struct virtqueue *vq;
unsigned long flags;
/* fill out our structure that represents an active queue */
if (!info)
return ERR_PTR(-ENOMEM);
vq = vp_dev->setup_vq(vp_dev, info, index, callback, name, msix_vec);
if (IS_ERR(vq))
goto out_info;
info->vq = vq;
if (callback) {
spin_lock_irqsave(&vp_dev->lock, flags);
list_add(&info->node, &vp_dev->virtqueues);
spin_unlock_irqrestore(&vp_dev->lock, flags);
} else {
INIT_LIST_HEAD(&info->node);
}
vp_dev->vqs[index] = info;
return vq;
out_info:
kfree(info);
return vq;
}
static void vp_del_vq(struct virtqueue *vq)
{
struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
unsigned long flags;
spin_lock_irqsave(&vp_dev->lock, flags);
list_del(&info->node);
spin_unlock_irqrestore(&vp_dev->lock, flags);
vp_dev->del_vq(info);
kfree(info);
}
/* the config->del_vqs() implementation */ /* the config->del_vqs() implementation */
void vp_del_vqs(struct virtio_device *vdev) void vp_del_vqs(struct virtio_device *vdev)
{ {
...@@ -224,16 +172,15 @@ void vp_del_vqs(struct virtio_device *vdev) ...@@ -224,16 +172,15 @@ void vp_del_vqs(struct virtio_device *vdev)
int i; int i;
list_for_each_entry_safe(vq, n, &vdev->vqs, list) { list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
if (vp_dev->per_vq_vectors) { if (vp_dev->msix_vector_map) {
int v = vp_dev->vqs[vq->index]->msix_vector; int v = vp_dev->msix_vector_map[vq->index];
if (v != VIRTIO_MSI_NO_VECTOR) if (v != VIRTIO_MSI_NO_VECTOR)
free_irq(pci_irq_vector(vp_dev->pci_dev, v), free_irq(pci_irq_vector(vp_dev->pci_dev, v),
vq); vq);
} }
vp_del_vq(vq); vp_dev->del_vq(vq);
} }
vp_dev->per_vq_vectors = false;
if (vp_dev->intx_enabled) { if (vp_dev->intx_enabled) {
free_irq(vp_dev->pci_dev->irq, vp_dev); free_irq(vp_dev->pci_dev->irq, vp_dev);
...@@ -261,8 +208,8 @@ void vp_del_vqs(struct virtio_device *vdev) ...@@ -261,8 +208,8 @@ void vp_del_vqs(struct virtio_device *vdev)
vp_dev->msix_names = NULL; vp_dev->msix_names = NULL;
kfree(vp_dev->msix_affinity_masks); kfree(vp_dev->msix_affinity_masks);
vp_dev->msix_affinity_masks = NULL; vp_dev->msix_affinity_masks = NULL;
kfree(vp_dev->vqs); kfree(vp_dev->msix_vector_map);
vp_dev->vqs = NULL; vp_dev->msix_vector_map = NULL;
} }
static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs, static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
...@@ -275,10 +222,6 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs, ...@@ -275,10 +222,6 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
u16 msix_vec; u16 msix_vec;
int i, err, nvectors, allocated_vectors; int i, err, nvectors, allocated_vectors;
vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
if (!vp_dev->vqs)
return -ENOMEM;
if (per_vq_vectors) { if (per_vq_vectors) {
/* Best option: one for change interrupt, one per vq. */ /* Best option: one for change interrupt, one per vq. */
nvectors = 1; nvectors = 1;
...@@ -294,7 +237,13 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs, ...@@ -294,7 +237,13 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
if (err) if (err)
goto error_find; goto error_find;
vp_dev->per_vq_vectors = per_vq_vectors; if (per_vq_vectors) {
vp_dev->msix_vector_map = kmalloc_array(nvqs,
sizeof(*vp_dev->msix_vector_map), GFP_KERNEL);
if (!vp_dev->msix_vector_map)
goto error_find;
}
allocated_vectors = vp_dev->msix_used_vectors; allocated_vectors = vp_dev->msix_used_vectors;
for (i = 0; i < nvqs; ++i) { for (i = 0; i < nvqs; ++i) {
if (!names[i]) { if (!names[i]) {
...@@ -304,19 +253,25 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs, ...@@ -304,19 +253,25 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
if (!callbacks[i]) if (!callbacks[i])
msix_vec = VIRTIO_MSI_NO_VECTOR; msix_vec = VIRTIO_MSI_NO_VECTOR;
else if (vp_dev->per_vq_vectors) else if (per_vq_vectors)
msix_vec = allocated_vectors++; msix_vec = allocated_vectors++;
else else
msix_vec = VP_MSIX_VQ_VECTOR; msix_vec = VP_MSIX_VQ_VECTOR;
vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i], msix_vec); vqs[i] = vp_dev->setup_vq(vp_dev, i, callbacks[i], names[i],
msix_vec);
if (IS_ERR(vqs[i])) { if (IS_ERR(vqs[i])) {
err = PTR_ERR(vqs[i]); err = PTR_ERR(vqs[i]);
goto error_find; goto error_find;
} }
if (!vp_dev->per_vq_vectors || msix_vec == VIRTIO_MSI_NO_VECTOR) if (!per_vq_vectors)
continue; continue;
if (msix_vec == VIRTIO_MSI_NO_VECTOR) {
vp_dev->msix_vector_map[i] = VIRTIO_MSI_NO_VECTOR;
continue;
}
/* allocate per-vq irq if available and necessary */ /* allocate per-vq irq if available and necessary */
snprintf(vp_dev->msix_names[msix_vec], snprintf(vp_dev->msix_names[msix_vec],
sizeof *vp_dev->msix_names, sizeof *vp_dev->msix_names,
...@@ -326,9 +281,13 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs, ...@@ -326,9 +281,13 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
vring_interrupt, 0, vring_interrupt, 0,
vp_dev->msix_names[msix_vec], vp_dev->msix_names[msix_vec],
vqs[i]); vqs[i]);
if (err) if (err) {
/* don't free this irq on error */
vp_dev->msix_vector_map[i] = VIRTIO_MSI_NO_VECTOR;
goto error_find; goto error_find;
} }
vp_dev->msix_vector_map[i] = msix_vec;
}
return 0; return 0;
error_find: error_find:
...@@ -343,23 +302,18 @@ static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned nvqs, ...@@ -343,23 +302,18 @@ static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned nvqs,
struct virtio_pci_device *vp_dev = to_vp_device(vdev); struct virtio_pci_device *vp_dev = to_vp_device(vdev);
int i, err; int i, err;
vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
if (!vp_dev->vqs)
return -ENOMEM;
err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED, err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED,
dev_name(&vdev->dev), vp_dev); dev_name(&vdev->dev), vp_dev);
if (err) if (err)
goto out_del_vqs; goto out_del_vqs;
vp_dev->intx_enabled = 1; vp_dev->intx_enabled = 1;
vp_dev->per_vq_vectors = false;
for (i = 0; i < nvqs; ++i) { for (i = 0; i < nvqs; ++i) {
if (!names[i]) { if (!names[i]) {
vqs[i] = NULL; vqs[i] = NULL;
continue; continue;
} }
vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i], vqs[i] = vp_dev->setup_vq(vp_dev, i, callbacks[i], names[i],
VIRTIO_MSI_NO_VECTOR); VIRTIO_MSI_NO_VECTOR);
if (IS_ERR(vqs[i])) { if (IS_ERR(vqs[i])) {
err = PTR_ERR(vqs[i]); err = PTR_ERR(vqs[i]);
...@@ -409,16 +363,15 @@ int vp_set_vq_affinity(struct virtqueue *vq, int cpu) ...@@ -409,16 +363,15 @@ int vp_set_vq_affinity(struct virtqueue *vq, int cpu)
{ {
struct virtio_device *vdev = vq->vdev; struct virtio_device *vdev = vq->vdev;
struct virtio_pci_device *vp_dev = to_vp_device(vdev); struct virtio_pci_device *vp_dev = to_vp_device(vdev);
struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
struct cpumask *mask;
unsigned int irq;
if (!vq->callback) if (!vq->callback)
return -EINVAL; return -EINVAL;
if (vp_dev->msix_enabled) { if (vp_dev->msix_enabled) {
mask = vp_dev->msix_affinity_masks[info->msix_vector]; int vec = vp_dev->msix_vector_map[vq->index];
irq = pci_irq_vector(vp_dev->pci_dev, info->msix_vector); struct cpumask *mask = vp_dev->msix_affinity_masks[vec];
unsigned int irq = pci_irq_vector(vp_dev->pci_dev, vec);
if (cpu == -1) if (cpu == -1)
irq_set_affinity_hint(irq, NULL); irq_set_affinity_hint(irq, NULL);
else { else {
...@@ -498,8 +451,6 @@ static int virtio_pci_probe(struct pci_dev *pci_dev, ...@@ -498,8 +451,6 @@ static int virtio_pci_probe(struct pci_dev *pci_dev,
vp_dev->vdev.dev.parent = &pci_dev->dev; vp_dev->vdev.dev.parent = &pci_dev->dev;
vp_dev->vdev.dev.release = virtio_pci_release_dev; vp_dev->vdev.dev.release = virtio_pci_release_dev;
vp_dev->pci_dev = pci_dev; vp_dev->pci_dev = pci_dev;
INIT_LIST_HEAD(&vp_dev->virtqueues);
spin_lock_init(&vp_dev->lock);
/* enable the device */ /* enable the device */
rc = pci_enable_device(pci_dev); rc = pci_enable_device(pci_dev);
......
...@@ -31,17 +31,6 @@ ...@@ -31,17 +31,6 @@
#include <linux/highmem.h> #include <linux/highmem.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
struct virtio_pci_vq_info {
/* the actual virtqueue */
struct virtqueue *vq;
/* the list node for the virtqueues list */
struct list_head node;
/* MSI-X vector (or none) */
unsigned msix_vector;
};
/* Our device structure */ /* Our device structure */
struct virtio_pci_device { struct virtio_pci_device {
struct virtio_device vdev; struct virtio_device vdev;
...@@ -75,13 +64,6 @@ struct virtio_pci_device { ...@@ -75,13 +64,6 @@ struct virtio_pci_device {
/* the IO mapping for the PCI config space */ /* the IO mapping for the PCI config space */
void __iomem *ioaddr; void __iomem *ioaddr;
/* a list of queues so we can dispatch IRQs */
spinlock_t lock;
struct list_head virtqueues;
/* array of all queues for house-keeping */
struct virtio_pci_vq_info **vqs;
/* MSI-X support */ /* MSI-X support */
int msix_enabled; int msix_enabled;
int intx_enabled; int intx_enabled;
...@@ -94,16 +76,15 @@ struct virtio_pci_device { ...@@ -94,16 +76,15 @@ struct virtio_pci_device {
/* Vectors allocated, excluding per-vq vectors if any */ /* Vectors allocated, excluding per-vq vectors if any */
unsigned msix_used_vectors; unsigned msix_used_vectors;
/* Whether we have vector per vq */ /* Map of per-VQ MSI-X vectors, may be NULL */
bool per_vq_vectors; unsigned *msix_vector_map;
struct virtqueue *(*setup_vq)(struct virtio_pci_device *vp_dev, struct virtqueue *(*setup_vq)(struct virtio_pci_device *vp_dev,
struct virtio_pci_vq_info *info,
unsigned idx, unsigned idx,
void (*callback)(struct virtqueue *vq), void (*callback)(struct virtqueue *vq),
const char *name, const char *name,
u16 msix_vec); u16 msix_vec);
void (*del_vq)(struct virtio_pci_vq_info *info); void (*del_vq)(struct virtqueue *vq);
u16 (*config_vector)(struct virtio_pci_device *vp_dev, u16 vector); u16 (*config_vector)(struct virtio_pci_device *vp_dev, u16 vector);
}; };
......
...@@ -112,7 +112,6 @@ static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector) ...@@ -112,7 +112,6 @@ static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
} }
static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
struct virtio_pci_vq_info *info,
unsigned index, unsigned index,
void (*callback)(struct virtqueue *vq), void (*callback)(struct virtqueue *vq),
const char *name, const char *name,
...@@ -130,8 +129,6 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, ...@@ -130,8 +129,6 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
if (!num || ioread32(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN)) if (!num || ioread32(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN))
return ERR_PTR(-ENOENT); return ERR_PTR(-ENOENT);
info->msix_vector = msix_vec;
/* create the vring */ /* create the vring */
vq = vring_create_virtqueue(index, num, vq = vring_create_virtqueue(index, num,
VIRTIO_PCI_VRING_ALIGN, &vp_dev->vdev, VIRTIO_PCI_VRING_ALIGN, &vp_dev->vdev,
...@@ -162,9 +159,8 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, ...@@ -162,9 +159,8 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
return ERR_PTR(err); return ERR_PTR(err);
} }
static void del_vq(struct virtio_pci_vq_info *info) static void del_vq(struct virtqueue *vq)
{ {
struct virtqueue *vq = info->vq;
struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
......
...@@ -293,7 +293,6 @@ static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector) ...@@ -293,7 +293,6 @@ static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
} }
static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
struct virtio_pci_vq_info *info,
unsigned index, unsigned index,
void (*callback)(struct virtqueue *vq), void (*callback)(struct virtqueue *vq),
const char *name, const char *name,
...@@ -323,8 +322,6 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, ...@@ -323,8 +322,6 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
/* get offset of notification word for this vq */ /* get offset of notification word for this vq */
off = vp_ioread16(&cfg->queue_notify_off); off = vp_ioread16(&cfg->queue_notify_off);
info->msix_vector = msix_vec;
/* create the vring */ /* create the vring */
vq = vring_create_virtqueue(index, num, vq = vring_create_virtqueue(index, num,
SMP_CACHE_BYTES, &vp_dev->vdev, SMP_CACHE_BYTES, &vp_dev->vdev,
...@@ -409,9 +406,8 @@ static int vp_modern_find_vqs(struct virtio_device *vdev, unsigned nvqs, ...@@ -409,9 +406,8 @@ static int vp_modern_find_vqs(struct virtio_device *vdev, unsigned nvqs,
return 0; return 0;
} }
static void del_vq(struct virtio_pci_vq_info *info) static void del_vq(struct virtqueue *vq)
{ {
struct virtqueue *vq = info->vq;
struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
vp_iowrite16(vq->index, &vp_dev->common->queue_select); vp_iowrite16(vq->index, &vp_dev->common->queue_select);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册