virtio_pci_common.c 15.0 KB
Newer Older
A
Anthony Liguori 已提交
1
/*
2
 * Virtio PCI driver - common functionality for all device versions
A
Anthony Liguori 已提交
3 4 5 6 7
 *
 * This module allows virtio devices to be used over a virtual PCI device.
 * This can be used with QEMU based VMMs like KVM or Xen.
 *
 * Copyright IBM Corp. 2007
8
 * Copyright Red Hat, Inc. 2014
A
Anthony Liguori 已提交
9 10 11
 *
 * Authors:
 *  Anthony Liguori  <aliguori@us.ibm.com>
12 13
 *  Rusty Russell <rusty@rustcorp.com.au>
 *  Michael S. Tsirkin <mst@redhat.com>
A
Anthony Liguori 已提交
14 15 16 17 18 19
 *
 * This work is licensed under the terms of the GNU GPL, version 2 or later.
 * See the COPYING file in the top-level directory.
 *
 */

20
#include "virtio_pci_common.h"
A
Anthony Liguori 已提交
21

22 23 24 25 26 27 28 29
static bool force_legacy = false;

#if IS_ENABLED(CONFIG_VIRTIO_PCI_LEGACY)
module_param(force_legacy, bool, 0444);
MODULE_PARM_DESC(force_legacy,
		 "Force legacy mode for transitional virtio 1 devices");
#endif

30
/* wait for pending irq handlers */
31
void vp_synchronize_vectors(struct virtio_device *vdev)
32 33 34 35
{
	struct virtio_pci_device *vp_dev = to_vp_device(vdev);
	int i;

36 37 38 39
	if (vp_dev->intx_enabled)
		synchronize_irq(vp_dev->pci_dev->irq);

	for (i = 0; i < vp_dev->msix_vectors; ++i)
40
		synchronize_irq(pci_irq_vector(vp_dev->pci_dev, i));
41 42
}

A
Anthony Liguori 已提交
43
/* the notify function used when creating a virt queue */
44
bool vp_notify(struct virtqueue *vq)
A
Anthony Liguori 已提交
45 46 47
{
	/* we write the queue's selector into the notification register to
	 * signal the other end */
48
	iowrite16(vq->index, (void __iomem *)vq->priv);
49
	return true;
A
Anthony Liguori 已提交
50 51
}

52 53 54 55 56
/* Handle a configuration change: Tell driver if it wants to know. */
static irqreturn_t vp_config_changed(int irq, void *opaque)
{
	struct virtio_pci_device *vp_dev = opaque;

57
	virtio_config_changed(&vp_dev->vdev);
58 59 60 61 62 63 64
	return IRQ_HANDLED;
}

/* Notify all virtqueues on an interrupt. */
static irqreturn_t vp_vring_interrupt(int irq, void *opaque)
{
	struct virtio_pci_device *vp_dev = opaque;
65
	struct virtio_pci_vq_info *info;
66
	irqreturn_t ret = IRQ_NONE;
67
	unsigned long flags;
68

69 70 71
	spin_lock_irqsave(&vp_dev->lock, flags);
	list_for_each_entry(info, &vp_dev->virtqueues, node) {
		if (vring_interrupt(irq, info->vq) == IRQ_HANDLED)
72 73
			ret = IRQ_HANDLED;
	}
74
	spin_unlock_irqrestore(&vp_dev->lock, flags);
75 76 77 78

	return ret;
}

A
Anthony Liguori 已提交
79 80 81 82 83 84 85 86 87 88 89 90 91
/* A small wrapper to also acknowledge the interrupt when it's handled.
 * I really need an EIO hook for the vring so I can ack the interrupt once we
 * know that we'll be handling the IRQ but before we invoke the callback since
 * the callback may notify the host which results in the host attempting to
 * raise an interrupt that we would then mask once we acknowledged the
 * interrupt. */
static irqreturn_t vp_interrupt(int irq, void *opaque)
{
	struct virtio_pci_device *vp_dev = opaque;
	u8 isr;

	/* reading the ISR has the effect of also clearing it so it's very
	 * important to save off the value. */
M
Michael S. Tsirkin 已提交
92
	isr = ioread8(vp_dev->isr);
A
Anthony Liguori 已提交
93 94 95 96 97 98

	/* It's definitely not us if the ISR was not high */
	if (!isr)
		return IRQ_NONE;

	/* Configuration change?  Tell driver if it wants to know. */
99 100
	if (isr & VIRTIO_PCI_ISR_CONFIG)
		vp_config_changed(irq, opaque);
A
Anthony Liguori 已提交
101

102
	return vp_vring_interrupt(irq, opaque);
A
Anthony Liguori 已提交
103 104
}

105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171
static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
				   bool per_vq_vectors, struct irq_affinity *desc)
{
	struct virtio_pci_device *vp_dev = to_vp_device(vdev);
	const char *name = dev_name(&vp_dev->vdev.dev);
	unsigned i, v;
	int err = -ENOMEM;

	vp_dev->msix_vectors = nvectors;

	vp_dev->msix_names = kmalloc(nvectors * sizeof *vp_dev->msix_names,
				     GFP_KERNEL);
	if (!vp_dev->msix_names)
		goto error;
	vp_dev->msix_affinity_masks
		= kzalloc(nvectors * sizeof *vp_dev->msix_affinity_masks,
			  GFP_KERNEL);
	if (!vp_dev->msix_affinity_masks)
		goto error;
	for (i = 0; i < nvectors; ++i)
		if (!alloc_cpumask_var(&vp_dev->msix_affinity_masks[i],
					GFP_KERNEL))
			goto error;

	err = pci_alloc_irq_vectors_affinity(vp_dev->pci_dev, nvectors,
					     nvectors, PCI_IRQ_MSIX |
					     (desc ? PCI_IRQ_AFFINITY : 0),
					     desc);
	if (err < 0)
		goto error;
	vp_dev->msix_enabled = 1;

	/* Set the vector used for configuration */
	v = vp_dev->msix_used_vectors;
	snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
		 "%s-config", name);
	err = request_irq(pci_irq_vector(vp_dev->pci_dev, v),
			  vp_config_changed, 0, vp_dev->msix_names[v],
			  vp_dev);
	if (err)
		goto error;
	++vp_dev->msix_used_vectors;

	v = vp_dev->config_vector(vp_dev, v);
	/* Verify we had enough resources to assign the vector */
	if (v == VIRTIO_MSI_NO_VECTOR) {
		err = -EBUSY;
		goto error;
	}

	if (!per_vq_vectors) {
		/* Shared vector for all VQs */
		v = vp_dev->msix_used_vectors;
		snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
			 "%s-virtqueues", name);
		err = request_irq(pci_irq_vector(vp_dev->pci_dev, v),
				  vp_vring_interrupt, 0, vp_dev->msix_names[v],
				  vp_dev);
		if (err)
			goto error;
		++vp_dev->msix_used_vectors;
	}
	return 0;
error:
	return err;
}

172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221
static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned index,
				     void (*callback)(struct virtqueue *vq),
				     const char *name,
				     u16 msix_vec)
{
	struct virtio_pci_device *vp_dev = to_vp_device(vdev);
	struct virtio_pci_vq_info *info = kmalloc(sizeof *info, GFP_KERNEL);
	struct virtqueue *vq;
	unsigned long flags;

	/* fill out our structure that represents an active queue */
	if (!info)
		return ERR_PTR(-ENOMEM);

	vq = vp_dev->setup_vq(vp_dev, info, index, callback, name,
			      msix_vec);
	if (IS_ERR(vq))
		goto out_info;

	info->vq = vq;
	if (callback) {
		spin_lock_irqsave(&vp_dev->lock, flags);
		list_add(&info->node, &vp_dev->virtqueues);
		spin_unlock_irqrestore(&vp_dev->lock, flags);
	} else {
		INIT_LIST_HEAD(&info->node);
	}

	vp_dev->vqs[index] = info;
	return vq;

out_info:
	kfree(info);
	return vq;
}

static void vp_del_vq(struct virtqueue *vq)
{
	struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
	struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
	unsigned long flags;

	spin_lock_irqsave(&vp_dev->lock, flags);
	list_del(&info->node);
	spin_unlock_irqrestore(&vp_dev->lock, flags);

	vp_dev->del_vq(info);
	kfree(info);
}

222 223
/* the config->del_vqs() implementation */
void vp_del_vqs(struct virtio_device *vdev)
224
{
M
Michael S. Tsirkin 已提交
225
	struct virtio_pci_device *vp_dev = to_vp_device(vdev);
226
	struct virtqueue *vq, *n;
227
	int i;
228

M
Michael S. Tsirkin 已提交
229
	list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
230 231
		if (vp_dev->per_vq_vectors) {
			int v = vp_dev->vqs[vq->index]->msix_vector;
232

233 234 235 236 237 238
			if (v != VIRTIO_MSI_NO_VECTOR) {
				int irq = pci_irq_vector(vp_dev->pci_dev, v);

				irq_set_affinity_hint(irq, NULL);
				free_irq(irq, vq);
			}
239
		}
240
		vp_del_vq(vq);
M
Michael S. Tsirkin 已提交
241
	}
242
	vp_dev->per_vq_vectors = false;
243

244 245 246 247
	if (vp_dev->intx_enabled) {
		free_irq(vp_dev->pci_dev->irq, vp_dev);
		vp_dev->intx_enabled = 0;
	}
248

249 250
	for (i = 0; i < vp_dev->msix_used_vectors; ++i)
		free_irq(pci_irq_vector(vp_dev->pci_dev, i), vp_dev);
251

252 253
	for (i = 0; i < vp_dev->msix_vectors; i++)
		if (vp_dev->msix_affinity_masks[i])
254 255
			free_cpumask_var(vp_dev->msix_affinity_masks[i]);

256
	if (vp_dev->msix_enabled) {
257 258 259
		/* Disable the vector used for configuration */
		vp_dev->config_vector(vp_dev, VIRTIO_MSI_NO_VECTOR);

260 261
		pci_free_irq_vectors(vp_dev->pci_dev);
		vp_dev->msix_enabled = 0;
262 263
	}

264 265 266 267 268 269
	vp_dev->msix_vectors = 0;
	vp_dev->msix_used_vectors = 0;
	kfree(vp_dev->msix_names);
	vp_dev->msix_names = NULL;
	kfree(vp_dev->msix_affinity_masks);
	vp_dev->msix_affinity_masks = NULL;
270 271
	kfree(vp_dev->vqs);
	vp_dev->vqs = NULL;
272 273
}

274
static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
275
		struct virtqueue *vqs[], vq_callback_t *callbacks[],
276 277
		const char * const names[], bool per_vq_vectors,
		struct irq_affinity *desc)
278
{
M
Michael S. Tsirkin 已提交
279
	struct virtio_pci_device *vp_dev = to_vp_device(vdev);
R
Rusty Russell 已提交
280
	u16 msix_vec;
281
	int i, err, nvectors, allocated_vectors;
282

283 284 285 286
	vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
	if (!vp_dev->vqs)
		return -ENOMEM;

287
	if (per_vq_vectors) {
288 289 290 291 292
		/* Best option: one for change interrupt, one per vq. */
		nvectors = 1;
		for (i = 0; i < nvqs; ++i)
			if (callbacks[i])
				++nvectors;
293
	} else {
294 295
		/* Second best: one for change, shared for all vqs. */
		nvectors = 2;
296 297
	}

298 299
	err = vp_request_msix_vectors(vdev, nvectors, per_vq_vectors,
				      per_vq_vectors ? desc : NULL);
300
	if (err)
301
		goto error_find;
302

303
	vp_dev->per_vq_vectors = per_vq_vectors;
304
	allocated_vectors = vp_dev->msix_used_vectors;
305
	for (i = 0; i < nvqs; ++i) {
306 307 308
		if (!names[i]) {
			vqs[i] = NULL;
			continue;
309 310
		}

311
		if (!callbacks[i])
312
			msix_vec = VIRTIO_MSI_NO_VECTOR;
313
		else if (vp_dev->per_vq_vectors)
314 315 316
			msix_vec = allocated_vectors++;
		else
			msix_vec = VP_MSIX_VQ_VECTOR;
317 318
		vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i],
				     msix_vec);
M
Michael S. Tsirkin 已提交
319 320
		if (IS_ERR(vqs[i])) {
			err = PTR_ERR(vqs[i]);
321
			goto error_find;
M
Michael S. Tsirkin 已提交
322
		}
323

324
		if (!vp_dev->per_vq_vectors || msix_vec == VIRTIO_MSI_NO_VECTOR)
325 326
			continue;

327 328 329 330
		/* allocate per-vq irq if available and necessary */
		snprintf(vp_dev->msix_names[msix_vec],
			 sizeof *vp_dev->msix_names,
			 "%s-%s",
331
			 dev_name(&vp_dev->vdev.dev), names[i]);
332
		err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec),
333 334 335
				  vring_interrupt, 0,
				  vp_dev->msix_names[msix_vec],
				  vqs[i]);
336
		if (err)
337
			goto error_find;
338 339 340
	}
	return 0;

341 342
error_find:
	vp_del_vqs(vdev);
M
Michael S. Tsirkin 已提交
343 344 345
	return err;
}

346 347 348 349 350 351 352
static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned nvqs,
		struct virtqueue *vqs[], vq_callback_t *callbacks[],
		const char * const names[])
{
	struct virtio_pci_device *vp_dev = to_vp_device(vdev);
	int i, err;

353 354 355 356
	vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
	if (!vp_dev->vqs)
		return -ENOMEM;

357 358 359
	err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED,
			dev_name(&vdev->dev), vp_dev);
	if (err)
360
		goto out_del_vqs;
361

362
	vp_dev->intx_enabled = 1;
363
	vp_dev->per_vq_vectors = false;
364 365 366 367 368
	for (i = 0; i < nvqs; ++i) {
		if (!names[i]) {
			vqs[i] = NULL;
			continue;
		}
369 370
		vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i],
				     VIRTIO_MSI_NO_VECTOR);
371 372
		if (IS_ERR(vqs[i])) {
			err = PTR_ERR(vqs[i]);
373
			goto out_del_vqs;
374 375 376 377
		}
	}

	return 0;
378 379
out_del_vqs:
	vp_del_vqs(vdev);
380 381 382
	return err;
}

M
Michael S. Tsirkin 已提交
383
/* the config->find_vqs() implementation */
384
int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs,
385 386
		struct virtqueue *vqs[], vq_callback_t *callbacks[],
		const char * const names[], struct irq_affinity *desc)
M
Michael S. Tsirkin 已提交
387
{
R
Rusty Russell 已提交
388
	int err;
M
Michael S. Tsirkin 已提交
389

390 391 392 393 394 395
	/* Try MSI-X with one vector per queue. */
	err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, true, desc);
	if (!err)
		return 0;
	/* Fallback: MSI-X with one vector for config, one shared for queues. */
	err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, false, desc);
M
Michael S. Tsirkin 已提交
396 397
	if (!err)
		return 0;
398
	/* Finally fall back to regular interrupts. */
399
	return vp_find_vqs_intx(vdev, nvqs, vqs, callbacks, names);
400 401
}

402
const char *vp_bus_name(struct virtio_device *vdev)
403 404 405 406 407 408
{
	struct virtio_pci_device *vp_dev = to_vp_device(vdev);

	return pci_name(vp_dev->pci_dev);
}

409 410 411 412 413
/* Setup the affinity for a virtqueue:
 * - force the affinity for per vq vector
 * - OR over all affinities for shared MSI
 * - ignore the affinity request if we're using INTX
 */
414
int vp_set_vq_affinity(struct virtqueue *vq, int cpu)
415 416 417
{
	struct virtio_device *vdev = vq->vdev;
	struct virtio_pci_device *vp_dev = to_vp_device(vdev);
418 419 420
	struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
	struct cpumask *mask;
	unsigned int irq;
421 422 423 424

	if (!vq->callback)
		return -EINVAL;

425
	if (vp_dev->msix_enabled) {
426 427
		mask = vp_dev->msix_affinity_masks[info->msix_vector];
		irq = pci_irq_vector(vp_dev->pci_dev, info->msix_vector);
428 429 430
		if (cpu == -1)
			irq_set_affinity_hint(irq, NULL);
		else {
431
			cpumask_clear(mask);
432 433 434 435 436 437 438
			cpumask_set_cpu(cpu, mask);
			irq_set_affinity_hint(irq, mask);
		}
	}
	return 0;
}

439 440 441 442
const struct cpumask *vp_get_vq_affinity(struct virtio_device *vdev, int index)
{
	struct virtio_pci_device *vp_dev = to_vp_device(vdev);

443 444
	if (!vp_dev->per_vq_vectors ||
	    vp_dev->vqs[index]->msix_vector == VIRTIO_MSI_NO_VECTOR)
445 446
		return NULL;

447 448
	return pci_irq_get_affinity(vp_dev->pci_dev,
				    vp_dev->vqs[index]->msix_vector);
449 450
}

451
#ifdef CONFIG_PM_SLEEP
452 453 454 455 456 457
static int virtio_pci_freeze(struct device *dev)
{
	struct pci_dev *pci_dev = to_pci_dev(dev);
	struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
	int ret;

458
	ret = virtio_device_freeze(&vp_dev->vdev);
459 460 461 462 463 464

	if (!ret)
		pci_disable_device(pci_dev);
	return ret;
}

A
Amit Shah 已提交
465
static int virtio_pci_restore(struct device *dev)
466 467 468 469 470 471 472 473
{
	struct pci_dev *pci_dev = to_pci_dev(dev);
	struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
	int ret;

	ret = pci_enable_device(pci_dev);
	if (ret)
		return ret;
A
Amit Shah 已提交
474

475
	pci_set_master(pci_dev);
476
	return virtio_device_restore(&vp_dev->vdev);
477 478
}

479
static const struct dev_pm_ops virtio_pci_pm_ops = {
480
	SET_SYSTEM_SLEEP_PM_OPS(virtio_pci_freeze, virtio_pci_restore)
A
Amit Shah 已提交
481
};
A
Anthony Liguori 已提交
482
#endif
483 484 485 486


/* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */
static const struct pci_device_id virtio_pci_id_table[] = {
487
	{ PCI_DEVICE(PCI_VENDOR_ID_REDHAT_QUMRANET, PCI_ANY_ID) },
488 489 490 491 492
	{ 0 }
};

MODULE_DEVICE_TABLE(pci, virtio_pci_id_table);

493 494 495 496 497 498 499 500 501 502 503
static void virtio_pci_release_dev(struct device *_d)
{
	struct virtio_device *vdev = dev_to_virtio(_d);
	struct virtio_pci_device *vp_dev = to_vp_device(vdev);

	/* As struct device is a kobject, it's not safe to
	 * free the memory (including the reference counter itself)
	 * until it's release callback. */
	kfree(vp_dev);
}

504 505 506
static int virtio_pci_probe(struct pci_dev *pci_dev,
			    const struct pci_device_id *id)
{
507 508 509 510 511 512 513 514 515 516 517 518
	struct virtio_pci_device *vp_dev;
	int rc;

	/* allocate our structure and fill it out */
	vp_dev = kzalloc(sizeof(struct virtio_pci_device), GFP_KERNEL);
	if (!vp_dev)
		return -ENOMEM;

	pci_set_drvdata(pci_dev, vp_dev);
	vp_dev->vdev.dev.parent = &pci_dev->dev;
	vp_dev->vdev.dev.release = virtio_pci_release_dev;
	vp_dev->pci_dev = pci_dev;
519 520
	INIT_LIST_HEAD(&vp_dev->virtqueues);
	spin_lock_init(&vp_dev->lock);
521 522 523 524 525 526

	/* enable the device */
	rc = pci_enable_device(pci_dev);
	if (rc)
		goto err_enable_device;

527
	if (force_legacy) {
M
Michael S. Tsirkin 已提交
528
		rc = virtio_pci_legacy_probe(vp_dev);
529 530 531 532 533 534 535 536 537 538 539 540
		/* Also try modern mode if we can't map BAR0 (no IO space). */
		if (rc == -ENODEV || rc == -ENOMEM)
			rc = virtio_pci_modern_probe(vp_dev);
		if (rc)
			goto err_probe;
	} else {
		rc = virtio_pci_modern_probe(vp_dev);
		if (rc == -ENODEV)
			rc = virtio_pci_legacy_probe(vp_dev);
		if (rc)
			goto err_probe;
	}
541 542 543 544 545 546 547 548 549 550

	pci_set_master(pci_dev);

	rc = register_virtio_device(&vp_dev->vdev);
	if (rc)
		goto err_register;

	return 0;

err_register:
M
Michael S. Tsirkin 已提交
551 552 553 554
	if (vp_dev->ioaddr)
	     virtio_pci_legacy_remove(vp_dev);
	else
	     virtio_pci_modern_remove(vp_dev);
555 556 557 558 559
err_probe:
	pci_disable_device(pci_dev);
err_enable_device:
	kfree(vp_dev);
	return rc;
560 561 562 563
}

static void virtio_pci_remove(struct pci_dev *pci_dev)
{
564
	struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
565
	struct device *dev = get_device(&vp_dev->vdev.dev);
566 567 568

	unregister_virtio_device(&vp_dev->vdev);

M
Michael S. Tsirkin 已提交
569 570 571 572
	if (vp_dev->ioaddr)
		virtio_pci_legacy_remove(vp_dev);
	else
		virtio_pci_modern_remove(vp_dev);
573 574

	pci_disable_device(pci_dev);
575
	put_device(dev);
576 577 578 579 580 581 582 583 584 585 586 587 588
}

static struct pci_driver virtio_pci_driver = {
	.name		= "virtio-pci",
	.id_table	= virtio_pci_id_table,
	.probe		= virtio_pci_probe,
	.remove		= virtio_pci_remove,
#ifdef CONFIG_PM_SLEEP
	.driver.pm	= &virtio_pci_pm_ops,
#endif
};

module_pci_driver(virtio_pci_driver);
H
Herbert Xu 已提交
589 590 591 592 593

MODULE_AUTHOR("Anthony Liguori <aliguori@us.ibm.com>");
MODULE_DESCRIPTION("virtio-pci");
MODULE_LICENSE("GPL");
MODULE_VERSION("1");