virtio_net.c 42.7 KB
Newer Older
1
/* A network driver using virtio.
R
Rusty Russell 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
 *
 * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 */
//#define DEBUG
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
22
#include <linux/ethtool.h>
R
Rusty Russell 已提交
23 24 25 26
#include <linux/module.h>
#include <linux/virtio.h>
#include <linux/virtio_net.h>
#include <linux/scatterlist.h>
27
#include <linux/if_vlan.h>
28
#include <linux/slab.h>
29
#include <linux/cpu.h>
R
Rusty Russell 已提交
30

31 32 33
static int napi_weight = 128;
module_param(napi_weight, int, 0444);

34
static bool csum = true, gso = true;
R
Rusty Russell 已提交
35 36 37
module_param(csum, bool, 0444);
module_param(gso, bool, 0444);

R
Rusty Russell 已提交
38
/* FIXME: MTU in config. */
39
#define MAX_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
40
#define GOOD_COPY_LEN	128
R
Rusty Russell 已提交
41

42
#define VIRTNET_SEND_COMMAND_SG_MAX    2
43
#define VIRTNET_DRIVER_VERSION "1.0.0"
44

45
struct virtnet_stats {
46 47
	struct u64_stats_sync tx_syncp;
	struct u64_stats_sync rx_syncp;
48 49 50 51 52 53 54
	u64 tx_bytes;
	u64 tx_packets;

	u64 rx_bytes;
	u64 rx_packets;
};

55 56 57 58 59 60 61
/* Internal representation of a send virtqueue */
struct send_queue {
	/* Virtqueue associated with this send _queue */
	struct virtqueue *vq;

	/* TX: fragments + linear part + virtio header */
	struct scatterlist sg[MAX_SKB_FRAGS + 2];
J
Jason Wang 已提交
62 63 64

	/* Name of the send queue: output.$index */
	char name[40];
65 66 67 68 69 70 71
};

/* Internal representation of a receive virtqueue */
struct receive_queue {
	/* Virtqueue associated with this receive_queue */
	struct virtqueue *vq;

R
Rusty Russell 已提交
72 73 74 75 76
	struct napi_struct napi;

	/* Number of input buffers, and max we've ever had. */
	unsigned int num, max;

77 78 79 80 81
	/* Chain pages by the private ptr. */
	struct page *pages;

	/* RX: fragments + linear part + virtio header */
	struct scatterlist sg[MAX_SKB_FRAGS + 2];
J
Jason Wang 已提交
82 83 84

	/* Name of this receive queue: input.$index */
	char name[40];
85 86 87 88 89 90
};

struct virtnet_info {
	struct virtio_device *vdev;
	struct virtqueue *cvq;
	struct net_device *dev;
J
Jason Wang 已提交
91 92
	struct send_queue *sq;
	struct receive_queue *rq;
93 94
	unsigned int status;

J
Jason Wang 已提交
95 96 97 98 99 100
	/* Max # of queue pairs supported by the device */
	u16 max_queue_pairs;

	/* # of queue pairs currently used by the driver */
	u16 curr_queue_pairs;

101 102 103
	/* I like... big packets and I cannot lie! */
	bool big_packets;

104 105 106
	/* Host will merge rx buffers for big packets (shake it! shake it!) */
	bool mergeable_rx_bufs;

J
Jason Wang 已提交
107 108 109
	/* Has control virtqueue */
	bool has_cvq;

110 111 112
	/* enable config space updates */
	bool config_enable;

113 114 115
	/* Active statistics */
	struct virtnet_stats __percpu *stats;

116 117 118
	/* Work struct for refilling if we run low on memory. */
	struct delayed_work refill;

119 120 121 122 123
	/* Work struct for config space updates */
	struct work_struct config_work;

	/* Lock for config space updates */
	struct mutex config_lock;
J
Jason Wang 已提交
124 125 126

	/* Does the affinity hint is set for virtqueues? */
	bool affinity_hint_set;
127 128 129

	/* Per-cpu variable to show the mapping from CPU to virtqueue */
	int __percpu *vq_index;
130 131 132

	/* CPU hot plug notifier */
	struct notifier_block nb;
R
Rusty Russell 已提交
133 134
};

135 136 137 138 139 140 141
struct skb_vnet_hdr {
	union {
		struct virtio_net_hdr hdr;
		struct virtio_net_hdr_mrg_rxbuf mhdr;
	};
};

142 143 144 145 146 147 148 149 150 151
struct padded_vnet_hdr {
	struct virtio_net_hdr hdr;
	/*
	 * virtio_net_hdr should be in a separated sg buffer because of a
	 * QEMU bug, and data sg buffer shares same page with this header sg.
	 * This padding makes next sg 16 byte aligned after virtio_net_hdr.
	 */
	char padding[6];
};

J
Jason Wang 已提交
152 153 154 155 156
/* Converting between virtqueue no. and kernel tx/rx queue no.
 * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
 */
static int vq2txq(struct virtqueue *vq)
{
157
	return (vq->index - 1) / 2;
J
Jason Wang 已提交
158 159 160 161 162 163 164 165 166
}

static int txq2vq(int txq)
{
	return txq * 2 + 1;
}

static int vq2rxq(struct virtqueue *vq)
{
167
	return vq->index / 2;
J
Jason Wang 已提交
168 169 170 171 172 173 174
}

static int rxq2vq(int rxq)
{
	return rxq * 2;
}

175
static inline struct skb_vnet_hdr *skb_vnet_hdr(struct sk_buff *skb)
R
Rusty Russell 已提交
176
{
177
	return (struct skb_vnet_hdr *)skb->cb;
R
Rusty Russell 已提交
178 179
}

180 181 182 183
/*
 * private is used to chain pages for big packets, put the whole
 * most recent used list in the beginning for reuse
 */
184
static void give_pages(struct receive_queue *rq, struct page *page)
185
{
186
	struct page *end;
187

188
	/* Find end of list, sew whole thing into vi->rq.pages. */
189
	for (end = page; end->private; end = (struct page *)end->private);
190 191
	end->private = (unsigned long)rq->pages;
	rq->pages = page;
192 193
}

194
static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
195
{
196
	struct page *p = rq->pages;
197

198
	if (p) {
199
		rq->pages = (struct page *)p->private;
200 201 202
		/* clear private here, it is used to chain pages */
		p->private = 0;
	} else
203 204 205 206
		p = alloc_page(gfp_mask);
	return p;
}

207
static void skb_xmit_done(struct virtqueue *vq)
R
Rusty Russell 已提交
208
{
209
	struct virtnet_info *vi = vq->vdev->priv;
R
Rusty Russell 已提交
210

211
	/* Suppress further interrupts. */
212
	virtqueue_disable_cb(vq);
213

214
	/* We were probably waiting for more output buffers. */
J
Jason Wang 已提交
215
	netif_wake_subqueue(vi->dev, vq2txq(vq));
R
Rusty Russell 已提交
216 217
}

218 219
static void set_skb_frag(struct sk_buff *skb, struct page *page,
			 unsigned int offset, unsigned int *len)
R
Rusty Russell 已提交
220
{
221
	int size = min((unsigned)PAGE_SIZE - offset, *len);
222 223
	int i = skb_shinfo(skb)->nr_frags;

224
	__skb_fill_page_desc(skb, i, page, offset, size);
225

226 227
	skb->data_len += size;
	skb->len += size;
228
	skb->truesize += PAGE_SIZE;
229
	skb_shinfo(skb)->nr_frags++;
230
	skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
231
	*len -= size;
232
}
233

234
/* Called from bottom half context */
235
static struct sk_buff *page_to_skb(struct receive_queue *rq,
236 237
				   struct page *page, unsigned int len)
{
238
	struct virtnet_info *vi = rq->vq->vdev->priv;
239 240 241 242
	struct sk_buff *skb;
	struct skb_vnet_hdr *hdr;
	unsigned int copy, hdr_len, offset;
	char *p;
243

244
	p = page_address(page);
245

246 247 248 249
	/* copy small packet so we can reuse these pages for small data */
	skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN);
	if (unlikely(!skb))
		return NULL;
250

251
	hdr = skb_vnet_hdr(skb);
252

253 254 255 256 257 258 259
	if (vi->mergeable_rx_bufs) {
		hdr_len = sizeof hdr->mhdr;
		offset = hdr_len;
	} else {
		hdr_len = sizeof hdr->hdr;
		offset = sizeof(struct padded_vnet_hdr);
	}
260

261
	memcpy(hdr, p, hdr_len);
262

263 264
	len -= hdr_len;
	p += offset;
265

266 267 268 269
	copy = len;
	if (copy > skb_tailroom(skb))
		copy = skb_tailroom(skb);
	memcpy(skb_put(skb, copy), p, copy);
270

271 272
	len -= copy;
	offset += copy;
273

274 275 276 277 278 279 280
	/*
	 * Verify that we can indeed put this data into a skb.
	 * This is here to handle cases when the device erroneously
	 * tries to receive more than is possible. This is usually
	 * the case of a broken device.
	 */
	if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) {
281
		net_dbg_ratelimited("%s: too much data\n", skb->dev->name);
282 283 284 285
		dev_kfree_skb(skb);
		return NULL;
	}

286 287 288 289 290
	while (len) {
		set_skb_frag(skb, page, offset, &len);
		page = (struct page *)page->private;
		offset = 0;
	}
291

292
	if (page)
293
		give_pages(rq, page);
294

295 296
	return skb;
}
297

298
static int receive_mergeable(struct receive_queue *rq, struct sk_buff *skb)
299 300 301 302 303 304 305 306 307 308 309 310 311
{
	struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
	struct page *page;
	int num_buf, i, len;

	num_buf = hdr->mhdr.num_buffers;
	while (--num_buf) {
		i = skb_shinfo(skb)->nr_frags;
		if (i >= MAX_SKB_FRAGS) {
			pr_debug("%s: packet too long\n", skb->dev->name);
			skb->dev->stats.rx_length_errors++;
			return -EINVAL;
		}
312
		page = virtqueue_get_buf(rq->vq, &len);
313 314 315 316 317
		if (!page) {
			pr_debug("%s: rx error: %d buffers missing\n",
				 skb->dev->name, hdr->mhdr.num_buffers);
			skb->dev->stats.rx_length_errors++;
			return -EINVAL;
318
		}
319

320 321 322 323 324
		if (len > PAGE_SIZE)
			len = PAGE_SIZE;

		set_skb_frag(skb, page, 0, &len);

325
		--rq->num;
326 327 328 329
	}
	return 0;
}

330
static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
331
{
332 333
	struct virtnet_info *vi = rq->vq->vdev->priv;
	struct net_device *dev = vi->dev;
E
Eric Dumazet 已提交
334
	struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
335 336 337
	struct sk_buff *skb;
	struct page *page;
	struct skb_vnet_hdr *hdr;
338

339 340 341 342
	if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
		pr_debug("%s: short packet %i\n", dev->name, len);
		dev->stats.rx_length_errors++;
		if (vi->mergeable_rx_bufs || vi->big_packets)
343
			give_pages(rq, buf);
344 345 346 347
		else
			dev_kfree_skb(buf);
		return;
	}
348

349 350 351 352 353 354
	if (!vi->mergeable_rx_bufs && !vi->big_packets) {
		skb = buf;
		len -= sizeof(struct virtio_net_hdr);
		skb_trim(skb, len);
	} else {
		page = buf;
355
		skb = page_to_skb(rq, page, len);
356
		if (unlikely(!skb)) {
357
			dev->stats.rx_dropped++;
358
			give_pages(rq, page);
359
			return;
360
		}
361
		if (vi->mergeable_rx_bufs)
362
			if (receive_mergeable(rq, skb)) {
363 364 365
				dev_kfree_skb(skb);
				return;
			}
366
	}
367

368
	hdr = skb_vnet_hdr(skb);
369

370
	u64_stats_update_begin(&stats->rx_syncp);
371 372
	stats->rx_bytes += skb->len;
	stats->rx_packets++;
373
	u64_stats_update_end(&stats->rx_syncp);
R
Rusty Russell 已提交
374

375
	if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
R
Rusty Russell 已提交
376
		pr_debug("Needs csum!\n");
377 378 379
		if (!skb_partial_csum_set(skb,
					  hdr->hdr.csum_start,
					  hdr->hdr.csum_offset))
R
Rusty Russell 已提交
380
			goto frame_err;
381 382
	} else if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) {
		skb->ip_summed = CHECKSUM_UNNECESSARY;
R
Rusty Russell 已提交
383 384
	}

385 386 387 388
	skb->protocol = eth_type_trans(skb, dev);
	pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
		 ntohs(skb->protocol), skb->len, skb->pkt_type);

389
	if (hdr->hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
R
Rusty Russell 已提交
390
		pr_debug("GSO!\n");
391
		switch (hdr->hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
R
Rusty Russell 已提交
392
		case VIRTIO_NET_HDR_GSO_TCPV4:
393
			skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
R
Rusty Russell 已提交
394 395
			break;
		case VIRTIO_NET_HDR_GSO_UDP:
396
			skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
R
Rusty Russell 已提交
397 398
			break;
		case VIRTIO_NET_HDR_GSO_TCPV6:
399
			skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
R
Rusty Russell 已提交
400 401
			break;
		default:
402 403
			net_warn_ratelimited("%s: bad gso type %u.\n",
					     dev->name, hdr->hdr.gso_type);
R
Rusty Russell 已提交
404 405 406
			goto frame_err;
		}

407
		if (hdr->hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
408
			skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
R
Rusty Russell 已提交
409

410
		skb_shinfo(skb)->gso_size = hdr->hdr.gso_size;
R
Rusty Russell 已提交
411
		if (skb_shinfo(skb)->gso_size == 0) {
412
			net_warn_ratelimited("%s: zero gso size.\n", dev->name);
R
Rusty Russell 已提交
413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428
			goto frame_err;
		}

		/* Header must be checked, and gso_segs computed. */
		skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
		skb_shinfo(skb)->gso_segs = 0;
	}

	netif_receive_skb(skb);
	return;

frame_err:
	dev->stats.rx_frame_errors++;
	dev_kfree_skb(skb);
}

429
static int add_recvbuf_small(struct receive_queue *rq, gfp_t gfp)
R
Rusty Russell 已提交
430
{
431
	struct virtnet_info *vi = rq->vq->vdev->priv;
R
Rusty Russell 已提交
432
	struct sk_buff *skb;
433 434
	struct skb_vnet_hdr *hdr;
	int err;
435

436
	skb = __netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN, gfp);
437 438
	if (unlikely(!skb))
		return -ENOMEM;
R
Rusty Russell 已提交
439

440
	skb_put(skb, MAX_PACKET_LEN);
441

442
	hdr = skb_vnet_hdr(skb);
443
	sg_set_buf(rq->sg, &hdr->hdr, sizeof hdr->hdr);
444

445
	skb_to_sgvec(skb, rq->sg + 1, 0, skb->len);
446

447
	err = virtqueue_add_buf(rq->vq, rq->sg, 0, 2, skb, gfp);
448 449
	if (err < 0)
		dev_kfree_skb(skb);
450

451 452
	return err;
}
453

454
static int add_recvbuf_big(struct receive_queue *rq, gfp_t gfp)
455 456 457 458 459
{
	struct page *first, *list = NULL;
	char *p;
	int i, err, offset;

460
	/* page in rq->sg[MAX_SKB_FRAGS + 1] is list tail */
461
	for (i = MAX_SKB_FRAGS + 1; i > 1; --i) {
462
		first = get_a_page(rq, gfp);
463 464
		if (!first) {
			if (list)
465
				give_pages(rq, list);
466
			return -ENOMEM;
467
		}
468
		sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE);
469

470 471 472 473
		/* chain new page in list head to match sg */
		first->private = (unsigned long)list;
		list = first;
	}
R
Rusty Russell 已提交
474

475
	first = get_a_page(rq, gfp);
476
	if (!first) {
477
		give_pages(rq, list);
478 479 480 481
		return -ENOMEM;
	}
	p = page_address(first);

482 483 484
	/* rq->sg[0], rq->sg[1] share the same page */
	/* a separated rq->sg[0] for virtio_net_hdr only due to QEMU bug */
	sg_set_buf(&rq->sg[0], p, sizeof(struct virtio_net_hdr));
485

486
	/* rq->sg[1] for data packet, from offset */
487
	offset = sizeof(struct padded_vnet_hdr);
488
	sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset);
489 490 491

	/* chain first in list head */
	first->private = (unsigned long)list;
492
	err = virtqueue_add_buf(rq->vq, rq->sg, 0, MAX_SKB_FRAGS + 2,
493
				first, gfp);
494
	if (err < 0)
495
		give_pages(rq, first);
496 497

	return err;
R
Rusty Russell 已提交
498 499
}

500
static int add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp)
501
{
502
	struct page *page;
503 504
	int err;

505
	page = get_a_page(rq, gfp);
506 507
	if (!page)
		return -ENOMEM;
508

509
	sg_init_one(rq->sg, page_address(page), PAGE_SIZE);
510

511
	err = virtqueue_add_buf(rq->vq, rq->sg, 0, 1, page, gfp);
512
	if (err < 0)
513
		give_pages(rq, page);
514

515 516
	return err;
}
517

518 519 520 521 522 523 524
/*
 * Returns false if we couldn't fill entirely (OOM).
 *
 * Normally run in the receive path, but can also be run from ndo_open
 * before we're receiving packets, or from refill_work which is
 * careful to disable receiving (using napi_disable).
 */
525
static bool try_fill_recv(struct receive_queue *rq, gfp_t gfp)
526
{
527
	struct virtnet_info *vi = rq->vq->vdev->priv;
528
	int err;
529
	bool oom;
530

531 532
	do {
		if (vi->mergeable_rx_bufs)
533
			err = add_recvbuf_mergeable(rq, gfp);
534
		else if (vi->big_packets)
535
			err = add_recvbuf_big(rq, gfp);
536
		else
537
			err = add_recvbuf_small(rq, gfp);
538

539
		oom = err == -ENOMEM;
540
		if (err)
541
			break;
542
		++rq->num;
543
	} while (rq->vq->num_free);
544 545 546
	if (unlikely(rq->num > rq->max))
		rq->max = rq->num;
	virtqueue_kick(rq->vq);
547
	return !oom;
548 549
}

550
static void skb_recv_done(struct virtqueue *rvq)
R
Rusty Russell 已提交
551 552
{
	struct virtnet_info *vi = rvq->vdev->priv;
J
Jason Wang 已提交
553
	struct receive_queue *rq = &vi->rq[vq2rxq(rvq)];
554

555
	/* Schedule NAPI, Suppress further interrupts if successful. */
556
	if (napi_schedule_prep(&rq->napi)) {
557
		virtqueue_disable_cb(rvq);
558
		__napi_schedule(&rq->napi);
559
	}
R
Rusty Russell 已提交
560 561
}

562
static void virtnet_napi_enable(struct receive_queue *rq)
563
{
564
	napi_enable(&rq->napi);
565 566 567 568 569

	/* If all buffers were filled by other side before we napi_enabled, we
	 * won't get another interrupt, so process any outstanding packets
	 * now.  virtnet_poll wants re-enable the queue, so we disable here.
	 * We synchronize against interrupts via NAPI_STATE_SCHED */
570 571
	if (napi_schedule_prep(&rq->napi)) {
		virtqueue_disable_cb(rq->vq);
572
		local_bh_disable();
573
		__napi_schedule(&rq->napi);
574
		local_bh_enable();
575 576 577
	}
}

578 579
static void refill_work(struct work_struct *work)
{
580 581
	struct virtnet_info *vi =
		container_of(work, struct virtnet_info, refill.work);
582
	bool still_empty;
J
Jason Wang 已提交
583 584 585 586
	int i;

	for (i = 0; i < vi->max_queue_pairs; i++) {
		struct receive_queue *rq = &vi->rq[i];
587

J
Jason Wang 已提交
588 589 590
		napi_disable(&rq->napi);
		still_empty = !try_fill_recv(rq, GFP_KERNEL);
		virtnet_napi_enable(rq);
591

J
Jason Wang 已提交
592 593 594 595 596 597
		/* In theory, this can happen: if we don't get any buffers in
		 * we will *never* try to fill again.
		 */
		if (still_empty)
			schedule_delayed_work(&vi->refill, HZ/2);
	}
598 599
}

R
Rusty Russell 已提交
600 601
static int virtnet_poll(struct napi_struct *napi, int budget)
{
602 603 604
	struct receive_queue *rq =
		container_of(napi, struct receive_queue, napi);
	struct virtnet_info *vi = rq->vq->vdev->priv;
605
	void *buf;
R
Rusty Russell 已提交
606 607 608 609
	unsigned int len, received = 0;

again:
	while (received < budget &&
610 611 612
	       (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
		receive_buf(rq, buf, len);
		--rq->num;
R
Rusty Russell 已提交
613 614 615
		received++;
	}

616 617
	if (rq->num < rq->max / 2) {
		if (!try_fill_recv(rq, GFP_ATOMIC))
618
			schedule_delayed_work(&vi->refill, 0);
619
	}
R
Rusty Russell 已提交
620

621 622
	/* Out of packets? */
	if (received < budget) {
623
		napi_complete(napi);
624
		if (unlikely(!virtqueue_enable_cb(rq->vq)) &&
625
		    napi_schedule_prep(napi)) {
626
			virtqueue_disable_cb(rq->vq);
627
			__napi_schedule(napi);
R
Rusty Russell 已提交
628
			goto again;
629
		}
R
Rusty Russell 已提交
630 631 632 633 634
	}

	return received;
}

J
Jason Wang 已提交
635 636 637 638 639 640 641 642 643 644 645 646 647 648 649
static int virtnet_open(struct net_device *dev)
{
	struct virtnet_info *vi = netdev_priv(dev);
	int i;

	for (i = 0; i < vi->max_queue_pairs; i++) {
		/* Make sure we have some buffers: if oom use wq. */
		if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
			schedule_delayed_work(&vi->refill, 0);
		virtnet_napi_enable(&vi->rq[i]);
	}

	return 0;
}

650
static void free_old_xmit_skbs(struct send_queue *sq)
R
Rusty Russell 已提交
651 652
{
	struct sk_buff *skb;
653
	unsigned int len;
654
	struct virtnet_info *vi = sq->vq->vdev->priv;
E
Eric Dumazet 已提交
655
	struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
R
Rusty Russell 已提交
656

657
	while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) {
R
Rusty Russell 已提交
658
		pr_debug("Sent skb %p\n", skb);
659

660
		u64_stats_update_begin(&stats->tx_syncp);
661 662
		stats->tx_bytes += skb->len;
		stats->tx_packets++;
663
		u64_stats_update_end(&stats->tx_syncp);
664

665
		dev_kfree_skb_any(skb);
R
Rusty Russell 已提交
666 667 668
	}
}

669
static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
R
Rusty Russell 已提交
670
{
671
	struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
R
Rusty Russell 已提交
672
	const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
673
	struct virtnet_info *vi = sq->vq->vdev->priv;
674
	unsigned num_sg;
R
Rusty Russell 已提交
675

J
Johannes Berg 已提交
676
	pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
R
Rusty Russell 已提交
677 678

	if (skb->ip_summed == CHECKSUM_PARTIAL) {
679
		hdr->hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
680
		hdr->hdr.csum_start = skb_checksum_start_offset(skb);
681
		hdr->hdr.csum_offset = skb->csum_offset;
R
Rusty Russell 已提交
682
	} else {
683 684
		hdr->hdr.flags = 0;
		hdr->hdr.csum_offset = hdr->hdr.csum_start = 0;
R
Rusty Russell 已提交
685 686 687
	}

	if (skb_is_gso(skb)) {
688 689
		hdr->hdr.hdr_len = skb_headlen(skb);
		hdr->hdr.gso_size = skb_shinfo(skb)->gso_size;
R
Rusty Russell 已提交
690
		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
691
			hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
R
Rusty Russell 已提交
692
		else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
693
			hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
R
Rusty Russell 已提交
694
		else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
695
			hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
R
Rusty Russell 已提交
696 697
		else
			BUG();
R
Rusty Russell 已提交
698
		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN)
699
			hdr->hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
R
Rusty Russell 已提交
700
	} else {
701 702
		hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
		hdr->hdr.gso_size = hdr->hdr.hdr_len = 0;
R
Rusty Russell 已提交
703 704
	}

705
	hdr->mhdr.num_buffers = 0;
706 707 708

	/* Encode metadata header at front. */
	if (vi->mergeable_rx_bufs)
709
		sg_set_buf(sq->sg, &hdr->mhdr, sizeof hdr->mhdr);
710
	else
711
		sg_set_buf(sq->sg, &hdr->hdr, sizeof hdr->hdr);
712

713 714
	num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1;
	return virtqueue_add_buf(sq->vq, sq->sg, num_sg,
715
				 0, skb, GFP_ATOMIC);
716 717
}

718
static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
719 720
{
	struct virtnet_info *vi = netdev_priv(dev);
J
Jason Wang 已提交
721 722
	int qnum = skb_get_queue_mapping(skb);
	struct send_queue *sq = &vi->sq[qnum];
723
	int err;
724 725

	/* Free up any pending old buffers before queueing new ones. */
726
	free_old_xmit_skbs(sq);
727

728
	/* Try to transmit */
729
	err = xmit_skb(sq, skb);
730

731
	/* This should not happen! */
732
	if (unlikely(err)) {
733 734 735
		dev->stats.tx_fifo_errors++;
		if (net_ratelimit())
			dev_warn(&dev->dev,
736
				 "Unexpected TXQ (%d) queue failure: %d\n", qnum, err);
737 738 739
		dev->stats.tx_dropped++;
		kfree_skb(skb);
		return NETDEV_TX_OK;
R
Rusty Russell 已提交
740
	}
741
	virtqueue_kick(sq->vq);
742

743 744 745 746 747 748
	/* Don't wait up for transmitted skbs to be freed. */
	skb_orphan(skb);
	nf_reset(skb);

	/* Apparently nice girls don't return TX_BUSY; stop the queue
	 * before it gets out of hand.  Naturally, this wastes entries. */
749
	if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
J
Jason Wang 已提交
750
		netif_stop_subqueue(dev, qnum);
751
		if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
752
			/* More just got used, free them then recheck. */
753 754
			free_old_xmit_skbs(sq);
			if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
J
Jason Wang 已提交
755
				netif_start_subqueue(dev, qnum);
756
				virtqueue_disable_cb(sq->vq);
757 758
			}
		}
759
	}
760 761

	return NETDEV_TX_OK;
R
Rusty Russell 已提交
762 763
}

764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807
/*
 * Send command via the control virtqueue and check status.  Commands
 * supported by the hypervisor, as indicated by feature bits, should
 * never fail unless improperly formated.
 */
static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
				 struct scatterlist *data, int out, int in)
{
	struct scatterlist *s, sg[VIRTNET_SEND_COMMAND_SG_MAX + 2];
	struct virtio_net_ctrl_hdr ctrl;
	virtio_net_ctrl_ack status = ~0;
	unsigned int tmp;
	int i;

	/* Caller should know better */
	BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) ||
		(out + in > VIRTNET_SEND_COMMAND_SG_MAX));

	out++; /* Add header */
	in++; /* Add return status */

	ctrl.class = class;
	ctrl.cmd = cmd;

	sg_init_table(sg, out + in);

	sg_set_buf(&sg[0], &ctrl, sizeof(ctrl));
	for_each_sg(data, s, out + in - 2, i)
		sg_set_buf(&sg[i + 1], sg_virt(s), s->length);
	sg_set_buf(&sg[out + in - 1], &status, sizeof(status));

	BUG_ON(virtqueue_add_buf(vi->cvq, sg, out, in, vi, GFP_ATOMIC) < 0);

	virtqueue_kick(vi->cvq);

	/* Spin for a response, the kick causes an ioport write, trapping
	 * into the hypervisor, so the request should be handled immediately.
	 */
	while (!virtqueue_get_buf(vi->cvq, &tmp))
		cpu_relax();

	return status == VIRTIO_NET_OK;
}

808 809 810 811
static int virtnet_set_mac_address(struct net_device *dev, void *p)
{
	struct virtnet_info *vi = netdev_priv(dev);
	struct virtio_device *vdev = vi->vdev;
812
	int ret;
813 814
	struct sockaddr *addr = p;
	struct scatterlist sg;
815

816
	ret = eth_prepare_mac_addr_change(dev, p);
817 818
	if (ret)
		return ret;
819

820 821 822 823 824 825 826 827 828 829
	if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
		sg_init_one(&sg, addr->sa_data, dev->addr_len);
		if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
					  VIRTIO_NET_CTRL_MAC_ADDR_SET,
					  &sg, 1, 0)) {
			dev_warn(&vdev->dev,
				 "Failed to set mac address by vq command.\n");
			return -EINVAL;
		}
	} else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
830
		vdev->config->set(vdev, offsetof(struct virtio_net_config, mac),
831 832 833 834
				  addr->sa_data, dev->addr_len);
	}

	eth_commit_mac_addr_change(dev, p);
835 836 837 838

	return 0;
}

839 840 841 842 843 844 845 846
static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev,
					       struct rtnl_link_stats64 *tot)
{
	struct virtnet_info *vi = netdev_priv(dev);
	int cpu;
	unsigned int start;

	for_each_possible_cpu(cpu) {
E
Eric Dumazet 已提交
847
		struct virtnet_stats *stats = per_cpu_ptr(vi->stats, cpu);
848 849 850
		u64 tpackets, tbytes, rpackets, rbytes;

		do {
851
			start = u64_stats_fetch_begin_bh(&stats->tx_syncp);
852 853
			tpackets = stats->tx_packets;
			tbytes   = stats->tx_bytes;
854
		} while (u64_stats_fetch_retry_bh(&stats->tx_syncp, start));
855 856

		do {
857
			start = u64_stats_fetch_begin_bh(&stats->rx_syncp);
858 859
			rpackets = stats->rx_packets;
			rbytes   = stats->rx_bytes;
860
		} while (u64_stats_fetch_retry_bh(&stats->rx_syncp, start));
861 862 863 864 865 866 867 868

		tot->rx_packets += rpackets;
		tot->tx_packets += tpackets;
		tot->rx_bytes   += rbytes;
		tot->tx_bytes   += tbytes;
	}

	tot->tx_dropped = dev->stats.tx_dropped;
869
	tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
870 871 872 873 874 875 876
	tot->rx_dropped = dev->stats.rx_dropped;
	tot->rx_length_errors = dev->stats.rx_length_errors;
	tot->rx_frame_errors = dev->stats.rx_frame_errors;

	return tot;
}

877 878 879 880
#ifdef CONFIG_NET_POLL_CONTROLLER
static void virtnet_netpoll(struct net_device *dev)
{
	struct virtnet_info *vi = netdev_priv(dev);
J
Jason Wang 已提交
881
	int i;
882

J
Jason Wang 已提交
883 884
	for (i = 0; i < vi->curr_queue_pairs; i++)
		napi_schedule(&vi->rq[i].napi);
885 886 887
}
#endif

888 889 890 891 892 893 894 895 896 897
static void virtnet_ack_link_announce(struct virtnet_info *vi)
{
	rtnl_lock();
	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE,
				  VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL,
				  0, 0))
		dev_warn(&vi->dev->dev, "Failed to ack link announce.\n");
	rtnl_unlock();
}

J
Jason Wang 已提交
898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920
static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
{
	struct scatterlist sg;
	struct virtio_net_ctrl_mq s;
	struct net_device *dev = vi->dev;

	if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
		return 0;

	s.virtqueue_pairs = queue_pairs;
	sg_init_one(&sg, &s, sizeof(s));

	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
				  VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg, 1, 0)){
		dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n",
			 queue_pairs);
		return -EINVAL;
	} else
		vi->curr_queue_pairs = queue_pairs;

	return 0;
}

R
Rusty Russell 已提交
921 922 923
static int virtnet_close(struct net_device *dev)
{
	struct virtnet_info *vi = netdev_priv(dev);
J
Jason Wang 已提交
924
	int i;
R
Rusty Russell 已提交
925

926 927
	/* Make sure refill_work doesn't re-enable napi! */
	cancel_delayed_work_sync(&vi->refill);
J
Jason Wang 已提交
928 929 930

	for (i = 0; i < vi->max_queue_pairs; i++)
		napi_disable(&vi->rq[i].napi);
R
Rusty Russell 已提交
931 932 933 934

	return 0;
}

935 936 937
static void virtnet_set_rx_mode(struct net_device *dev)
{
	struct virtnet_info *vi = netdev_priv(dev);
938
	struct scatterlist sg[2];
939
	u8 promisc, allmulti;
940
	struct virtio_net_ctrl_mac *mac_data;
J
Jiri Pirko 已提交
941
	struct netdev_hw_addr *ha;
942
	int uc_count;
943
	int mc_count;
944 945
	void *buf;
	int i;
946 947 948 949 950

	/* We can't dynamicaly set ndo_set_rx_mode, so return gracefully */
	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
		return;

951 952
	promisc = ((dev->flags & IFF_PROMISC) != 0);
	allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
953

954
	sg_init_one(sg, &promisc, sizeof(promisc));
955 956 957

	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
				  VIRTIO_NET_CTRL_RX_PROMISC,
958
				  sg, 1, 0))
959 960 961
		dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
			 promisc ? "en" : "dis");

962
	sg_init_one(sg, &allmulti, sizeof(allmulti));
963 964 965

	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
				  VIRTIO_NET_CTRL_RX_ALLMULTI,
966
				  sg, 1, 0))
967 968
		dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
			 allmulti ? "en" : "dis");
969

970
	uc_count = netdev_uc_count(dev);
971
	mc_count = netdev_mc_count(dev);
972
	/* MAC filter - use one buffer for both lists */
973 974 975
	buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) +
		      (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
	mac_data = buf;
976
	if (!buf)
977 978
		return;

979 980
	sg_init_table(sg, 2);

981
	/* Store the unicast list and count in the front of the buffer */
982
	mac_data->entries = uc_count;
J
Jiri Pirko 已提交
983
	i = 0;
984
	netdev_for_each_uc_addr(ha, dev)
J
Jiri Pirko 已提交
985
		memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
986 987

	sg_set_buf(&sg[0], mac_data,
988
		   sizeof(mac_data->entries) + (uc_count * ETH_ALEN));
989 990

	/* multicast list and count fill the end */
991
	mac_data = (void *)&mac_data->macs[uc_count][0];
992

993
	mac_data->entries = mc_count;
994
	i = 0;
995 996
	netdev_for_each_mc_addr(ha, dev)
		memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
997 998

	sg_set_buf(&sg[1], mac_data,
999
		   sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
1000 1001 1002 1003 1004 1005 1006

	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
				  VIRTIO_NET_CTRL_MAC_TABLE_SET,
				  sg, 2, 0))
		dev_warn(&dev->dev, "Failed to set MAC fitler table.\n");

	kfree(buf);
1007 1008
}

1009
static int virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid)
1010 1011 1012 1013
{
	struct virtnet_info *vi = netdev_priv(dev);
	struct scatterlist sg;

1014
	sg_init_one(&sg, &vid, sizeof(vid));
1015 1016 1017 1018

	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
				  VIRTIO_NET_CTRL_VLAN_ADD, &sg, 1, 0))
		dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
1019
	return 0;
1020 1021
}

1022
static int virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
1023 1024 1025 1026
{
	struct virtnet_info *vi = netdev_priv(dev);
	struct scatterlist sg;

1027
	sg_init_one(&sg, &vid, sizeof(vid));
1028 1029 1030 1031

	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
				  VIRTIO_NET_CTRL_VLAN_DEL, &sg, 1, 0))
		dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
1032
	return 0;
1033 1034
}

1035
static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu)
J
Jason Wang 已提交
1036 1037
{
	int i;
1038
	int cpu;
J
Jason Wang 已提交
1039

1040 1041
	if (vi->affinity_hint_set) {
		for (i = 0; i < vi->max_queue_pairs; i++) {
1042 1043 1044 1045
			virtqueue_set_affinity(vi->rq[i].vq, -1);
			virtqueue_set_affinity(vi->sq[i].vq, -1);
		}

1046 1047 1048 1049 1050 1051 1052 1053
		vi->affinity_hint_set = false;
	}

	i = 0;
	for_each_online_cpu(cpu) {
		if (cpu == hcpu) {
			*per_cpu_ptr(vi->vq_index, cpu) = -1;
		} else {
1054 1055
			*per_cpu_ptr(vi->vq_index, cpu) =
				++i % vi->curr_queue_pairs;
1056 1057 1058
		}
	}
}
1059

1060 1061 1062 1063
static void virtnet_set_affinity(struct virtnet_info *vi)
{
	int i;
	int cpu;
J
Jason Wang 已提交
1064 1065 1066 1067 1068

	/* In multiqueue mode, when the number of cpu is equal to the number of
	 * queue pairs, we let the queue pairs to be private to one cpu by
	 * setting the affinity hint to eliminate the contention.
	 */
1069 1070 1071 1072
	if (vi->curr_queue_pairs == 1 ||
	    vi->max_queue_pairs != num_online_cpus()) {
		virtnet_clean_affinity(vi, -1);
		return;
J
Jason Wang 已提交
1073 1074
	}

1075 1076
	i = 0;
	for_each_online_cpu(cpu) {
J
Jason Wang 已提交
1077 1078
		virtqueue_set_affinity(vi->rq[i].vq, cpu);
		virtqueue_set_affinity(vi->sq[i].vq, cpu);
1079 1080
		*per_cpu_ptr(vi->vq_index, cpu) = i;
		i++;
J
Jason Wang 已提交
1081 1082
	}

1083
	vi->affinity_hint_set = true;
J
Jason Wang 已提交
1084 1085
}

1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103
static int virtnet_cpu_callback(struct notifier_block *nfb,
			        unsigned long action, void *hcpu)
{
	struct virtnet_info *vi = container_of(nfb, struct virtnet_info, nb);

	switch(action & ~CPU_TASKS_FROZEN) {
	case CPU_ONLINE:
	case CPU_DOWN_FAILED:
	case CPU_DEAD:
		virtnet_set_affinity(vi);
		break;
	case CPU_DOWN_PREPARE:
		virtnet_clean_affinity(vi, (long)hcpu);
		break;
	default:
		break;
	}
	return NOTIFY_OK;
J
Jason Wang 已提交
1104 1105
}

R
Rick Jones 已提交
1106 1107 1108 1109 1110
static void virtnet_get_ringparam(struct net_device *dev,
				struct ethtool_ringparam *ring)
{
	struct virtnet_info *vi = netdev_priv(dev);

J
Jason Wang 已提交
1111 1112
	ring->rx_max_pending = virtqueue_get_vring_size(vi->rq[0].vq);
	ring->tx_max_pending = virtqueue_get_vring_size(vi->sq[0].vq);
R
Rick Jones 已提交
1113 1114 1115 1116
	ring->rx_pending = ring->rx_max_pending;
	ring->tx_pending = ring->tx_max_pending;
}

1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129

static void virtnet_get_drvinfo(struct net_device *dev,
				struct ethtool_drvinfo *info)
{
	struct virtnet_info *vi = netdev_priv(dev);
	struct virtio_device *vdev = vi->vdev;

	strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
	strlcpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version));
	strlcpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info));

}

1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146
/* TODO: Eliminate OOO packets during switching */
static int virtnet_set_channels(struct net_device *dev,
				struct ethtool_channels *channels)
{
	struct virtnet_info *vi = netdev_priv(dev);
	u16 queue_pairs = channels->combined_count;
	int err;

	/* We don't support separate rx/tx channels.
	 * We don't allow setting 'other' channels.
	 */
	if (channels->rx_count || channels->tx_count || channels->other_count)
		return -EINVAL;

	if (queue_pairs > vi->max_queue_pairs)
		return -EINVAL;

1147
	get_online_cpus();
1148 1149 1150 1151 1152
	err = virtnet_set_queues(vi, queue_pairs);
	if (!err) {
		netif_set_real_num_tx_queues(dev, queue_pairs);
		netif_set_real_num_rx_queues(dev, queue_pairs);

1153
		virtnet_set_affinity(vi);
1154
	}
1155
	put_online_cpus();
1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172

	return err;
}

static void virtnet_get_channels(struct net_device *dev,
				 struct ethtool_channels *channels)
{
	struct virtnet_info *vi = netdev_priv(dev);

	channels->combined_count = vi->curr_queue_pairs;
	channels->max_combined = vi->max_queue_pairs;
	channels->max_other = 0;
	channels->rx_count = 0;
	channels->tx_count = 0;
	channels->other_count = 0;
}

1173
static const struct ethtool_ops virtnet_ethtool_ops = {
1174
	.get_drvinfo = virtnet_get_drvinfo,
1175
	.get_link = ethtool_op_get_link,
R
Rick Jones 已提交
1176
	.get_ringparam = virtnet_get_ringparam,
1177 1178
	.set_channels = virtnet_set_channels,
	.get_channels = virtnet_get_channels,
1179 1180
};

M
Mark McLoughlin 已提交
1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191
#define MIN_MTU 68
#define MAX_MTU 65535

static int virtnet_change_mtu(struct net_device *dev, int new_mtu)
{
	if (new_mtu < MIN_MTU || new_mtu > MAX_MTU)
		return -EINVAL;
	dev->mtu = new_mtu;
	return 0;
}

J
Jason Wang 已提交
1192 1193 1194 1195 1196
/* To avoid contending a lock hold by a vcpu who would exit to host, select the
 * txq based on the processor id.
 */
static u16 virtnet_select_queue(struct net_device *dev, struct sk_buff *skb)
{
1197 1198 1199 1200 1201 1202 1203 1204 1205 1206
	int txq;
	struct virtnet_info *vi = netdev_priv(dev);

	if (skb_rx_queue_recorded(skb)) {
		txq = skb_get_rx_queue(skb);
	} else {
		txq = *__this_cpu_ptr(vi->vq_index);
		if (txq == -1)
			txq = 0;
	}
J
Jason Wang 已提交
1207 1208 1209 1210 1211 1212 1213

	while (unlikely(txq >= dev->real_num_tx_queues))
		txq -= dev->real_num_tx_queues;

	return txq;
}

1214 1215 1216 1217 1218
static const struct net_device_ops virtnet_netdev = {
	.ndo_open            = virtnet_open,
	.ndo_stop   	     = virtnet_close,
	.ndo_start_xmit      = start_xmit,
	.ndo_validate_addr   = eth_validate_addr,
1219
	.ndo_set_mac_address = virtnet_set_mac_address,
1220
	.ndo_set_rx_mode     = virtnet_set_rx_mode,
1221
	.ndo_change_mtu	     = virtnet_change_mtu,
1222
	.ndo_get_stats64     = virtnet_stats,
1223 1224
	.ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
	.ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
J
Jason Wang 已提交
1225
	.ndo_select_queue     = virtnet_select_queue,
1226 1227 1228 1229 1230
#ifdef CONFIG_NET_POLL_CONTROLLER
	.ndo_poll_controller = virtnet_netpoll,
#endif
};

1231
static void virtnet_config_changed_work(struct work_struct *work)
1232
{
1233 1234
	struct virtnet_info *vi =
		container_of(work, struct virtnet_info, config_work);
1235 1236
	u16 v;

1237 1238 1239 1240
	mutex_lock(&vi->config_lock);
	if (!vi->config_enable)
		goto done;

1241
	if (virtio_config_val(vi->vdev, VIRTIO_NET_F_STATUS,
1242
			      offsetof(struct virtio_net_config, status),
1243
			      &v) < 0)
1244 1245 1246
		goto done;

	if (v & VIRTIO_NET_S_ANNOUNCE) {
1247
		netdev_notify_peers(vi->dev);
1248 1249
		virtnet_ack_link_announce(vi);
	}
1250 1251 1252 1253 1254

	/* Ignore unknown (future) status bits */
	v &= VIRTIO_NET_S_LINK_UP;

	if (vi->status == v)
1255
		goto done;
1256 1257 1258 1259 1260

	vi->status = v;

	if (vi->status & VIRTIO_NET_S_LINK_UP) {
		netif_carrier_on(vi->dev);
J
Jason Wang 已提交
1261
		netif_tx_wake_all_queues(vi->dev);
1262 1263
	} else {
		netif_carrier_off(vi->dev);
J
Jason Wang 已提交
1264
		netif_tx_stop_all_queues(vi->dev);
1265
	}
1266 1267
done:
	mutex_unlock(&vi->config_lock);
1268 1269 1270 1271 1272 1273
}

static void virtnet_config_changed(struct virtio_device *vdev)
{
	struct virtnet_info *vi = vdev->priv;

1274
	schedule_work(&vi->config_work);
1275 1276
}

J
Jason Wang 已提交
1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317
static void virtnet_free_queues(struct virtnet_info *vi)
{
	kfree(vi->rq);
	kfree(vi->sq);
}

static void free_receive_bufs(struct virtnet_info *vi)
{
	int i;

	for (i = 0; i < vi->max_queue_pairs; i++) {
		while (vi->rq[i].pages)
			__free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0);
	}
}

static void free_unused_bufs(struct virtnet_info *vi)
{
	void *buf;
	int i;

	for (i = 0; i < vi->max_queue_pairs; i++) {
		struct virtqueue *vq = vi->sq[i].vq;
		while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
			dev_kfree_skb(buf);
	}

	for (i = 0; i < vi->max_queue_pairs; i++) {
		struct virtqueue *vq = vi->rq[i].vq;

		while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
			if (vi->mergeable_rx_bufs || vi->big_packets)
				give_pages(&vi->rq[i], buf);
			else
				dev_kfree_skb(buf);
			--vi->rq[i].num;
		}
		BUG_ON(vi->rq[i].num != 0);
	}
}

1318 1319 1320 1321
static void virtnet_del_vqs(struct virtnet_info *vi)
{
	struct virtio_device *vdev = vi->vdev;

1322
	virtnet_clean_affinity(vi, -1);
J
Jason Wang 已提交
1323

1324
	vdev->config->del_vqs(vdev);
J
Jason Wang 已提交
1325 1326

	virtnet_free_queues(vi);
1327 1328
}

J
Jason Wang 已提交
1329
static int virtnet_find_vqs(struct virtnet_info *vi)
1330
{
J
Jason Wang 已提交
1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359
	vq_callback_t **callbacks;
	struct virtqueue **vqs;
	int ret = -ENOMEM;
	int i, total_vqs;
	const char **names;

	/* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by
	 * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by
	 * possible control vq.
	 */
	total_vqs = vi->max_queue_pairs * 2 +
		    virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ);

	/* Allocate space for find_vqs parameters */
	vqs = kzalloc(total_vqs * sizeof(*vqs), GFP_KERNEL);
	if (!vqs)
		goto err_vq;
	callbacks = kmalloc(total_vqs * sizeof(*callbacks), GFP_KERNEL);
	if (!callbacks)
		goto err_callback;
	names = kmalloc(total_vqs * sizeof(*names), GFP_KERNEL);
	if (!names)
		goto err_names;

	/* Parameters for control virtqueue, if any */
	if (vi->has_cvq) {
		callbacks[total_vqs - 1] = NULL;
		names[total_vqs - 1] = "control";
	}
1360

J
Jason Wang 已提交
1361 1362 1363 1364 1365 1366 1367 1368 1369
	/* Allocate/initialize parameters for send/receive virtqueues */
	for (i = 0; i < vi->max_queue_pairs; i++) {
		callbacks[rxq2vq(i)] = skb_recv_done;
		callbacks[txq2vq(i)] = skb_xmit_done;
		sprintf(vi->rq[i].name, "input.%d", i);
		sprintf(vi->sq[i].name, "output.%d", i);
		names[rxq2vq(i)] = vi->rq[i].name;
		names[txq2vq(i)] = vi->sq[i].name;
	}
1370

J
Jason Wang 已提交
1371 1372 1373 1374
	ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks,
					 names);
	if (ret)
		goto err_find;
1375

J
Jason Wang 已提交
1376 1377
	if (vi->has_cvq) {
		vi->cvq = vqs[total_vqs - 1];
1378 1379 1380
		if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
			vi->dev->features |= NETIF_F_HW_VLAN_FILTER;
	}
J
Jason Wang 已提交
1381 1382 1383 1384 1385 1386 1387 1388 1389 1390

	for (i = 0; i < vi->max_queue_pairs; i++) {
		vi->rq[i].vq = vqs[rxq2vq(i)];
		vi->sq[i].vq = vqs[txq2vq(i)];
	}

	kfree(names);
	kfree(callbacks);
	kfree(vqs);

1391
	return 0;
J
Jason Wang 已提交
1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410

err_find:
	kfree(names);
err_names:
	kfree(callbacks);
err_callback:
	kfree(vqs);
err_vq:
	return ret;
}

static int virtnet_alloc_queues(struct virtnet_info *vi)
{
	int i;

	vi->sq = kzalloc(sizeof(*vi->sq) * vi->max_queue_pairs, GFP_KERNEL);
	if (!vi->sq)
		goto err_sq;
	vi->rq = kzalloc(sizeof(*vi->rq) * vi->max_queue_pairs, GFP_KERNEL);
1411
	if (!vi->rq)
J
Jason Wang 已提交
1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444
		goto err_rq;

	INIT_DELAYED_WORK(&vi->refill, refill_work);
	for (i = 0; i < vi->max_queue_pairs; i++) {
		vi->rq[i].pages = NULL;
		netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll,
			       napi_weight);

		sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
		sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg));
	}

	return 0;

err_rq:
	kfree(vi->sq);
err_sq:
	return -ENOMEM;
}

static int init_vqs(struct virtnet_info *vi)
{
	int ret;

	/* Allocate send & receive queues */
	ret = virtnet_alloc_queues(vi);
	if (ret)
		goto err;

	ret = virtnet_find_vqs(vi);
	if (ret)
		goto err_free;

1445
	get_online_cpus();
1446
	virtnet_set_affinity(vi);
1447 1448
	put_online_cpus();

J
Jason Wang 已提交
1449 1450 1451 1452 1453 1454
	return 0;

err_free:
	virtnet_free_queues(vi);
err:
	return ret;
1455 1456
}

R
Rusty Russell 已提交
1457 1458
static int virtnet_probe(struct virtio_device *vdev)
{
J
Jason Wang 已提交
1459
	int i, err;
R
Rusty Russell 已提交
1460 1461
	struct net_device *dev;
	struct virtnet_info *vi;
J
Jason Wang 已提交
1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473
	u16 max_queue_pairs;

	/* Find if host supports multiqueue virtio_net device */
	err = virtio_config_val(vdev, VIRTIO_NET_F_MQ,
				offsetof(struct virtio_net_config,
				max_virtqueue_pairs), &max_queue_pairs);

	/* We need at least 2 queue's */
	if (err || max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
	    max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
	    !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
		max_queue_pairs = 1;
R
Rusty Russell 已提交
1474 1475

	/* Allocate ourselves a network device with room for our info */
J
Jason Wang 已提交
1476
	dev = alloc_etherdev_mq(sizeof(struct virtnet_info), max_queue_pairs);
R
Rusty Russell 已提交
1477 1478 1479 1480
	if (!dev)
		return -ENOMEM;

	/* Set up network device as normal. */
1481
	dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE;
1482
	dev->netdev_ops = &virtnet_netdev;
R
Rusty Russell 已提交
1483
	dev->features = NETIF_F_HIGHDMA;
1484

1485
	SET_ETHTOOL_OPS(dev, &virtnet_ethtool_ops);
R
Rusty Russell 已提交
1486 1487 1488
	SET_NETDEV_DEV(dev, &vdev->dev);

	/* Do we support "hardware" checksums? */
1489
	if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
R
Rusty Russell 已提交
1490
		/* This opens up the world of extra features. */
1491 1492 1493 1494 1495 1496
		dev->hw_features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
		if (csum)
			dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;

		if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
			dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO
R
Rusty Russell 已提交
1497 1498
				| NETIF_F_TSO_ECN | NETIF_F_TSO6;
		}
1499
		/* Individual feature bits: what can host handle? */
1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511
		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4))
			dev->hw_features |= NETIF_F_TSO;
		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6))
			dev->hw_features |= NETIF_F_TSO6;
		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
			dev->hw_features |= NETIF_F_TSO_ECN;
		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO))
			dev->hw_features |= NETIF_F_UFO;

		if (gso)
			dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO);
		/* (!csum && gso) case will be fixed by register_netdev() */
R
Rusty Russell 已提交
1512 1513 1514
	}

	/* Configuration may specify what MAC to use.  Otherwise random. */
1515
	if (virtio_config_val_len(vdev, VIRTIO_NET_F_MAC,
1516
				  offsetof(struct virtio_net_config, mac),
1517
				  dev->dev_addr, dev->addr_len) < 0)
1518
		eth_hw_addr_random(dev);
R
Rusty Russell 已提交
1519 1520 1521 1522 1523

	/* Set up our device-specific information */
	vi = netdev_priv(dev);
	vi->dev = dev;
	vi->vdev = vdev;
1524
	vdev->priv = vi;
1525 1526 1527 1528 1529
	vi->stats = alloc_percpu(struct virtnet_stats);
	err = -ENOMEM;
	if (vi->stats == NULL)
		goto free;

1530 1531 1532 1533
	vi->vq_index = alloc_percpu(int);
	if (vi->vq_index == NULL)
		goto free_stats;

1534 1535 1536
	mutex_init(&vi->config_lock);
	vi->config_enable = true;
	INIT_WORK(&vi->config_work, virtnet_config_changed_work);
R
Rusty Russell 已提交
1537

1538
	/* If we can receive ANY GSO packets, we must allocate large ones. */
1539 1540 1541
	if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
	    virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) ||
	    virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN))
1542 1543
		vi->big_packets = true;

1544 1545 1546
	if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
		vi->mergeable_rx_bufs = true;

J
Jason Wang 已提交
1547 1548 1549 1550 1551 1552 1553 1554
	if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
		vi->has_cvq = true;

	/* Use single tx/rx queue pair as default */
	vi->curr_queue_pairs = 1;
	vi->max_queue_pairs = max_queue_pairs;

	/* Allocate/initialize the rx/tx queues, and invoke find_vqs */
1555
	err = init_vqs(vi);
1556
	if (err)
1557
		goto free_index;
R
Rusty Russell 已提交
1558

J
Jason Wang 已提交
1559 1560 1561
	netif_set_real_num_tx_queues(dev, 1);
	netif_set_real_num_rx_queues(dev, 1);

R
Rusty Russell 已提交
1562 1563 1564
	err = register_netdev(dev);
	if (err) {
		pr_debug("virtio_net: registering device failed\n");
1565
		goto free_vqs;
R
Rusty Russell 已提交
1566
	}
1567 1568

	/* Last of all, set up some receive buffers. */
J
Jason Wang 已提交
1569 1570 1571 1572 1573 1574 1575 1576 1577
	for (i = 0; i < vi->max_queue_pairs; i++) {
		try_fill_recv(&vi->rq[i], GFP_KERNEL);

		/* If we didn't even get one input buffer, we're useless. */
		if (vi->rq[i].num == 0) {
			free_unused_bufs(vi);
			err = -ENOMEM;
			goto free_recv_bufs;
		}
1578 1579
	}

1580 1581 1582 1583 1584 1585 1586
	vi->nb.notifier_call = &virtnet_cpu_callback;
	err = register_hotcpu_notifier(&vi->nb);
	if (err) {
		pr_debug("virtio_net: registering cpu notifier failed\n");
		goto free_recv_bufs;
	}

J
Jason Wang 已提交
1587 1588 1589 1590
	/* Assume link up if device can't report link status,
	   otherwise get link status from config. */
	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
		netif_carrier_off(dev);
1591
		schedule_work(&vi->config_work);
J
Jason Wang 已提交
1592 1593 1594 1595
	} else {
		vi->status = VIRTIO_NET_S_LINK_UP;
		netif_carrier_on(dev);
	}
1596

J
Jason Wang 已提交
1597 1598 1599
	pr_debug("virtnet: registered device %s with %d RX and TX vq's\n",
		 dev->name, max_queue_pairs);

R
Rusty Russell 已提交
1600 1601
	return 0;

J
Jason Wang 已提交
1602 1603
free_recv_bufs:
	free_receive_bufs(vi);
1604
	unregister_netdev(dev);
1605
free_vqs:
J
Jason Wang 已提交
1606
	cancel_delayed_work_sync(&vi->refill);
1607
	virtnet_del_vqs(vi);
1608 1609
free_index:
	free_percpu(vi->vq_index);
1610 1611
free_stats:
	free_percpu(vi->stats);
R
Rusty Russell 已提交
1612 1613 1614 1615 1616
free:
	free_netdev(dev);
	return err;
}

1617
static void remove_vq_common(struct virtnet_info *vi)
R
Rusty Russell 已提交
1618
{
1619
	vi->vdev->config->reset(vi->vdev);
S
Shirley Ma 已提交
1620 1621

	/* Free unused buffers in both send and recv, if any. */
1622
	free_unused_bufs(vi);
1623

J
Jason Wang 已提交
1624
	free_receive_bufs(vi);
1625

J
Jason Wang 已提交
1626
	virtnet_del_vqs(vi);
1627 1628
}

1629
static void virtnet_remove(struct virtio_device *vdev)
1630 1631 1632
{
	struct virtnet_info *vi = vdev->priv;

1633 1634
	unregister_hotcpu_notifier(&vi->nb);

1635 1636 1637 1638 1639
	/* Prevent config work handler from accessing the device. */
	mutex_lock(&vi->config_lock);
	vi->config_enable = false;
	mutex_unlock(&vi->config_lock);

1640 1641 1642
	unregister_netdev(vi->dev);

	remove_vq_common(vi);
1643

1644 1645
	flush_work(&vi->config_work);

1646
	free_percpu(vi->vq_index);
1647
	free_percpu(vi->stats);
1648
	free_netdev(vi->dev);
R
Rusty Russell 已提交
1649 1650
}

1651 1652 1653 1654
#ifdef CONFIG_PM
static int virtnet_freeze(struct virtio_device *vdev)
{
	struct virtnet_info *vi = vdev->priv;
J
Jason Wang 已提交
1655
	int i;
1656

1657 1658 1659 1660 1661
	/* Prevent config work handler from accessing the device */
	mutex_lock(&vi->config_lock);
	vi->config_enable = false;
	mutex_unlock(&vi->config_lock);

1662 1663 1664 1665
	netif_device_detach(vi->dev);
	cancel_delayed_work_sync(&vi->refill);

	if (netif_running(vi->dev))
J
Jason Wang 已提交
1666 1667 1668 1669
		for (i = 0; i < vi->max_queue_pairs; i++) {
			napi_disable(&vi->rq[i].napi);
			netif_napi_del(&vi->rq[i].napi);
		}
1670 1671 1672

	remove_vq_common(vi);

1673 1674
	flush_work(&vi->config_work);

1675 1676 1677 1678 1679 1680
	return 0;
}

static int virtnet_restore(struct virtio_device *vdev)
{
	struct virtnet_info *vi = vdev->priv;
J
Jason Wang 已提交
1681
	int err, i;
1682 1683 1684 1685 1686 1687

	err = init_vqs(vi);
	if (err)
		return err;

	if (netif_running(vi->dev))
J
Jason Wang 已提交
1688 1689
		for (i = 0; i < vi->max_queue_pairs; i++)
			virtnet_napi_enable(&vi->rq[i]);
1690 1691 1692

	netif_device_attach(vi->dev);

J
Jason Wang 已提交
1693 1694 1695
	for (i = 0; i < vi->max_queue_pairs; i++)
		if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
			schedule_delayed_work(&vi->refill, 0);
1696

1697 1698 1699 1700
	mutex_lock(&vi->config_lock);
	vi->config_enable = true;
	mutex_unlock(&vi->config_lock);

J
Jason Wang 已提交
1701 1702
	virtnet_set_queues(vi, vi->curr_queue_pairs);

1703 1704 1705 1706
	return 0;
}
#endif

R
Rusty Russell 已提交
1707 1708 1709 1710 1711
static struct virtio_device_id id_table[] = {
	{ VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
	{ 0 },
};

1712
static unsigned int features[] = {
1713 1714
	VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM,
	VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC,
1715
	VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6,
1716
	VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6,
1717
	VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO,
1718
	VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ,
1719
	VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
J
Jason Wang 已提交
1720
	VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ,
1721
	VIRTIO_NET_F_CTRL_MAC_ADDR,
1722 1723
};

1724
static struct virtio_driver virtio_net_driver = {
1725 1726
	.feature_table = features,
	.feature_table_size = ARRAY_SIZE(features),
R
Rusty Russell 已提交
1727 1728 1729 1730
	.driver.name =	KBUILD_MODNAME,
	.driver.owner =	THIS_MODULE,
	.id_table =	id_table,
	.probe =	virtnet_probe,
1731
	.remove =	virtnet_remove,
1732
	.config_changed = virtnet_config_changed,
1733 1734 1735 1736
#ifdef CONFIG_PM
	.freeze =	virtnet_freeze,
	.restore =	virtnet_restore,
#endif
R
Rusty Russell 已提交
1737 1738
};

1739
module_virtio_driver(virtio_net_driver);
R
Rusty Russell 已提交
1740 1741 1742 1743

MODULE_DEVICE_TABLE(virtio, id_table);
MODULE_DESCRIPTION("Virtio network driver");
MODULE_LICENSE("GPL");