virtio_net.c 44.8 KB
Newer Older
1
/* A network driver using virtio.
R
Rusty Russell 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
 *
 * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 */
//#define DEBUG
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
22
#include <linux/ethtool.h>
R
Rusty Russell 已提交
23 24 25 26
#include <linux/module.h>
#include <linux/virtio.h>
#include <linux/virtio_net.h>
#include <linux/scatterlist.h>
27
#include <linux/if_vlan.h>
28
#include <linux/slab.h>
29
#include <linux/cpu.h>
R
Rusty Russell 已提交
30

31
static int napi_weight = NAPI_POLL_WEIGHT;
32 33
module_param(napi_weight, int, 0444);

34
static bool csum = true, gso = true;
R
Rusty Russell 已提交
35 36 37
module_param(csum, bool, 0444);
module_param(gso, bool, 0444);

R
Rusty Russell 已提交
38
/* FIXME: MTU in config. */
39
#define MAX_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
40
#define GOOD_COPY_LEN	128
R
Rusty Russell 已提交
41

42
#define VIRTNET_DRIVER_VERSION "1.0.0"
43

44
struct virtnet_stats {
45 46
	struct u64_stats_sync tx_syncp;
	struct u64_stats_sync rx_syncp;
47 48 49 50 51 52 53
	u64 tx_bytes;
	u64 tx_packets;

	u64 rx_bytes;
	u64 rx_packets;
};

54 55 56 57 58 59 60
/* Internal representation of a send virtqueue */
struct send_queue {
	/* Virtqueue associated with this send _queue */
	struct virtqueue *vq;

	/* TX: fragments + linear part + virtio header */
	struct scatterlist sg[MAX_SKB_FRAGS + 2];
J
Jason Wang 已提交
61 62 63

	/* Name of the send queue: output.$index */
	char name[40];
64 65 66 67 68 69 70
};

/* Internal representation of a receive virtqueue */
struct receive_queue {
	/* Virtqueue associated with this receive_queue */
	struct virtqueue *vq;

R
Rusty Russell 已提交
71 72 73 74 75
	struct napi_struct napi;

	/* Number of input buffers, and max we've ever had. */
	unsigned int num, max;

76 77 78 79 80
	/* Chain pages by the private ptr. */
	struct page *pages;

	/* RX: fragments + linear part + virtio header */
	struct scatterlist sg[MAX_SKB_FRAGS + 2];
J
Jason Wang 已提交
81 82 83

	/* Name of this receive queue: input.$index */
	char name[40];
84 85 86 87 88 89
};

struct virtnet_info {
	struct virtio_device *vdev;
	struct virtqueue *cvq;
	struct net_device *dev;
J
Jason Wang 已提交
90 91
	struct send_queue *sq;
	struct receive_queue *rq;
92 93
	unsigned int status;

J
Jason Wang 已提交
94 95 96 97 98 99
	/* Max # of queue pairs supported by the device */
	u16 max_queue_pairs;

	/* # of queue pairs currently used by the driver */
	u16 curr_queue_pairs;

100 101 102
	/* I like... big packets and I cannot lie! */
	bool big_packets;

103 104 105
	/* Host will merge rx buffers for big packets (shake it! shake it!) */
	bool mergeable_rx_bufs;

J
Jason Wang 已提交
106 107 108
	/* Has control virtqueue */
	bool has_cvq;

109 110 111
	/* Host can handle any s/g split between our header and packet data */
	bool any_header_sg;

112 113 114
	/* enable config space updates */
	bool config_enable;

115 116 117
	/* Active statistics */
	struct virtnet_stats __percpu *stats;

118 119 120
	/* Work struct for refilling if we run low on memory. */
	struct delayed_work refill;

121 122 123 124 125
	/* Work struct for config space updates */
	struct work_struct config_work;

	/* Lock for config space updates */
	struct mutex config_lock;
J
Jason Wang 已提交
126

127 128 129 130 131
	/* Page_frag for GFP_KERNEL packet buffer allocation when we run
	 * low on memory.
	 */
	struct page_frag alloc_frag;

J
Jason Wang 已提交
132 133
	/* Does the affinity hint is set for virtqueues? */
	bool affinity_hint_set;
134

135 136
	/* CPU hot plug notifier */
	struct notifier_block nb;
R
Rusty Russell 已提交
137 138
};

139 140 141 142 143 144 145
struct skb_vnet_hdr {
	union {
		struct virtio_net_hdr hdr;
		struct virtio_net_hdr_mrg_rxbuf mhdr;
	};
};

146 147 148 149 150 151 152 153 154 155
struct padded_vnet_hdr {
	struct virtio_net_hdr hdr;
	/*
	 * virtio_net_hdr should be in a separated sg buffer because of a
	 * QEMU bug, and data sg buffer shares same page with this header sg.
	 * This padding makes next sg 16 byte aligned after virtio_net_hdr.
	 */
	char padding[6];
};

J
Jason Wang 已提交
156 157 158 159 160
/* Converting between virtqueue no. and kernel tx/rx queue no.
 * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
 */
static int vq2txq(struct virtqueue *vq)
{
161
	return (vq->index - 1) / 2;
J
Jason Wang 已提交
162 163 164 165 166 167 168 169 170
}

static int txq2vq(int txq)
{
	return txq * 2 + 1;
}

static int vq2rxq(struct virtqueue *vq)
{
171
	return vq->index / 2;
J
Jason Wang 已提交
172 173 174 175 176 177 178
}

static int rxq2vq(int rxq)
{
	return rxq * 2;
}

179
static inline struct skb_vnet_hdr *skb_vnet_hdr(struct sk_buff *skb)
R
Rusty Russell 已提交
180
{
181
	return (struct skb_vnet_hdr *)skb->cb;
R
Rusty Russell 已提交
182 183
}

184 185 186 187
/*
 * private is used to chain pages for big packets, put the whole
 * most recent used list in the beginning for reuse
 */
188
static void give_pages(struct receive_queue *rq, struct page *page)
189
{
190
	struct page *end;
191

192
	/* Find end of list, sew whole thing into vi->rq.pages. */
193
	for (end = page; end->private; end = (struct page *)end->private);
194 195
	end->private = (unsigned long)rq->pages;
	rq->pages = page;
196 197
}

198
static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
199
{
200
	struct page *p = rq->pages;
201

202
	if (p) {
203
		rq->pages = (struct page *)p->private;
204 205 206
		/* clear private here, it is used to chain pages */
		p->private = 0;
	} else
207 208 209 210
		p = alloc_page(gfp_mask);
	return p;
}

211
static void skb_xmit_done(struct virtqueue *vq)
R
Rusty Russell 已提交
212
{
213
	struct virtnet_info *vi = vq->vdev->priv;
R
Rusty Russell 已提交
214

215
	/* Suppress further interrupts. */
216
	virtqueue_disable_cb(vq);
217

218
	/* We were probably waiting for more output buffers. */
J
Jason Wang 已提交
219
	netif_wake_subqueue(vi->dev, vq2txq(vq));
R
Rusty Russell 已提交
220 221
}

222
/* Called from bottom half context */
223
static struct sk_buff *page_to_skb(struct receive_queue *rq,
224 225
				   struct page *page, unsigned int offset,
				   unsigned int len, unsigned int truesize)
226
{
227
	struct virtnet_info *vi = rq->vq->vdev->priv;
228 229
	struct sk_buff *skb;
	struct skb_vnet_hdr *hdr;
230
	unsigned int copy, hdr_len, hdr_padded_len;
231
	char *p;
232

233
	p = page_address(page) + offset;
234

235 236 237 238
	/* copy small packet so we can reuse these pages for small data */
	skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN);
	if (unlikely(!skb))
		return NULL;
239

240
	hdr = skb_vnet_hdr(skb);
241

242 243
	if (vi->mergeable_rx_bufs) {
		hdr_len = sizeof hdr->mhdr;
244
		hdr_padded_len = sizeof hdr->mhdr;
245 246
	} else {
		hdr_len = sizeof hdr->hdr;
247
		hdr_padded_len = sizeof(struct padded_vnet_hdr);
248
	}
249

250
	memcpy(hdr, p, hdr_len);
251

252
	len -= hdr_len;
253 254
	offset += hdr_padded_len;
	p += hdr_padded_len;
255

256 257 258 259
	copy = len;
	if (copy > skb_tailroom(skb))
		copy = skb_tailroom(skb);
	memcpy(skb_put(skb, copy), p, copy);
260

261 262
	len -= copy;
	offset += copy;
263

264 265 266 267 268 269 270 271
	if (vi->mergeable_rx_bufs) {
		if (len)
			skb_add_rx_frag(skb, 0, page, offset, len, truesize);
		else
			put_page(page);
		return skb;
	}

272 273 274 275 276 277 278
	/*
	 * Verify that we can indeed put this data into a skb.
	 * This is here to handle cases when the device erroneously
	 * tries to receive more than is possible. This is usually
	 * the case of a broken device.
	 */
	if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) {
279
		net_dbg_ratelimited("%s: too much data\n", skb->dev->name);
280 281 282
		dev_kfree_skb(skb);
		return NULL;
	}
283
	BUG_ON(offset >= PAGE_SIZE);
284
	while (len) {
285 286 287 288
		unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len);
		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset,
				frag_size, truesize);
		len -= frag_size;
289 290 291
		page = (struct page *)page->private;
		offset = 0;
	}
292

293
	if (page)
294
		give_pages(rq, page);
295

296 297
	return skb;
}
298

299
static int receive_mergeable(struct receive_queue *rq, struct sk_buff *head_skb)
300
{
301 302 303
	struct skb_vnet_hdr *hdr = skb_vnet_hdr(head_skb);
	struct sk_buff *curr_skb = head_skb;
	char *buf;
304
	struct page *page;
305
	int num_buf, len, offset;
306 307 308

	num_buf = hdr->mhdr.num_buffers;
	while (--num_buf) {
309 310 311
		int num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
		buf = virtqueue_get_buf(rq->vq, &len);
		if (unlikely(!buf)) {
312
			pr_debug("%s: rx error: %d buffers missing\n",
313 314
				 head_skb->dev->name, hdr->mhdr.num_buffers);
			head_skb->dev->stats.rx_length_errors++;
315
			return -EINVAL;
316
		}
317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341
		if (unlikely(len > MAX_PACKET_LEN)) {
			pr_debug("%s: rx error: merge buffer too long\n",
				 head_skb->dev->name);
			len = MAX_PACKET_LEN;
		}
		if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) {
			struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC);
			if (unlikely(!nskb)) {
				head_skb->dev->stats.rx_dropped++;
				return -ENOMEM;
			}
			if (curr_skb == head_skb)
				skb_shinfo(curr_skb)->frag_list = nskb;
			else
				curr_skb->next = nskb;
			curr_skb = nskb;
			head_skb->truesize += nskb->truesize;
			num_skb_frags = 0;
		}
		if (curr_skb != head_skb) {
			head_skb->data_len += len;
			head_skb->len += len;
			head_skb->truesize += MAX_PACKET_LEN;
		}
		page = virt_to_head_page(buf);
342 343 344 345 346 347 348 349 350 351
		offset = buf - (char *)page_address(page);
		if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) {
			put_page(page);
			skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1,
					     len, MAX_PACKET_LEN);
		} else {
			skb_add_rx_frag(curr_skb, num_skb_frags, page,
					offset, len,
					MAX_PACKET_LEN);
		}
352
		--rq->num;
353 354 355 356
	}
	return 0;
}

357
static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
358
{
359 360
	struct virtnet_info *vi = rq->vq->vdev->priv;
	struct net_device *dev = vi->dev;
E
Eric Dumazet 已提交
361
	struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
362 363 364
	struct sk_buff *skb;
	struct page *page;
	struct skb_vnet_hdr *hdr;
365

366 367 368
	if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
		pr_debug("%s: short packet %i\n", dev->name, len);
		dev->stats.rx_length_errors++;
369
		if (vi->big_packets)
370
			give_pages(rq, buf);
371 372
		else if (vi->mergeable_rx_bufs)
			put_page(virt_to_head_page(buf));
373 374 375 376
		else
			dev_kfree_skb(buf);
		return;
	}
377

378 379 380 381
	if (!vi->mergeable_rx_bufs && !vi->big_packets) {
		skb = buf;
		len -= sizeof(struct virtio_net_hdr);
		skb_trim(skb, len);
382 383 384 385 386 387 388 389 390 391 392 393 394 395
	} else if (vi->mergeable_rx_bufs) {
		struct page *page = virt_to_head_page(buf);
		skb = page_to_skb(rq, page,
				  (char *)buf - (char *)page_address(page),
				  len, MAX_PACKET_LEN);
		if (unlikely(!skb)) {
			dev->stats.rx_dropped++;
			put_page(page);
			return;
		}
		if (receive_mergeable(rq, skb)) {
			dev_kfree_skb(skb);
			return;
		}
396 397
	} else {
		page = buf;
398
		skb = page_to_skb(rq, page, 0, len, PAGE_SIZE);
399
		if (unlikely(!skb)) {
400
			dev->stats.rx_dropped++;
401
			give_pages(rq, page);
402
			return;
403
		}
404
	}
405

406
	hdr = skb_vnet_hdr(skb);
407

408
	u64_stats_update_begin(&stats->rx_syncp);
409 410
	stats->rx_bytes += skb->len;
	stats->rx_packets++;
411
	u64_stats_update_end(&stats->rx_syncp);
R
Rusty Russell 已提交
412

413
	if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
R
Rusty Russell 已提交
414
		pr_debug("Needs csum!\n");
415 416 417
		if (!skb_partial_csum_set(skb,
					  hdr->hdr.csum_start,
					  hdr->hdr.csum_offset))
R
Rusty Russell 已提交
418
			goto frame_err;
419 420
	} else if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) {
		skb->ip_summed = CHECKSUM_UNNECESSARY;
R
Rusty Russell 已提交
421 422
	}

423 424 425 426
	skb->protocol = eth_type_trans(skb, dev);
	pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
		 ntohs(skb->protocol), skb->len, skb->pkt_type);

427
	if (hdr->hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
R
Rusty Russell 已提交
428
		pr_debug("GSO!\n");
429
		switch (hdr->hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
R
Rusty Russell 已提交
430
		case VIRTIO_NET_HDR_GSO_TCPV4:
431
			skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
R
Rusty Russell 已提交
432 433
			break;
		case VIRTIO_NET_HDR_GSO_UDP:
434
			skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
R
Rusty Russell 已提交
435 436
			break;
		case VIRTIO_NET_HDR_GSO_TCPV6:
437
			skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
R
Rusty Russell 已提交
438 439
			break;
		default:
440 441
			net_warn_ratelimited("%s: bad gso type %u.\n",
					     dev->name, hdr->hdr.gso_type);
R
Rusty Russell 已提交
442 443 444
			goto frame_err;
		}

445
		if (hdr->hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
446
			skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
R
Rusty Russell 已提交
447

448
		skb_shinfo(skb)->gso_size = hdr->hdr.gso_size;
R
Rusty Russell 已提交
449
		if (skb_shinfo(skb)->gso_size == 0) {
450
			net_warn_ratelimited("%s: zero gso size.\n", dev->name);
R
Rusty Russell 已提交
451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466
			goto frame_err;
		}

		/* Header must be checked, and gso_segs computed. */
		skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
		skb_shinfo(skb)->gso_segs = 0;
	}

	netif_receive_skb(skb);
	return;

frame_err:
	dev->stats.rx_frame_errors++;
	dev_kfree_skb(skb);
}

467
static int add_recvbuf_small(struct receive_queue *rq, gfp_t gfp)
R
Rusty Russell 已提交
468
{
469
	struct virtnet_info *vi = rq->vq->vdev->priv;
R
Rusty Russell 已提交
470
	struct sk_buff *skb;
471 472
	struct skb_vnet_hdr *hdr;
	int err;
473

474
	skb = __netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN, gfp);
475 476
	if (unlikely(!skb))
		return -ENOMEM;
R
Rusty Russell 已提交
477

478
	skb_put(skb, MAX_PACKET_LEN);
479

480
	hdr = skb_vnet_hdr(skb);
481
	sg_set_buf(rq->sg, &hdr->hdr, sizeof hdr->hdr);
482

483
	skb_to_sgvec(skb, rq->sg + 1, 0, skb->len);
484

485
	err = virtqueue_add_inbuf(rq->vq, rq->sg, 2, skb, gfp);
486 487
	if (err < 0)
		dev_kfree_skb(skb);
488

489 490
	return err;
}
491

492
static int add_recvbuf_big(struct receive_queue *rq, gfp_t gfp)
493 494 495 496 497
{
	struct page *first, *list = NULL;
	char *p;
	int i, err, offset;

498
	/* page in rq->sg[MAX_SKB_FRAGS + 1] is list tail */
499
	for (i = MAX_SKB_FRAGS + 1; i > 1; --i) {
500
		first = get_a_page(rq, gfp);
501 502
		if (!first) {
			if (list)
503
				give_pages(rq, list);
504
			return -ENOMEM;
505
		}
506
		sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE);
507

508 509 510 511
		/* chain new page in list head to match sg */
		first->private = (unsigned long)list;
		list = first;
	}
R
Rusty Russell 已提交
512

513
	first = get_a_page(rq, gfp);
514
	if (!first) {
515
		give_pages(rq, list);
516 517 518 519
		return -ENOMEM;
	}
	p = page_address(first);

520 521 522
	/* rq->sg[0], rq->sg[1] share the same page */
	/* a separated rq->sg[0] for virtio_net_hdr only due to QEMU bug */
	sg_set_buf(&rq->sg[0], p, sizeof(struct virtio_net_hdr));
523

524
	/* rq->sg[1] for data packet, from offset */
525
	offset = sizeof(struct padded_vnet_hdr);
526
	sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset);
527 528 529

	/* chain first in list head */
	first->private = (unsigned long)list;
530 531
	err = virtqueue_add_inbuf(rq->vq, rq->sg, MAX_SKB_FRAGS + 2,
				  first, gfp);
532
	if (err < 0)
533
		give_pages(rq, first);
534 535

	return err;
R
Rusty Russell 已提交
536 537
}

538
static int add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp)
539
{
540 541
	struct virtnet_info *vi = rq->vq->vdev->priv;
	char *buf = NULL;
542 543
	int err;

544 545 546 547 548 549 550 551 552 553 554 555
	if (gfp & __GFP_WAIT) {
		if (skb_page_frag_refill(MAX_PACKET_LEN, &vi->alloc_frag,
					 gfp)) {
			buf = (char *)page_address(vi->alloc_frag.page) +
			      vi->alloc_frag.offset;
			get_page(vi->alloc_frag.page);
			vi->alloc_frag.offset += MAX_PACKET_LEN;
		}
	} else {
		buf = netdev_alloc_frag(MAX_PACKET_LEN);
	}
	if (!buf)
556
		return -ENOMEM;
557

558 559
	sg_init_one(rq->sg, buf, MAX_PACKET_LEN);
	err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, buf, gfp);
560
	if (err < 0)
561
		put_page(virt_to_head_page(buf));
562

563 564
	return err;
}
565

566 567 568 569 570 571 572
/*
 * Returns false if we couldn't fill entirely (OOM).
 *
 * Normally run in the receive path, but can also be run from ndo_open
 * before we're receiving packets, or from refill_work which is
 * careful to disable receiving (using napi_disable).
 */
573
static bool try_fill_recv(struct receive_queue *rq, gfp_t gfp)
574
{
575
	struct virtnet_info *vi = rq->vq->vdev->priv;
576
	int err;
577
	bool oom;
578

579 580
	do {
		if (vi->mergeable_rx_bufs)
581
			err = add_recvbuf_mergeable(rq, gfp);
582
		else if (vi->big_packets)
583
			err = add_recvbuf_big(rq, gfp);
584
		else
585
			err = add_recvbuf_small(rq, gfp);
586

587
		oom = err == -ENOMEM;
588
		if (err)
589
			break;
590
		++rq->num;
591
	} while (rq->vq->num_free);
592 593 594
	if (unlikely(rq->num > rq->max))
		rq->max = rq->num;
	virtqueue_kick(rq->vq);
595
	return !oom;
596 597
}

598
static void skb_recv_done(struct virtqueue *rvq)
R
Rusty Russell 已提交
599 600
{
	struct virtnet_info *vi = rvq->vdev->priv;
J
Jason Wang 已提交
601
	struct receive_queue *rq = &vi->rq[vq2rxq(rvq)];
602

603
	/* Schedule NAPI, Suppress further interrupts if successful. */
604
	if (napi_schedule_prep(&rq->napi)) {
605
		virtqueue_disable_cb(rvq);
606
		__napi_schedule(&rq->napi);
607
	}
R
Rusty Russell 已提交
608 609
}

610
static void virtnet_napi_enable(struct receive_queue *rq)
611
{
612
	napi_enable(&rq->napi);
613 614 615 616 617

	/* If all buffers were filled by other side before we napi_enabled, we
	 * won't get another interrupt, so process any outstanding packets
	 * now.  virtnet_poll wants re-enable the queue, so we disable here.
	 * We synchronize against interrupts via NAPI_STATE_SCHED */
618 619
	if (napi_schedule_prep(&rq->napi)) {
		virtqueue_disable_cb(rq->vq);
620
		local_bh_disable();
621
		__napi_schedule(&rq->napi);
622
		local_bh_enable();
623 624 625
	}
}

626 627
static void refill_work(struct work_struct *work)
{
628 629
	struct virtnet_info *vi =
		container_of(work, struct virtnet_info, refill.work);
630
	bool still_empty;
J
Jason Wang 已提交
631 632
	int i;

633
	for (i = 0; i < vi->curr_queue_pairs; i++) {
J
Jason Wang 已提交
634
		struct receive_queue *rq = &vi->rq[i];
635

J
Jason Wang 已提交
636 637 638
		napi_disable(&rq->napi);
		still_empty = !try_fill_recv(rq, GFP_KERNEL);
		virtnet_napi_enable(rq);
639

J
Jason Wang 已提交
640 641 642 643 644 645
		/* In theory, this can happen: if we don't get any buffers in
		 * we will *never* try to fill again.
		 */
		if (still_empty)
			schedule_delayed_work(&vi->refill, HZ/2);
	}
646 647
}

R
Rusty Russell 已提交
648 649
static int virtnet_poll(struct napi_struct *napi, int budget)
{
650 651 652
	struct receive_queue *rq =
		container_of(napi, struct receive_queue, napi);
	struct virtnet_info *vi = rq->vq->vdev->priv;
653
	void *buf;
654
	unsigned int r, len, received = 0;
R
Rusty Russell 已提交
655 656 657

again:
	while (received < budget &&
658 659 660
	       (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
		receive_buf(rq, buf, len);
		--rq->num;
R
Rusty Russell 已提交
661 662 663
		received++;
	}

664 665
	if (rq->num < rq->max / 2) {
		if (!try_fill_recv(rq, GFP_ATOMIC))
666
			schedule_delayed_work(&vi->refill, 0);
667
	}
R
Rusty Russell 已提交
668

669 670
	/* Out of packets? */
	if (received < budget) {
671
		r = virtqueue_enable_cb_prepare(rq->vq);
672
		napi_complete(napi);
673
		if (unlikely(virtqueue_poll(rq->vq, r)) &&
674
		    napi_schedule_prep(napi)) {
675
			virtqueue_disable_cb(rq->vq);
676
			__napi_schedule(napi);
R
Rusty Russell 已提交
677
			goto again;
678
		}
R
Rusty Russell 已提交
679 680 681 682 683
	}

	return received;
}

J
Jason Wang 已提交
684 685 686 687 688
static int virtnet_open(struct net_device *dev)
{
	struct virtnet_info *vi = netdev_priv(dev);
	int i;

689 690 691 692 693
	for (i = 0; i < vi->max_queue_pairs; i++) {
		if (i < vi->curr_queue_pairs)
			/* Make sure we have some buffers: if oom use wq. */
			if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
				schedule_delayed_work(&vi->refill, 0);
J
Jason Wang 已提交
694 695 696 697 698 699
		virtnet_napi_enable(&vi->rq[i]);
	}

	return 0;
}

700
static void free_old_xmit_skbs(struct send_queue *sq)
R
Rusty Russell 已提交
701 702
{
	struct sk_buff *skb;
703
	unsigned int len;
704
	struct virtnet_info *vi = sq->vq->vdev->priv;
E
Eric Dumazet 已提交
705
	struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
R
Rusty Russell 已提交
706

707
	while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) {
R
Rusty Russell 已提交
708
		pr_debug("Sent skb %p\n", skb);
709

710
		u64_stats_update_begin(&stats->tx_syncp);
711 712
		stats->tx_bytes += skb->len;
		stats->tx_packets++;
713
		u64_stats_update_end(&stats->tx_syncp);
714

715
		dev_kfree_skb_any(skb);
R
Rusty Russell 已提交
716 717 718
	}
}

719
static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
R
Rusty Russell 已提交
720
{
721
	struct skb_vnet_hdr *hdr;
R
Rusty Russell 已提交
722
	const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
723
	struct virtnet_info *vi = sq->vq->vdev->priv;
724
	unsigned num_sg;
725 726
	unsigned hdr_len;
	bool can_push;
R
Rusty Russell 已提交
727

J
Johannes Berg 已提交
728
	pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
729 730 731 732 733 734 735 736 737 738 739 740 741 742
	if (vi->mergeable_rx_bufs)
		hdr_len = sizeof hdr->mhdr;
	else
		hdr_len = sizeof hdr->hdr;

	can_push = vi->any_header_sg &&
		!((unsigned long)skb->data & (__alignof__(*hdr) - 1)) &&
		!skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len;
	/* Even if we can, don't push here yet as this would skew
	 * csum_start offset below. */
	if (can_push)
		hdr = (struct skb_vnet_hdr *)(skb->data - hdr_len);
	else
		hdr = skb_vnet_hdr(skb);
R
Rusty Russell 已提交
743 744

	if (skb->ip_summed == CHECKSUM_PARTIAL) {
745
		hdr->hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
746
		hdr->hdr.csum_start = skb_checksum_start_offset(skb);
747
		hdr->hdr.csum_offset = skb->csum_offset;
R
Rusty Russell 已提交
748
	} else {
749 750
		hdr->hdr.flags = 0;
		hdr->hdr.csum_offset = hdr->hdr.csum_start = 0;
R
Rusty Russell 已提交
751 752 753
	}

	if (skb_is_gso(skb)) {
754 755
		hdr->hdr.hdr_len = skb_headlen(skb);
		hdr->hdr.gso_size = skb_shinfo(skb)->gso_size;
R
Rusty Russell 已提交
756
		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
757
			hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
R
Rusty Russell 已提交
758
		else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
759
			hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
R
Rusty Russell 已提交
760
		else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
761
			hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
R
Rusty Russell 已提交
762 763
		else
			BUG();
R
Rusty Russell 已提交
764
		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN)
765
			hdr->hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
R
Rusty Russell 已提交
766
	} else {
767 768
		hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
		hdr->hdr.gso_size = hdr->hdr.hdr_len = 0;
R
Rusty Russell 已提交
769 770
	}

771
	if (vi->mergeable_rx_bufs)
772
		hdr->mhdr.num_buffers = 0;
773

774 775 776 777 778 779 780 781 782
	if (can_push) {
		__skb_push(skb, hdr_len);
		num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len);
		/* Pull header back to avoid skew in tx bytes calculations. */
		__skb_pull(skb, hdr_len);
	} else {
		sg_set_buf(sq->sg, hdr, hdr_len);
		num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1;
	}
783
	return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC);
784 785
}

786
static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
787 788
{
	struct virtnet_info *vi = netdev_priv(dev);
J
Jason Wang 已提交
789 790
	int qnum = skb_get_queue_mapping(skb);
	struct send_queue *sq = &vi->sq[qnum];
791
	int err;
792 793

	/* Free up any pending old buffers before queueing new ones. */
794
	free_old_xmit_skbs(sq);
795

796
	/* Try to transmit */
797
	err = xmit_skb(sq, skb);
798

799
	/* This should not happen! */
800
	if (unlikely(err)) {
801 802 803
		dev->stats.tx_fifo_errors++;
		if (net_ratelimit())
			dev_warn(&dev->dev,
804
				 "Unexpected TXQ (%d) queue failure: %d\n", qnum, err);
805 806 807
		dev->stats.tx_dropped++;
		kfree_skb(skb);
		return NETDEV_TX_OK;
R
Rusty Russell 已提交
808
	}
809
	virtqueue_kick(sq->vq);
810

811 812 813 814 815 816
	/* Don't wait up for transmitted skbs to be freed. */
	skb_orphan(skb);
	nf_reset(skb);

	/* Apparently nice girls don't return TX_BUSY; stop the queue
	 * before it gets out of hand.  Naturally, this wastes entries. */
817
	if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
J
Jason Wang 已提交
818
		netif_stop_subqueue(dev, qnum);
819
		if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
820
			/* More just got used, free them then recheck. */
821 822
			free_old_xmit_skbs(sq);
			if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
J
Jason Wang 已提交
823
				netif_start_subqueue(dev, qnum);
824
				virtqueue_disable_cb(sq->vq);
825 826
			}
		}
827
	}
828 829

	return NETDEV_TX_OK;
R
Rusty Russell 已提交
830 831
}

832 833 834 835 836 837
/*
 * Send command via the control virtqueue and check status.  Commands
 * supported by the hypervisor, as indicated by feature bits, should
 * never fail unless improperly formated.
 */
static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
838 839
				 struct scatterlist *out,
				 struct scatterlist *in)
840
{
841
	struct scatterlist *sgs[4], hdr, stat;
842 843
	struct virtio_net_ctrl_hdr ctrl;
	virtio_net_ctrl_ack status = ~0;
844
	unsigned out_num = 0, in_num = 0, tmp;
845 846

	/* Caller should know better */
847
	BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
848 849 850

	ctrl.class = class;
	ctrl.cmd = cmd;
851 852 853
	/* Add header */
	sg_init_one(&hdr, &ctrl, sizeof(ctrl));
	sgs[out_num++] = &hdr;
854

855 856 857 858
	if (out)
		sgs[out_num++] = out;
	if (in)
		sgs[out_num + in_num++] = in;
859

860 861 862
	/* Add return status. */
	sg_init_one(&stat, &status, sizeof(status));
	sgs[out_num + in_num++] = &stat;
863

864 865 866
	BUG_ON(out_num + in_num > ARRAY_SIZE(sgs));
	BUG_ON(virtqueue_add_sgs(vi->cvq, sgs, out_num, in_num, vi, GFP_ATOMIC)
	       < 0);
867 868 869 870 871 872 873 874 875 876 877 878

	virtqueue_kick(vi->cvq);

	/* Spin for a response, the kick causes an ioport write, trapping
	 * into the hypervisor, so the request should be handled immediately.
	 */
	while (!virtqueue_get_buf(vi->cvq, &tmp))
		cpu_relax();

	return status == VIRTIO_NET_OK;
}

879 880 881 882
static int virtnet_set_mac_address(struct net_device *dev, void *p)
{
	struct virtnet_info *vi = netdev_priv(dev);
	struct virtio_device *vdev = vi->vdev;
883
	int ret;
884 885
	struct sockaddr *addr = p;
	struct scatterlist sg;
886

887
	ret = eth_prepare_mac_addr_change(dev, p);
888 889
	if (ret)
		return ret;
890

891 892 893 894
	if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
		sg_init_one(&sg, addr->sa_data, dev->addr_len);
		if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
					  VIRTIO_NET_CTRL_MAC_ADDR_SET,
895
					  &sg, NULL)) {
896 897 898 899 900
			dev_warn(&vdev->dev,
				 "Failed to set mac address by vq command.\n");
			return -EINVAL;
		}
	} else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
901
		vdev->config->set(vdev, offsetof(struct virtio_net_config, mac),
902 903 904 905
				  addr->sa_data, dev->addr_len);
	}

	eth_commit_mac_addr_change(dev, p);
906 907 908 909

	return 0;
}

910 911 912 913 914 915 916 917
static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev,
					       struct rtnl_link_stats64 *tot)
{
	struct virtnet_info *vi = netdev_priv(dev);
	int cpu;
	unsigned int start;

	for_each_possible_cpu(cpu) {
E
Eric Dumazet 已提交
918
		struct virtnet_stats *stats = per_cpu_ptr(vi->stats, cpu);
919 920 921
		u64 tpackets, tbytes, rpackets, rbytes;

		do {
922
			start = u64_stats_fetch_begin_bh(&stats->tx_syncp);
923 924
			tpackets = stats->tx_packets;
			tbytes   = stats->tx_bytes;
925
		} while (u64_stats_fetch_retry_bh(&stats->tx_syncp, start));
926 927

		do {
928
			start = u64_stats_fetch_begin_bh(&stats->rx_syncp);
929 930
			rpackets = stats->rx_packets;
			rbytes   = stats->rx_bytes;
931
		} while (u64_stats_fetch_retry_bh(&stats->rx_syncp, start));
932 933 934 935 936 937 938 939

		tot->rx_packets += rpackets;
		tot->tx_packets += tpackets;
		tot->rx_bytes   += rbytes;
		tot->tx_bytes   += tbytes;
	}

	tot->tx_dropped = dev->stats.tx_dropped;
940
	tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
941 942 943 944 945 946 947
	tot->rx_dropped = dev->stats.rx_dropped;
	tot->rx_length_errors = dev->stats.rx_length_errors;
	tot->rx_frame_errors = dev->stats.rx_frame_errors;

	return tot;
}

948 949 950 951
#ifdef CONFIG_NET_POLL_CONTROLLER
static void virtnet_netpoll(struct net_device *dev)
{
	struct virtnet_info *vi = netdev_priv(dev);
J
Jason Wang 已提交
952
	int i;
953

J
Jason Wang 已提交
954 955
	for (i = 0; i < vi->curr_queue_pairs; i++)
		napi_schedule(&vi->rq[i].napi);
956 957 958
}
#endif

959 960 961 962
static void virtnet_ack_link_announce(struct virtnet_info *vi)
{
	rtnl_lock();
	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE,
963
				  VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL, NULL))
964 965 966 967
		dev_warn(&vi->dev->dev, "Failed to ack link announce.\n");
	rtnl_unlock();
}

J
Jason Wang 已提交
968 969 970 971 972 973 974 975 976 977 978 979 980
static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
{
	struct scatterlist sg;
	struct virtio_net_ctrl_mq s;
	struct net_device *dev = vi->dev;

	if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
		return 0;

	s.virtqueue_pairs = queue_pairs;
	sg_init_one(&sg, &s, sizeof(s));

	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
981
				  VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg, NULL)) {
J
Jason Wang 已提交
982 983 984
		dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n",
			 queue_pairs);
		return -EINVAL;
985
	} else {
J
Jason Wang 已提交
986
		vi->curr_queue_pairs = queue_pairs;
987 988 989
		/* virtnet_open() will refill when device is going to up. */
		if (dev->flags & IFF_UP)
			schedule_delayed_work(&vi->refill, 0);
990
	}
J
Jason Wang 已提交
991 992 993 994

	return 0;
}

R
Rusty Russell 已提交
995 996 997
static int virtnet_close(struct net_device *dev)
{
	struct virtnet_info *vi = netdev_priv(dev);
J
Jason Wang 已提交
998
	int i;
R
Rusty Russell 已提交
999

1000 1001
	/* Make sure refill_work doesn't re-enable napi! */
	cancel_delayed_work_sync(&vi->refill);
J
Jason Wang 已提交
1002 1003 1004

	for (i = 0; i < vi->max_queue_pairs; i++)
		napi_disable(&vi->rq[i].napi);
R
Rusty Russell 已提交
1005 1006 1007 1008

	return 0;
}

1009 1010 1011
static void virtnet_set_rx_mode(struct net_device *dev)
{
	struct virtnet_info *vi = netdev_priv(dev);
1012
	struct scatterlist sg[2];
1013
	u8 promisc, allmulti;
1014
	struct virtio_net_ctrl_mac *mac_data;
J
Jiri Pirko 已提交
1015
	struct netdev_hw_addr *ha;
1016
	int uc_count;
1017
	int mc_count;
1018 1019
	void *buf;
	int i;
1020 1021 1022 1023 1024

	/* We can't dynamicaly set ndo_set_rx_mode, so return gracefully */
	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
		return;

1025 1026
	promisc = ((dev->flags & IFF_PROMISC) != 0);
	allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
1027

1028
	sg_init_one(sg, &promisc, sizeof(promisc));
1029 1030 1031

	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
				  VIRTIO_NET_CTRL_RX_PROMISC,
1032
				  sg, NULL))
1033 1034 1035
		dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
			 promisc ? "en" : "dis");

1036
	sg_init_one(sg, &allmulti, sizeof(allmulti));
1037 1038 1039

	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
				  VIRTIO_NET_CTRL_RX_ALLMULTI,
1040
				  sg, NULL))
1041 1042
		dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
			 allmulti ? "en" : "dis");
1043

1044
	uc_count = netdev_uc_count(dev);
1045
	mc_count = netdev_mc_count(dev);
1046
	/* MAC filter - use one buffer for both lists */
1047 1048 1049
	buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) +
		      (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
	mac_data = buf;
1050
	if (!buf)
1051 1052
		return;

1053 1054
	sg_init_table(sg, 2);

1055
	/* Store the unicast list and count in the front of the buffer */
1056
	mac_data->entries = uc_count;
J
Jiri Pirko 已提交
1057
	i = 0;
1058
	netdev_for_each_uc_addr(ha, dev)
J
Jiri Pirko 已提交
1059
		memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
1060 1061

	sg_set_buf(&sg[0], mac_data,
1062
		   sizeof(mac_data->entries) + (uc_count * ETH_ALEN));
1063 1064

	/* multicast list and count fill the end */
1065
	mac_data = (void *)&mac_data->macs[uc_count][0];
1066

1067
	mac_data->entries = mc_count;
1068
	i = 0;
1069 1070
	netdev_for_each_mc_addr(ha, dev)
		memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
1071 1072

	sg_set_buf(&sg[1], mac_data,
1073
		   sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
1074 1075 1076

	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
				  VIRTIO_NET_CTRL_MAC_TABLE_SET,
1077
				  sg, NULL))
1078 1079 1080
		dev_warn(&dev->dev, "Failed to set MAC fitler table.\n");

	kfree(buf);
1081 1082
}

1083 1084
static int virtnet_vlan_rx_add_vid(struct net_device *dev,
				   __be16 proto, u16 vid)
1085 1086 1087 1088
{
	struct virtnet_info *vi = netdev_priv(dev);
	struct scatterlist sg;

1089
	sg_init_one(&sg, &vid, sizeof(vid));
1090 1091

	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
1092
				  VIRTIO_NET_CTRL_VLAN_ADD, &sg, NULL))
1093
		dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
1094
	return 0;
1095 1096
}

1097 1098
static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
				    __be16 proto, u16 vid)
1099 1100 1101 1102
{
	struct virtnet_info *vi = netdev_priv(dev);
	struct scatterlist sg;

1103
	sg_init_one(&sg, &vid, sizeof(vid));
1104 1105

	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
1106
				  VIRTIO_NET_CTRL_VLAN_DEL, &sg, NULL))
1107
		dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
1108
	return 0;
1109 1110
}

1111
static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu)
J
Jason Wang 已提交
1112 1113 1114
{
	int i;

1115 1116
	if (vi->affinity_hint_set) {
		for (i = 0; i < vi->max_queue_pairs; i++) {
1117 1118 1119 1120
			virtqueue_set_affinity(vi->rq[i].vq, -1);
			virtqueue_set_affinity(vi->sq[i].vq, -1);
		}

1121 1122 1123
		vi->affinity_hint_set = false;
	}
}
1124

1125 1126 1127 1128
static void virtnet_set_affinity(struct virtnet_info *vi)
{
	int i;
	int cpu;
J
Jason Wang 已提交
1129 1130 1131 1132 1133

	/* In multiqueue mode, when the number of cpu is equal to the number of
	 * queue pairs, we let the queue pairs to be private to one cpu by
	 * setting the affinity hint to eliminate the contention.
	 */
1134 1135 1136 1137
	if (vi->curr_queue_pairs == 1 ||
	    vi->max_queue_pairs != num_online_cpus()) {
		virtnet_clean_affinity(vi, -1);
		return;
J
Jason Wang 已提交
1138 1139
	}

1140 1141
	i = 0;
	for_each_online_cpu(cpu) {
J
Jason Wang 已提交
1142 1143
		virtqueue_set_affinity(vi->rq[i].vq, cpu);
		virtqueue_set_affinity(vi->sq[i].vq, cpu);
1144
		netif_set_xps_queue(vi->dev, cpumask_of(cpu), i);
1145
		i++;
J
Jason Wang 已提交
1146 1147
	}

1148
	vi->affinity_hint_set = true;
J
Jason Wang 已提交
1149 1150
}

1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167
static int virtnet_cpu_callback(struct notifier_block *nfb,
			        unsigned long action, void *hcpu)
{
	struct virtnet_info *vi = container_of(nfb, struct virtnet_info, nb);

	switch(action & ~CPU_TASKS_FROZEN) {
	case CPU_ONLINE:
	case CPU_DOWN_FAILED:
	case CPU_DEAD:
		virtnet_set_affinity(vi);
		break;
	case CPU_DOWN_PREPARE:
		virtnet_clean_affinity(vi, (long)hcpu);
		break;
	default:
		break;
	}
1168

1169
	return NOTIFY_OK;
J
Jason Wang 已提交
1170 1171
}

R
Rick Jones 已提交
1172 1173 1174 1175 1176
static void virtnet_get_ringparam(struct net_device *dev,
				struct ethtool_ringparam *ring)
{
	struct virtnet_info *vi = netdev_priv(dev);

J
Jason Wang 已提交
1177 1178
	ring->rx_max_pending = virtqueue_get_vring_size(vi->rq[0].vq);
	ring->tx_max_pending = virtqueue_get_vring_size(vi->sq[0].vq);
R
Rick Jones 已提交
1179 1180 1181 1182
	ring->rx_pending = ring->rx_max_pending;
	ring->tx_pending = ring->tx_max_pending;
}

1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195

static void virtnet_get_drvinfo(struct net_device *dev,
				struct ethtool_drvinfo *info)
{
	struct virtnet_info *vi = netdev_priv(dev);
	struct virtio_device *vdev = vi->vdev;

	strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
	strlcpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version));
	strlcpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info));

}

1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212
/* TODO: Eliminate OOO packets during switching */
static int virtnet_set_channels(struct net_device *dev,
				struct ethtool_channels *channels)
{
	struct virtnet_info *vi = netdev_priv(dev);
	u16 queue_pairs = channels->combined_count;
	int err;

	/* We don't support separate rx/tx channels.
	 * We don't allow setting 'other' channels.
	 */
	if (channels->rx_count || channels->tx_count || channels->other_count)
		return -EINVAL;

	if (queue_pairs > vi->max_queue_pairs)
		return -EINVAL;

1213
	get_online_cpus();
1214 1215 1216 1217 1218
	err = virtnet_set_queues(vi, queue_pairs);
	if (!err) {
		netif_set_real_num_tx_queues(dev, queue_pairs);
		netif_set_real_num_rx_queues(dev, queue_pairs);

1219
		virtnet_set_affinity(vi);
1220
	}
1221
	put_online_cpus();
1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238

	return err;
}

static void virtnet_get_channels(struct net_device *dev,
				 struct ethtool_channels *channels)
{
	struct virtnet_info *vi = netdev_priv(dev);

	channels->combined_count = vi->curr_queue_pairs;
	channels->max_combined = vi->max_queue_pairs;
	channels->max_other = 0;
	channels->rx_count = 0;
	channels->tx_count = 0;
	channels->other_count = 0;
}

1239
static const struct ethtool_ops virtnet_ethtool_ops = {
1240
	.get_drvinfo = virtnet_get_drvinfo,
1241
	.get_link = ethtool_op_get_link,
R
Rick Jones 已提交
1242
	.get_ringparam = virtnet_get_ringparam,
1243 1244
	.set_channels = virtnet_set_channels,
	.get_channels = virtnet_get_channels,
1245 1246
};

M
Mark McLoughlin 已提交
1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257
#define MIN_MTU 68
#define MAX_MTU 65535

static int virtnet_change_mtu(struct net_device *dev, int new_mtu)
{
	if (new_mtu < MIN_MTU || new_mtu > MAX_MTU)
		return -EINVAL;
	dev->mtu = new_mtu;
	return 0;
}

1258 1259 1260 1261 1262
static const struct net_device_ops virtnet_netdev = {
	.ndo_open            = virtnet_open,
	.ndo_stop   	     = virtnet_close,
	.ndo_start_xmit      = start_xmit,
	.ndo_validate_addr   = eth_validate_addr,
1263
	.ndo_set_mac_address = virtnet_set_mac_address,
1264
	.ndo_set_rx_mode     = virtnet_set_rx_mode,
1265
	.ndo_change_mtu	     = virtnet_change_mtu,
1266
	.ndo_get_stats64     = virtnet_stats,
1267 1268
	.ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
	.ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
1269 1270 1271 1272 1273
#ifdef CONFIG_NET_POLL_CONTROLLER
	.ndo_poll_controller = virtnet_netpoll,
#endif
};

1274
static void virtnet_config_changed_work(struct work_struct *work)
1275
{
1276 1277
	struct virtnet_info *vi =
		container_of(work, struct virtnet_info, config_work);
1278 1279
	u16 v;

1280 1281 1282 1283
	mutex_lock(&vi->config_lock);
	if (!vi->config_enable)
		goto done;

1284
	if (virtio_config_val(vi->vdev, VIRTIO_NET_F_STATUS,
1285
			      offsetof(struct virtio_net_config, status),
1286
			      &v) < 0)
1287 1288 1289
		goto done;

	if (v & VIRTIO_NET_S_ANNOUNCE) {
1290
		netdev_notify_peers(vi->dev);
1291 1292
		virtnet_ack_link_announce(vi);
	}
1293 1294 1295 1296 1297

	/* Ignore unknown (future) status bits */
	v &= VIRTIO_NET_S_LINK_UP;

	if (vi->status == v)
1298
		goto done;
1299 1300 1301 1302 1303

	vi->status = v;

	if (vi->status & VIRTIO_NET_S_LINK_UP) {
		netif_carrier_on(vi->dev);
J
Jason Wang 已提交
1304
		netif_tx_wake_all_queues(vi->dev);
1305 1306
	} else {
		netif_carrier_off(vi->dev);
J
Jason Wang 已提交
1307
		netif_tx_stop_all_queues(vi->dev);
1308
	}
1309 1310
done:
	mutex_unlock(&vi->config_lock);
1311 1312 1313 1314 1315 1316
}

static void virtnet_config_changed(struct virtio_device *vdev)
{
	struct virtnet_info *vi = vdev->priv;

1317
	schedule_work(&vi->config_work);
1318 1319
}

J
Jason Wang 已提交
1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350
static void virtnet_free_queues(struct virtnet_info *vi)
{
	kfree(vi->rq);
	kfree(vi->sq);
}

static void free_receive_bufs(struct virtnet_info *vi)
{
	int i;

	for (i = 0; i < vi->max_queue_pairs; i++) {
		while (vi->rq[i].pages)
			__free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0);
	}
}

static void free_unused_bufs(struct virtnet_info *vi)
{
	void *buf;
	int i;

	for (i = 0; i < vi->max_queue_pairs; i++) {
		struct virtqueue *vq = vi->sq[i].vq;
		while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
			dev_kfree_skb(buf);
	}

	for (i = 0; i < vi->max_queue_pairs; i++) {
		struct virtqueue *vq = vi->rq[i].vq;

		while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
1351
			if (vi->big_packets)
J
Jason Wang 已提交
1352
				give_pages(&vi->rq[i], buf);
1353 1354
			else if (vi->mergeable_rx_bufs)
				put_page(virt_to_head_page(buf));
J
Jason Wang 已提交
1355 1356 1357 1358 1359 1360 1361 1362
			else
				dev_kfree_skb(buf);
			--vi->rq[i].num;
		}
		BUG_ON(vi->rq[i].num != 0);
	}
}

1363 1364 1365 1366
static void virtnet_del_vqs(struct virtnet_info *vi)
{
	struct virtio_device *vdev = vi->vdev;

1367
	virtnet_clean_affinity(vi, -1);
J
Jason Wang 已提交
1368

1369
	vdev->config->del_vqs(vdev);
J
Jason Wang 已提交
1370 1371

	virtnet_free_queues(vi);
1372 1373
}

J
Jason Wang 已提交
1374
static int virtnet_find_vqs(struct virtnet_info *vi)
1375
{
J
Jason Wang 已提交
1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404
	vq_callback_t **callbacks;
	struct virtqueue **vqs;
	int ret = -ENOMEM;
	int i, total_vqs;
	const char **names;

	/* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by
	 * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by
	 * possible control vq.
	 */
	total_vqs = vi->max_queue_pairs * 2 +
		    virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ);

	/* Allocate space for find_vqs parameters */
	vqs = kzalloc(total_vqs * sizeof(*vqs), GFP_KERNEL);
	if (!vqs)
		goto err_vq;
	callbacks = kmalloc(total_vqs * sizeof(*callbacks), GFP_KERNEL);
	if (!callbacks)
		goto err_callback;
	names = kmalloc(total_vqs * sizeof(*names), GFP_KERNEL);
	if (!names)
		goto err_names;

	/* Parameters for control virtqueue, if any */
	if (vi->has_cvq) {
		callbacks[total_vqs - 1] = NULL;
		names[total_vqs - 1] = "control";
	}
1405

J
Jason Wang 已提交
1406 1407 1408 1409 1410 1411 1412 1413 1414
	/* Allocate/initialize parameters for send/receive virtqueues */
	for (i = 0; i < vi->max_queue_pairs; i++) {
		callbacks[rxq2vq(i)] = skb_recv_done;
		callbacks[txq2vq(i)] = skb_xmit_done;
		sprintf(vi->rq[i].name, "input.%d", i);
		sprintf(vi->sq[i].name, "output.%d", i);
		names[rxq2vq(i)] = vi->rq[i].name;
		names[txq2vq(i)] = vi->sq[i].name;
	}
1415

J
Jason Wang 已提交
1416 1417 1418 1419
	ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks,
					 names);
	if (ret)
		goto err_find;
1420

J
Jason Wang 已提交
1421 1422
	if (vi->has_cvq) {
		vi->cvq = vqs[total_vqs - 1];
1423
		if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
1424
			vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1425
	}
J
Jason Wang 已提交
1426 1427 1428 1429 1430 1431 1432 1433 1434 1435

	for (i = 0; i < vi->max_queue_pairs; i++) {
		vi->rq[i].vq = vqs[rxq2vq(i)];
		vi->sq[i].vq = vqs[txq2vq(i)];
	}

	kfree(names);
	kfree(callbacks);
	kfree(vqs);

1436
	return 0;
J
Jason Wang 已提交
1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455

err_find:
	kfree(names);
err_names:
	kfree(callbacks);
err_callback:
	kfree(vqs);
err_vq:
	return ret;
}

static int virtnet_alloc_queues(struct virtnet_info *vi)
{
	int i;

	vi->sq = kzalloc(sizeof(*vi->sq) * vi->max_queue_pairs, GFP_KERNEL);
	if (!vi->sq)
		goto err_sq;
	vi->rq = kzalloc(sizeof(*vi->rq) * vi->max_queue_pairs, GFP_KERNEL);
1456
	if (!vi->rq)
J
Jason Wang 已提交
1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489
		goto err_rq;

	INIT_DELAYED_WORK(&vi->refill, refill_work);
	for (i = 0; i < vi->max_queue_pairs; i++) {
		vi->rq[i].pages = NULL;
		netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll,
			       napi_weight);

		sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
		sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg));
	}

	return 0;

err_rq:
	kfree(vi->sq);
err_sq:
	return -ENOMEM;
}

static int init_vqs(struct virtnet_info *vi)
{
	int ret;

	/* Allocate send & receive queues */
	ret = virtnet_alloc_queues(vi);
	if (ret)
		goto err;

	ret = virtnet_find_vqs(vi);
	if (ret)
		goto err_free;

1490
	get_online_cpus();
1491
	virtnet_set_affinity(vi);
1492 1493
	put_online_cpus();

J
Jason Wang 已提交
1494 1495 1496 1497 1498 1499
	return 0;

err_free:
	virtnet_free_queues(vi);
err:
	return ret;
1500 1501
}

R
Rusty Russell 已提交
1502 1503
static int virtnet_probe(struct virtio_device *vdev)
{
J
Jason Wang 已提交
1504
	int i, err;
R
Rusty Russell 已提交
1505 1506
	struct net_device *dev;
	struct virtnet_info *vi;
J
Jason Wang 已提交
1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518
	u16 max_queue_pairs;

	/* Find if host supports multiqueue virtio_net device */
	err = virtio_config_val(vdev, VIRTIO_NET_F_MQ,
				offsetof(struct virtio_net_config,
				max_virtqueue_pairs), &max_queue_pairs);

	/* We need at least 2 queue's */
	if (err || max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
	    max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
	    !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
		max_queue_pairs = 1;
R
Rusty Russell 已提交
1519 1520

	/* Allocate ourselves a network device with room for our info */
J
Jason Wang 已提交
1521
	dev = alloc_etherdev_mq(sizeof(struct virtnet_info), max_queue_pairs);
R
Rusty Russell 已提交
1522 1523 1524 1525
	if (!dev)
		return -ENOMEM;

	/* Set up network device as normal. */
1526
	dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE;
1527
	dev->netdev_ops = &virtnet_netdev;
R
Rusty Russell 已提交
1528
	dev->features = NETIF_F_HIGHDMA;
1529

1530
	SET_ETHTOOL_OPS(dev, &virtnet_ethtool_ops);
R
Rusty Russell 已提交
1531 1532 1533
	SET_NETDEV_DEV(dev, &vdev->dev);

	/* Do we support "hardware" checksums? */
1534
	if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
R
Rusty Russell 已提交
1535
		/* This opens up the world of extra features. */
1536 1537 1538 1539 1540 1541
		dev->hw_features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
		if (csum)
			dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;

		if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
			dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO
R
Rusty Russell 已提交
1542 1543
				| NETIF_F_TSO_ECN | NETIF_F_TSO6;
		}
1544
		/* Individual feature bits: what can host handle? */
1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556
		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4))
			dev->hw_features |= NETIF_F_TSO;
		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6))
			dev->hw_features |= NETIF_F_TSO6;
		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
			dev->hw_features |= NETIF_F_TSO_ECN;
		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO))
			dev->hw_features |= NETIF_F_UFO;

		if (gso)
			dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO);
		/* (!csum && gso) case will be fixed by register_netdev() */
R
Rusty Russell 已提交
1557
	}
1558 1559
	if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM))
		dev->features |= NETIF_F_RXCSUM;
R
Rusty Russell 已提交
1560

1561 1562
	dev->vlan_features = dev->features;

R
Rusty Russell 已提交
1563
	/* Configuration may specify what MAC to use.  Otherwise random. */
1564
	if (virtio_config_val_len(vdev, VIRTIO_NET_F_MAC,
1565
				  offsetof(struct virtio_net_config, mac),
1566
				  dev->dev_addr, dev->addr_len) < 0)
1567
		eth_hw_addr_random(dev);
R
Rusty Russell 已提交
1568 1569 1570 1571 1572

	/* Set up our device-specific information */
	vi = netdev_priv(dev);
	vi->dev = dev;
	vi->vdev = vdev;
1573
	vdev->priv = vi;
1574 1575 1576 1577 1578
	vi->stats = alloc_percpu(struct virtnet_stats);
	err = -ENOMEM;
	if (vi->stats == NULL)
		goto free;

1579 1580 1581
	mutex_init(&vi->config_lock);
	vi->config_enable = true;
	INIT_WORK(&vi->config_work, virtnet_config_changed_work);
R
Rusty Russell 已提交
1582

1583
	/* If we can receive ANY GSO packets, we must allocate large ones. */
1584 1585 1586
	if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
	    virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) ||
	    virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN))
1587 1588
		vi->big_packets = true;

1589 1590 1591
	if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
		vi->mergeable_rx_bufs = true;

1592 1593 1594
	if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT))
		vi->any_header_sg = true;

J
Jason Wang 已提交
1595 1596 1597 1598 1599 1600 1601 1602
	if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
		vi->has_cvq = true;

	/* Use single tx/rx queue pair as default */
	vi->curr_queue_pairs = 1;
	vi->max_queue_pairs = max_queue_pairs;

	/* Allocate/initialize the rx/tx queues, and invoke find_vqs */
1603
	err = init_vqs(vi);
1604
	if (err)
1605
		goto free_stats;
R
Rusty Russell 已提交
1606

J
Jason Wang 已提交
1607 1608 1609
	netif_set_real_num_tx_queues(dev, 1);
	netif_set_real_num_rx_queues(dev, 1);

R
Rusty Russell 已提交
1610 1611 1612
	err = register_netdev(dev);
	if (err) {
		pr_debug("virtio_net: registering device failed\n");
1613
		goto free_vqs;
R
Rusty Russell 已提交
1614
	}
1615 1616

	/* Last of all, set up some receive buffers. */
1617
	for (i = 0; i < vi->curr_queue_pairs; i++) {
J
Jason Wang 已提交
1618 1619 1620 1621 1622 1623 1624 1625
		try_fill_recv(&vi->rq[i], GFP_KERNEL);

		/* If we didn't even get one input buffer, we're useless. */
		if (vi->rq[i].num == 0) {
			free_unused_bufs(vi);
			err = -ENOMEM;
			goto free_recv_bufs;
		}
1626 1627
	}

1628 1629 1630 1631 1632 1633 1634
	vi->nb.notifier_call = &virtnet_cpu_callback;
	err = register_hotcpu_notifier(&vi->nb);
	if (err) {
		pr_debug("virtio_net: registering cpu notifier failed\n");
		goto free_recv_bufs;
	}

J
Jason Wang 已提交
1635 1636 1637 1638
	/* Assume link up if device can't report link status,
	   otherwise get link status from config. */
	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
		netif_carrier_off(dev);
1639
		schedule_work(&vi->config_work);
J
Jason Wang 已提交
1640 1641 1642 1643
	} else {
		vi->status = VIRTIO_NET_S_LINK_UP;
		netif_carrier_on(dev);
	}
1644

J
Jason Wang 已提交
1645 1646 1647
	pr_debug("virtnet: registered device %s with %d RX and TX vq's\n",
		 dev->name, max_queue_pairs);

R
Rusty Russell 已提交
1648 1649
	return 0;

J
Jason Wang 已提交
1650 1651
free_recv_bufs:
	free_receive_bufs(vi);
1652
	unregister_netdev(dev);
1653
free_vqs:
J
Jason Wang 已提交
1654
	cancel_delayed_work_sync(&vi->refill);
1655
	virtnet_del_vqs(vi);
1656 1657
	if (vi->alloc_frag.page)
		put_page(vi->alloc_frag.page);
1658 1659
free_stats:
	free_percpu(vi->stats);
R
Rusty Russell 已提交
1660 1661 1662 1663 1664
free:
	free_netdev(dev);
	return err;
}

1665
static void remove_vq_common(struct virtnet_info *vi)
R
Rusty Russell 已提交
1666
{
1667
	vi->vdev->config->reset(vi->vdev);
S
Shirley Ma 已提交
1668 1669

	/* Free unused buffers in both send and recv, if any. */
1670
	free_unused_bufs(vi);
1671

J
Jason Wang 已提交
1672
	free_receive_bufs(vi);
1673

J
Jason Wang 已提交
1674
	virtnet_del_vqs(vi);
1675 1676
}

1677
static void virtnet_remove(struct virtio_device *vdev)
1678 1679 1680
{
	struct virtnet_info *vi = vdev->priv;

1681 1682
	unregister_hotcpu_notifier(&vi->nb);

1683 1684 1685 1686 1687
	/* Prevent config work handler from accessing the device. */
	mutex_lock(&vi->config_lock);
	vi->config_enable = false;
	mutex_unlock(&vi->config_lock);

1688 1689 1690
	unregister_netdev(vi->dev);

	remove_vq_common(vi);
1691 1692
	if (vi->alloc_frag.page)
		put_page(vi->alloc_frag.page);
1693

1694 1695
	flush_work(&vi->config_work);

1696
	free_percpu(vi->stats);
1697
	free_netdev(vi->dev);
R
Rusty Russell 已提交
1698 1699
}

1700 1701 1702 1703
#ifdef CONFIG_PM
static int virtnet_freeze(struct virtio_device *vdev)
{
	struct virtnet_info *vi = vdev->priv;
J
Jason Wang 已提交
1704
	int i;
1705

1706 1707
	unregister_hotcpu_notifier(&vi->nb);

1708 1709 1710 1711 1712
	/* Prevent config work handler from accessing the device */
	mutex_lock(&vi->config_lock);
	vi->config_enable = false;
	mutex_unlock(&vi->config_lock);

1713 1714 1715 1716
	netif_device_detach(vi->dev);
	cancel_delayed_work_sync(&vi->refill);

	if (netif_running(vi->dev))
J
Jason Wang 已提交
1717 1718 1719 1720
		for (i = 0; i < vi->max_queue_pairs; i++) {
			napi_disable(&vi->rq[i].napi);
			netif_napi_del(&vi->rq[i].napi);
		}
1721 1722 1723

	remove_vq_common(vi);

1724 1725
	flush_work(&vi->config_work);

1726 1727 1728 1729 1730 1731
	return 0;
}

static int virtnet_restore(struct virtio_device *vdev)
{
	struct virtnet_info *vi = vdev->priv;
J
Jason Wang 已提交
1732
	int err, i;
1733 1734 1735 1736 1737 1738

	err = init_vqs(vi);
	if (err)
		return err;

	if (netif_running(vi->dev))
J
Jason Wang 已提交
1739 1740
		for (i = 0; i < vi->max_queue_pairs; i++)
			virtnet_napi_enable(&vi->rq[i]);
1741 1742 1743

	netif_device_attach(vi->dev);

1744
	for (i = 0; i < vi->curr_queue_pairs; i++)
J
Jason Wang 已提交
1745 1746
		if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
			schedule_delayed_work(&vi->refill, 0);
1747

1748 1749 1750 1751
	mutex_lock(&vi->config_lock);
	vi->config_enable = true;
	mutex_unlock(&vi->config_lock);

1752
	rtnl_lock();
J
Jason Wang 已提交
1753
	virtnet_set_queues(vi, vi->curr_queue_pairs);
1754
	rtnl_unlock();
J
Jason Wang 已提交
1755

1756 1757 1758 1759
	err = register_hotcpu_notifier(&vi->nb);
	if (err)
		return err;

1760 1761 1762 1763
	return 0;
}
#endif

R
Rusty Russell 已提交
1764 1765 1766 1767 1768
static struct virtio_device_id id_table[] = {
	{ VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
	{ 0 },
};

1769
static unsigned int features[] = {
1770 1771
	VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM,
	VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC,
1772
	VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6,
1773
	VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6,
1774
	VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO,
1775
	VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ,
1776
	VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
J
Jason Wang 已提交
1777
	VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ,
1778
	VIRTIO_NET_F_CTRL_MAC_ADDR,
1779
	VIRTIO_F_ANY_LAYOUT,
1780 1781
};

1782
static struct virtio_driver virtio_net_driver = {
1783 1784
	.feature_table = features,
	.feature_table_size = ARRAY_SIZE(features),
R
Rusty Russell 已提交
1785 1786 1787 1788
	.driver.name =	KBUILD_MODNAME,
	.driver.owner =	THIS_MODULE,
	.id_table =	id_table,
	.probe =	virtnet_probe,
1789
	.remove =	virtnet_remove,
1790
	.config_changed = virtnet_config_changed,
1791 1792 1793 1794
#ifdef CONFIG_PM
	.freeze =	virtnet_freeze,
	.restore =	virtnet_restore,
#endif
R
Rusty Russell 已提交
1795 1796
};

1797
module_virtio_driver(virtio_net_driver);
R
Rusty Russell 已提交
1798 1799 1800 1801

MODULE_DEVICE_TABLE(virtio, id_table);
MODULE_DESCRIPTION("Virtio network driver");
MODULE_LICENSE("GPL");