virtio_net.c 29.3 KB
Newer Older
1
/* A network driver using virtio.
R
Rusty Russell 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
 *
 * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 */
//#define DEBUG
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
22
#include <linux/ethtool.h>
R
Rusty Russell 已提交
23 24 25 26
#include <linux/module.h>
#include <linux/virtio.h>
#include <linux/virtio_net.h>
#include <linux/scatterlist.h>
27
#include <linux/if_vlan.h>
28
#include <linux/slab.h>
R
Rusty Russell 已提交
29

30 31 32
static int napi_weight = 128;
module_param(napi_weight, int, 0444);

R
Rusty Russell 已提交
33 34 35 36
static int csum = 1, gso = 1;
module_param(csum, bool, 0444);
module_param(gso, bool, 0444);

R
Rusty Russell 已提交
37
/* FIXME: MTU in config. */
38
#define MAX_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
39
#define GOOD_COPY_LEN	128
R
Rusty Russell 已提交
40

41
#define VIRTNET_SEND_COMMAND_SG_MAX    2
42

43 44 45 46 47 48 49 50 51
struct virtnet_stats {
	struct u64_stats_sync syncp;
	u64 tx_bytes;
	u64 tx_packets;

	u64 rx_bytes;
	u64 rx_packets;
};

52
struct virtnet_info {
R
Rusty Russell 已提交
53
	struct virtio_device *vdev;
54
	struct virtqueue *rvq, *svq, *cvq;
R
Rusty Russell 已提交
55 56
	struct net_device *dev;
	struct napi_struct napi;
57
	unsigned int status;
R
Rusty Russell 已提交
58 59 60 61

	/* Number of input buffers, and max we've ever had. */
	unsigned int num, max;

62 63 64
	/* I like... big packets and I cannot lie! */
	bool big_packets;

65 66 67
	/* Host will merge rx buffers for big packets (shake it! shake it!) */
	bool mergeable_rx_bufs;

68 69 70
	/* Active statistics */
	struct virtnet_stats __percpu *stats;

71 72 73
	/* Work struct for refilling if we run low on memory. */
	struct delayed_work refill;

74 75
	/* Chain pages by the private ptr. */
	struct page *pages;
76 77 78 79

	/* fragments + linear part + virtio header */
	struct scatterlist rx_sg[MAX_SKB_FRAGS + 2];
	struct scatterlist tx_sg[MAX_SKB_FRAGS + 2];
R
Rusty Russell 已提交
80 81
};

82 83 84 85 86
struct skb_vnet_hdr {
	union {
		struct virtio_net_hdr hdr;
		struct virtio_net_hdr_mrg_rxbuf mhdr;
	};
87
	unsigned int num_sg;
88 89
};

90 91 92 93 94 95 96 97 98 99
struct padded_vnet_hdr {
	struct virtio_net_hdr hdr;
	/*
	 * virtio_net_hdr should be in a separated sg buffer because of a
	 * QEMU bug, and data sg buffer shares same page with this header sg.
	 * This padding makes next sg 16 byte aligned after virtio_net_hdr.
	 */
	char padding[6];
};

100
static inline struct skb_vnet_hdr *skb_vnet_hdr(struct sk_buff *skb)
R
Rusty Russell 已提交
101
{
102
	return (struct skb_vnet_hdr *)skb->cb;
R
Rusty Russell 已提交
103 104
}

105 106 107 108 109
/*
 * private is used to chain pages for big packets, put the whole
 * most recent used list in the beginning for reuse
 */
static void give_pages(struct virtnet_info *vi, struct page *page)
110
{
111
	struct page *end;
112

113 114 115 116
	/* Find end of list, sew whole thing into vi->pages. */
	for (end = page; end->private; end = (struct page *)end->private);
	end->private = (unsigned long)vi->pages;
	vi->pages = page;
117 118
}

119 120 121 122
static struct page *get_a_page(struct virtnet_info *vi, gfp_t gfp_mask)
{
	struct page *p = vi->pages;

123
	if (p) {
124
		vi->pages = (struct page *)p->private;
125 126 127
		/* clear private here, it is used to chain pages */
		p->private = 0;
	} else
128 129 130 131
		p = alloc_page(gfp_mask);
	return p;
}

132
static void skb_xmit_done(struct virtqueue *svq)
R
Rusty Russell 已提交
133
{
134
	struct virtnet_info *vi = svq->vdev->priv;
R
Rusty Russell 已提交
135

136
	/* Suppress further interrupts. */
137
	virtqueue_disable_cb(svq);
138

139
	/* We were probably waiting for more output buffers. */
R
Rusty Russell 已提交
140 141 142
	netif_wake_queue(vi->dev);
}

143 144
static void set_skb_frag(struct sk_buff *skb, struct page *page,
			 unsigned int offset, unsigned int *len)
R
Rusty Russell 已提交
145
{
146
	int size = min((unsigned)PAGE_SIZE - offset, *len);
147 148
	int i = skb_shinfo(skb)->nr_frags;

149
	__skb_fill_page_desc(skb, i, page, offset, size);
150

151 152
	skb->data_len += size;
	skb->len += size;
153
	skb->truesize += PAGE_SIZE;
154
	skb_shinfo(skb)->nr_frags++;
155
	*len -= size;
156
}
157

158 159 160 161 162 163 164
static struct sk_buff *page_to_skb(struct virtnet_info *vi,
				   struct page *page, unsigned int len)
{
	struct sk_buff *skb;
	struct skb_vnet_hdr *hdr;
	unsigned int copy, hdr_len, offset;
	char *p;
165

166
	p = page_address(page);
167

168 169 170 171
	/* copy small packet so we can reuse these pages for small data */
	skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN);
	if (unlikely(!skb))
		return NULL;
172

173
	hdr = skb_vnet_hdr(skb);
174

175 176 177 178 179 180 181
	if (vi->mergeable_rx_bufs) {
		hdr_len = sizeof hdr->mhdr;
		offset = hdr_len;
	} else {
		hdr_len = sizeof hdr->hdr;
		offset = sizeof(struct padded_vnet_hdr);
	}
182

183
	memcpy(hdr, p, hdr_len);
184

185 186
	len -= hdr_len;
	p += offset;
187

188 189 190 191
	copy = len;
	if (copy > skb_tailroom(skb))
		copy = skb_tailroom(skb);
	memcpy(skb_put(skb, copy), p, copy);
192

193 194
	len -= copy;
	offset += copy;
195

196 197 198 199 200 201 202 203 204 205 206 207 208
	/*
	 * Verify that we can indeed put this data into a skb.
	 * This is here to handle cases when the device erroneously
	 * tries to receive more than is possible. This is usually
	 * the case of a broken device.
	 */
	if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) {
		if (net_ratelimit())
			pr_debug("%s: too much data\n", skb->dev->name);
		dev_kfree_skb(skb);
		return NULL;
	}

209 210 211 212 213
	while (len) {
		set_skb_frag(skb, page, offset, &len);
		page = (struct page *)page->private;
		offset = 0;
	}
214

215 216
	if (page)
		give_pages(vi, page);
217

218 219
	return skb;
}
220

221 222 223 224 225 226 227 228 229 230 231 232 233 234
static int receive_mergeable(struct virtnet_info *vi, struct sk_buff *skb)
{
	struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
	struct page *page;
	int num_buf, i, len;

	num_buf = hdr->mhdr.num_buffers;
	while (--num_buf) {
		i = skb_shinfo(skb)->nr_frags;
		if (i >= MAX_SKB_FRAGS) {
			pr_debug("%s: packet too long\n", skb->dev->name);
			skb->dev->stats.rx_length_errors++;
			return -EINVAL;
		}
235
		page = virtqueue_get_buf(vi->rvq, &len);
236 237 238 239 240
		if (!page) {
			pr_debug("%s: rx error: %d buffers missing\n",
				 skb->dev->name, hdr->mhdr.num_buffers);
			skb->dev->stats.rx_length_errors++;
			return -EINVAL;
241
		}
242

243 244 245 246 247 248 249 250 251 252 253 254 255
		if (len > PAGE_SIZE)
			len = PAGE_SIZE;

		set_skb_frag(skb, page, 0, &len);

		--vi->num;
	}
	return 0;
}

static void receive_buf(struct net_device *dev, void *buf, unsigned int len)
{
	struct virtnet_info *vi = netdev_priv(dev);
256
	struct virtnet_stats __percpu *stats = this_cpu_ptr(vi->stats);
257 258 259
	struct sk_buff *skb;
	struct page *page;
	struct skb_vnet_hdr *hdr;
260

261 262 263 264 265 266 267 268 269
	if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
		pr_debug("%s: short packet %i\n", dev->name, len);
		dev->stats.rx_length_errors++;
		if (vi->mergeable_rx_bufs || vi->big_packets)
			give_pages(vi, buf);
		else
			dev_kfree_skb(buf);
		return;
	}
270

271 272 273 274 275 276 277 278
	if (!vi->mergeable_rx_bufs && !vi->big_packets) {
		skb = buf;
		len -= sizeof(struct virtio_net_hdr);
		skb_trim(skb, len);
	} else {
		page = buf;
		skb = page_to_skb(vi, page, len);
		if (unlikely(!skb)) {
279
			dev->stats.rx_dropped++;
280 281
			give_pages(vi, page);
			return;
282
		}
283 284 285 286 287
		if (vi->mergeable_rx_bufs)
			if (receive_mergeable(vi, skb)) {
				dev_kfree_skb(skb);
				return;
			}
288
	}
289

290
	hdr = skb_vnet_hdr(skb);
291 292 293 294 295

	u64_stats_update_begin(&stats->syncp);
	stats->rx_bytes += skb->len;
	stats->rx_packets++;
	u64_stats_update_end(&stats->syncp);
R
Rusty Russell 已提交
296

297
	if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
R
Rusty Russell 已提交
298
		pr_debug("Needs csum!\n");
299 300 301
		if (!skb_partial_csum_set(skb,
					  hdr->hdr.csum_start,
					  hdr->hdr.csum_offset))
R
Rusty Russell 已提交
302
			goto frame_err;
303 304
	} else if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) {
		skb->ip_summed = CHECKSUM_UNNECESSARY;
R
Rusty Russell 已提交
305 306
	}

307 308 309 310
	skb->protocol = eth_type_trans(skb, dev);
	pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
		 ntohs(skb->protocol), skb->len, skb->pkt_type);

311
	if (hdr->hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
R
Rusty Russell 已提交
312
		pr_debug("GSO!\n");
313
		switch (hdr->hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
R
Rusty Russell 已提交
314 315 316 317 318 319 320 321 322 323 324 325
		case VIRTIO_NET_HDR_GSO_TCPV4:
			skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
			break;
		case VIRTIO_NET_HDR_GSO_UDP:
			skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
			break;
		case VIRTIO_NET_HDR_GSO_TCPV6:
			skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
			break;
		default:
			if (net_ratelimit())
				printk(KERN_WARNING "%s: bad gso type %u.\n",
326
				       dev->name, hdr->hdr.gso_type);
R
Rusty Russell 已提交
327 328 329
			goto frame_err;
		}

330
		if (hdr->hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
R
Rusty Russell 已提交
331 332
			skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;

333
		skb_shinfo(skb)->gso_size = hdr->hdr.gso_size;
R
Rusty Russell 已提交
334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353
		if (skb_shinfo(skb)->gso_size == 0) {
			if (net_ratelimit())
				printk(KERN_WARNING "%s: zero gso size.\n",
				       dev->name);
			goto frame_err;
		}

		/* Header must be checked, and gso_segs computed. */
		skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
		skb_shinfo(skb)->gso_segs = 0;
	}

	netif_receive_skb(skb);
	return;

frame_err:
	dev->stats.rx_frame_errors++;
	dev_kfree_skb(skb);
}

354
static int add_recvbuf_small(struct virtnet_info *vi, gfp_t gfp)
R
Rusty Russell 已提交
355 356
{
	struct sk_buff *skb;
357 358
	struct skb_vnet_hdr *hdr;
	int err;
359

360 361 362
	skb = netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN);
	if (unlikely(!skb))
		return -ENOMEM;
R
Rusty Russell 已提交
363

364
	skb_put(skb, MAX_PACKET_LEN);
365

366
	hdr = skb_vnet_hdr(skb);
367
	sg_set_buf(vi->rx_sg, &hdr->hdr, sizeof hdr->hdr);
368

369
	skb_to_sgvec(skb, vi->rx_sg + 1, 0, skb->len);
370

371
	err = virtqueue_add_buf_gfp(vi->rvq, vi->rx_sg, 0, 2, skb, gfp);
372 373
	if (err < 0)
		dev_kfree_skb(skb);
374

375 376
	return err;
}
377

378 379 380 381 382 383
static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp)
{
	struct page *first, *list = NULL;
	char *p;
	int i, err, offset;

384
	/* page in vi->rx_sg[MAX_SKB_FRAGS + 1] is list tail */
385 386 387 388 389 390
	for (i = MAX_SKB_FRAGS + 1; i > 1; --i) {
		first = get_a_page(vi, gfp);
		if (!first) {
			if (list)
				give_pages(vi, list);
			return -ENOMEM;
391
		}
392
		sg_set_buf(&vi->rx_sg[i], page_address(first), PAGE_SIZE);
393

394 395 396 397
		/* chain new page in list head to match sg */
		first->private = (unsigned long)list;
		list = first;
	}
R
Rusty Russell 已提交
398

399 400 401 402 403 404 405
	first = get_a_page(vi, gfp);
	if (!first) {
		give_pages(vi, list);
		return -ENOMEM;
	}
	p = page_address(first);

406 407 408
	/* vi->rx_sg[0], vi->rx_sg[1] share the same page */
	/* a separated vi->rx_sg[0] for virtio_net_hdr only due to QEMU bug */
	sg_set_buf(&vi->rx_sg[0], p, sizeof(struct virtio_net_hdr));
409

410
	/* vi->rx_sg[1] for data packet, from offset */
411
	offset = sizeof(struct padded_vnet_hdr);
412
	sg_set_buf(&vi->rx_sg[1], p + offset, PAGE_SIZE - offset);
413 414 415

	/* chain first in list head */
	first->private = (unsigned long)list;
416 417
	err = virtqueue_add_buf_gfp(vi->rvq, vi->rx_sg, 0, MAX_SKB_FRAGS + 2,
				    first, gfp);
418 419 420 421
	if (err < 0)
		give_pages(vi, first);

	return err;
R
Rusty Russell 已提交
422 423
}

424
static int add_recvbuf_mergeable(struct virtnet_info *vi, gfp_t gfp)
425
{
426
	struct page *page;
427 428
	int err;

429 430 431
	page = get_a_page(vi, gfp);
	if (!page)
		return -ENOMEM;
432

433
	sg_init_one(vi->rx_sg, page_address(page), PAGE_SIZE);
434

435
	err = virtqueue_add_buf_gfp(vi->rvq, vi->rx_sg, 0, 1, page, gfp);
436 437
	if (err < 0)
		give_pages(vi, page);
438

439 440
	return err;
}
441

442 443 444 445
/* Returns false if we couldn't fill entirely (OOM). */
static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
{
	int err;
446
	bool oom;
447

448 449 450 451 452 453 454
	do {
		if (vi->mergeable_rx_bufs)
			err = add_recvbuf_mergeable(vi, gfp);
		else if (vi->big_packets)
			err = add_recvbuf_big(vi, gfp);
		else
			err = add_recvbuf_small(vi, gfp);
455

456 457
		oom = err == -ENOMEM;
		if (err < 0)
458
			break;
459
		++vi->num;
460
	} while (err > 0);
461 462
	if (unlikely(vi->num > vi->max))
		vi->max = vi->num;
463
	virtqueue_kick(vi->rvq);
464
	return !oom;
465 466
}

467
static void skb_recv_done(struct virtqueue *rvq)
R
Rusty Russell 已提交
468 469
{
	struct virtnet_info *vi = rvq->vdev->priv;
470
	/* Schedule NAPI, Suppress further interrupts if successful. */
471
	if (napi_schedule_prep(&vi->napi)) {
472
		virtqueue_disable_cb(rvq);
473
		__napi_schedule(&vi->napi);
474
	}
R
Rusty Russell 已提交
475 476
}

477 478 479 480 481 482 483 484 485 486 487 488 489 490
static void virtnet_napi_enable(struct virtnet_info *vi)
{
	napi_enable(&vi->napi);

	/* If all buffers were filled by other side before we napi_enabled, we
	 * won't get another interrupt, so process any outstanding packets
	 * now.  virtnet_poll wants re-enable the queue, so we disable here.
	 * We synchronize against interrupts via NAPI_STATE_SCHED */
	if (napi_schedule_prep(&vi->napi)) {
		virtqueue_disable_cb(vi->rvq);
		__napi_schedule(&vi->napi);
	}
}

491 492 493 494 495 496 497
static void refill_work(struct work_struct *work)
{
	struct virtnet_info *vi;
	bool still_empty;

	vi = container_of(work, struct virtnet_info, refill.work);
	napi_disable(&vi->napi);
498
	still_empty = !try_fill_recv(vi, GFP_KERNEL);
499
	virtnet_napi_enable(vi);
500 501 502 503 504 505 506

	/* In theory, this can happen: if we don't get any buffers in
	 * we will *never* try to fill again. */
	if (still_empty)
		schedule_delayed_work(&vi->refill, HZ/2);
}

R
Rusty Russell 已提交
507 508 509
static int virtnet_poll(struct napi_struct *napi, int budget)
{
	struct virtnet_info *vi = container_of(napi, struct virtnet_info, napi);
510
	void *buf;
R
Rusty Russell 已提交
511 512 513 514
	unsigned int len, received = 0;

again:
	while (received < budget &&
515
	       (buf = virtqueue_get_buf(vi->rvq, &len)) != NULL) {
516 517
		receive_buf(vi->dev, buf, len);
		--vi->num;
R
Rusty Russell 已提交
518 519 520
		received++;
	}

521 522 523 524
	if (vi->num < vi->max / 2) {
		if (!try_fill_recv(vi, GFP_ATOMIC))
			schedule_delayed_work(&vi->refill, 0);
	}
R
Rusty Russell 已提交
525

526 527
	/* Out of packets? */
	if (received < budget) {
528
		napi_complete(napi);
529
		if (unlikely(!virtqueue_enable_cb(vi->rvq)) &&
530
		    napi_schedule_prep(napi)) {
531
			virtqueue_disable_cb(vi->rvq);
532
			__napi_schedule(napi);
R
Rusty Russell 已提交
533
			goto again;
534
		}
R
Rusty Russell 已提交
535 536 537 538 539
	}

	return received;
}

540
static unsigned int free_old_xmit_skbs(struct virtnet_info *vi)
R
Rusty Russell 已提交
541 542
{
	struct sk_buff *skb;
543
	unsigned int len, tot_sgs = 0;
544
	struct virtnet_stats __percpu *stats = this_cpu_ptr(vi->stats);
R
Rusty Russell 已提交
545

546
	while ((skb = virtqueue_get_buf(vi->svq, &len)) != NULL) {
R
Rusty Russell 已提交
547
		pr_debug("Sent skb %p\n", skb);
548 549 550 551 552 553

		u64_stats_update_begin(&stats->syncp);
		stats->tx_bytes += skb->len;
		stats->tx_packets++;
		u64_stats_update_end(&stats->syncp);

554
		tot_sgs += skb_vnet_hdr(skb)->num_sg;
555
		dev_kfree_skb_any(skb);
R
Rusty Russell 已提交
556
	}
557
	return tot_sgs;
R
Rusty Russell 已提交
558 559
}

560
static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
R
Rusty Russell 已提交
561
{
562
	struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
R
Rusty Russell 已提交
563 564
	const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;

J
Johannes Berg 已提交
565
	pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
R
Rusty Russell 已提交
566 567

	if (skb->ip_summed == CHECKSUM_PARTIAL) {
568
		hdr->hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
569
		hdr->hdr.csum_start = skb_checksum_start_offset(skb);
570
		hdr->hdr.csum_offset = skb->csum_offset;
R
Rusty Russell 已提交
571
	} else {
572 573
		hdr->hdr.flags = 0;
		hdr->hdr.csum_offset = hdr->hdr.csum_start = 0;
R
Rusty Russell 已提交
574 575 576
	}

	if (skb_is_gso(skb)) {
577 578
		hdr->hdr.hdr_len = skb_headlen(skb);
		hdr->hdr.gso_size = skb_shinfo(skb)->gso_size;
R
Rusty Russell 已提交
579
		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
580
			hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
R
Rusty Russell 已提交
581
		else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
582
			hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
R
Rusty Russell 已提交
583
		else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
584
			hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
R
Rusty Russell 已提交
585 586
		else
			BUG();
R
Rusty Russell 已提交
587
		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN)
588
			hdr->hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
R
Rusty Russell 已提交
589
	} else {
590 591
		hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
		hdr->hdr.gso_size = hdr->hdr.hdr_len = 0;
R
Rusty Russell 已提交
592 593
	}

594
	hdr->mhdr.num_buffers = 0;
595 596 597

	/* Encode metadata header at front. */
	if (vi->mergeable_rx_bufs)
598
		sg_set_buf(vi->tx_sg, &hdr->mhdr, sizeof hdr->mhdr);
599
	else
600
		sg_set_buf(vi->tx_sg, &hdr->hdr, sizeof hdr->hdr);
601

602
	hdr->num_sg = skb_to_sgvec(skb, vi->tx_sg + 1, 0, skb->len) + 1;
603
	return virtqueue_add_buf(vi->svq, vi->tx_sg, hdr->num_sg,
604
					0, skb);
605 606
}

607
static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
608 609
{
	struct virtnet_info *vi = netdev_priv(dev);
610
	int capacity;
611 612 613

	/* Free up any pending old buffers before queueing new ones. */
	free_old_xmit_skbs(vi);
614

615
	/* Try to transmit */
616 617 618 619
	capacity = xmit_skb(vi, skb);

	/* This can happen with OOM and indirect buffers. */
	if (unlikely(capacity < 0)) {
620 621 622 623 624 625 626 627 628 629
		if (net_ratelimit()) {
			if (likely(capacity == -ENOMEM)) {
				dev_warn(&dev->dev,
					 "TX queue failure: out of memory\n");
			} else {
				dev->stats.tx_fifo_errors++;
				dev_warn(&dev->dev,
					 "Unexpected TX queue failure: %d\n",
					 capacity);
			}
630
		}
631 632 633
		dev->stats.tx_dropped++;
		kfree_skb(skb);
		return NETDEV_TX_OK;
R
Rusty Russell 已提交
634
	}
635
	virtqueue_kick(vi->svq);
636

637 638 639 640 641 642 643 644
	/* Don't wait up for transmitted skbs to be freed. */
	skb_orphan(skb);
	nf_reset(skb);

	/* Apparently nice girls don't return TX_BUSY; stop the queue
	 * before it gets out of hand.  Naturally, this wastes entries. */
	if (capacity < 2+MAX_SKB_FRAGS) {
		netif_stop_queue(dev);
645
		if (unlikely(!virtqueue_enable_cb_delayed(vi->svq))) {
646 647 648 649
			/* More just got used, free them then recheck. */
			capacity += free_old_xmit_skbs(vi);
			if (capacity >= 2+MAX_SKB_FRAGS) {
				netif_start_queue(dev);
650
				virtqueue_disable_cb(vi->svq);
651 652
			}
		}
653
	}
654 655

	return NETDEV_TX_OK;
R
Rusty Russell 已提交
656 657
}

658 659 660 661 662 663 664 665 666 667
static int virtnet_set_mac_address(struct net_device *dev, void *p)
{
	struct virtnet_info *vi = netdev_priv(dev);
	struct virtio_device *vdev = vi->vdev;
	int ret;

	ret = eth_mac_addr(dev, p);
	if (ret)
		return ret;

668 669 670
	if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC))
		vdev->config->set(vdev, offsetof(struct virtio_net_config, mac),
		                  dev->dev_addr, dev->addr_len);
671 672 673 674

	return 0;
}

675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708
static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev,
					       struct rtnl_link_stats64 *tot)
{
	struct virtnet_info *vi = netdev_priv(dev);
	int cpu;
	unsigned int start;

	for_each_possible_cpu(cpu) {
		struct virtnet_stats __percpu *stats
			= per_cpu_ptr(vi->stats, cpu);
		u64 tpackets, tbytes, rpackets, rbytes;

		do {
			start = u64_stats_fetch_begin(&stats->syncp);
			tpackets = stats->tx_packets;
			tbytes   = stats->tx_bytes;
			rpackets = stats->rx_packets;
			rbytes   = stats->rx_bytes;
		} while (u64_stats_fetch_retry(&stats->syncp, start));

		tot->rx_packets += rpackets;
		tot->tx_packets += tpackets;
		tot->rx_bytes   += rbytes;
		tot->tx_bytes   += tbytes;
	}

	tot->tx_dropped = dev->stats.tx_dropped;
	tot->rx_dropped = dev->stats.rx_dropped;
	tot->rx_length_errors = dev->stats.rx_length_errors;
	tot->rx_frame_errors = dev->stats.rx_frame_errors;

	return tot;
}

709 710 711 712 713 714 715 716 717
#ifdef CONFIG_NET_POLL_CONTROLLER
static void virtnet_netpoll(struct net_device *dev)
{
	struct virtnet_info *vi = netdev_priv(dev);

	napi_schedule(&vi->napi);
}
#endif

R
Rusty Russell 已提交
718 719 720 721
static int virtnet_open(struct net_device *dev)
{
	struct virtnet_info *vi = netdev_priv(dev);

722
	virtnet_napi_enable(vi);
R
Rusty Russell 已提交
723 724 725
	return 0;
}

726 727 728 729 730 731 732 733
/*
 * Send command via the control virtqueue and check status.  Commands
 * supported by the hypervisor, as indicated by feature bits, should
 * never fail unless improperly formated.
 */
static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
				 struct scatterlist *data, int out, int in)
{
734
	struct scatterlist *s, sg[VIRTNET_SEND_COMMAND_SG_MAX + 2];
735 736 737
	struct virtio_net_ctrl_hdr ctrl;
	virtio_net_ctrl_ack status = ~0;
	unsigned int tmp;
738
	int i;
739

740 741 742
	/* Caller should know better */
	BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) ||
		(out + in > VIRTNET_SEND_COMMAND_SG_MAX));
743 744 745 746 747 748 749 750 751 752

	out++; /* Add header */
	in++; /* Add return status */

	ctrl.class = class;
	ctrl.cmd = cmd;

	sg_init_table(sg, out + in);

	sg_set_buf(&sg[0], &ctrl, sizeof(ctrl));
753 754
	for_each_sg(data, s, out + in - 2, i)
		sg_set_buf(&sg[i + 1], sg_virt(s), s->length);
755 756
	sg_set_buf(&sg[out + in - 1], &status, sizeof(status));

757
	BUG_ON(virtqueue_add_buf(vi->cvq, sg, out, in, vi) < 0);
758

759
	virtqueue_kick(vi->cvq);
760 761 762 763 764

	/*
	 * Spin for a response, the kick causes an ioport write, trapping
	 * into the hypervisor, so the request should be handled immediately.
	 */
765
	while (!virtqueue_get_buf(vi->cvq, &tmp))
766 767 768 769 770
		cpu_relax();

	return status == VIRTIO_NET_OK;
}

R
Rusty Russell 已提交
771 772 773 774 775 776 777 778 779
static int virtnet_close(struct net_device *dev)
{
	struct virtnet_info *vi = netdev_priv(dev);

	napi_disable(&vi->napi);

	return 0;
}

780 781 782
static void virtnet_set_rx_mode(struct net_device *dev)
{
	struct virtnet_info *vi = netdev_priv(dev);
783
	struct scatterlist sg[2];
784
	u8 promisc, allmulti;
785
	struct virtio_net_ctrl_mac *mac_data;
J
Jiri Pirko 已提交
786
	struct netdev_hw_addr *ha;
787
	int uc_count;
788
	int mc_count;
789 790
	void *buf;
	int i;
791 792 793 794 795

	/* We can't dynamicaly set ndo_set_rx_mode, so return gracefully */
	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
		return;

796 797
	promisc = ((dev->flags & IFF_PROMISC) != 0);
	allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
798

799
	sg_init_one(sg, &promisc, sizeof(promisc));
800 801 802

	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
				  VIRTIO_NET_CTRL_RX_PROMISC,
803
				  sg, 1, 0))
804 805 806
		dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
			 promisc ? "en" : "dis");

807
	sg_init_one(sg, &allmulti, sizeof(allmulti));
808 809 810

	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
				  VIRTIO_NET_CTRL_RX_ALLMULTI,
811
				  sg, 1, 0))
812 813
		dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
			 allmulti ? "en" : "dis");
814

815
	uc_count = netdev_uc_count(dev);
816
	mc_count = netdev_mc_count(dev);
817
	/* MAC filter - use one buffer for both lists */
818 819 820
	buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) +
		      (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
	mac_data = buf;
821 822 823 824 825
	if (!buf) {
		dev_warn(&dev->dev, "No memory for MAC address buffer\n");
		return;
	}

826 827
	sg_init_table(sg, 2);

828
	/* Store the unicast list and count in the front of the buffer */
829
	mac_data->entries = uc_count;
J
Jiri Pirko 已提交
830
	i = 0;
831
	netdev_for_each_uc_addr(ha, dev)
J
Jiri Pirko 已提交
832
		memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
833 834

	sg_set_buf(&sg[0], mac_data,
835
		   sizeof(mac_data->entries) + (uc_count * ETH_ALEN));
836 837

	/* multicast list and count fill the end */
838
	mac_data = (void *)&mac_data->macs[uc_count][0];
839

840
	mac_data->entries = mc_count;
841
	i = 0;
842 843
	netdev_for_each_mc_addr(ha, dev)
		memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
844 845

	sg_set_buf(&sg[1], mac_data,
846
		   sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
847 848 849 850 851 852 853

	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
				  VIRTIO_NET_CTRL_MAC_TABLE_SET,
				  sg, 2, 0))
		dev_warn(&dev->dev, "Failed to set MAC fitler table.\n");

	kfree(buf);
854 855
}

856
static void virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid)
857 858 859 860
{
	struct virtnet_info *vi = netdev_priv(dev);
	struct scatterlist sg;

861
	sg_init_one(&sg, &vid, sizeof(vid));
862 863 864 865 866 867

	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
				  VIRTIO_NET_CTRL_VLAN_ADD, &sg, 1, 0))
		dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
}

868
static void virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
869 870 871 872
{
	struct virtnet_info *vi = netdev_priv(dev);
	struct scatterlist sg;

873
	sg_init_one(&sg, &vid, sizeof(vid));
874 875 876 877 878 879

	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
				  VIRTIO_NET_CTRL_VLAN_DEL, &sg, 1, 0))
		dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
}

880
static const struct ethtool_ops virtnet_ethtool_ops = {
881
	.get_link = ethtool_op_get_link,
882 883
};

M
Mark McLoughlin 已提交
884 885 886 887 888 889 890 891 892 893 894
#define MIN_MTU 68
#define MAX_MTU 65535

static int virtnet_change_mtu(struct net_device *dev, int new_mtu)
{
	if (new_mtu < MIN_MTU || new_mtu > MAX_MTU)
		return -EINVAL;
	dev->mtu = new_mtu;
	return 0;
}

895 896 897 898 899
static const struct net_device_ops virtnet_netdev = {
	.ndo_open            = virtnet_open,
	.ndo_stop   	     = virtnet_close,
	.ndo_start_xmit      = start_xmit,
	.ndo_validate_addr   = eth_validate_addr,
900
	.ndo_set_mac_address = virtnet_set_mac_address,
901
	.ndo_set_rx_mode     = virtnet_set_rx_mode,
902
	.ndo_change_mtu	     = virtnet_change_mtu,
903
	.ndo_get_stats64     = virtnet_stats,
904 905
	.ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
	.ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
906 907 908 909 910
#ifdef CONFIG_NET_POLL_CONTROLLER
	.ndo_poll_controller = virtnet_netpoll,
#endif
};

911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945
static void virtnet_update_status(struct virtnet_info *vi)
{
	u16 v;

	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS))
		return;

	vi->vdev->config->get(vi->vdev,
			      offsetof(struct virtio_net_config, status),
			      &v, sizeof(v));

	/* Ignore unknown (future) status bits */
	v &= VIRTIO_NET_S_LINK_UP;

	if (vi->status == v)
		return;

	vi->status = v;

	if (vi->status & VIRTIO_NET_S_LINK_UP) {
		netif_carrier_on(vi->dev);
		netif_wake_queue(vi->dev);
	} else {
		netif_carrier_off(vi->dev);
		netif_stop_queue(vi->dev);
	}
}

static void virtnet_config_changed(struct virtio_device *vdev)
{
	struct virtnet_info *vi = vdev->priv;

	virtnet_update_status(vi);
}

R
Rusty Russell 已提交
946 947 948 949 950
static int virtnet_probe(struct virtio_device *vdev)
{
	int err;
	struct net_device *dev;
	struct virtnet_info *vi;
951 952 953 954
	struct virtqueue *vqs[3];
	vq_callback_t *callbacks[] = { skb_recv_done, skb_xmit_done, NULL};
	const char *names[] = { "input", "output", "control" };
	int nvqs;
R
Rusty Russell 已提交
955 956 957 958 959 960 961

	/* Allocate ourselves a network device with room for our info */
	dev = alloc_etherdev(sizeof(struct virtnet_info));
	if (!dev)
		return -ENOMEM;

	/* Set up network device as normal. */
962
	dev->priv_flags |= IFF_UNICAST_FLT;
963
	dev->netdev_ops = &virtnet_netdev;
R
Rusty Russell 已提交
964
	dev->features = NETIF_F_HIGHDMA;
965

966
	SET_ETHTOOL_OPS(dev, &virtnet_ethtool_ops);
R
Rusty Russell 已提交
967 968 969
	SET_NETDEV_DEV(dev, &vdev->dev);

	/* Do we support "hardware" checksums? */
970
	if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
R
Rusty Russell 已提交
971
		/* This opens up the world of extra features. */
972 973 974 975 976 977
		dev->hw_features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
		if (csum)
			dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;

		if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
			dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO
R
Rusty Russell 已提交
978 979
				| NETIF_F_TSO_ECN | NETIF_F_TSO6;
		}
980
		/* Individual feature bits: what can host handle? */
981 982 983 984 985 986 987 988 989 990 991 992
		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4))
			dev->hw_features |= NETIF_F_TSO;
		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6))
			dev->hw_features |= NETIF_F_TSO6;
		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
			dev->hw_features |= NETIF_F_TSO_ECN;
		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO))
			dev->hw_features |= NETIF_F_UFO;

		if (gso)
			dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO);
		/* (!csum && gso) case will be fixed by register_netdev() */
R
Rusty Russell 已提交
993 994 995
	}

	/* Configuration may specify what MAC to use.  Otherwise random. */
996
	if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
997 998 999
		vdev->config->get(vdev,
				  offsetof(struct virtio_net_config, mac),
				  dev->dev_addr, dev->addr_len);
1000
	} else
R
Rusty Russell 已提交
1001 1002 1003 1004
		random_ether_addr(dev->dev_addr);

	/* Set up our device-specific information */
	vi = netdev_priv(dev);
1005
	netif_napi_add(dev, &vi->napi, virtnet_poll, napi_weight);
R
Rusty Russell 已提交
1006 1007
	vi->dev = dev;
	vi->vdev = vdev;
1008
	vdev->priv = vi;
1009
	vi->pages = NULL;
1010 1011 1012 1013 1014
	vi->stats = alloc_percpu(struct virtnet_stats);
	err = -ENOMEM;
	if (vi->stats == NULL)
		goto free;

1015
	INIT_DELAYED_WORK(&vi->refill, refill_work);
1016 1017
	sg_init_table(vi->rx_sg, ARRAY_SIZE(vi->rx_sg));
	sg_init_table(vi->tx_sg, ARRAY_SIZE(vi->tx_sg));
R
Rusty Russell 已提交
1018

1019
	/* If we can receive ANY GSO packets, we must allocate large ones. */
1020 1021 1022
	if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
	    virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) ||
	    virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN))
1023 1024
		vi->big_packets = true;

1025 1026 1027
	if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
		vi->mergeable_rx_bufs = true;

1028 1029 1030 1031 1032 1033
	/* We expect two virtqueues, receive then send,
	 * and optionally control. */
	nvqs = virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) ? 3 : 2;

	err = vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names);
	if (err)
1034
		goto free_stats;
R
Rusty Russell 已提交
1035

1036 1037
	vi->rvq = vqs[0];
	vi->svq = vqs[1];
R
Rusty Russell 已提交
1038

1039
	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) {
1040
		vi->cvq = vqs[2];
1041 1042 1043

		if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
			dev->features |= NETIF_F_HW_VLAN_FILTER;
1044 1045
	}

R
Rusty Russell 已提交
1046 1047 1048
	err = register_netdev(dev);
	if (err) {
		pr_debug("virtio_net: registering device failed\n");
1049
		goto free_vqs;
R
Rusty Russell 已提交
1050
	}
1051 1052

	/* Last of all, set up some receive buffers. */
1053
	try_fill_recv(vi, GFP_KERNEL);
1054 1055 1056 1057 1058 1059 1060

	/* If we didn't even get one input buffer, we're useless. */
	if (vi->num == 0) {
		err = -ENOMEM;
		goto unregister;
	}

J
Jason Wang 已提交
1061 1062 1063 1064 1065 1066 1067 1068 1069
	/* Assume link up if device can't report link status,
	   otherwise get link status from config. */
	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
		netif_carrier_off(dev);
		virtnet_update_status(vi);
	} else {
		vi->status = VIRTIO_NET_S_LINK_UP;
		netif_carrier_on(dev);
	}
1070

R
Rusty Russell 已提交
1071 1072 1073
	pr_debug("virtnet: registered device %s\n", dev->name);
	return 0;

1074 1075
unregister:
	unregister_netdev(dev);
1076
	cancel_delayed_work_sync(&vi->refill);
1077 1078
free_vqs:
	vdev->config->del_vqs(vdev);
1079 1080
free_stats:
	free_percpu(vi->stats);
R
Rusty Russell 已提交
1081 1082 1083 1084 1085
free:
	free_netdev(dev);
	return err;
}

1086 1087 1088
static void free_unused_bufs(struct virtnet_info *vi)
{
	void *buf;
S
Shirley Ma 已提交
1089
	while (1) {
1090
		buf = virtqueue_detach_unused_buf(vi->svq);
S
Shirley Ma 已提交
1091 1092 1093 1094
		if (!buf)
			break;
		dev_kfree_skb(buf);
	}
1095
	while (1) {
1096
		buf = virtqueue_detach_unused_buf(vi->rvq);
1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107
		if (!buf)
			break;
		if (vi->mergeable_rx_bufs || vi->big_packets)
			give_pages(vi, buf);
		else
			dev_kfree_skb(buf);
		--vi->num;
	}
	BUG_ON(vi->num != 0);
}

1108
static void __devexit virtnet_remove(struct virtio_device *vdev)
R
Rusty Russell 已提交
1109
{
1110
	struct virtnet_info *vi = vdev->priv;
1111

R
Rusty Russell 已提交
1112 1113 1114
	/* Stop all the virtqueues. */
	vdev->config->reset(vdev);

1115

1116
	unregister_netdev(vi->dev);
1117
	cancel_delayed_work_sync(&vi->refill);
S
Shirley Ma 已提交
1118 1119

	/* Free unused buffers in both send and recv, if any. */
1120
	free_unused_bufs(vi);
1121

1122 1123
	vdev->config->del_vqs(vi->vdev);

1124 1125 1126
	while (vi->pages)
		__free_pages(get_a_page(vi, GFP_KERNEL), 0);

1127
	free_percpu(vi->stats);
1128
	free_netdev(vi->dev);
R
Rusty Russell 已提交
1129 1130 1131 1132 1133 1134 1135
}

static struct virtio_device_id id_table[] = {
	{ VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
	{ 0 },
};

1136
static unsigned int features[] = {
1137 1138
	VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM,
	VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC,
1139
	VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6,
1140
	VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6,
1141
	VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO,
1142
	VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ,
1143
	VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
1144 1145
};

1146
static struct virtio_driver virtio_net_driver = {
1147 1148
	.feature_table = features,
	.feature_table_size = ARRAY_SIZE(features),
R
Rusty Russell 已提交
1149 1150 1151 1152 1153
	.driver.name =	KBUILD_MODNAME,
	.driver.owner =	THIS_MODULE,
	.id_table =	id_table,
	.probe =	virtnet_probe,
	.remove =	__devexit_p(virtnet_remove),
1154
	.config_changed = virtnet_config_changed,
R
Rusty Russell 已提交
1155 1156 1157 1158
};

static int __init init(void)
{
1159
	return register_virtio_driver(&virtio_net_driver);
R
Rusty Russell 已提交
1160 1161 1162 1163
}

static void __exit fini(void)
{
1164
	unregister_virtio_driver(&virtio_net_driver);
R
Rusty Russell 已提交
1165 1166 1167 1168 1169 1170 1171
}
module_init(init);
module_exit(fini);

MODULE_DEVICE_TABLE(virtio, id_table);
MODULE_DESCRIPTION("Virtio network driver");
MODULE_LICENSE("GPL");