netvsc_drv.c 24.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 * Copyright (c) 2009, Microsoft Corporation.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
14
 * this program; if not, see <http://www.gnu.org/licenses/>.
15 16
 *
 * Authors:
17
 *   Haiyang Zhang <haiyangz@microsoft.com>
18 19
 *   Hank Janssen  <hjanssen@microsoft.com>
 */
20 21
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

22
#include <linux/init.h>
23
#include <linux/atomic.h>
24 25 26 27 28 29 30 31 32
#include <linux/module.h>
#include <linux/highmem.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/inetdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
33
#include <linux/if_vlan.h>
34
#include <linux/in.h>
35
#include <linux/slab.h>
36 37 38 39
#include <net/arp.h>
#include <net/route.h>
#include <net/sock.h>
#include <net/pkt_sched.h>
40

41
#include "hyperv_net.h"
42 43

struct net_device_context {
44
	/* point back to our device context */
45
	struct hv_device *device_ctx;
46
	struct delayed_work dwork;
47
	struct work_struct work;
48 49
};

50
#define RING_SIZE_MIN 64
51
static int ring_size = 128;
S
Stephen Hemminger 已提交
52 53
module_param(ring_size, int, S_IRUGO);
MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
54

55 56
static void do_set_multicast(struct work_struct *w)
{
57 58
	struct net_device_context *ndevctx =
		container_of(w, struct net_device_context, work);
59 60 61 62
	struct netvsc_device *nvdev;
	struct rndis_device *rdev;

	nvdev = hv_get_drvdata(ndevctx->device_ctx);
63 64
	if (nvdev == NULL || nvdev->ndev == NULL)
		return;
65 66 67

	rdev = nvdev->extension;
	if (rdev == NULL)
68
		return;
69

70
	if (nvdev->ndev->flags & IFF_PROMISC)
71 72 73 74 75 76 77 78 79
		rndis_filter_set_packet_filter(rdev,
			NDIS_PACKET_TYPE_PROMISCUOUS);
	else
		rndis_filter_set_packet_filter(rdev,
			NDIS_PACKET_TYPE_BROADCAST |
			NDIS_PACKET_TYPE_ALL_MULTICAST |
			NDIS_PACKET_TYPE_DIRECTED);
}

80
static void netvsc_set_multicast_list(struct net_device *net)
81
{
82
	struct net_device_context *net_device_ctx = netdev_priv(net);
83

84
	schedule_work(&net_device_ctx->work);
85 86 87 88 89
}

static int netvsc_open(struct net_device *net)
{
	struct net_device_context *net_device_ctx = netdev_priv(net);
90
	struct hv_device *device_obj = net_device_ctx->device_ctx;
91 92
	struct netvsc_device *nvdev;
	struct rndis_device *rdev;
93
	int ret = 0;
94

95 96
	netif_carrier_off(net);

97 98 99 100 101
	/* Open up the device */
	ret = rndis_filter_open(device_obj);
	if (ret != 0) {
		netdev_err(net, "unable to open device (ret %d).\n", ret);
		return ret;
102 103
	}

104
	netif_tx_start_all_queues(net);
105

106 107 108 109 110
	nvdev = hv_get_drvdata(device_obj);
	rdev = nvdev->extension;
	if (!rdev->link_state)
		netif_carrier_on(net);

111 112 113 114 115 116
	return ret;
}

static int netvsc_close(struct net_device *net)
{
	struct net_device_context *net_device_ctx = netdev_priv(net);
117
	struct hv_device *device_obj = net_device_ctx->device_ctx;
118
	int ret;
119

120
	netif_tx_disable(net);
121

122 123
	/* Make sure netvsc_set_multicast_list doesn't re-enable filter! */
	cancel_work_sync(&net_device_ctx->work);
124
	ret = rndis_filter_close(device_obj);
125
	if (ret != 0)
126
		netdev_err(net, "unable to close device (ret %d).\n", ret);
127 128 129 130

	return ret;
}

131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151
static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size,
				int pkt_type)
{
	struct rndis_packet *rndis_pkt;
	struct rndis_per_packet_info *ppi;

	rndis_pkt = &msg->msg.pkt;
	rndis_pkt->data_offset += ppi_size;

	ppi = (struct rndis_per_packet_info *)((void *)rndis_pkt +
		rndis_pkt->per_pkt_info_offset + rndis_pkt->per_pkt_info_len);

	ppi->size = ppi_size;
	ppi->type = pkt_type;
	ppi->ppi_offset = sizeof(struct rndis_per_packet_info);

	rndis_pkt->per_pkt_info_len += ppi_size;

	return ppi;
}

152 153 154 155 156 157 158 159 160 161 162 163 164
union sub_key {
	u64 k;
	struct {
		u8 pad[3];
		u8 kb;
		u32 ka;
	};
};

/* Toeplitz hash function
 * data: network byte order
 * return: host byte order
 */
165
static u32 comp_hash(u8 *key, int klen, void *data, int dlen)
166 167 168 169 170 171 172 173 174 175 176 177 178
{
	union sub_key subk;
	int k_next = 4;
	u8 dt;
	int i, j;
	u32 ret = 0;

	subk.k = 0;
	subk.ka = ntohl(*(u32 *)key);

	for (i = 0; i < dlen; i++) {
		subk.kb = key[k_next];
		k_next = (k_next + 1) % klen;
179
		dt = ((u8 *)data)[i];
180 181 182 183 184 185 186 187 188 189 190 191 192
		for (j = 0; j < 8; j++) {
			if (dt & 0x80)
				ret ^= subk.ka;
			dt <<= 1;
			subk.k <<= 1;
		}
	}

	return ret;
}

static bool netvsc_set_hash(u32 *hash, struct sk_buff *skb)
{
193
	struct flow_keys flow;
194 195
	int data_len;

196 197 198
	if (!skb_flow_dissect(skb, &flow) ||
	    !(flow.n_proto == htons(ETH_P_IP) ||
	      flow.n_proto == htons(ETH_P_IPV6)))
199 200
		return false;

201 202 203 204
	if (flow.ip_proto == IPPROTO_TCP)
		data_len = 12;
	else
		data_len = 8;
205

206
	*hash = comp_hash(netvsc_hash_key, HASH_KEYLEN, &flow, data_len);
207

208
	return true;
209 210 211 212 213 214 215 216 217 218 219 220 221 222
}

static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
			void *accel_priv, select_queue_fallback_t fallback)
{
	struct net_device_context *net_device_ctx = netdev_priv(ndev);
	struct hv_device *hdev =  net_device_ctx->device_ctx;
	struct netvsc_device *nvsc_dev = hv_get_drvdata(hdev);
	u32 hash;
	u16 q_idx = 0;

	if (nvsc_dev == NULL || ndev->real_num_tx_queues <= 1)
		return 0;

223
	if (netvsc_set_hash(&hash, skb)) {
224 225
		q_idx = nvsc_dev->send_table[hash % VRSS_SEND_TAB_SIZE] %
			ndev->real_num_tx_queues;
226 227
		skb_set_hash(skb, hash, PKT_HASH_TYPE_L3);
	}
228 229 230 231

	return q_idx;
}

232 233
static void netvsc_xmit_completion(void *context)
{
234
	struct hv_netvsc_packet *packet = (struct hv_netvsc_packet *)context;
235
	struct sk_buff *skb = (struct sk_buff *)
236
		(unsigned long)packet->send_completion_tid;
237 238 239

	kfree(packet);

240
	if (skb)
241
		dev_kfree_skb_any(skb);
242 243
}

244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277
static u32 fill_pg_buf(struct page *page, u32 offset, u32 len,
			struct hv_page_buffer *pb)
{
	int j = 0;

	/* Deal with compund pages by ignoring unused part
	 * of the page.
	 */
	page += (offset >> PAGE_SHIFT);
	offset &= ~PAGE_MASK;

	while (len > 0) {
		unsigned long bytes;

		bytes = PAGE_SIZE - offset;
		if (bytes > len)
			bytes = len;
		pb[j].pfn = page_to_pfn(page);
		pb[j].offset = offset;
		pb[j].len = bytes;

		offset += bytes;
		len -= bytes;

		if (offset == PAGE_SIZE && len) {
			page++;
			offset = 0;
			j++;
		}
	}

	return j + 1;
}

278 279
static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
			   struct hv_page_buffer *pb)
280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306
{
	u32 slots_used = 0;
	char *data = skb->data;
	int frags = skb_shinfo(skb)->nr_frags;
	int i;

	/* The packet is laid out thus:
	 * 1. hdr
	 * 2. skb linear data
	 * 3. skb fragment data
	 */
	if (hdr != NULL)
		slots_used += fill_pg_buf(virt_to_page(hdr),
					offset_in_page(hdr),
					len, &pb[slots_used]);

	slots_used += fill_pg_buf(virt_to_page(data),
				offset_in_page(data),
				skb_headlen(skb), &pb[slots_used]);

	for (i = 0; i < frags; i++) {
		skb_frag_t *frag = skb_shinfo(skb)->frags + i;

		slots_used += fill_pg_buf(skb_frag_page(frag),
					frag->page_offset,
					skb_frag_size(frag), &pb[slots_used]);
	}
307
	return slots_used;
308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339
}

static int count_skb_frag_slots(struct sk_buff *skb)
{
	int i, frags = skb_shinfo(skb)->nr_frags;
	int pages = 0;

	for (i = 0; i < frags; i++) {
		skb_frag_t *frag = skb_shinfo(skb)->frags + i;
		unsigned long size = skb_frag_size(frag);
		unsigned long offset = frag->page_offset;

		/* Skip unused frames from start of page */
		offset &= ~PAGE_MASK;
		pages += PFN_UP(offset + size);
	}
	return pages;
}

static int netvsc_get_slots(struct sk_buff *skb)
{
	char *data = skb->data;
	unsigned int offset = offset_in_page(data);
	unsigned int len = skb_headlen(skb);
	int slots;
	int frag_slots;

	slots = DIV_ROUND_UP(offset + len, PAGE_SIZE);
	frag_slots = count_skb_frag_slots(skb);
	return slots + frag_slots;
}

340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368
static u32 get_net_transport_info(struct sk_buff *skb, u32 *trans_off)
{
	u32 ret_val = TRANSPORT_INFO_NOT_IP;

	if ((eth_hdr(skb)->h_proto != htons(ETH_P_IP)) &&
		(eth_hdr(skb)->h_proto != htons(ETH_P_IPV6))) {
		goto not_ip;
	}

	*trans_off = skb_transport_offset(skb);

	if ((eth_hdr(skb)->h_proto == htons(ETH_P_IP))) {
		struct iphdr *iphdr = ip_hdr(skb);

		if (iphdr->protocol == IPPROTO_TCP)
			ret_val = TRANSPORT_INFO_IPV4_TCP;
		else if (iphdr->protocol == IPPROTO_UDP)
			ret_val = TRANSPORT_INFO_IPV4_UDP;
	} else {
		if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
			ret_val = TRANSPORT_INFO_IPV6_TCP;
		else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
			ret_val = TRANSPORT_INFO_IPV6_UDP;
	}

not_ip:
	return ret_val;
}

369
static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
370 371
{
	struct net_device_context *net_device_ctx = netdev_priv(net);
372
	struct hv_netvsc_packet *packet;
373
	int ret;
374 375 376 377 378 379
	unsigned int num_data_pgs;
	struct rndis_message *rndis_msg;
	struct rndis_packet *rndis_pkt;
	u32 rndis_msg_size;
	bool isvlan;
	struct rndis_per_packet_info *ppi;
380
	struct ndis_tcp_ip_checksum_info *csum_info;
381
	struct ndis_tcp_lso_info *lso_info;
382 383
	int  hdr_offset;
	u32 net_trans_info;
384
	u32 hash;
385
	u32 skb_length = skb->len;
386

387

388 389 390 391
	/* We will atmost need two pages to describe the rndis
	 * header. We can only transmit MAX_PAGE_BUFFER_COUNT number
	 * of pages in a single packet.
	 */
392 393
	num_data_pgs = netvsc_get_slots(skb) + 2;
	if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) {
394 395 396 397 398
		netdev_err(net, "Packet too big: %u\n", skb->len);
		dev_kfree_skb(skb);
		net->stats.tx_dropped++;
		return NETDEV_TX_OK;
	}
399

400
	/* Allocate a netvsc packet based on # of frags. */
401
	packet = kzalloc(sizeof(struct hv_netvsc_packet) +
402
			 (num_data_pgs * sizeof(struct hv_page_buffer)) +
403
			 sizeof(struct rndis_message) +
404 405
			 NDIS_VLAN_PPI_SIZE + NDIS_CSUM_PPI_SIZE +
			 NDIS_LSO_PPI_SIZE + NDIS_HASH_PPI_SIZE, GFP_ATOMIC);
406
	if (!packet) {
407
		/* out of memory, drop packet */
408
		netdev_err(net, "unable to allocate hv_netvsc_packet\n");
409 410 411

		dev_kfree_skb(skb);
		net->stats.tx_dropped++;
412
		return NETDEV_TX_OK;
413 414
	}

415 416
	packet->xmit_more = skb->xmit_more;

417 418
	packet->vlan_tci = skb->vlan_tci;

419 420
	packet->q_idx = skb_get_queue_mapping(skb);

421
	packet->is_data_pkt = true;
422
	packet->total_data_buflen = skb->len;
423

424 425 426
	packet->rndis_msg = (struct rndis_message *)((unsigned long)packet +
				sizeof(struct hv_netvsc_packet) +
				(num_data_pgs * sizeof(struct hv_page_buffer)));
427

428
	/* Set the completion routine */
429 430 431
	packet->send_completion = netvsc_xmit_completion;
	packet->send_completion_ctx = packet;
	packet->send_completion_tid = (unsigned long)skb;
432

433 434 435 436 437 438 439 440 441 442 443 444 445
	isvlan = packet->vlan_tci & VLAN_TAG_PRESENT;

	/* Add the rndis header */
	rndis_msg = packet->rndis_msg;
	rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET;
	rndis_msg->msg_len = packet->total_data_buflen;
	rndis_pkt = &rndis_msg->msg.pkt;
	rndis_pkt->data_offset = sizeof(struct rndis_packet);
	rndis_pkt->data_len = packet->total_data_buflen;
	rndis_pkt->per_pkt_info_offset = sizeof(struct rndis_packet);

	rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet);

446 447 448 449 450 451 452 453
	hash = skb_get_hash_raw(skb);
	if (hash != 0 && net->real_num_tx_queues > 1) {
		rndis_msg_size += NDIS_HASH_PPI_SIZE;
		ppi = init_ppi_data(rndis_msg, NDIS_HASH_PPI_SIZE,
				    NBL_HASH_VALUE);
		*(u32 *)((void *)ppi + ppi->ppi_offset) = hash;
	}

454 455 456 457 458 459 460 461 462 463 464 465 466
	if (isvlan) {
		struct ndis_pkt_8021q_info *vlan;

		rndis_msg_size += NDIS_VLAN_PPI_SIZE;
		ppi = init_ppi_data(rndis_msg, NDIS_VLAN_PPI_SIZE,
					IEEE_8021Q_INFO);
		vlan = (struct ndis_pkt_8021q_info *)((void *)ppi +
						ppi->ppi_offset);
		vlan->vlanid = packet->vlan_tci & VLAN_VID_MASK;
		vlan->pri = (packet->vlan_tci & VLAN_PRIO_MASK) >>
				VLAN_PRIO_SHIFT;
	}

467 468 469 470 471 472 473 474 475
	net_trans_info = get_net_transport_info(skb, &hdr_offset);
	if (net_trans_info == TRANSPORT_INFO_NOT_IP)
		goto do_send;

	/*
	 * Setup the sendside checksum offload only if this is not a
	 * GSO packet.
	 */
	if (skb_is_gso(skb))
476
		goto do_lso;
477

478 479 480 481
	if ((skb->ip_summed == CHECKSUM_NONE) ||
	    (skb->ip_summed == CHECKSUM_UNNECESSARY))
		goto do_send;

482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497
	rndis_msg_size += NDIS_CSUM_PPI_SIZE;
	ppi = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
			    TCPIP_CHKSUM_PKTINFO);

	csum_info = (struct ndis_tcp_ip_checksum_info *)((void *)ppi +
			ppi->ppi_offset);

	if (net_trans_info & (INFO_IPV4 << 16))
		csum_info->transmit.is_ipv4 = 1;
	else
		csum_info->transmit.is_ipv6 = 1;

	if (net_trans_info & INFO_TCP) {
		csum_info->transmit.tcp_checksum = 1;
		csum_info->transmit.tcp_header_offset = hdr_offset;
	} else if (net_trans_info & INFO_UDP) {
498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521
		/* UDP checksum offload is not supported on ws2008r2.
		 * Furthermore, on ws2012 and ws2012r2, there are some
		 * issues with udp checksum offload from Linux guests.
		 * (these are host issues).
		 * For now compute the checksum here.
		 */
		struct udphdr *uh;
		u16 udp_len;

		ret = skb_cow_head(skb, 0);
		if (ret)
			goto drop;

		uh = udp_hdr(skb);
		udp_len = ntohs(uh->len);
		uh->check = 0;
		uh->check = csum_tcpudp_magic(ip_hdr(skb)->saddr,
					      ip_hdr(skb)->daddr,
					      udp_len, IPPROTO_UDP,
					      csum_partial(uh, udp_len, 0));
		if (uh->check == 0)
			uh->check = CSUM_MANGLED_0;

		csum_info->transmit.udp_checksum = 0;
522
	}
523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551
	goto do_send;

do_lso:
	rndis_msg_size += NDIS_LSO_PPI_SIZE;
	ppi = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE,
			    TCP_LARGESEND_PKTINFO);

	lso_info = (struct ndis_tcp_lso_info *)((void *)ppi +
			ppi->ppi_offset);

	lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE;
	if (net_trans_info & (INFO_IPV4 << 16)) {
		lso_info->lso_v2_transmit.ip_version =
			NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4;
		ip_hdr(skb)->tot_len = 0;
		ip_hdr(skb)->check = 0;
		tcp_hdr(skb)->check =
		~csum_tcpudp_magic(ip_hdr(skb)->saddr,
				   ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
	} else {
		lso_info->lso_v2_transmit.ip_version =
			NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6;
		ipv6_hdr(skb)->payload_len = 0;
		tcp_hdr(skb)->check =
		~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
				&ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
	}
	lso_info->lso_v2_transmit.tcp_header_offset = hdr_offset;
	lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size;
552 553

do_send:
554 555
	/* Start filling in the page buffers with the rndis hdr */
	rndis_msg->msg_len += rndis_msg_size;
556
	packet->total_data_buflen = rndis_msg->msg_len;
557 558 559 560 561
	packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size,
					skb, &packet->page_buf[0]);

	ret = netvsc_send(net_device_ctx->device_ctx, packet);

562
drop:
563
	if (ret == 0) {
564
		net->stats.tx_bytes += skb_length;
565
		net->stats.tx_packets++;
566
	} else {
567
		kfree(packet);
568 569 570 571
		if (ret != -EAGAIN) {
			dev_kfree_skb_any(skb);
			net->stats.tx_dropped++;
		}
572 573
	}

574
	return (ret == -EAGAIN) ? NETDEV_TX_BUSY : NETDEV_TX_OK;
575 576
}

577
/*
578 579
 * netvsc_linkstatus_callback - Link up/down notification
 */
580
void netvsc_linkstatus_callback(struct hv_device *device_obj,
581
				struct rndis_message *resp)
582
{
583
	struct rndis_indicate_status *indicate = &resp->msg.indicate_status;
584
	struct net_device *net;
585
	struct net_device_context *ndev_ctx;
586
	struct netvsc_device *net_device;
587
	struct rndis_device *rdev;
588 589

	net_device = hv_get_drvdata(device_obj);
590 591
	rdev = net_device->extension;

592 593 594 595 596 597 598 599 600 601 602 603 604
	switch (indicate->status) {
	case RNDIS_STATUS_MEDIA_CONNECT:
		rdev->link_state = false;
		break;
	case RNDIS_STATUS_MEDIA_DISCONNECT:
		rdev->link_state = true;
		break;
	case RNDIS_STATUS_NETWORK_CHANGE:
		rdev->link_change = true;
		break;
	default:
		return;
	}
605

606
	net = net_device->ndev;
607

608
	if (!net || net->reg_state != NETREG_REGISTERED)
609 610
		return;

611
	ndev_ctx = netdev_priv(net);
612
	if (!rdev->link_state) {
613
		schedule_delayed_work(&ndev_ctx->dwork, 0);
614
		schedule_delayed_work(&ndev_ctx->dwork, msecs_to_jiffies(20));
615
	} else {
616
		schedule_delayed_work(&ndev_ctx->dwork, 0);
617 618 619
	}
}

620 621 622
/*
 * netvsc_recv_callback -  Callback when we receive a packet from the
 * "wire" on the specified device.
623
 */
624
int netvsc_recv_callback(struct hv_device *device_obj,
625 626
				struct hv_netvsc_packet *packet,
				struct ndis_tcp_ip_checksum_info *csum_info)
627
{
628
	struct net_device *net;
629 630
	struct sk_buff *skb;

631
	net = ((struct netvsc_device *)hv_get_drvdata(device_obj))->ndev;
632
	if (!net || net->reg_state != NETREG_REGISTERED) {
633
		packet->status = NVSP_STAT_FAIL;
634 635 636
		return 0;
	}

637
	/* Allocate a skb - TODO direct I/O to pages? */
638
	skb = netdev_alloc_skb_ip_align(net, packet->total_data_buflen);
639 640
	if (unlikely(!skb)) {
		++net->stats.rx_dropped;
641
		packet->status = NVSP_STAT_FAIL;
642 643
		return 0;
	}
644

645 646 647 648
	/*
	 * Copy to skb. This copy is needed here since the memory pointed by
	 * hv_netvsc_packet cannot be deallocated
	 */
649 650
	memcpy(skb_put(skb, packet->total_data_buflen), packet->data,
		packet->total_data_buflen);
651 652

	skb->protocol = eth_type_trans(skb, net);
653 654 655 656 657 658 659 660 661 662 663
	if (csum_info) {
		/* We only look at the IP checksum here.
		 * Should we be dropping the packet if checksum
		 * failed? How do we deal with other checksums - TCP/UDP?
		 */
		if (csum_info->receive.ip_checksum_succeeded)
			skb->ip_summed = CHECKSUM_UNNECESSARY;
		else
			skb->ip_summed = CHECKSUM_NONE;
	}

664 665 666
	if (packet->vlan_tci & VLAN_TAG_PRESENT)
		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
				       packet->vlan_tci);
667

668
	skb_record_rx_queue(skb, packet->channel->
669
			    offermsg.offer.sub_channel_index);
670

671
	net->stats.rx_packets++;
672
	net->stats.rx_bytes += packet->total_data_buflen;
673

674 675
	/*
	 * Pass the skb back up. Network stack will deallocate the skb when it
676 677
	 * is done.
	 * TODO - use NAPI?
678
	 */
679
	netif_rx(skb);
680 681 682 683

	return 0;
}

684 685 686
static void netvsc_get_drvinfo(struct net_device *net,
			       struct ethtool_drvinfo *info)
{
687 688
	strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
	strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
689 690
}

691 692 693 694 695 696 697 698 699 700 701 702 703
static void netvsc_get_channels(struct net_device *net,
				struct ethtool_channels *channel)
{
	struct net_device_context *net_device_ctx = netdev_priv(net);
	struct hv_device *dev = net_device_ctx->device_ctx;
	struct netvsc_device *nvdev = hv_get_drvdata(dev);

	if (nvdev) {
		channel->max_combined	= nvdev->max_chn;
		channel->combined_count = nvdev->num_chn;
	}
}

704 705 706 707 708 709 710 711 712 713 714
static int netvsc_change_mtu(struct net_device *ndev, int mtu)
{
	struct net_device_context *ndevctx = netdev_priv(ndev);
	struct hv_device *hdev =  ndevctx->device_ctx;
	struct netvsc_device *nvdev = hv_get_drvdata(hdev);
	struct netvsc_device_info device_info;
	int limit = ETH_DATA_LEN;

	if (nvdev == NULL || nvdev->destroy)
		return -ENODEV;

715
	if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
716
		limit = NETVSC_MTU - ETH_HLEN;
717

718 719
	/* Hyper-V hosts don't support MTU < ETH_DATA_LEN (1500) */
	if (mtu < ETH_DATA_LEN || mtu > limit)
720 721 722
		return -EINVAL;

	nvdev->start_remove = true;
723
	cancel_work_sync(&ndevctx->work);
724
	netif_tx_disable(ndev);
725 726 727 728 729 730 731 732
	rndis_filter_device_remove(hdev);

	ndev->mtu = mtu;

	ndevctx->device_ctx = hdev;
	hv_set_drvdata(hdev, ndev);
	device_info.ring_size = ring_size;
	rndis_filter_device_add(hdev, &device_info);
733
	netif_tx_wake_all_queues(ndev);
734 735 736 737

	return 0;
}

738 739 740 741 742 743

static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
{
	struct net_device_context *ndevctx = netdev_priv(ndev);
	struct hv_device *hdev =  ndevctx->device_ctx;
	struct sockaddr *addr = p;
744
	char save_adr[ETH_ALEN];
745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764
	unsigned char save_aatype;
	int err;

	memcpy(save_adr, ndev->dev_addr, ETH_ALEN);
	save_aatype = ndev->addr_assign_type;

	err = eth_mac_addr(ndev, p);
	if (err != 0)
		return err;

	err = rndis_filter_set_device_mac(hdev, addr->sa_data);
	if (err != 0) {
		/* roll back to saved MAC */
		memcpy(ndev->dev_addr, save_adr, ETH_ALEN);
		ndev->addr_assign_type = save_aatype;
	}

	return err;
}

R
Richard Weinberger 已提交
765 766 767 768 769 770 771 772
#ifdef CONFIG_NET_POLL_CONTROLLER
static void netvsc_poll_controller(struct net_device *net)
{
	/* As netvsc_start_xmit() works synchronous we don't have to
	 * trigger anything here.
	 */
}
#endif
773

774 775 776
static const struct ethtool_ops ethtool_ops = {
	.get_drvinfo	= netvsc_get_drvinfo,
	.get_link	= ethtool_op_get_link,
777
	.get_channels   = netvsc_get_channels,
778 779
};

780 781 782 783
static const struct net_device_ops device_ops = {
	.ndo_open =			netvsc_open,
	.ndo_stop =			netvsc_close,
	.ndo_start_xmit =		netvsc_start_xmit,
784
	.ndo_set_rx_mode =		netvsc_set_multicast_list,
785
	.ndo_change_mtu =		netvsc_change_mtu,
786
	.ndo_validate_addr =		eth_validate_addr,
787
	.ndo_set_mac_address =		netvsc_set_mac_addr,
788
	.ndo_select_queue =		netvsc_select_queue,
R
Richard Weinberger 已提交
789 790 791
#ifdef CONFIG_NET_POLL_CONTROLLER
	.ndo_poll_controller =		netvsc_poll_controller,
#endif
792 793
};

794 795 796 797
/*
 * Send GARP packet to network peers after migrations.
 * After Quick Migration, the network is not immediately operational in the
 * current context when receiving RNDIS_STATUS_MEDIA_CONNECT event. So, add
798
 * another netif_notify_peers() into a delayed work, otherwise GARP packet
799
 * will not be sent after quick migration, and cause network disconnection.
800
 * Also, we update the carrier status here.
801
 */
802
static void netvsc_link_change(struct work_struct *w)
803 804 805
{
	struct net_device_context *ndev_ctx;
	struct net_device *net;
806
	struct netvsc_device *net_device;
807
	struct rndis_device *rdev;
808 809 810
	bool notify, refresh = false;
	char *argv[] = { "/etc/init.d/network", "restart", NULL };
	char *envp[] = { "HOME=/", "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL };
811 812

	rtnl_lock();
813

814
	ndev_ctx = container_of(w, struct net_device_context, dwork.work);
815
	net_device = hv_get_drvdata(ndev_ctx->device_ctx);
816
	rdev = net_device->extension;
817
	net = net_device->ndev;
818 819 820 821 822 823 824

	if (rdev->link_state) {
		netif_carrier_off(net);
		notify = false;
	} else {
		netif_carrier_on(net);
		notify = true;
825 826 827 828
		if (rdev->link_change) {
			rdev->link_change = false;
			refresh = true;
		}
829 830 831 832
	}

	rtnl_unlock();

833 834 835
	if (refresh)
		call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);

836 837
	if (notify)
		netdev_notify_peers(net);
838 839 840
}


841 842
static int netvsc_probe(struct hv_device *dev,
			const struct hv_vmbus_device_id *dev_id)
843 844 845 846
{
	struct net_device *net = NULL;
	struct net_device_context *net_device_ctx;
	struct netvsc_device_info device_info;
847
	struct netvsc_device *nvdev;
848 849
	int ret;

850 851
	net = alloc_etherdev_mq(sizeof(struct net_device_context),
				num_online_cpus());
852
	if (!net)
853
		return -ENOMEM;
854

855 856
	netif_carrier_off(net);

857
	net_device_ctx = netdev_priv(net);
858
	net_device_ctx->device_ctx = dev;
859
	hv_set_drvdata(dev, net);
860
	INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
861
	INIT_WORK(&net_device_ctx->work, do_set_multicast);
862 863 864

	net->netdev_ops = &device_ops;

865 866
	net->hw_features = NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_IP_CSUM |
				NETIF_F_TSO;
867
	net->features = NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_SG | NETIF_F_RXCSUM |
868
			NETIF_F_IP_CSUM | NETIF_F_TSO;
869

870
	net->ethtool_ops = &ethtool_ops;
871
	SET_NETDEV_DEV(net, &dev->device);
872

873 874 875 876 877
	/* Notify the netvsc driver of the new device */
	device_info.ring_size = ring_size;
	ret = rndis_filter_device_add(dev, &device_info);
	if (ret != 0) {
		netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
878
		free_netdev(net);
879
		hv_set_drvdata(dev, NULL);
880
		return ret;
881
	}
882 883
	memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);

884 885 886 887
	nvdev = hv_get_drvdata(dev);
	netif_set_real_num_tx_queues(net, nvdev->num_chn);
	netif_set_real_num_rx_queues(net, nvdev->num_chn);

888 889 890 891 892
	ret = register_netdev(net);
	if (ret != 0) {
		pr_err("Unable to register netdev.\n");
		rndis_filter_device_remove(dev);
		free_netdev(net);
893 894
	} else {
		schedule_delayed_work(&net_device_ctx->dwork, 0);
895 896
	}

897 898 899
	return ret;
}

900
static int netvsc_remove(struct hv_device *dev)
901
{
902
	struct net_device *net;
903
	struct net_device_context *ndev_ctx;
904 905 906 907
	struct netvsc_device *net_device;

	net_device = hv_get_drvdata(dev);
	net = net_device->ndev;
908 909

	if (net == NULL) {
910
		dev_err(&dev->device, "No net device to remove\n");
911 912 913
		return 0;
	}

914 915
	net_device->start_remove = true;

916 917
	ndev_ctx = netdev_priv(net);
	cancel_delayed_work_sync(&ndev_ctx->dwork);
918
	cancel_work_sync(&ndev_ctx->work);
919

920
	/* Stop outbound asap */
921
	netif_tx_disable(net);
922 923 924 925 926 927 928

	unregister_netdev(net);

	/*
	 * Call to the vsc driver to let it know that the device is being
	 * removed
	 */
929
	rndis_filter_device_remove(dev);
930 931

	free_netdev(net);
932
	return 0;
933 934
}

935
static const struct hv_vmbus_device_id id_table[] = {
936
	/* Network guid */
937
	{ HV_NIC_GUID, },
938
	{ },
939 940 941 942
};

MODULE_DEVICE_TABLE(vmbus, id_table);

943
/* The one and only one */
944
static struct  hv_driver netvsc_drv = {
945
	.name = KBUILD_MODNAME,
946
	.id_table = id_table,
947 948
	.probe = netvsc_probe,
	.remove = netvsc_remove,
949
};
950

951
static void __exit netvsc_drv_exit(void)
952
{
953
	vmbus_driver_unregister(&netvsc_drv);
954 955
}

956
static int __init netvsc_drv_init(void)
957
{
958 959 960 961 962
	if (ring_size < RING_SIZE_MIN) {
		ring_size = RING_SIZE_MIN;
		pr_info("Increased ring_size to %d (min allowed)\n",
			ring_size);
	}
963
	return vmbus_driver_register(&netvsc_drv);
964 965
}

966
MODULE_LICENSE("GPL");
967
MODULE_DESCRIPTION("Microsoft Hyper-V network driver");
968

969
module_init(netvsc_drv_init);
970
module_exit(netvsc_drv_exit);