netvsc.c 39.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 * Copyright (c) 2009, Microsoft Corporation.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
14
 * this program; if not, see <http://www.gnu.org/licenses/>.
15 16
 *
 * Authors:
17
 *   Haiyang Zhang <haiyangz@microsoft.com>
18 19
 *   Hank Janssen  <hjanssen@microsoft.com>
 */
20 21
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

22
#include <linux/kernel.h>
23 24
#include <linux/sched.h>
#include <linux/wait.h>
25
#include <linux/mm.h>
26
#include <linux/delay.h>
27
#include <linux/io.h>
28
#include <linux/slab.h>
29
#include <linux/netdevice.h>
30
#include <linux/if_ether.h>
31
#include <linux/vmalloc.h>
32
#include <linux/rtnetlink.h>
33
#include <linux/prefetch.h>
34

35
#include <asm/sync_bitops.h>
36

37
#include "hyperv_net.h"
S
Stephen Hemminger 已提交
38
#include "netvsc_trace.h"
39

40 41 42 43
/*
 * Switch the data path from the synthetic interface to the VF
 * interface.
 */
44
void netvsc_switch_datapath(struct net_device *ndev, bool vf)
45
{
46 47
	struct net_device_context *net_device_ctx = netdev_priv(ndev);
	struct hv_device *dev = net_device_ctx->device_ctx;
48
	struct netvsc_device *nv_dev = rtnl_dereference(net_device_ctx->nvdev);
49
	struct nvsp_message *init_pkt = &nv_dev->channel_init_pkt;
50 51 52 53 54 55 56 57 58 59

	memset(init_pkt, 0, sizeof(struct nvsp_message));
	init_pkt->hdr.msg_type = NVSP_MSG4_TYPE_SWITCH_DATA_PATH;
	if (vf)
		init_pkt->msg.v4_msg.active_dp.active_datapath =
			NVSP_DATAPATH_VF;
	else
		init_pkt->msg.v4_msg.active_dp.active_datapath =
			NVSP_DATAPATH_SYNTHETIC;

S
Stephen Hemminger 已提交
60 61
	trace_nvsp_send(ndev, init_pkt);

62 63 64 65 66 67
	vmbus_sendpacket(dev->channel, init_pkt,
			       sizeof(struct nvsp_message),
			       (unsigned long)init_pkt,
			       VM_PKT_DATA_INBAND, 0);
}

68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102
/* Worker to setup sub channels on initial setup
 * Initial hotplug event occurs in softirq context
 * and can't wait for channels.
 */
static void netvsc_subchan_work(struct work_struct *w)
{
	struct netvsc_device *nvdev =
		container_of(w, struct netvsc_device, subchan_work);
	struct rndis_device *rdev;
	int i, ret;

	/* Avoid deadlock with device removal already under RTNL */
	if (!rtnl_trylock()) {
		schedule_work(w);
		return;
	}

	rdev = nvdev->extension;
	if (rdev) {
		ret = rndis_set_subchannel(rdev->ndev, nvdev);
		if (ret == 0) {
			netif_device_attach(rdev->ndev);
		} else {
			/* fallback to only primary channel */
			for (i = 1; i < nvdev->num_chn; i++)
				netif_napi_del(&nvdev->chan_table[i].napi);

			nvdev->max_chn = 1;
			nvdev->num_chn = 1;
		}
	}

	rtnl_unlock();
}

103
static struct netvsc_device *alloc_net_device(void)
104
{
105
	struct netvsc_device *net_device;
106

107 108
	net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL);
	if (!net_device)
109 110
		return NULL;

111
	init_waitqueue_head(&net_device->wait_drain);
112
	net_device->destroy = false;
113

114 115
	net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
	net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
116

117
	init_completion(&net_device->channel_init_wait);
118
	init_waitqueue_head(&net_device->subchan_open);
119
	INIT_WORK(&net_device->subchan_work, netvsc_subchan_work);
120

121
	return net_device;
122 123
}

124
static void free_netvsc_device(struct rcu_head *head)
125
{
126 127
	struct netvsc_device *nvdev
		= container_of(head, struct netvsc_device, rcu);
128 129
	int i;

130 131 132 133 134
	kfree(nvdev->extension);
	vfree(nvdev->recv_buf);
	vfree(nvdev->send_buf);
	kfree(nvdev->send_section_map);

135
	for (i = 0; i < VRSS_CHANNEL_MAX; i++)
136
		vfree(nvdev->chan_table[i].mrc.slots);
137

138 139 140
	kfree(nvdev);
}

141 142 143 144
static void free_netvsc_device_rcu(struct netvsc_device *nvdev)
{
	call_rcu(&nvdev->rcu, free_netvsc_device);
}
145

146
static void netvsc_revoke_recv_buf(struct hv_device *device,
147 148
				   struct netvsc_device *net_device,
				   struct net_device *ndev)
149
{
150
	struct nvsp_message *revoke_packet;
151
	int ret;
152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168

	/*
	 * If we got a section count, it means we received a
	 * SendReceiveBufferComplete msg (ie sent
	 * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
	 * to send a revoke msg here
	 */
	if (net_device->recv_section_cnt) {
		/* Send the revoke receive buffer */
		revoke_packet = &net_device->revoke_packet;
		memset(revoke_packet, 0, sizeof(struct nvsp_message));

		revoke_packet->hdr.msg_type =
			NVSP_MSG1_TYPE_REVOKE_RECV_BUF;
		revoke_packet->msg.v1_msg.
		revoke_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;

S
Stephen Hemminger 已提交
169 170
		trace_nvsp_send(ndev, revoke_packet);

171
		ret = vmbus_sendpacket(device->channel,
172 173 174 175
				       revoke_packet,
				       sizeof(struct nvsp_message),
				       (unsigned long)revoke_packet,
				       VM_PKT_DATA_INBAND, 0);
176 177 178 179 180 181 182
		/* If the failure is because the channel is rescinded;
		 * ignore the failure since we cannot send on a rescinded
		 * channel. This would allow us to properly cleanup
		 * even when the channel is rescinded.
		 */
		if (device->channel->rescind)
			ret = 0;
183 184 185 186 187
		/*
		 * If we failed here, we might as well return and
		 * have a leak rather than continue and a bugchk
		 */
		if (ret != 0) {
188
			netdev_err(ndev, "unable to send "
189
				"revoke receive buffer to netvsp\n");
190
			return;
191
		}
192
		net_device->recv_section_cnt = 0;
193
	}
194 195 196
}

static void netvsc_revoke_send_buf(struct hv_device *device,
197 198
				   struct netvsc_device *net_device,
				   struct net_device *ndev)
199 200 201
{
	struct nvsp_message *revoke_packet;
	int ret;
202

203 204
	/* Deal with the send buffer we may have setup.
	 * If we got a  send section size, it means we received a
205 206
	 * NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE msg (ie sent
	 * NVSP_MSG1_TYPE_SEND_SEND_BUF msg) therefore, we need
207 208
	 * to send a revoke msg here
	 */
209
	if (net_device->send_section_cnt) {
210 211 212 213 214 215
		/* Send the revoke receive buffer */
		revoke_packet = &net_device->revoke_packet;
		memset(revoke_packet, 0, sizeof(struct nvsp_message));

		revoke_packet->hdr.msg_type =
			NVSP_MSG1_TYPE_REVOKE_SEND_BUF;
216 217
		revoke_packet->msg.v1_msg.revoke_send_buf.id =
			NETVSC_SEND_BUFFER_ID;
218

S
Stephen Hemminger 已提交
219 220
		trace_nvsp_send(ndev, revoke_packet);

221
		ret = vmbus_sendpacket(device->channel,
222 223 224 225
				       revoke_packet,
				       sizeof(struct nvsp_message),
				       (unsigned long)revoke_packet,
				       VM_PKT_DATA_INBAND, 0);
226 227 228 229 230 231 232 233 234

		/* If the failure is because the channel is rescinded;
		 * ignore the failure since we cannot send on a rescinded
		 * channel. This would allow us to properly cleanup
		 * even when the channel is rescinded.
		 */
		if (device->channel->rescind)
			ret = 0;

235 236 237 238 239 240
		/* If we failed here, we might as well return and
		 * have a leak rather than continue and a bugchk
		 */
		if (ret != 0) {
			netdev_err(ndev, "unable to send "
				   "revoke send buffer to netvsp\n");
241
			return;
242
		}
243
		net_device->send_section_cnt = 0;
244
	}
245 246
}

247
static void netvsc_teardown_recv_gpadl(struct hv_device *device,
248 249
				       struct netvsc_device *net_device,
				       struct net_device *ndev)
250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266
{
	int ret;

	if (net_device->recv_buf_gpadl_handle) {
		ret = vmbus_teardown_gpadl(device->channel,
					   net_device->recv_buf_gpadl_handle);

		/* If we failed here, we might as well return and have a leak
		 * rather than continue and a bugchk
		 */
		if (ret != 0) {
			netdev_err(ndev,
				   "unable to teardown receive buffer's gpadl\n");
			return;
		}
		net_device->recv_buf_gpadl_handle = 0;
	}
267 268 269
}

static void netvsc_teardown_send_gpadl(struct hv_device *device,
270 271
				       struct netvsc_device *net_device,
				       struct net_device *ndev)
272 273
{
	int ret;
274

275
	if (net_device->send_buf_gpadl_handle) {
276
		ret = vmbus_teardown_gpadl(device->channel,
277 278 279 280 281 282 283 284
					   net_device->send_buf_gpadl_handle);

		/* If we failed here, we might as well return and have a leak
		 * rather than continue and a bugchk
		 */
		if (ret != 0) {
			netdev_err(ndev,
				   "unable to teardown send buffer's gpadl\n");
285
			return;
286
		}
287
		net_device->send_buf_gpadl_handle = 0;
288
	}
289 290
}

291 292 293 294 295 296 297 298 299 300 301 302 303 304
int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx)
{
	struct netvsc_channel *nvchan = &net_device->chan_table[q_idx];
	int node = cpu_to_node(nvchan->channel->target_cpu);
	size_t size;

	size = net_device->recv_completion_cnt * sizeof(struct recv_comp_data);
	nvchan->mrc.slots = vzalloc_node(size, node);
	if (!nvchan->mrc.slots)
		nvchan->mrc.slots = vzalloc(size);

	return nvchan->mrc.slots ? 0 : -ENOMEM;
}

305
static int netvsc_init_buf(struct hv_device *device,
306 307
			   struct netvsc_device *net_device,
			   const struct netvsc_device_info *device_info)
308
{
309
	struct nvsp_1_message_send_receive_buffer_complete *resp;
310 311
	struct net_device *ndev = hv_get_drvdata(device);
	struct nvsp_message *init_packet;
312
	unsigned int buf_size;
313
	size_t map_words;
314
	int ret = 0;
315

316
	/* Get receive buffer area. */
317
	buf_size = device_info->recv_sections * device_info->recv_section_size;
318 319
	buf_size = roundup(buf_size, PAGE_SIZE);

320 321 322 323 324
	/* Legacy hosts only allow smaller receive buffer */
	if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_2)
		buf_size = min_t(unsigned int, buf_size,
				 NETVSC_RECEIVE_BUFFER_SIZE_LEGACY);

325
	net_device->recv_buf = vzalloc(buf_size);
326
	if (!net_device->recv_buf) {
327 328 329
		netdev_err(ndev,
			   "unable to allocate receive buffer of size %u\n",
			   buf_size);
330
		ret = -ENOMEM;
331
		goto cleanup;
332 333
	}

334 335
	net_device->recv_buf_size = buf_size;

336 337 338 339 340
	/*
	 * Establish the gpadl handle for this buffer on this
	 * channel.  Note: This call uses the vmbus connection rather
	 * than the channel to establish the gpadl handle.
	 */
341
	ret = vmbus_establish_gpadl(device->channel, net_device->recv_buf,
342
				    buf_size,
343
				    &net_device->recv_buf_gpadl_handle);
344
	if (ret != 0) {
345
		netdev_err(ndev,
346
			"unable to establish receive buffer's gpadl\n");
347
		goto cleanup;
348 349
	}

350
	/* Notify the NetVsp of the gpadl handle */
351
	init_packet = &net_device->channel_init_pkt;
352
	memset(init_packet, 0, sizeof(struct nvsp_message));
353 354 355 356 357
	init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_RECV_BUF;
	init_packet->msg.v1_msg.send_recv_buf.
		gpadl_handle = net_device->recv_buf_gpadl_handle;
	init_packet->msg.v1_msg.
		send_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
358

S
Stephen Hemminger 已提交
359 360
	trace_nvsp_send(ndev, init_packet);

361
	/* Send the gpadl notification request */
362
	ret = vmbus_sendpacket(device->channel, init_packet,
363
			       sizeof(struct nvsp_message),
364
			       (unsigned long)init_packet,
365
			       VM_PKT_DATA_INBAND,
366
			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
367
	if (ret != 0) {
368
		netdev_err(ndev,
369
			"unable to send receive buffer's gpadl to netvsp\n");
370
		goto cleanup;
371 372
	}

373
	wait_for_completion(&net_device->channel_init_wait);
374

375
	/* Check the response */
376 377 378 379 380
	resp = &init_packet->msg.v1_msg.send_recv_buf_complete;
	if (resp->status != NVSP_STAT_SUCCESS) {
		netdev_err(ndev,
			   "Unable to complete receive buffer initialization with NetVsp - status %d\n",
			   resp->status);
381
		ret = -EINVAL;
382
		goto cleanup;
383 384
	}

385
	/* Parse the response */
386 387 388
	netdev_dbg(ndev, "Receive sections: %u sub_allocs: size %u count: %u\n",
		   resp->num_sections, resp->sections[0].sub_alloc_size,
		   resp->sections[0].num_sub_allocs);
389

390 391
	/* There should only be one section for the entire receive buffer */
	if (resp->num_sections != 1 || resp->sections[0].offset != 0) {
392
		ret = -EINVAL;
393
		goto cleanup;
394 395
	}

396 397 398
	net_device->recv_section_size = resp->sections[0].sub_alloc_size;
	net_device->recv_section_cnt = resp->sections[0].num_sub_allocs;

399 400
	/* Setup receive completion ring */
	net_device->recv_completion_cnt
401
		= round_up(net_device->recv_section_cnt + 1,
402 403 404 405 406 407
			   PAGE_SIZE / sizeof(u64));
	ret = netvsc_alloc_recv_comp_ring(net_device, 0);
	if (ret)
		goto cleanup;

	/* Now setup the send buffer. */
408
	buf_size = device_info->send_sections * device_info->send_section_size;
409 410 411
	buf_size = round_up(buf_size, PAGE_SIZE);

	net_device->send_buf = vzalloc(buf_size);
412
	if (!net_device->send_buf) {
413 414
		netdev_err(ndev, "unable to allocate send buffer of size %u\n",
			   buf_size);
415 416 417 418 419 420 421 422 423
		ret = -ENOMEM;
		goto cleanup;
	}

	/* Establish the gpadl handle for this buffer on this
	 * channel.  Note: This call uses the vmbus connection rather
	 * than the channel to establish the gpadl handle.
	 */
	ret = vmbus_establish_gpadl(device->channel, net_device->send_buf,
424
				    buf_size,
425 426 427 428 429 430 431 432 433 434 435
				    &net_device->send_buf_gpadl_handle);
	if (ret != 0) {
		netdev_err(ndev,
			   "unable to establish send buffer's gpadl\n");
		goto cleanup;
	}

	/* Notify the NetVsp of the gpadl handle */
	init_packet = &net_device->channel_init_pkt;
	memset(init_packet, 0, sizeof(struct nvsp_message));
	init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_SEND_BUF;
436
	init_packet->msg.v1_msg.send_send_buf.gpadl_handle =
437
		net_device->send_buf_gpadl_handle;
438
	init_packet->msg.v1_msg.send_send_buf.id = NETVSC_SEND_BUFFER_ID;
439

S
Stephen Hemminger 已提交
440 441
	trace_nvsp_send(ndev, init_packet);

442 443 444 445 446 447 448 449 450 451 452 453
	/* Send the gpadl notification request */
	ret = vmbus_sendpacket(device->channel, init_packet,
			       sizeof(struct nvsp_message),
			       (unsigned long)init_packet,
			       VM_PKT_DATA_INBAND,
			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
	if (ret != 0) {
		netdev_err(ndev,
			   "unable to send send buffer's gpadl to netvsp\n");
		goto cleanup;
	}

454
	wait_for_completion(&net_device->channel_init_wait);
455 456 457 458 459 460 461

	/* Check the response */
	if (init_packet->msg.v1_msg.
	    send_send_buf_complete.status != NVSP_STAT_SUCCESS) {
		netdev_err(ndev, "Unable to complete send buffer "
			   "initialization with NetVsp - status %d\n",
			   init_packet->msg.v1_msg.
462
			   send_send_buf_complete.status);
463 464 465 466 467 468 469 470
		ret = -EINVAL;
		goto cleanup;
	}

	/* Parse the response */
	net_device->send_section_size = init_packet->msg.
				v1_msg.send_send_buf_complete.section_size;

471 472
	/* Section count is simply the size divided by the section size. */
	net_device->send_section_cnt = buf_size / net_device->send_section_size;
473

474 475
	netdev_dbg(ndev, "Send section size: %d, Section count:%d\n",
		   net_device->send_section_size, net_device->send_section_cnt);
476 477

	/* Setup state for managing the send buffer. */
478
	map_words = DIV_ROUND_UP(net_device->send_section_cnt, BITS_PER_LONG);
479

480
	net_device->send_section_map = kcalloc(map_words, sizeof(ulong), GFP_KERNEL);
481 482
	if (net_device->send_section_map == NULL) {
		ret = -ENOMEM;
483
		goto cleanup;
484
	}
485

486
	goto exit;
487

488
cleanup:
489 490 491 492
	netvsc_revoke_recv_buf(device, net_device, ndev);
	netvsc_revoke_send_buf(device, net_device, ndev);
	netvsc_teardown_recv_gpadl(device, net_device, ndev);
	netvsc_teardown_send_gpadl(device, net_device, ndev);
493

494
exit:
495 496 497
	return ret;
}

498 499 500 501 502
/* Negotiate NVSP protocol version */
static int negotiate_nvsp_ver(struct hv_device *device,
			      struct netvsc_device *net_device,
			      struct nvsp_message *init_packet,
			      u32 nvsp_ver)
503
{
504
	struct net_device *ndev = hv_get_drvdata(device);
505
	int ret;
506

507
	memset(init_packet, 0, sizeof(struct nvsp_message));
508
	init_packet->hdr.msg_type = NVSP_MSG_TYPE_INIT;
509 510
	init_packet->msg.init_msg.init.min_protocol_ver = nvsp_ver;
	init_packet->msg.init_msg.init.max_protocol_ver = nvsp_ver;
S
Stephen Hemminger 已提交
511 512
	trace_nvsp_send(ndev, init_packet);

513
	/* Send the init request */
514
	ret = vmbus_sendpacket(device->channel, init_packet,
515
			       sizeof(struct nvsp_message),
516
			       (unsigned long)init_packet,
517
			       VM_PKT_DATA_INBAND,
518
			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
519

520
	if (ret != 0)
521
		return ret;
522

523
	wait_for_completion(&net_device->channel_init_wait);
524

525
	if (init_packet->msg.init_msg.init_complete.status !=
526 527
	    NVSP_STAT_SUCCESS)
		return -EINVAL;
528

529
	if (nvsp_ver == NVSP_PROTOCOL_VERSION_1)
530 531
		return 0;

532
	/* NVSPv2 or later: Send NDIS config */
533 534
	memset(init_packet, 0, sizeof(struct nvsp_message));
	init_packet->hdr.msg_type = NVSP_MSG2_TYPE_SEND_NDIS_CONFIG;
535
	init_packet->msg.v2_msg.send_ndis_config.mtu = ndev->mtu + ETH_HLEN;
536
	init_packet->msg.v2_msg.send_ndis_config.capability.ieee8021q = 1;
537

538
	if (nvsp_ver >= NVSP_PROTOCOL_VERSION_5) {
539 540
		init_packet->msg.v2_msg.send_ndis_config.capability.sriov = 1;

541 542 543 544
		/* Teaming bit is needed to receive link speed updates */
		init_packet->msg.v2_msg.send_ndis_config.capability.teaming = 1;
	}

S
Stephen Hemminger 已提交
545 546
	trace_nvsp_send(ndev, init_packet);

547 548 549 550 551 552 553 554
	ret = vmbus_sendpacket(device->channel, init_packet,
				sizeof(struct nvsp_message),
				(unsigned long)init_packet,
				VM_PKT_DATA_INBAND, 0);

	return ret;
}

555
static int netvsc_connect_vsp(struct hv_device *device,
556 557
			      struct netvsc_device *net_device,
			      const struct netvsc_device_info *device_info)
558
{
S
Stephen Hemminger 已提交
559
	struct net_device *ndev = hv_get_drvdata(device);
560
	static const u32 ver_list[] = {
561
		NVSP_PROTOCOL_VERSION_1, NVSP_PROTOCOL_VERSION_2,
562 563
		NVSP_PROTOCOL_VERSION_4, NVSP_PROTOCOL_VERSION_5,
		NVSP_PROTOCOL_VERSION_6, NVSP_PROTOCOL_VERSION_61
564 565 566
	};
	struct nvsp_message *init_packet;
	int ndis_version, i, ret;
567 568 569 570

	init_packet = &net_device->channel_init_pkt;

	/* Negotiate the latest NVSP protocol supported */
571
	for (i = ARRAY_SIZE(ver_list) - 1; i >= 0; i--)
572 573 574 575 576 577 578
		if (negotiate_nvsp_ver(device, net_device, init_packet,
				       ver_list[i])  == 0) {
			net_device->nvsp_version = ver_list[i];
			break;
		}

	if (i < 0) {
579
		ret = -EPROTO;
580
		goto cleanup;
581
	}
582 583 584

	pr_debug("Negotiated NVSP version:%x\n", net_device->nvsp_version);

585
	/* Send the ndis version */
586
	memset(init_packet, 0, sizeof(struct nvsp_message));
587

588
	if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_4)
589
		ndis_version = 0x00060001;
590 591
	else
		ndis_version = 0x0006001e;
592

593 594 595
	init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_NDIS_VER;
	init_packet->msg.v1_msg.
		send_ndis_ver.ndis_major_ver =
596
				(ndis_version & 0xFFFF0000) >> 16;
597 598
	init_packet->msg.v1_msg.
		send_ndis_ver.ndis_minor_ver =
599
				ndis_version & 0xFFFF;
600

S
Stephen Hemminger 已提交
601 602
	trace_nvsp_send(ndev, init_packet);

603
	/* Send the init request */
604
	ret = vmbus_sendpacket(device->channel, init_packet,
605 606 607
				sizeof(struct nvsp_message),
				(unsigned long)init_packet,
				VM_PKT_DATA_INBAND, 0);
608
	if (ret != 0)
609
		goto cleanup;
610

611

612
	ret = netvsc_init_buf(device, net_device, device_info);
613

614
cleanup:
615 616 617
	return ret;
}

618
/*
619
 * netvsc_device_remove - Callback when the root bus device is removed
620
 */
621
void netvsc_device_remove(struct hv_device *device)
622
{
623 624
	struct net_device *ndev = hv_get_drvdata(device);
	struct net_device_context *net_device_ctx = netdev_priv(ndev);
625 626
	struct netvsc_device *net_device
		= rtnl_dereference(net_device_ctx->nvdev);
S
stephen hemminger 已提交
627
	int i;
628

629 630 631 632
	/*
	 * Revoke receive buffer. If host is pre-Win2016 then tear down
	 * receive buffer GPADL. Do the same for send buffer.
	 */
633
	netvsc_revoke_recv_buf(device, net_device, ndev);
634
	if (vmbus_proto_version < VERSION_WIN10)
635
		netvsc_teardown_recv_gpadl(device, net_device, ndev);
636

637
	netvsc_revoke_send_buf(device, net_device, ndev);
638
	if (vmbus_proto_version < VERSION_WIN10)
639
		netvsc_teardown_send_gpadl(device, net_device, ndev);
640

641
	RCU_INIT_POINTER(net_device_ctx->nvdev, NULL);
642

643 644 645 646
	/* And disassociate NAPI context from device */
	for (i = 0; i < net_device->num_chn; i++)
		netif_napi_del(&net_device->chan_table[i].napi);

647 648 649 650
	/*
	 * At this point, no one should be accessing net_device
	 * except in here
	 */
651
	netdev_dbg(ndev, "net device safe to remove\n");
652

653
	/* Now, we can close the channel safely */
654
	vmbus_close(device->channel);
655

656 657 658 659
	/*
	 * If host is Win2016 or higher then we do the GPADL tear down
	 * here after VMBus is closed.
	*/
660
	if (vmbus_proto_version >= VERSION_WIN10) {
661 662
		netvsc_teardown_recv_gpadl(device, net_device, ndev);
		netvsc_teardown_send_gpadl(device, net_device, ndev);
663
	}
S
stephen hemminger 已提交
664

665
	/* Release all resources */
666
	free_netvsc_device_rcu(net_device);
667 668
}

669 670 671
#define RING_AVAIL_PERCENT_HIWATER 20
#define RING_AVAIL_PERCENT_LOWATER 10

672 673 674 675 676 677
static inline void netvsc_free_send_slot(struct netvsc_device *net_device,
					 u32 index)
{
	sync_change_bit(index, net_device->send_section_map);
}

678 679 680
static void netvsc_send_tx_complete(struct net_device *ndev,
				    struct netvsc_device *net_device,
				    struct vmbus_channel *channel,
S
stephen hemminger 已提交
681 682
				    const struct vmpacket_descriptor *desc,
				    int budget)
683
{
684
	struct sk_buff *skb = (struct sk_buff *)(unsigned long)desc->trans_id;
685
	struct net_device_context *ndev_ctx = netdev_priv(ndev);
686 687 688 689 690
	u16 q_idx = 0;
	int queue_sends;

	/* Notify the layer above us */
	if (likely(skb)) {
691
		const struct hv_netvsc_packet *packet
692
			= (struct hv_netvsc_packet *)skb->cb;
693 694
		u32 send_index = packet->send_buf_index;
		struct netvsc_stats *tx_stats;
695 696 697

		if (send_index != NETVSC_INVALID_INDEX)
			netvsc_free_send_slot(net_device, send_index);
698
		q_idx = packet->q_idx;
699

700
		tx_stats = &net_device->chan_table[q_idx].tx_stats;
701 702 703 704 705 706

		u64_stats_update_begin(&tx_stats->syncp);
		tx_stats->packets += packet->total_packets;
		tx_stats->bytes += packet->total_bytes;
		u64_stats_update_end(&tx_stats->syncp);

S
stephen hemminger 已提交
707
		napi_consume_skb(skb, budget);
708 709
	}

710 711
	queue_sends =
		atomic_dec_return(&net_device->chan_table[q_idx].queue_sends);
712

713 714 715 716 717 718 719
	if (unlikely(net_device->destroy)) {
		if (queue_sends == 0)
			wake_up(&net_device->wait_drain);
	} else {
		struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx);

		if (netif_tx_queue_stopped(txq) &&
720 721
		    (hv_get_avail_to_write_percent(&channel->outbound) >
		     RING_AVAIL_PERCENT_HIWATER || queue_sends < 1)) {
722 723 724
			netif_tx_wake_queue(txq);
			ndev_ctx->eth_stats.wake_queue++;
		}
725
	}
726 727
}

728 729
static void netvsc_send_completion(struct net_device *ndev,
				   struct netvsc_device *net_device,
730
				   struct vmbus_channel *incoming_channel,
S
stephen hemminger 已提交
731 732
				   const struct vmpacket_descriptor *desc,
				   int budget)
733
{
734
	const struct nvsp_message *nvsp_packet = hv_pkt_data(desc);
735

736 737 738 739 740
	switch (nvsp_packet->hdr.msg_type) {
	case NVSP_MSG_TYPE_INIT_COMPLETE:
	case NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE:
	case NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE:
	case NVSP_MSG5_TYPE_SUBCHANNEL:
741
		/* Copy the response back */
742
		memcpy(&net_device->channel_init_pkt, nvsp_packet,
743
		       sizeof(struct nvsp_message));
744
		complete(&net_device->channel_init_wait);
745 746 747
		break;

	case NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE:
748 749
		netvsc_send_tx_complete(ndev, net_device, incoming_channel,
					desc, budget);
750
		break;
751

752 753 754 755
	default:
		netdev_err(ndev,
			   "Unknown send completion type %d received!!\n",
			   nvsp_packet->hdr.msg_type);
756 757 758
	}
}

759 760
static u32 netvsc_get_next_send_section(struct netvsc_device *net_device)
{
761 762 763
	unsigned long *map_addr = net_device->send_section_map;
	unsigned int i;

764
	for_each_clear_bit(i, map_addr, net_device->send_section_cnt) {
765 766
		if (sync_test_and_set_bit(i, map_addr) == 0)
			return i;
767
	}
768 769

	return NETVSC_INVALID_INDEX;
770 771
}

772 773 774 775 776 777
static void netvsc_copy_to_send_buf(struct netvsc_device *net_device,
				    unsigned int section_index,
				    u32 pend_size,
				    struct hv_netvsc_packet *packet,
				    struct rndis_message *rndis_msg,
				    struct hv_page_buffer *pb,
778
				    bool xmit_more)
779 780
{
	char *start = net_device->send_buf;
781 782
	char *dest = start + (section_index * net_device->send_section_size)
		     + pend_size;
783
	int i;
784
	u32 padding = 0;
785 786
	u32 page_count = packet->cp_partial ? packet->rmsg_pgcnt :
		packet->page_buf_cnt;
787
	u32 remain;
788 789

	/* Add padding */
790
	remain = packet->total_data_buflen & (net_device->pkt_align - 1);
791
	if (xmit_more && remain) {
792
		padding = net_device->pkt_align - remain;
793
		rndis_msg->msg_len += padding;
794 795
		packet->total_data_buflen += padding;
	}
796

797
	for (i = 0; i < page_count; i++) {
798 799 800
		char *src = phys_to_virt(pb[i].pfn << PAGE_SHIFT);
		u32 offset = pb[i].offset;
		u32 len = pb[i].len;
801 802 803 804

		memcpy(dest, (src + offset), len);
		dest += len;
	}
805

806
	if (padding)
807
		memset(dest, 0, padding);
808 809
}

810
static inline int netvsc_send_pkt(
811
	struct hv_device *device,
812
	struct hv_netvsc_packet *packet,
813
	struct netvsc_device *net_device,
814
	struct hv_page_buffer *pb,
815
	struct sk_buff *skb)
816
{
817
	struct nvsp_message nvmsg;
S
Stephen Hemminger 已提交
818
	struct nvsp_1_message_send_rndis_packet *rpkt =
819 820 821
		&nvmsg.msg.v1_msg.send_rndis_pkt;
	struct netvsc_channel * const nvchan =
		&net_device->chan_table[packet->q_idx];
822
	struct vmbus_channel *out_channel = nvchan->channel;
823
	struct net_device *ndev = hv_get_drvdata(device);
824
	struct net_device_context *ndev_ctx = netdev_priv(ndev);
825
	struct netdev_queue *txq = netdev_get_tx_queue(ndev, packet->q_idx);
826 827
	u64 req_id;
	int ret;
828
	u32 ring_avail = hv_get_avail_to_write_percent(&out_channel->outbound);
829

830
	nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
831 832 833 834
	if (skb)
		rpkt->channel_type = 0;		/* 0 is RMC_DATA */
	else
		rpkt->channel_type = 1;		/* 1 is RMC_CONTROL */
835

836
	rpkt->send_buf_section_index = packet->send_buf_index;
837
	if (packet->send_buf_index == NETVSC_INVALID_INDEX)
838
		rpkt->send_buf_section_size = 0;
839
	else
840
		rpkt->send_buf_section_size = packet->total_data_buflen;
841

842
	req_id = (ulong)skb;
843

844 845 846
	if (out_channel->rescind)
		return -ENODEV;

S
Stephen Hemminger 已提交
847 848
	trace_nvsp_send_pkt(ndev, out_channel, rpkt);

849
	if (packet->page_buf_cnt) {
850 851 852
		if (packet->cp_partial)
			pb += packet->rmsg_pgcnt;

853 854 855 856
		ret = vmbus_sendpacket_pagebuffer(out_channel,
						  pb, packet->page_buf_cnt,
						  &nvmsg, sizeof(nvmsg),
						  req_id);
857
	} else {
858 859 860 861
		ret = vmbus_sendpacket(out_channel,
				       &nvmsg, sizeof(nvmsg),
				       req_id, VM_PKT_DATA_INBAND,
				       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
862 863
	}

864
	if (ret == 0) {
865
		atomic_inc_return(&nvchan->queue_sends);
866

867
		if (ring_avail < RING_AVAIL_PERCENT_LOWATER) {
868
			netif_tx_stop_queue(txq);
869 870
			ndev_ctx->eth_stats.stop_queue++;
		}
871
	} else if (ret == -EAGAIN) {
872
		netif_tx_stop_queue(txq);
873
		ndev_ctx->eth_stats.stop_queue++;
874 875
		if (atomic_read(&nvchan->queue_sends) < 1) {
			netif_tx_wake_queue(txq);
876
			ndev_ctx->eth_stats.wake_queue++;
877 878
			ret = -ENOSPC;
		}
879
	} else {
880 881 882 883
		netdev_err(ndev,
			   "Unable to send packet pages %u len %u, ret %d\n",
			   packet->page_buf_cnt, packet->total_data_buflen,
			   ret);
884
	}
885

886 887 888
	return ret;
}

889 890 891 892 893 894 895 896 897 898 899 900
/* Move packet out of multi send data (msd), and clear msd */
static inline void move_pkt_msd(struct hv_netvsc_packet **msd_send,
				struct sk_buff **msd_skb,
				struct multi_send_data *msdp)
{
	*msd_skb = msdp->skb;
	*msd_send = msdp->pkt;
	msdp->skb = NULL;
	msdp->pkt = NULL;
	msdp->count = 0;
}

901
/* RCU already held by caller */
902
int netvsc_send(struct net_device *ndev,
903
		struct hv_netvsc_packet *packet,
904
		struct rndis_message *rndis_msg,
905
		struct hv_page_buffer *pb,
906
		struct sk_buff *skb)
907
{
908
	struct net_device_context *ndev_ctx = netdev_priv(ndev);
909
	struct netvsc_device *net_device
910
		= rcu_dereference_bh(ndev_ctx->nvdev);
911
	struct hv_device *device = ndev_ctx->device_ctx;
912
	int ret = 0;
913
	struct netvsc_channel *nvchan;
914 915 916 917
	u32 pktlen = packet->total_data_buflen, msd_len = 0;
	unsigned int section_index = NETVSC_INVALID_INDEX;
	struct multi_send_data *msdp;
	struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL;
918
	struct sk_buff *msd_skb = NULL;
919
	bool try_batch, xmit_more;
920

921
	/* If device is rescinded, return error and packet will get dropped. */
922
	if (unlikely(!net_device || net_device->destroy))
923 924
		return -ENODEV;

925
	nvchan = &net_device->chan_table[packet->q_idx];
926
	packet->send_buf_index = NETVSC_INVALID_INDEX;
927
	packet->cp_partial = false;
928

929 930 931
	/* Send control message directly without accessing msd (Multi-Send
	 * Data) field which may be changed during data packet processing.
	 */
932 933
	if (!skb)
		return netvsc_send_pkt(device, packet, net_device, pb, skb);
934

935
	/* batch packets in send buffer if possible */
936
	msdp = &nvchan->msd;
937 938 939
	if (msdp->pkt)
		msd_len = msdp->pkt->total_data_buflen;

940
	try_batch =  msd_len > 0 && msdp->count < net_device->max_pkt;
941
	if (try_batch && msd_len + pktlen + net_device->pkt_align <
942 943 944
	    net_device->send_section_size) {
		section_index = msdp->pkt->send_buf_index;

945 946 947 948 949
	} else if (try_batch && msd_len + packet->rmsg_size <
		   net_device->send_section_size) {
		section_index = msdp->pkt->send_buf_index;
		packet->cp_partial = true;

950
	} else if (pktlen + net_device->pkt_align <
951 952
		   net_device->send_section_size) {
		section_index = netvsc_get_next_send_section(net_device);
953 954 955
		if (unlikely(section_index == NETVSC_INVALID_INDEX)) {
			++ndev_ctx->eth_stats.tx_send_full;
		} else {
956 957
			move_pkt_msd(&msd_send, &msd_skb, msdp);
			msd_len = 0;
958 959 960
		}
	}

961 962 963 964 965 966 967
	/* Keep aggregating only if stack says more data is coming
	 * and not doing mixed modes send and not flow blocked
	 */
	xmit_more = skb->xmit_more &&
		!packet->cp_partial &&
		!netif_xmit_stopped(netdev_get_tx_queue(ndev, packet->q_idx));

968 969 970
	if (section_index != NETVSC_INVALID_INDEX) {
		netvsc_copy_to_send_buf(net_device,
					section_index, msd_len,
971
					packet, rndis_msg, pb, xmit_more);
972

973
		packet->send_buf_index = section_index;
974 975 976 977 978 979 980 981

		if (packet->cp_partial) {
			packet->page_buf_cnt -= packet->rmsg_pgcnt;
			packet->total_data_buflen = msd_len + packet->rmsg_size;
		} else {
			packet->page_buf_cnt = 0;
			packet->total_data_buflen += msd_len;
		}
982

983 984 985 986 987
		if (msdp->pkt) {
			packet->total_packets += msdp->pkt->total_packets;
			packet->total_bytes += msdp->pkt->total_bytes;
		}

988
		if (msdp->skb)
S
Stephen Hemminger 已提交
989
			dev_consume_skb_any(msdp->skb);
990

991
		if (xmit_more) {
992
			msdp->skb = skb;
993 994 995 996
			msdp->pkt = packet;
			msdp->count++;
		} else {
			cur_send = packet;
997
			msdp->skb = NULL;
998 999 1000 1001
			msdp->pkt = NULL;
			msdp->count = 0;
		}
	} else {
1002
		move_pkt_msd(&msd_send, &msd_skb, msdp);
1003 1004 1005 1006
		cur_send = packet;
	}

	if (msd_send) {
1007 1008
		int m_ret = netvsc_send_pkt(device, msd_send, net_device,
					    NULL, msd_skb);
1009 1010 1011 1012

		if (m_ret != 0) {
			netvsc_free_send_slot(net_device,
					      msd_send->send_buf_index);
1013
			dev_kfree_skb_any(msd_skb);
1014 1015 1016 1017
		}
	}

	if (cur_send)
1018
		ret = netvsc_send_pkt(device, cur_send, net_device, pb, skb);
1019

1020 1021
	if (ret != 0 && section_index != NETVSC_INVALID_INDEX)
		netvsc_free_send_slot(net_device, section_index);
1022

1023 1024 1025
	return ret;
}

1026
/* Send pending recv completions */
1027 1028 1029
static int send_recv_completions(struct net_device *ndev,
				 struct netvsc_device *nvdev,
				 struct netvsc_channel *nvchan)
1030
{
1031 1032 1033 1034 1035 1036 1037 1038
	struct multi_recv_comp *mrc = &nvchan->mrc;
	struct recv_comp_msg {
		struct nvsp_message_header hdr;
		u32 status;
	}  __packed;
	struct recv_comp_msg msg = {
		.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE,
	};
1039 1040
	int ret;

1041 1042 1043
	while (mrc->first != mrc->next) {
		const struct recv_comp_data *rcd
			= mrc->slots + mrc->first;
1044

1045 1046 1047
		msg.status = rcd->status;
		ret = vmbus_sendpacket(nvchan->channel, &msg, sizeof(msg),
				       rcd->tid, VM_PKT_COMP, 0);
1048 1049 1050 1051
		if (unlikely(ret)) {
			struct net_device_context *ndev_ctx = netdev_priv(ndev);

			++ndev_ctx->eth_stats.rx_comp_busy;
1052
			return ret;
1053
		}
1054

1055 1056 1057
		if (++mrc->first == nvdev->recv_completion_cnt)
			mrc->first = 0;
	}
1058

1059 1060 1061
	/* receive completion ring has been emptied */
	if (unlikely(nvdev->destroy))
		wake_up(&nvdev->wait_drain);
1062

1063
	return 0;
1064 1065
}

1066 1067 1068 1069
/* Count how many receive completions are outstanding */
static void recv_comp_slot_avail(const struct netvsc_device *nvdev,
				 const struct multi_recv_comp *mrc,
				 u32 *filled, u32 *avail)
1070
{
1071
	u32 count = nvdev->recv_completion_cnt;
1072

1073 1074 1075 1076
	if (mrc->next >= mrc->first)
		*filled = mrc->next - mrc->first;
	else
		*filled = (count - mrc->first) + mrc->next;
1077

1078
	*avail = count - *filled - 1;
1079 1080
}

1081 1082 1083 1084
/* Add receive complete to ring to send to host. */
static void enq_receive_complete(struct net_device *ndev,
				 struct netvsc_device *nvdev, u16 q_idx,
				 u64 tid, u32 status)
1085
{
1086 1087
	struct netvsc_channel *nvchan = &nvdev->chan_table[q_idx];
	struct multi_recv_comp *mrc = &nvchan->mrc;
1088
	struct recv_comp_data *rcd;
1089
	u32 filled, avail;
1090

1091
	recv_comp_slot_avail(nvdev, mrc, &filled, &avail);
1092

1093
	if (unlikely(filled > NAPI_POLL_WEIGHT)) {
1094
		send_recv_completions(ndev, nvdev, nvchan);
1095
		recv_comp_slot_avail(nvdev, mrc, &filled, &avail);
1096
	}
1097

1098 1099 1100 1101 1102
	if (unlikely(!avail)) {
		netdev_err(ndev, "Recv_comp full buf q:%hd, tid:%llx\n",
			   q_idx, tid);
		return;
	}
1103

1104 1105 1106
	rcd = mrc->slots + mrc->next;
	rcd->tid = tid;
	rcd->status = status;
1107

1108 1109
	if (++mrc->next == nvdev->recv_completion_cnt)
		mrc->next = 0;
1110 1111
}

S
stephen hemminger 已提交
1112
static int netvsc_receive(struct net_device *ndev,
1113 1114 1115
			  struct netvsc_device *net_device,
			  struct vmbus_channel *channel,
			  const struct vmpacket_descriptor *desc,
1116
			  const struct nvsp_message *nvsp)
1117
{
1118
	struct net_device_context *net_device_ctx = netdev_priv(ndev);
1119 1120
	const struct vmtransfer_page_packet_header *vmxferpage_packet
		= container_of(desc, const struct vmtransfer_page_packet_header, d);
S
stephen hemminger 已提交
1121
	u16 q_idx = channel->offermsg.offer.sub_channel_index;
1122
	char *recv_buf = net_device->recv_buf;
1123
	u32 status = NVSP_STAT_SUCCESS;
1124 1125
	int i;
	int count = 0;
1126

1127
	/* Make sure this is a valid nvsp packet */
1128 1129 1130 1131
	if (unlikely(nvsp->hdr.msg_type != NVSP_MSG1_TYPE_SEND_RNDIS_PKT)) {
		netif_err(net_device_ctx, rx_err, ndev,
			  "Unknown nvsp packet type received %u\n",
			  nvsp->hdr.msg_type);
S
stephen hemminger 已提交
1132
		return 0;
1133 1134
	}

1135 1136 1137 1138 1139
	if (unlikely(vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID)) {
		netif_err(net_device_ctx, rx_err, ndev,
			  "Invalid xfer page set id - expecting %x got %x\n",
			  NETVSC_RECEIVE_BUFFER_ID,
			  vmxferpage_packet->xfer_pageset_id);
S
stephen hemminger 已提交
1140
		return 0;
1141 1142
	}

1143
	count = vmxferpage_packet->range_cnt;
1144

1145
	/* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */
1146
	for (i = 0; i < count; i++) {
1147
		u32 offset = vmxferpage_packet->ranges[i].byte_offset;
1148
		u32 buflen = vmxferpage_packet->ranges[i].byte_count;
1149
		void *data;
1150
		int ret;
1151

1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162
		if (unlikely(offset + buflen > net_device->recv_buf_size)) {
			status = NVSP_STAT_FAIL;
			netif_err(net_device_ctx, rx_err, ndev,
				  "Packet offset:%u + len:%u too big\n",
				  offset, buflen);

			continue;
		}

		data = recv_buf + offset;

S
Stephen Hemminger 已提交
1163 1164
		trace_rndis_recv(ndev, q_idx, data);

1165
		/* Pass it to the upper layer */
1166 1167 1168 1169 1170
		ret = rndis_filter_receive(ndev, net_device,
					   channel, data, buflen);

		if (unlikely(ret != NVSP_STAT_SUCCESS))
			status = NVSP_STAT_FAIL;
1171 1172
	}

1173 1174
	enq_receive_complete(ndev, net_device, q_idx,
			     vmxferpage_packet->d.trans_id, status);
S
stephen hemminger 已提交
1175 1176

	return count;
1177 1178
}

1179 1180
static void netvsc_send_table(struct net_device *ndev,
			      const struct nvsp_message *nvmsg)
1181
{
1182
	struct net_device_context *net_device_ctx = netdev_priv(ndev);
1183
	u32 count, *tab;
1184
	int i;
1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195

	count = nvmsg->msg.v5_msg.send_table.count;
	if (count != VRSS_SEND_TAB_SIZE) {
		netdev_err(ndev, "Received wrong send-table size:%u\n", count);
		return;
	}

	tab = (u32 *)((unsigned long)&nvmsg->msg.v5_msg.send_table +
		      nvmsg->msg.v5_msg.send_table.offset);

	for (i = 0; i < count; i++)
1196
		net_device_ctx->tx_table[i] = tab[i];
1197 1198
}

1199 1200
static void netvsc_send_vf(struct net_device *ndev,
			   const struct nvsp_message *nvmsg)
1201
{
1202 1203
	struct net_device_context *net_device_ctx = netdev_priv(ndev);

1204 1205
	net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated;
	net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial;
1206 1207 1208
	netdev_info(ndev, "VF slot %u %s\n",
		    net_device_ctx->vf_serial,
		    net_device_ctx->vf_alloc ? "added" : "removed");
1209 1210
}

1211 1212
static  void netvsc_receive_inband(struct net_device *ndev,
				   const struct nvsp_message *nvmsg)
1213 1214 1215
{
	switch (nvmsg->hdr.msg_type) {
	case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE:
1216
		netvsc_send_table(ndev, nvmsg);
1217 1218 1219
		break;

	case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION:
1220
		netvsc_send_vf(ndev, nvmsg);
1221 1222 1223 1224
		break;
	}
}

S
stephen hemminger 已提交
1225 1226 1227 1228
static int netvsc_process_raw_pkt(struct hv_device *device,
				  struct vmbus_channel *channel,
				  struct netvsc_device *net_device,
				  struct net_device *ndev,
S
stephen hemminger 已提交
1229 1230
				  const struct vmpacket_descriptor *desc,
				  int budget)
1231
{
1232
	const struct nvsp_message *nvmsg = hv_pkt_data(desc);
1233

S
Stephen Hemminger 已提交
1234 1235
	trace_nvsp_recv(ndev, channel, nvmsg);

1236 1237
	switch (desc->type) {
	case VM_PKT_COMP:
1238
		netvsc_send_completion(ndev, net_device, channel,
S
stephen hemminger 已提交
1239
				       desc, budget);
1240 1241 1242
		break;

	case VM_PKT_DATA_USING_XFER_PAGES:
1243 1244
		return netvsc_receive(ndev, net_device, channel,
				      desc, nvmsg);
1245 1246 1247
		break;

	case VM_PKT_DATA_INBAND:
1248
		netvsc_receive_inband(ndev, nvmsg);
1249 1250 1251 1252
		break;

	default:
		netdev_err(ndev, "unhandled packet type %d, tid %llx\n",
1253
			   desc->type, desc->trans_id);
1254 1255
		break;
	}
S
stephen hemminger 已提交
1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266

	return 0;
}

static struct hv_device *netvsc_channel_to_device(struct vmbus_channel *channel)
{
	struct vmbus_channel *primary = channel->primary_channel;

	return primary ? primary->device_obj : channel->device_obj;
}

1267 1268 1269 1270
/* Network processing softirq
 * Process data in incoming ring buffer from host
 * Stops when ring is empty or budget is met or exceeded.
 */
S
stephen hemminger 已提交
1271 1272 1273 1274
int netvsc_poll(struct napi_struct *napi, int budget)
{
	struct netvsc_channel *nvchan
		= container_of(napi, struct netvsc_channel, napi);
1275
	struct netvsc_device *net_device = nvchan->net_device;
S
stephen hemminger 已提交
1276 1277 1278 1279
	struct vmbus_channel *channel = nvchan->channel;
	struct hv_device *device = netvsc_channel_to_device(channel);
	struct net_device *ndev = hv_get_drvdata(device);
	int work_done = 0;
1280
	int ret;
S
stephen hemminger 已提交
1281

1282 1283 1284
	/* If starting a new interval */
	if (!nvchan->desc)
		nvchan->desc = hv_pkt_iter_first(channel);
S
stephen hemminger 已提交
1285

1286 1287
	while (nvchan->desc && work_done < budget) {
		work_done += netvsc_process_raw_pkt(device, channel, net_device,
S
stephen hemminger 已提交
1288
						    ndev, nvchan->desc, budget);
1289
		nvchan->desc = hv_pkt_iter_next(channel, nvchan->desc);
S
stephen hemminger 已提交
1290 1291
	}

1292 1293 1294 1295 1296
	/* Send any pending receive completions */
	ret = send_recv_completions(ndev, net_device, nvchan);

	/* If it did not exhaust NAPI budget this time
	 *  and not doing busy poll
1297
	 * then re-enable host interrupts
1298 1299
	 *  and reschedule if ring is not empty
	 *   or sending receive completion failed.
1300
	 */
1301
	if (work_done < budget &&
S
stephen hemminger 已提交
1302
	    napi_complete_done(napi, work_done) &&
1303
	    (ret || hv_end_read(&channel->inbound)) &&
1304
	    napi_schedule_prep(napi)) {
1305
		hv_begin_read(&channel->inbound);
1306
		__napi_schedule(napi);
1307
	}
1308 1309 1310

	/* Driver may overshoot since multiple packets per descriptor */
	return min(work_done, budget);
1311 1312
}

1313 1314 1315
/* Call back when data is available in host ring buffer.
 * Processing is deferred until network softirq (NAPI)
 */
1316
void netvsc_channel_cb(void *context)
1317
{
1318
	struct netvsc_channel *nvchan = context;
1319 1320 1321 1322 1323
	struct vmbus_channel *channel = nvchan->channel;
	struct hv_ring_buffer_info *rbi = &channel->inbound;

	/* preload first vmpacket descriptor */
	prefetch(hv_get_ring_buffer(rbi) + rbi->priv_read_index);
1324

1325 1326
	if (napi_schedule_prep(&nvchan->napi)) {
		/* disable interupts from host */
1327
		hv_begin_read(rbi);
1328

1329
		__napi_schedule_irqoff(&nvchan->napi);
1330
	}
1331
}
1332

1333 1334 1335 1336
/*
 * netvsc_device_add - Callback when the device belonging to this
 * driver is added
 */
1337 1338
struct netvsc_device *netvsc_device_add(struct hv_device *device,
				const struct netvsc_device_info *device_info)
1339
{
1340
	int i, ret = 0;
1341
	struct netvsc_device *net_device;
1342 1343
	struct net_device *ndev = hv_get_drvdata(device);
	struct net_device_context *net_device_ctx = netdev_priv(ndev);
1344

1345
	net_device = alloc_net_device();
1346
	if (!net_device)
1347
		return ERR_PTR(-ENOMEM);
1348

1349 1350 1351
	for (i = 0; i < VRSS_SEND_TAB_SIZE; i++)
		net_device_ctx->tx_table[i] = 0;

S
stephen hemminger 已提交
1352 1353 1354 1355 1356
	/* Because the device uses NAPI, all the interrupt batching and
	 * control is done via Net softirq, not the channel handling
	 */
	set_channel_read_mode(device->channel, HV_CALL_ISR);

1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367
	/* If we're reopening the device we may have multiple queues, fill the
	 * chn_table with the default channel to use it before subchannels are
	 * opened.
	 * Initialize the channel state before we open;
	 * we can be interrupted as soon as we open the channel.
	 */

	for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
		struct netvsc_channel *nvchan = &net_device->chan_table[i];

		nvchan->channel = device->channel;
1368
		nvchan->net_device = net_device;
1369 1370
		u64_stats_init(&nvchan->tx_stats.syncp);
		u64_stats_init(&nvchan->rx_stats.syncp);
1371 1372
	}

1373 1374 1375 1376
	/* Enable NAPI handler before init callbacks */
	netif_napi_add(ndev, &net_device->chan_table[0].napi,
		       netvsc_poll, NAPI_POLL_WEIGHT);

1377
	/* Open the channel */
1378 1379 1380
	ret = vmbus_open(device->channel, netvsc_ring_bytes,
			 netvsc_ring_bytes,  NULL, 0,
			 netvsc_channel_cb, net_device->chan_table);
1381 1382

	if (ret != 0) {
1383
		netdev_err(ndev, "unable to open channel: %d\n", ret);
1384 1385 1386 1387
		goto cleanup;
	}

	/* Channel is opened */
1388
	netdev_dbg(ndev, "hv_netvsc channel opened successfully\n");
1389

S
stephen hemminger 已提交
1390
	napi_enable(&net_device->chan_table[0].napi);
1391

1392
	/* Connect with the NetVsp */
1393
	ret = netvsc_connect_vsp(device, net_device, device_info);
1394
	if (ret != 0) {
1395
		netdev_err(ndev,
1396
			"unable to connect to NetVSP - %d\n", ret);
1397 1398 1399
		goto close;
	}

1400 1401 1402 1403 1404
	/* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is
	 * populated.
	 */
	rcu_assign_pointer(net_device_ctx->nvdev, net_device);

1405
	return net_device;
1406 1407

close:
1408 1409
	RCU_INIT_POINTER(net_device_ctx->nvdev, NULL);
	napi_disable(&net_device->chan_table[0].napi);
S
stephen hemminger 已提交
1410

1411 1412 1413 1414
	/* Now, we can close the channel safely */
	vmbus_close(device->channel);

cleanup:
1415
	netif_napi_del(&net_device->chan_table[0].napi);
1416
	free_netvsc_device(&net_device->rcu);
1417

1418
	return ERR_PTR(ret);
1419
}