netvsc.c 33.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 * Copyright (c) 2009, Microsoft Corporation.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
14
 * this program; if not, see <http://www.gnu.org/licenses/>.
15 16
 *
 * Authors:
17
 *   Haiyang Zhang <haiyangz@microsoft.com>
18 19
 *   Hank Janssen  <hjanssen@microsoft.com>
 */
20 21
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

22
#include <linux/kernel.h>
23 24
#include <linux/sched.h>
#include <linux/wait.h>
25
#include <linux/mm.h>
26
#include <linux/delay.h>
27
#include <linux/io.h>
28
#include <linux/slab.h>
29
#include <linux/netdevice.h>
30
#include <linux/if_ether.h>
31
#include <linux/vmalloc.h>
32
#include <asm/sync_bitops.h>
33

34
#include "hyperv_net.h"
35 36


37
static struct netvsc_device *alloc_net_device(struct hv_device *device)
38
{
39
	struct netvsc_device *net_device;
40
	struct net_device *ndev = hv_get_drvdata(device);
41

42 43
	net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL);
	if (!net_device)
44 45
		return NULL;

46 47 48 49 50 51
	net_device->cb_buffer = kzalloc(NETVSC_PACKET_SIZE, GFP_KERNEL);
	if (!net_device->cb_buffer) {
		kfree(net_device);
		return NULL;
	}

52
	init_waitqueue_head(&net_device->wait_drain);
53
	net_device->start_remove = false;
54
	net_device->destroy = false;
55
	net_device->dev = device;
56
	net_device->ndev = ndev;
57 58 59
	net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
	net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;

60
	hv_set_drvdata(device, net_device);
61
	return net_device;
62 63
}

64 65 66 67 68 69
static void free_netvsc_device(struct netvsc_device *nvdev)
{
	kfree(nvdev->cb_buffer);
	kfree(nvdev);
}

70
static struct netvsc_device *get_outbound_net_device(struct hv_device *device)
71
{
72
	struct netvsc_device *net_device;
73

74
	net_device = hv_get_drvdata(device);
75
	if (net_device && net_device->destroy)
76
		net_device = NULL;
77

78
	return net_device;
79 80
}

81
static struct netvsc_device *get_inbound_net_device(struct hv_device *device)
82
{
83
	struct netvsc_device *net_device;
84

85
	net_device = hv_get_drvdata(device);
86 87 88 89 90 91

	if (!net_device)
		goto get_in_err;

	if (net_device->destroy &&
		atomic_read(&net_device->num_outstanding_sends) == 0)
92
		net_device = NULL;
93

94
get_in_err:
95
	return net_device;
96 97 98
}


99
static int netvsc_destroy_buf(struct netvsc_device *net_device)
100 101 102
{
	struct nvsp_message *revoke_packet;
	int ret = 0;
103
	struct net_device *ndev = net_device->ndev;
104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130

	/*
	 * If we got a section count, it means we received a
	 * SendReceiveBufferComplete msg (ie sent
	 * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
	 * to send a revoke msg here
	 */
	if (net_device->recv_section_cnt) {
		/* Send the revoke receive buffer */
		revoke_packet = &net_device->revoke_packet;
		memset(revoke_packet, 0, sizeof(struct nvsp_message));

		revoke_packet->hdr.msg_type =
			NVSP_MSG1_TYPE_REVOKE_RECV_BUF;
		revoke_packet->msg.v1_msg.
		revoke_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;

		ret = vmbus_sendpacket(net_device->dev->channel,
				       revoke_packet,
				       sizeof(struct nvsp_message),
				       (unsigned long)revoke_packet,
				       VM_PKT_DATA_INBAND, 0);
		/*
		 * If we failed here, we might as well return and
		 * have a leak rather than continue and a bugchk
		 */
		if (ret != 0) {
131
			netdev_err(ndev, "unable to send "
132
				"revoke receive buffer to netvsp\n");
133
			return ret;
134 135 136 137 138 139 140 141 142 143 144 145
		}
	}

	/* Teardown the gpadl on the vsp end */
	if (net_device->recv_buf_gpadl_handle) {
		ret = vmbus_teardown_gpadl(net_device->dev->channel,
			   net_device->recv_buf_gpadl_handle);

		/* If we failed here, we might as well return and have a leak
		 * rather than continue and a bugchk
		 */
		if (ret != 0) {
146
			netdev_err(ndev,
147
				   "unable to teardown receive buffer's gpadl\n");
148
			return ret;
149 150 151 152 153 154
		}
		net_device->recv_buf_gpadl_handle = 0;
	}

	if (net_device->recv_buf) {
		/* Free up the receive buffer */
155
		vfree(net_device->recv_buf);
156 157 158 159 160 161 162 163 164
		net_device->recv_buf = NULL;
	}

	if (net_device->recv_section) {
		net_device->recv_section_cnt = 0;
		kfree(net_device->recv_section);
		net_device->recv_section = NULL;
	}

165 166
	/* Deal with the send buffer we may have setup.
	 * If we got a  send section size, it means we received a
167 168
	 * NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE msg (ie sent
	 * NVSP_MSG1_TYPE_SEND_SEND_BUF msg) therefore, we need
169 170 171 172 173 174 175 176 177
	 * to send a revoke msg here
	 */
	if (net_device->send_section_size) {
		/* Send the revoke receive buffer */
		revoke_packet = &net_device->revoke_packet;
		memset(revoke_packet, 0, sizeof(struct nvsp_message));

		revoke_packet->hdr.msg_type =
			NVSP_MSG1_TYPE_REVOKE_SEND_BUF;
178 179
		revoke_packet->msg.v1_msg.revoke_send_buf.id =
			NETVSC_SEND_BUFFER_ID;
180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207

		ret = vmbus_sendpacket(net_device->dev->channel,
				       revoke_packet,
				       sizeof(struct nvsp_message),
				       (unsigned long)revoke_packet,
				       VM_PKT_DATA_INBAND, 0);
		/* If we failed here, we might as well return and
		 * have a leak rather than continue and a bugchk
		 */
		if (ret != 0) {
			netdev_err(ndev, "unable to send "
				   "revoke send buffer to netvsp\n");
			return ret;
		}
	}
	/* Teardown the gpadl on the vsp end */
	if (net_device->send_buf_gpadl_handle) {
		ret = vmbus_teardown_gpadl(net_device->dev->channel,
					   net_device->send_buf_gpadl_handle);

		/* If we failed here, we might as well return and have a leak
		 * rather than continue and a bugchk
		 */
		if (ret != 0) {
			netdev_err(ndev,
				   "unable to teardown send buffer's gpadl\n");
			return ret;
		}
208
		net_device->send_buf_gpadl_handle = 0;
209 210
	}
	if (net_device->send_buf) {
211
		/* Free up the send buffer */
212
		vfree(net_device->send_buf);
213 214 215 216
		net_device->send_buf = NULL;
	}
	kfree(net_device->send_section_map);

217 218 219
	return ret;
}

220
static int netvsc_init_buf(struct hv_device *device)
221
{
222
	int ret = 0;
223
	unsigned long t;
224 225
	struct netvsc_device *net_device;
	struct nvsp_message *init_packet;
226
	struct net_device *ndev;
227
	int node;
228

229
	net_device = get_outbound_net_device(device);
230
	if (!net_device)
231
		return -ENODEV;
232
	ndev = net_device->ndev;
233

234 235 236 237 238
	node = cpu_to_node(device->channel->target_cpu);
	net_device->recv_buf = vzalloc_node(net_device->recv_buf_size, node);
	if (!net_device->recv_buf)
		net_device->recv_buf = vzalloc(net_device->recv_buf_size);

239
	if (!net_device->recv_buf) {
240
		netdev_err(ndev, "unable to allocate receive "
241
			"buffer of size %d\n", net_device->recv_buf_size);
242
		ret = -ENOMEM;
243
		goto cleanup;
244 245
	}

246 247 248 249 250
	/*
	 * Establish the gpadl handle for this buffer on this
	 * channel.  Note: This call uses the vmbus connection rather
	 * than the channel to establish the gpadl handle.
	 */
251 252 253
	ret = vmbus_establish_gpadl(device->channel, net_device->recv_buf,
				    net_device->recv_buf_size,
				    &net_device->recv_buf_gpadl_handle);
254
	if (ret != 0) {
255
		netdev_err(ndev,
256
			"unable to establish receive buffer's gpadl\n");
257
		goto cleanup;
258 259 260
	}


261
	/* Notify the NetVsp of the gpadl handle */
262
	init_packet = &net_device->channel_init_pkt;
263

264
	memset(init_packet, 0, sizeof(struct nvsp_message));
265

266 267 268 269 270
	init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_RECV_BUF;
	init_packet->msg.v1_msg.send_recv_buf.
		gpadl_handle = net_device->recv_buf_gpadl_handle;
	init_packet->msg.v1_msg.
		send_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
271

272
	/* Send the gpadl notification request */
273
	ret = vmbus_sendpacket(device->channel, init_packet,
274
			       sizeof(struct nvsp_message),
275
			       (unsigned long)init_packet,
276
			       VM_PKT_DATA_INBAND,
277
			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
278
	if (ret != 0) {
279
		netdev_err(ndev,
280
			"unable to send receive buffer's gpadl to netvsp\n");
281
		goto cleanup;
282 283
	}

284
	t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ);
285
	BUG_ON(t == 0);
286

287

288
	/* Check the response */
289 290
	if (init_packet->msg.v1_msg.
	    send_recv_buf_complete.status != NVSP_STAT_SUCCESS) {
291
		netdev_err(ndev, "Unable to complete receive buffer "
292
			   "initialization with NetVsp - status %d\n",
293 294
			   init_packet->msg.v1_msg.
			   send_recv_buf_complete.status);
295
		ret = -EINVAL;
296
		goto cleanup;
297 298
	}

299
	/* Parse the response */
300

301 302
	net_device->recv_section_cnt = init_packet->msg.
		v1_msg.send_recv_buf_complete.num_sections;
303

304 305 306 307 308
	net_device->recv_section = kmemdup(
		init_packet->msg.v1_msg.send_recv_buf_complete.sections,
		net_device->recv_section_cnt *
		sizeof(struct nvsp_1_receive_buffer_section),
		GFP_KERNEL);
309
	if (net_device->recv_section == NULL) {
310
		ret = -EINVAL;
311
		goto cleanup;
312 313
	}

314 315 316 317
	/*
	 * For 1st release, there should only be 1 section that represents the
	 * entire receive buffer
	 */
318 319
	if (net_device->recv_section_cnt != 1 ||
	    net_device->recv_section->offset != 0) {
320
		ret = -EINVAL;
321
		goto cleanup;
322 323
	}

324 325
	/* Now setup the send buffer.
	 */
326 327 328
	net_device->send_buf = vzalloc_node(net_device->send_buf_size, node);
	if (!net_device->send_buf)
		net_device->send_buf = vzalloc(net_device->send_buf_size);
329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352
	if (!net_device->send_buf) {
		netdev_err(ndev, "unable to allocate send "
			   "buffer of size %d\n", net_device->send_buf_size);
		ret = -ENOMEM;
		goto cleanup;
	}

	/* Establish the gpadl handle for this buffer on this
	 * channel.  Note: This call uses the vmbus connection rather
	 * than the channel to establish the gpadl handle.
	 */
	ret = vmbus_establish_gpadl(device->channel, net_device->send_buf,
				    net_device->send_buf_size,
				    &net_device->send_buf_gpadl_handle);
	if (ret != 0) {
		netdev_err(ndev,
			   "unable to establish send buffer's gpadl\n");
		goto cleanup;
	}

	/* Notify the NetVsp of the gpadl handle */
	init_packet = &net_device->channel_init_pkt;
	memset(init_packet, 0, sizeof(struct nvsp_message));
	init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_SEND_BUF;
353
	init_packet->msg.v1_msg.send_send_buf.gpadl_handle =
354
		net_device->send_buf_gpadl_handle;
355
	init_packet->msg.v1_msg.send_send_buf.id = NETVSC_SEND_BUFFER_ID;
356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377

	/* Send the gpadl notification request */
	ret = vmbus_sendpacket(device->channel, init_packet,
			       sizeof(struct nvsp_message),
			       (unsigned long)init_packet,
			       VM_PKT_DATA_INBAND,
			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
	if (ret != 0) {
		netdev_err(ndev,
			   "unable to send send buffer's gpadl to netvsp\n");
		goto cleanup;
	}

	t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ);
	BUG_ON(t == 0);

	/* Check the response */
	if (init_packet->msg.v1_msg.
	    send_send_buf_complete.status != NVSP_STAT_SUCCESS) {
		netdev_err(ndev, "Unable to complete send buffer "
			   "initialization with NetVsp - status %d\n",
			   init_packet->msg.v1_msg.
378
			   send_send_buf_complete.status);
379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400
		ret = -EINVAL;
		goto cleanup;
	}

	/* Parse the response */
	net_device->send_section_size = init_packet->msg.
				v1_msg.send_send_buf_complete.section_size;

	/* Section count is simply the size divided by the section size.
	 */
	net_device->send_section_cnt =
		net_device->send_buf_size/net_device->send_section_size;

	dev_info(&device->device, "Send section size: %d, Section count:%d\n",
		 net_device->send_section_size, net_device->send_section_cnt);

	/* Setup state for managing the send buffer. */
	net_device->map_words = DIV_ROUND_UP(net_device->send_section_cnt,
					     BITS_PER_LONG);

	net_device->send_section_map =
		kzalloc(net_device->map_words * sizeof(ulong), GFP_KERNEL);
401 402
	if (net_device->send_section_map == NULL) {
		ret = -ENOMEM;
403
		goto cleanup;
404
	}
405

406
	goto exit;
407

408
cleanup:
409
	netvsc_destroy_buf(net_device);
410

411
exit:
412 413 414 415
	return ret;
}


416 417 418 419 420
/* Negotiate NVSP protocol version */
static int negotiate_nvsp_ver(struct hv_device *device,
			      struct netvsc_device *net_device,
			      struct nvsp_message *init_packet,
			      u32 nvsp_ver)
421
{
422 423
	int ret;
	unsigned long t;
424

425
	memset(init_packet, 0, sizeof(struct nvsp_message));
426
	init_packet->hdr.msg_type = NVSP_MSG_TYPE_INIT;
427 428
	init_packet->msg.init_msg.init.min_protocol_ver = nvsp_ver;
	init_packet->msg.init_msg.init.max_protocol_ver = nvsp_ver;
429

430
	/* Send the init request */
431
	ret = vmbus_sendpacket(device->channel, init_packet,
432
			       sizeof(struct nvsp_message),
433
			       (unsigned long)init_packet,
434
			       VM_PKT_DATA_INBAND,
435
			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
436

437
	if (ret != 0)
438
		return ret;
439

440
	t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ);
441

442 443
	if (t == 0)
		return -ETIMEDOUT;
444

445
	if (init_packet->msg.init_msg.init_complete.status !=
446 447
	    NVSP_STAT_SUCCESS)
		return -EINVAL;
448

449
	if (nvsp_ver == NVSP_PROTOCOL_VERSION_1)
450 451
		return 0;

452
	/* NVSPv2 or later: Send NDIS config */
453 454
	memset(init_packet, 0, sizeof(struct nvsp_message));
	init_packet->hdr.msg_type = NVSP_MSG2_TYPE_SEND_NDIS_CONFIG;
455 456
	init_packet->msg.v2_msg.send_ndis_config.mtu = net_device->ndev->mtu +
						       ETH_HLEN;
457
	init_packet->msg.v2_msg.send_ndis_config.capability.ieee8021q = 1;
458

459 460 461
	if (nvsp_ver >= NVSP_PROTOCOL_VERSION_5)
		init_packet->msg.v2_msg.send_ndis_config.capability.sriov = 1;

462 463 464 465 466 467 468 469 470 471 472 473 474 475 476
	ret = vmbus_sendpacket(device->channel, init_packet,
				sizeof(struct nvsp_message),
				(unsigned long)init_packet,
				VM_PKT_DATA_INBAND, 0);

	return ret;
}

static int netvsc_connect_vsp(struct hv_device *device)
{
	int ret;
	struct netvsc_device *net_device;
	struct nvsp_message *init_packet;
	int ndis_version;
	struct net_device *ndev;
477 478 479
	u32 ver_list[] = { NVSP_PROTOCOL_VERSION_1, NVSP_PROTOCOL_VERSION_2,
		NVSP_PROTOCOL_VERSION_4, NVSP_PROTOCOL_VERSION_5 };
	int i, num_ver = 4; /* number of different NVSP versions */
480 481 482 483 484 485 486 487 488

	net_device = get_outbound_net_device(device);
	if (!net_device)
		return -ENODEV;
	ndev = net_device->ndev;

	init_packet = &net_device->channel_init_pkt;

	/* Negotiate the latest NVSP protocol supported */
489 490 491 492 493 494 495 496
	for (i = num_ver - 1; i >= 0; i--)
		if (negotiate_nvsp_ver(device, net_device, init_packet,
				       ver_list[i])  == 0) {
			net_device->nvsp_version = ver_list[i];
			break;
		}

	if (i < 0) {
497
		ret = -EPROTO;
498
		goto cleanup;
499
	}
500 501 502

	pr_debug("Negotiated NVSP version:%x\n", net_device->nvsp_version);

503
	/* Send the ndis version */
504
	memset(init_packet, 0, sizeof(struct nvsp_message));
505

506
	if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_4)
507
		ndis_version = 0x00060001;
508 509
	else
		ndis_version = 0x0006001e;
510

511 512 513
	init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_NDIS_VER;
	init_packet->msg.v1_msg.
		send_ndis_ver.ndis_major_ver =
514
				(ndis_version & 0xFFFF0000) >> 16;
515 516
	init_packet->msg.v1_msg.
		send_ndis_ver.ndis_minor_ver =
517
				ndis_version & 0xFFFF;
518

519
	/* Send the init request */
520
	ret = vmbus_sendpacket(device->channel, init_packet,
521 522 523
				sizeof(struct nvsp_message),
				(unsigned long)init_packet,
				VM_PKT_DATA_INBAND, 0);
524
	if (ret != 0)
525
		goto cleanup;
526 527

	/* Post the big receive buffer to NetVSP */
528 529 530 531
	if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_2)
		net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE_LEGACY;
	else
		net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
532
	net_device->send_buf_size = NETVSC_SEND_BUFFER_SIZE;
533

534
	ret = netvsc_init_buf(device);
535

536
cleanup:
537 538 539
	return ret;
}

540
static void netvsc_disconnect_vsp(struct netvsc_device *net_device)
541
{
542
	netvsc_destroy_buf(net_device);
543 544
}

545
/*
546
 * netvsc_device_remove - Callback when the root bus device is removed
547
 */
548
int netvsc_device_remove(struct hv_device *device)
549
{
550
	struct netvsc_device *net_device;
551
	unsigned long flags;
552

553
	net_device = hv_get_drvdata(device);
554

555
	netvsc_disconnect_vsp(net_device);
556

557
	/*
558 559 560 561 562
	 * Since we have already drained, we don't need to busy wait
	 * as was done in final_release_stor_device()
	 * Note that we cannot set the ext pointer to NULL until
	 * we have drained - to drain the outgoing packets, we need to
	 * allow incoming packets.
563
	 */
564 565

	spin_lock_irqsave(&device->channel->inbound_lock, flags);
566
	hv_set_drvdata(device, NULL);
567
	spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
568

569 570 571 572
	/*
	 * At this point, no one should be accessing net_device
	 * except in here
	 */
573
	dev_notice(&device->device, "net device safe to remove\n");
574

575
	/* Now, we can close the channel safely */
576
	vmbus_close(device->channel);
577

578
	/* Release all resources */
579
	vfree(net_device->sub_cb_buf);
580
	free_netvsc_device(net_device);
581
	return 0;
582 583
}

584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601

#define RING_AVAIL_PERCENT_HIWATER 20
#define RING_AVAIL_PERCENT_LOWATER 10

/*
 * Get the percentage of available bytes to write in the ring.
 * The return value is in range from 0 to 100.
 */
static inline u32 hv_ringbuf_avail_percent(
		struct hv_ring_buffer_info *ring_info)
{
	u32 avail_read, avail_write;

	hv_get_ringbuffer_availbytes(ring_info, &avail_read, &avail_write);

	return avail_write * 100 / ring_info->ring_datasize;
}

602 603 604 605 606 607
static inline void netvsc_free_send_slot(struct netvsc_device *net_device,
					 u32 index)
{
	sync_change_bit(index, net_device->send_section_map);
}

608
static void netvsc_send_completion(struct netvsc_device *net_device,
609
				   struct vmbus_channel *incoming_channel,
610
				   struct hv_device *device,
611
				   struct vmpacket_descriptor *packet)
612
{
613 614
	struct nvsp_message *nvsp_packet;
	struct hv_netvsc_packet *nvsc_packet;
615
	struct net_device *ndev;
616
	u32 send_index;
617
	struct sk_buff *skb;
618

619
	ndev = net_device->ndev;
620

621
	nvsp_packet = (struct nvsp_message *)((unsigned long)packet +
622
			(packet->offset8 << 3));
623

624 625 626 627
	if ((nvsp_packet->hdr.msg_type == NVSP_MSG_TYPE_INIT_COMPLETE) ||
	    (nvsp_packet->hdr.msg_type ==
	     NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE) ||
	    (nvsp_packet->hdr.msg_type ==
628 629 630
	     NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE) ||
	    (nvsp_packet->hdr.msg_type ==
	     NVSP_MSG5_TYPE_SUBCHANNEL)) {
631
		/* Copy the response back */
632
		memcpy(&net_device->channel_init_pkt, nvsp_packet,
633
		       sizeof(struct nvsp_message));
634
		complete(&net_device->channel_init_wait);
635 636
	} else if (nvsp_packet->hdr.msg_type ==
		   NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE) {
637
		int num_outstanding_sends;
638 639 640
		u16 q_idx = 0;
		struct vmbus_channel *channel = device->channel;
		int queue_sends;
641

642
		/* Get the send context */
643
		skb = (struct sk_buff *)(unsigned long)packet->trans_id;
644

645
		/* Notify the layer above us */
646 647
		if (skb) {
			nvsc_packet = (struct hv_netvsc_packet *) skb->cb;
648 649 650
			send_index = nvsc_packet->send_buf_index;
			if (send_index != NETVSC_INVALID_INDEX)
				netvsc_free_send_slot(net_device, send_index);
651
			q_idx = nvsc_packet->q_idx;
652
			channel = incoming_channel;
653
			dev_kfree_skb_any(skb);
654
		}
655

656 657
		num_outstanding_sends =
			atomic_dec_return(&net_device->num_outstanding_sends);
658 659
		queue_sends = atomic_dec_return(&net_device->
						queue_sends[q_idx]);
660

661 662 663
		if (net_device->destroy && num_outstanding_sends == 0)
			wake_up(&net_device->wait_drain);

664 665 666 667 668 669
		if (netif_tx_queue_stopped(netdev_get_tx_queue(ndev, q_idx)) &&
		    !net_device->start_remove &&
		    (hv_ringbuf_avail_percent(&channel->outbound) >
		     RING_AVAIL_PERCENT_HIWATER || queue_sends < 1))
				netif_tx_wake_queue(netdev_get_tx_queue(
						    ndev, q_idx));
670
	} else {
671
		netdev_err(ndev, "Unknown send completion packet type- "
672
			   "%d received!!\n", nvsp_packet->hdr.msg_type);
673 674 675 676
	}

}

677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701
static u32 netvsc_get_next_send_section(struct netvsc_device *net_device)
{
	unsigned long index;
	u32 max_words = net_device->map_words;
	unsigned long *map_addr = (unsigned long *)net_device->send_section_map;
	u32 section_cnt = net_device->send_section_cnt;
	int ret_val = NETVSC_INVALID_INDEX;
	int i;
	int prev_val;

	for (i = 0; i < max_words; i++) {
		if (!~(map_addr[i]))
			continue;
		index = ffz(map_addr[i]);
		prev_val = sync_test_and_set_bit(index, &map_addr[i]);
		if (prev_val)
			continue;
		if ((index + (i * BITS_PER_LONG)) >= section_cnt)
			break;
		ret_val = (index + (i * BITS_PER_LONG));
		break;
	}
	return ret_val;
}

L
Lad, Prabhakar 已提交
702 703
static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device,
				   unsigned int section_index,
704
				   u32 pend_size,
705
				   struct hv_netvsc_packet *packet,
706 707
				   struct rndis_message *rndis_msg,
				   struct hv_page_buffer **pb)
708 709
{
	char *start = net_device->send_buf;
710 711
	char *dest = start + (section_index * net_device->send_section_size)
		     + pend_size;
712 713
	int i;
	u32 msg_size = 0;
714 715
	u32 padding = 0;
	u32 remain = packet->total_data_buflen % net_device->pkt_align;
716 717
	u32 page_count = packet->cp_partial ? packet->rmsg_pgcnt :
		packet->page_buf_cnt;
718 719

	/* Add padding */
720 721
	if (packet->is_data_pkt && packet->xmit_more && remain &&
	    !packet->cp_partial) {
722
		padding = net_device->pkt_align - remain;
723
		rndis_msg->msg_len += padding;
724 725
		packet->total_data_buflen += padding;
	}
726

727
	for (i = 0; i < page_count; i++) {
728 729 730
		char *src = phys_to_virt((*pb)[i].pfn << PAGE_SHIFT);
		u32 offset = (*pb)[i].offset;
		u32 len = (*pb)[i].len;
731 732 733 734 735

		memcpy(dest, (src + offset), len);
		msg_size += len;
		dest += len;
	}
736 737 738 739 740 741

	if (padding) {
		memset(dest, 0, padding);
		msg_size += padding;
	}

742 743 744
	return msg_size;
}

745 746
static inline int netvsc_send_pkt(
	struct hv_netvsc_packet *packet,
747
	struct netvsc_device *net_device,
748 749
	struct hv_page_buffer **pb,
	struct sk_buff *skb)
750
{
751
	struct nvsp_message nvmsg;
K
KY Srinivasan 已提交
752
	u16 q_idx = packet->q_idx;
753
	struct vmbus_channel *out_channel = net_device->chn_table[q_idx];
754 755 756
	struct net_device *ndev = net_device->ndev;
	u64 req_id;
	int ret;
757
	struct hv_page_buffer *pgbuf;
758
	u32 ring_avail = hv_ringbuf_avail_percent(&out_channel->outbound);
759

760
	nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
761
	if (packet->is_data_pkt) {
762
		/* 0 is RMC_DATA; */
763
		nvmsg.msg.v1_msg.send_rndis_pkt.channel_type = 0;
764 765
	} else {
		/* 1 is RMC_CONTROL; */
766
		nvmsg.msg.v1_msg.send_rndis_pkt.channel_type = 1;
767
	}
768

769 770 771 772 773 774 775
	nvmsg.msg.v1_msg.send_rndis_pkt.send_buf_section_index =
		packet->send_buf_index;
	if (packet->send_buf_index == NETVSC_INVALID_INDEX)
		nvmsg.msg.v1_msg.send_rndis_pkt.send_buf_section_size = 0;
	else
		nvmsg.msg.v1_msg.send_rndis_pkt.send_buf_section_size =
			packet->total_data_buflen;
776

777
	req_id = (ulong)skb;
778

779 780 781
	if (out_channel->rescind)
		return -ENODEV;

782 783 784 785 786 787 788 789 790 791
	/*
	 * It is possible that once we successfully place this packet
	 * on the ringbuffer, we may stop the queue. In that case, we want
	 * to notify the host independent of the xmit_more flag. We don't
	 * need to be precise here; in the worst case we may signal the host
	 * unnecessarily.
	 */
	if (ring_avail < (RING_AVAIL_PERCENT_LOWATER + 1))
		packet->xmit_more = false;

792
	if (packet->page_buf_cnt) {
793 794
		pgbuf = packet->cp_partial ? (*pb) +
			packet->rmsg_pgcnt : (*pb);
795 796 797 798 799 800 801 802
		ret = vmbus_sendpacket_pagebuffer_ctl(out_channel,
						      pgbuf,
						      packet->page_buf_cnt,
						      &nvmsg,
						      sizeof(struct nvsp_message),
						      req_id,
						      VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED,
						      !packet->xmit_more);
803
	} else {
804 805 806 807 808 809
		ret = vmbus_sendpacket_ctl(out_channel, &nvmsg,
					   sizeof(struct nvsp_message),
					   req_id,
					   VM_PKT_DATA_INBAND,
					   VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED,
					   !packet->xmit_more);
810 811
	}

812 813
	if (ret == 0) {
		atomic_inc(&net_device->num_outstanding_sends);
K
KY Srinivasan 已提交
814
		atomic_inc(&net_device->queue_sends[q_idx]);
815

816 817
		if (ring_avail < RING_AVAIL_PERCENT_LOWATER) {
			netif_tx_stop_queue(netdev_get_tx_queue(ndev, q_idx));
818

819
			if (atomic_read(&net_device->
K
KY Srinivasan 已提交
820
				queue_sends[q_idx]) < 1)
821
				netif_tx_wake_queue(netdev_get_tx_queue(
K
KY Srinivasan 已提交
822
						    ndev, q_idx));
823
		}
824
	} else if (ret == -EAGAIN) {
825
		netif_tx_stop_queue(netdev_get_tx_queue(
K
KY Srinivasan 已提交
826 827
				    ndev, q_idx));
		if (atomic_read(&net_device->queue_sends[q_idx]) < 1) {
828
			netif_tx_wake_queue(netdev_get_tx_queue(
K
KY Srinivasan 已提交
829
					    ndev, q_idx));
830 831
			ret = -ENOSPC;
		}
832
	} else {
833
		netdev_err(ndev, "Unable to send packet %p ret %d\n",
834
			   packet, ret);
835
	}
836

837 838 839 840
	return ret;
}

int netvsc_send(struct hv_device *device,
841
		struct hv_netvsc_packet *packet,
842
		struct rndis_message *rndis_msg,
843 844
		struct hv_page_buffer **pb,
		struct sk_buff *skb)
845 846 847 848 849 850 851 852 853
{
	struct netvsc_device *net_device;
	int ret = 0, m_ret = 0;
	struct vmbus_channel *out_channel;
	u16 q_idx = packet->q_idx;
	u32 pktlen = packet->total_data_buflen, msd_len = 0;
	unsigned int section_index = NETVSC_INVALID_INDEX;
	struct multi_send_data *msdp;
	struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL;
854
	bool try_batch;
855 856 857 858 859

	net_device = get_outbound_net_device(device);
	if (!net_device)
		return -ENODEV;

860
	out_channel = net_device->chn_table[q_idx];
861

862
	packet->send_buf_index = NETVSC_INVALID_INDEX;
863
	packet->cp_partial = false;
864 865 866 867 868 869 870

	msdp = &net_device->msd[q_idx];

	/* batch packets in send buffer if possible */
	if (msdp->pkt)
		msd_len = msdp->pkt->total_data_buflen;

871 872 873 874
	try_batch = packet->is_data_pkt && msd_len > 0 && msdp->count <
		    net_device->max_pkt;

	if (try_batch && msd_len + pktlen + net_device->pkt_align <
875 876 877
	    net_device->send_section_size) {
		section_index = msdp->pkt->send_buf_index;

878 879 880 881 882
	} else if (try_batch && msd_len + packet->rmsg_size <
		   net_device->send_section_size) {
		section_index = msdp->pkt->send_buf_index;
		packet->cp_partial = true;

883 884 885 886 887 888 889 890 891 892 893 894 895 896
	} else if (packet->is_data_pkt && pktlen + net_device->pkt_align <
		   net_device->send_section_size) {
		section_index = netvsc_get_next_send_section(net_device);
		if (section_index != NETVSC_INVALID_INDEX) {
				msd_send = msdp->pkt;
				msdp->pkt = NULL;
				msdp->count = 0;
				msd_len = 0;
		}
	}

	if (section_index != NETVSC_INVALID_INDEX) {
		netvsc_copy_to_send_buf(net_device,
					section_index, msd_len,
897
					packet, rndis_msg, pb);
898

899
		packet->send_buf_index = section_index;
900 901 902 903 904 905 906 907

		if (packet->cp_partial) {
			packet->page_buf_cnt -= packet->rmsg_pgcnt;
			packet->total_data_buflen = msd_len + packet->rmsg_size;
		} else {
			packet->page_buf_cnt = 0;
			packet->total_data_buflen += msd_len;
		}
908

909
		if (msdp->pkt)
910
			dev_kfree_skb_any(skb);
911

912
		if (packet->xmit_more && !packet->cp_partial) {
913 914 915 916 917 918 919 920 921 922 923 924 925 926 927
			msdp->pkt = packet;
			msdp->count++;
		} else {
			cur_send = packet;
			msdp->pkt = NULL;
			msdp->count = 0;
		}
	} else {
		msd_send = msdp->pkt;
		msdp->pkt = NULL;
		msdp->count = 0;
		cur_send = packet;
	}

	if (msd_send) {
928
		m_ret = netvsc_send_pkt(msd_send, net_device, pb, skb);
929 930 931 932

		if (m_ret != 0) {
			netvsc_free_send_slot(net_device,
					      msd_send->send_buf_index);
933
			dev_kfree_skb_any(skb);
934 935 936 937
		}
	}

	if (cur_send)
938
		ret = netvsc_send_pkt(cur_send, net_device, pb, skb);
939

940 941
	if (ret != 0 && section_index != NETVSC_INVALID_INDEX)
		netvsc_free_send_slot(net_device, section_index);
942

943 944 945
	return ret;
}

946
static void netvsc_send_recv_completion(struct hv_device *device,
947
					struct vmbus_channel *channel,
948
					struct netvsc_device *net_device,
949
					u64 transaction_id, u32 status)
950 951 952 953
{
	struct nvsp_message recvcompMessage;
	int retries = 0;
	int ret;
954 955 956
	struct net_device *ndev;

	ndev = net_device->ndev;
957 958 959 960

	recvcompMessage.hdr.msg_type =
				NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE;

961
	recvcompMessage.msg.v1_msg.send_rndis_pkt_complete.status = status;
962 963 964

retry_send_cmplt:
	/* Send the completion */
965
	ret = vmbus_sendpacket(channel, &recvcompMessage,
966 967 968 969 970
			       sizeof(struct nvsp_message), transaction_id,
			       VM_PKT_COMP, 0);
	if (ret == 0) {
		/* success */
		/* no-op */
971
	} else if (ret == -EAGAIN) {
972 973
		/* no more room...wait a bit and attempt to retry 3 times */
		retries++;
974
		netdev_err(ndev, "unable to send receive completion pkt"
975
			" (tid %llx)...retrying %d\n", transaction_id, retries);
976 977 978 979 980

		if (retries < 4) {
			udelay(100);
			goto retry_send_cmplt;
		} else {
981
			netdev_err(ndev, "unable to send receive "
982
				"completion pkt (tid %llx)...give up retrying\n",
983 984 985
				transaction_id);
		}
	} else {
986
		netdev_err(ndev, "unable to send receive "
987
			"completion pkt - %llx\n", transaction_id);
988 989 990
	}
}

991
static void netvsc_receive(struct netvsc_device *net_device,
992
			struct vmbus_channel *channel,
993 994
			struct hv_device *device,
			struct vmpacket_descriptor *packet)
995
{
996 997
	struct vmtransfer_page_packet_header *vmxferpage_packet;
	struct nvsp_message *nvsp_packet;
998 999 1000
	struct hv_netvsc_packet nv_pkt;
	struct hv_netvsc_packet *netvsc_packet = &nv_pkt;
	u32 status = NVSP_STAT_SUCCESS;
1001 1002
	int i;
	int count = 0;
1003
	struct net_device *ndev;
1004
	void *data;
1005

1006
	ndev = net_device->ndev;
1007

1008 1009 1010 1011
	/*
	 * All inbound packets other than send completion should be xfer page
	 * packet
	 */
1012
	if (packet->type != VM_PKT_DATA_USING_XFER_PAGES) {
1013
		netdev_err(ndev, "Unknown packet type received - %d\n",
1014
			   packet->type);
1015 1016 1017
		return;
	}

1018
	nvsp_packet = (struct nvsp_message *)((unsigned long)packet +
1019
			(packet->offset8 << 3));
1020

1021
	/* Make sure this is a valid nvsp packet */
1022 1023
	if (nvsp_packet->hdr.msg_type !=
	    NVSP_MSG1_TYPE_SEND_RNDIS_PKT) {
1024
		netdev_err(ndev, "Unknown nvsp packet type received-"
1025
			" %d\n", nvsp_packet->hdr.msg_type);
1026 1027 1028
		return;
	}

1029
	vmxferpage_packet = (struct vmtransfer_page_packet_header *)packet;
1030

1031
	if (vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID) {
1032
		netdev_err(ndev, "Invalid xfer page set id - "
1033
			   "expecting %x got %x\n", NETVSC_RECEIVE_BUFFER_ID,
1034
			   vmxferpage_packet->xfer_pageset_id);
1035 1036 1037
		return;
	}

1038
	count = vmxferpage_packet->range_cnt;
1039

1040
	/* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */
1041
	for (i = 0; i < count; i++) {
1042
		/* Initialize the netvsc packet */
1043
		netvsc_packet->status = NVSP_STAT_SUCCESS;
1044
		data = (void *)((unsigned long)net_device->
1045
			recv_buf + vmxferpage_packet->ranges[i].byte_offset);
1046
		netvsc_packet->total_data_buflen =
1047
					vmxferpage_packet->ranges[i].byte_count;
1048

1049
		/* Pass it to the upper layer */
1050
		rndis_filter_receive(device, netvsc_packet, &data, channel);
1051

1052 1053
		if (netvsc_packet->status != NVSP_STAT_SUCCESS)
			status = NVSP_STAT_FAIL;
1054 1055
	}

1056 1057
	netvsc_send_recv_completion(device, channel, net_device,
				    vmxferpage_packet->d.trans_id, status);
1058 1059
}

1060 1061

static void netvsc_send_table(struct hv_device *hdev,
1062
			      struct nvsp_message *nvmsg)
1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086
{
	struct netvsc_device *nvscdev;
	struct net_device *ndev;
	int i;
	u32 count, *tab;

	nvscdev = get_outbound_net_device(hdev);
	if (!nvscdev)
		return;
	ndev = nvscdev->ndev;

	count = nvmsg->msg.v5_msg.send_table.count;
	if (count != VRSS_SEND_TAB_SIZE) {
		netdev_err(ndev, "Received wrong send-table size:%u\n", count);
		return;
	}

	tab = (u32 *)((unsigned long)&nvmsg->msg.v5_msg.send_table +
		      nvmsg->msg.v5_msg.send_table.offset);

	for (i = 0; i < count; i++)
		nvscdev->send_table[i] = tab[i];
}

1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108
static void netvsc_send_vf(struct netvsc_device *nvdev,
			   struct nvsp_message *nvmsg)
{
	nvdev->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated;
	nvdev->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial;
}

static inline void netvsc_receive_inband(struct hv_device *hdev,
					 struct netvsc_device *nvdev,
					 struct nvsp_message *nvmsg)
{
	switch (nvmsg->hdr.msg_type) {
	case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE:
		netvsc_send_table(hdev, nvmsg);
		break;

	case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION:
		netvsc_send_vf(nvdev, nvmsg);
		break;
	}
}

1109
void netvsc_channel_cb(void *context)
1110
{
1111
	int ret;
1112 1113
	struct vmbus_channel *channel = (struct vmbus_channel *)context;
	struct hv_device *device;
1114 1115 1116
	struct netvsc_device *net_device;
	u32 bytes_recvd;
	u64 request_id;
1117
	struct vmpacket_descriptor *desc;
1118 1119
	unsigned char *buffer;
	int bufferlen = NETVSC_PACKET_SIZE;
1120
	struct net_device *ndev;
1121
	struct nvsp_message *nvmsg;
1122

1123 1124 1125 1126 1127
	if (channel->primary_channel != NULL)
		device = channel->primary_channel->device_obj;
	else
		device = channel->device_obj;

1128
	net_device = get_inbound_net_device(device);
1129
	if (!net_device)
1130
		return;
1131
	ndev = net_device->ndev;
1132
	buffer = get_per_channel_state(channel);
1133

1134
	do {
1135
		ret = vmbus_recvpacket_raw(channel, buffer, bufferlen,
1136
					   &bytes_recvd, &request_id);
1137
		if (ret == 0) {
1138
			if (bytes_recvd > 0) {
1139
				desc = (struct vmpacket_descriptor *)buffer;
1140 1141
				nvmsg = (struct nvsp_message *)((unsigned long)
					 desc + (desc->offset8 << 3));
1142 1143
				switch (desc->type) {
				case VM_PKT_COMP:
1144
					netvsc_send_completion(net_device,
1145
								channel,
1146
								device, desc);
1147 1148
					break;

1149
				case VM_PKT_DATA_USING_XFER_PAGES:
1150 1151 1152 1153 1154
					netvsc_receive(net_device, channel,
						       device, desc);
					break;

				case VM_PKT_DATA_INBAND:
1155 1156 1157
					netvsc_receive_inband(device,
							      net_device,
							      nvmsg);
1158 1159 1160
					break;

				default:
1161
					netdev_err(ndev,
1162 1163
						   "unhandled packet type %d, "
						   "tid %llx len %d\n",
1164
						   desc->type, request_id,
1165
						   bytes_recvd);
1166
					break;
1167 1168
				}

1169
			} else {
1170 1171 1172
				/*
				 * We are done for this pass.
				 */
1173 1174
				break;
			}
1175

1176
		} else if (ret == -ENOBUFS) {
1177 1178
			if (bufferlen > NETVSC_PACKET_SIZE)
				kfree(buffer);
1179
			/* Handle large packet */
1180
			buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
1181
			if (buffer == NULL) {
1182
				/* Try again next time around */
1183
				netdev_err(ndev,
1184
					   "unable to allocate buffer of size "
1185
					   "(%d)!!\n", bytes_recvd);
1186 1187 1188
				break;
			}

1189
			bufferlen = bytes_recvd;
1190 1191 1192
		}
	} while (1);

1193 1194
	if (bufferlen > NETVSC_PACKET_SIZE)
		kfree(buffer);
1195 1196
	return;
}
1197

1198 1199 1200 1201
/*
 * netvsc_device_add - Callback when the device belonging to this
 * driver is added
 */
1202
int netvsc_device_add(struct hv_device *device, void *additional_info)
1203 1204
{
	int ret = 0;
1205 1206
	int ring_size =
	((struct netvsc_device_info *)additional_info)->ring_size;
1207
	struct netvsc_device *net_device;
1208
	struct net_device *ndev;
1209 1210

	net_device = alloc_net_device(device);
1211 1212
	if (!net_device)
		return -ENOMEM;
1213

1214 1215
	net_device->ring_size = ring_size;

1216 1217 1218 1219 1220 1221 1222 1223 1224
	/*
	 * Coming into this function, struct net_device * is
	 * registered as the driver private data.
	 * In alloc_net_device(), we register struct netvsc_device *
	 * as the driver private data and stash away struct net_device *
	 * in struct netvsc_device *.
	 */
	ndev = net_device->ndev;

1225 1226 1227
	/* Add netvsc_device context to netvsc_device */
	net_device->nd_ctx = netdev_priv(ndev);

1228
	/* Initialize the NetVSC channel extension */
1229
	init_completion(&net_device->channel_init_wait);
1230

1231 1232
	set_per_channel_state(device->channel, net_device->cb_buffer);

1233
	/* Open the channel */
1234 1235
	ret = vmbus_open(device->channel, ring_size * PAGE_SIZE,
			 ring_size * PAGE_SIZE, NULL, 0,
1236
			 netvsc_channel_cb, device->channel);
1237 1238

	if (ret != 0) {
1239
		netdev_err(ndev, "unable to open channel: %d\n", ret);
1240 1241 1242 1243
		goto cleanup;
	}

	/* Channel is opened */
1244
	pr_info("hv_netvsc channel opened successfully\n");
1245

1246 1247
	net_device->chn_table[0] = device->channel;

1248 1249 1250
	/* Connect with the NetVsp */
	ret = netvsc_connect_vsp(device);
	if (ret != 0) {
1251
		netdev_err(ndev,
1252
			"unable to connect to NetVSP - %d\n", ret);
1253 1254 1255 1256 1257 1258 1259 1260 1261 1262
		goto close;
	}

	return ret;

close:
	/* Now, we can close the channel safely */
	vmbus_close(device->channel);

cleanup:
1263
	free_netvsc_device(net_device);
1264 1265 1266

	return ret;
}