netvsc.c 33.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 * Copyright (c) 2009, Microsoft Corporation.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
14
 * this program; if not, see <http://www.gnu.org/licenses/>.
15 16
 *
 * Authors:
17
 *   Haiyang Zhang <haiyangz@microsoft.com>
18 19
 *   Hank Janssen  <hjanssen@microsoft.com>
 */
20 21
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

22
#include <linux/kernel.h>
23 24
#include <linux/sched.h>
#include <linux/wait.h>
25
#include <linux/mm.h>
26
#include <linux/delay.h>
27
#include <linux/io.h>
28
#include <linux/slab.h>
29
#include <linux/netdevice.h>
30
#include <linux/if_ether.h>
31
#include <linux/vmalloc.h>
32
#include <asm/sync_bitops.h>
33

34
#include "hyperv_net.h"
35 36


37
static struct netvsc_device *alloc_net_device(struct hv_device *device)
38
{
39
	struct netvsc_device *net_device;
40
	struct net_device *ndev = hv_get_drvdata(device);
41
	int i;
42

43 44
	net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL);
	if (!net_device)
45 46
		return NULL;

47 48 49 50 51 52
	net_device->cb_buffer = kzalloc(NETVSC_PACKET_SIZE, GFP_KERNEL);
	if (!net_device->cb_buffer) {
		kfree(net_device);
		return NULL;
	}

53
	init_waitqueue_head(&net_device->wait_drain);
54
	net_device->start_remove = false;
55
	net_device->destroy = false;
56
	net_device->dev = device;
57
	net_device->ndev = ndev;
58 59 60 61 62
	net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
	net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;

	for (i = 0; i < num_online_cpus(); i++)
		spin_lock_init(&net_device->msd[i].lock);
63

64
	hv_set_drvdata(device, net_device);
65
	return net_device;
66 67
}

68 69 70 71 72 73
static void free_netvsc_device(struct netvsc_device *nvdev)
{
	kfree(nvdev->cb_buffer);
	kfree(nvdev);
}

74
static struct netvsc_device *get_outbound_net_device(struct hv_device *device)
75
{
76
	struct netvsc_device *net_device;
77

78
	net_device = hv_get_drvdata(device);
79
	if (net_device && net_device->destroy)
80
		net_device = NULL;
81

82
	return net_device;
83 84
}

85
static struct netvsc_device *get_inbound_net_device(struct hv_device *device)
86
{
87
	struct netvsc_device *net_device;
88

89
	net_device = hv_get_drvdata(device);
90 91 92 93 94 95

	if (!net_device)
		goto get_in_err;

	if (net_device->destroy &&
		atomic_read(&net_device->num_outstanding_sends) == 0)
96
		net_device = NULL;
97

98
get_in_err:
99
	return net_device;
100 101 102
}


103
static int netvsc_destroy_buf(struct netvsc_device *net_device)
104 105 106
{
	struct nvsp_message *revoke_packet;
	int ret = 0;
107
	struct net_device *ndev = net_device->ndev;
108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134

	/*
	 * If we got a section count, it means we received a
	 * SendReceiveBufferComplete msg (ie sent
	 * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
	 * to send a revoke msg here
	 */
	if (net_device->recv_section_cnt) {
		/* Send the revoke receive buffer */
		revoke_packet = &net_device->revoke_packet;
		memset(revoke_packet, 0, sizeof(struct nvsp_message));

		revoke_packet->hdr.msg_type =
			NVSP_MSG1_TYPE_REVOKE_RECV_BUF;
		revoke_packet->msg.v1_msg.
		revoke_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;

		ret = vmbus_sendpacket(net_device->dev->channel,
				       revoke_packet,
				       sizeof(struct nvsp_message),
				       (unsigned long)revoke_packet,
				       VM_PKT_DATA_INBAND, 0);
		/*
		 * If we failed here, we might as well return and
		 * have a leak rather than continue and a bugchk
		 */
		if (ret != 0) {
135
			netdev_err(ndev, "unable to send "
136
				"revoke receive buffer to netvsp\n");
137
			return ret;
138 139 140 141 142 143 144 145 146 147 148 149
		}
	}

	/* Teardown the gpadl on the vsp end */
	if (net_device->recv_buf_gpadl_handle) {
		ret = vmbus_teardown_gpadl(net_device->dev->channel,
			   net_device->recv_buf_gpadl_handle);

		/* If we failed here, we might as well return and have a leak
		 * rather than continue and a bugchk
		 */
		if (ret != 0) {
150
			netdev_err(ndev,
151
				   "unable to teardown receive buffer's gpadl\n");
152
			return ret;
153 154 155 156 157 158
		}
		net_device->recv_buf_gpadl_handle = 0;
	}

	if (net_device->recv_buf) {
		/* Free up the receive buffer */
159
		vfree(net_device->recv_buf);
160 161 162 163 164 165 166 167 168
		net_device->recv_buf = NULL;
	}

	if (net_device->recv_section) {
		net_device->recv_section_cnt = 0;
		kfree(net_device->recv_section);
		net_device->recv_section = NULL;
	}

169 170
	/* Deal with the send buffer we may have setup.
	 * If we got a  send section size, it means we received a
171 172
	 * NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE msg (ie sent
	 * NVSP_MSG1_TYPE_SEND_SEND_BUF msg) therefore, we need
173 174 175 176 177 178 179 180 181
	 * to send a revoke msg here
	 */
	if (net_device->send_section_size) {
		/* Send the revoke receive buffer */
		revoke_packet = &net_device->revoke_packet;
		memset(revoke_packet, 0, sizeof(struct nvsp_message));

		revoke_packet->hdr.msg_type =
			NVSP_MSG1_TYPE_REVOKE_SEND_BUF;
182 183
		revoke_packet->msg.v1_msg.revoke_send_buf.id =
			NETVSC_SEND_BUFFER_ID;
184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211

		ret = vmbus_sendpacket(net_device->dev->channel,
				       revoke_packet,
				       sizeof(struct nvsp_message),
				       (unsigned long)revoke_packet,
				       VM_PKT_DATA_INBAND, 0);
		/* If we failed here, we might as well return and
		 * have a leak rather than continue and a bugchk
		 */
		if (ret != 0) {
			netdev_err(ndev, "unable to send "
				   "revoke send buffer to netvsp\n");
			return ret;
		}
	}
	/* Teardown the gpadl on the vsp end */
	if (net_device->send_buf_gpadl_handle) {
		ret = vmbus_teardown_gpadl(net_device->dev->channel,
					   net_device->send_buf_gpadl_handle);

		/* If we failed here, we might as well return and have a leak
		 * rather than continue and a bugchk
		 */
		if (ret != 0) {
			netdev_err(ndev,
				   "unable to teardown send buffer's gpadl\n");
			return ret;
		}
212
		net_device->send_buf_gpadl_handle = 0;
213 214
	}
	if (net_device->send_buf) {
215
		/* Free up the send buffer */
216
		vfree(net_device->send_buf);
217 218 219 220
		net_device->send_buf = NULL;
	}
	kfree(net_device->send_section_map);

221 222 223
	return ret;
}

224
static int netvsc_init_buf(struct hv_device *device)
225
{
226
	int ret = 0;
227
	unsigned long t;
228 229
	struct netvsc_device *net_device;
	struct nvsp_message *init_packet;
230
	struct net_device *ndev;
231
	int node;
232

233
	net_device = get_outbound_net_device(device);
234
	if (!net_device)
235
		return -ENODEV;
236
	ndev = net_device->ndev;
237

238 239 240 241 242
	node = cpu_to_node(device->channel->target_cpu);
	net_device->recv_buf = vzalloc_node(net_device->recv_buf_size, node);
	if (!net_device->recv_buf)
		net_device->recv_buf = vzalloc(net_device->recv_buf_size);

243
	if (!net_device->recv_buf) {
244
		netdev_err(ndev, "unable to allocate receive "
245
			"buffer of size %d\n", net_device->recv_buf_size);
246
		ret = -ENOMEM;
247
		goto cleanup;
248 249
	}

250 251 252 253 254
	/*
	 * Establish the gpadl handle for this buffer on this
	 * channel.  Note: This call uses the vmbus connection rather
	 * than the channel to establish the gpadl handle.
	 */
255 256 257
	ret = vmbus_establish_gpadl(device->channel, net_device->recv_buf,
				    net_device->recv_buf_size,
				    &net_device->recv_buf_gpadl_handle);
258
	if (ret != 0) {
259
		netdev_err(ndev,
260
			"unable to establish receive buffer's gpadl\n");
261
		goto cleanup;
262 263 264
	}


265
	/* Notify the NetVsp of the gpadl handle */
266
	init_packet = &net_device->channel_init_pkt;
267

268
	memset(init_packet, 0, sizeof(struct nvsp_message));
269

270 271 272 273 274
	init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_RECV_BUF;
	init_packet->msg.v1_msg.send_recv_buf.
		gpadl_handle = net_device->recv_buf_gpadl_handle;
	init_packet->msg.v1_msg.
		send_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
275

276
	/* Send the gpadl notification request */
277
	ret = vmbus_sendpacket(device->channel, init_packet,
278
			       sizeof(struct nvsp_message),
279
			       (unsigned long)init_packet,
280
			       VM_PKT_DATA_INBAND,
281
			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
282
	if (ret != 0) {
283
		netdev_err(ndev,
284
			"unable to send receive buffer's gpadl to netvsp\n");
285
		goto cleanup;
286 287
	}

288
	t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ);
289
	BUG_ON(t == 0);
290

291

292
	/* Check the response */
293 294
	if (init_packet->msg.v1_msg.
	    send_recv_buf_complete.status != NVSP_STAT_SUCCESS) {
295
		netdev_err(ndev, "Unable to complete receive buffer "
296
			   "initialization with NetVsp - status %d\n",
297 298
			   init_packet->msg.v1_msg.
			   send_recv_buf_complete.status);
299
		ret = -EINVAL;
300
		goto cleanup;
301 302
	}

303
	/* Parse the response */
304

305 306
	net_device->recv_section_cnt = init_packet->msg.
		v1_msg.send_recv_buf_complete.num_sections;
307

308 309 310 311 312
	net_device->recv_section = kmemdup(
		init_packet->msg.v1_msg.send_recv_buf_complete.sections,
		net_device->recv_section_cnt *
		sizeof(struct nvsp_1_receive_buffer_section),
		GFP_KERNEL);
313
	if (net_device->recv_section == NULL) {
314
		ret = -EINVAL;
315
		goto cleanup;
316 317
	}

318 319 320 321
	/*
	 * For 1st release, there should only be 1 section that represents the
	 * entire receive buffer
	 */
322 323
	if (net_device->recv_section_cnt != 1 ||
	    net_device->recv_section->offset != 0) {
324
		ret = -EINVAL;
325
		goto cleanup;
326 327
	}

328 329
	/* Now setup the send buffer.
	 */
330 331 332
	net_device->send_buf = vzalloc_node(net_device->send_buf_size, node);
	if (!net_device->send_buf)
		net_device->send_buf = vzalloc(net_device->send_buf_size);
333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356
	if (!net_device->send_buf) {
		netdev_err(ndev, "unable to allocate send "
			   "buffer of size %d\n", net_device->send_buf_size);
		ret = -ENOMEM;
		goto cleanup;
	}

	/* Establish the gpadl handle for this buffer on this
	 * channel.  Note: This call uses the vmbus connection rather
	 * than the channel to establish the gpadl handle.
	 */
	ret = vmbus_establish_gpadl(device->channel, net_device->send_buf,
				    net_device->send_buf_size,
				    &net_device->send_buf_gpadl_handle);
	if (ret != 0) {
		netdev_err(ndev,
			   "unable to establish send buffer's gpadl\n");
		goto cleanup;
	}

	/* Notify the NetVsp of the gpadl handle */
	init_packet = &net_device->channel_init_pkt;
	memset(init_packet, 0, sizeof(struct nvsp_message));
	init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_SEND_BUF;
357
	init_packet->msg.v1_msg.send_send_buf.gpadl_handle =
358
		net_device->send_buf_gpadl_handle;
359
	init_packet->msg.v1_msg.send_send_buf.id = NETVSC_SEND_BUFFER_ID;
360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381

	/* Send the gpadl notification request */
	ret = vmbus_sendpacket(device->channel, init_packet,
			       sizeof(struct nvsp_message),
			       (unsigned long)init_packet,
			       VM_PKT_DATA_INBAND,
			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
	if (ret != 0) {
		netdev_err(ndev,
			   "unable to send send buffer's gpadl to netvsp\n");
		goto cleanup;
	}

	t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ);
	BUG_ON(t == 0);

	/* Check the response */
	if (init_packet->msg.v1_msg.
	    send_send_buf_complete.status != NVSP_STAT_SUCCESS) {
		netdev_err(ndev, "Unable to complete send buffer "
			   "initialization with NetVsp - status %d\n",
			   init_packet->msg.v1_msg.
382
			   send_send_buf_complete.status);
383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404
		ret = -EINVAL;
		goto cleanup;
	}

	/* Parse the response */
	net_device->send_section_size = init_packet->msg.
				v1_msg.send_send_buf_complete.section_size;

	/* Section count is simply the size divided by the section size.
	 */
	net_device->send_section_cnt =
		net_device->send_buf_size/net_device->send_section_size;

	dev_info(&device->device, "Send section size: %d, Section count:%d\n",
		 net_device->send_section_size, net_device->send_section_cnt);

	/* Setup state for managing the send buffer. */
	net_device->map_words = DIV_ROUND_UP(net_device->send_section_cnt,
					     BITS_PER_LONG);

	net_device->send_section_map =
		kzalloc(net_device->map_words * sizeof(ulong), GFP_KERNEL);
405 406
	if (net_device->send_section_map == NULL) {
		ret = -ENOMEM;
407
		goto cleanup;
408
	}
409

410
	goto exit;
411

412
cleanup:
413
	netvsc_destroy_buf(net_device);
414

415
exit:
416 417 418 419
	return ret;
}


420 421 422 423 424
/* Negotiate NVSP protocol version */
static int negotiate_nvsp_ver(struct hv_device *device,
			      struct netvsc_device *net_device,
			      struct nvsp_message *init_packet,
			      u32 nvsp_ver)
425
{
426 427
	int ret;
	unsigned long t;
428

429
	memset(init_packet, 0, sizeof(struct nvsp_message));
430
	init_packet->hdr.msg_type = NVSP_MSG_TYPE_INIT;
431 432
	init_packet->msg.init_msg.init.min_protocol_ver = nvsp_ver;
	init_packet->msg.init_msg.init.max_protocol_ver = nvsp_ver;
433

434
	/* Send the init request */
435
	ret = vmbus_sendpacket(device->channel, init_packet,
436
			       sizeof(struct nvsp_message),
437
			       (unsigned long)init_packet,
438
			       VM_PKT_DATA_INBAND,
439
			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
440

441
	if (ret != 0)
442
		return ret;
443

444
	t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ);
445

446 447
	if (t == 0)
		return -ETIMEDOUT;
448

449
	if (init_packet->msg.init_msg.init_complete.status !=
450 451
	    NVSP_STAT_SUCCESS)
		return -EINVAL;
452

453
	if (nvsp_ver == NVSP_PROTOCOL_VERSION_1)
454 455
		return 0;

456
	/* NVSPv2 or later: Send NDIS config */
457 458
	memset(init_packet, 0, sizeof(struct nvsp_message));
	init_packet->hdr.msg_type = NVSP_MSG2_TYPE_SEND_NDIS_CONFIG;
459 460
	init_packet->msg.v2_msg.send_ndis_config.mtu = net_device->ndev->mtu +
						       ETH_HLEN;
461
	init_packet->msg.v2_msg.send_ndis_config.capability.ieee8021q = 1;
462

463 464 465
	if (nvsp_ver >= NVSP_PROTOCOL_VERSION_5)
		init_packet->msg.v2_msg.send_ndis_config.capability.sriov = 1;

466 467 468 469 470 471 472 473 474 475 476 477 478 479 480
	ret = vmbus_sendpacket(device->channel, init_packet,
				sizeof(struct nvsp_message),
				(unsigned long)init_packet,
				VM_PKT_DATA_INBAND, 0);

	return ret;
}

static int netvsc_connect_vsp(struct hv_device *device)
{
	int ret;
	struct netvsc_device *net_device;
	struct nvsp_message *init_packet;
	int ndis_version;
	struct net_device *ndev;
481 482 483
	u32 ver_list[] = { NVSP_PROTOCOL_VERSION_1, NVSP_PROTOCOL_VERSION_2,
		NVSP_PROTOCOL_VERSION_4, NVSP_PROTOCOL_VERSION_5 };
	int i, num_ver = 4; /* number of different NVSP versions */
484 485 486 487 488 489 490 491 492

	net_device = get_outbound_net_device(device);
	if (!net_device)
		return -ENODEV;
	ndev = net_device->ndev;

	init_packet = &net_device->channel_init_pkt;

	/* Negotiate the latest NVSP protocol supported */
493 494 495 496 497 498 499 500
	for (i = num_ver - 1; i >= 0; i--)
		if (negotiate_nvsp_ver(device, net_device, init_packet,
				       ver_list[i])  == 0) {
			net_device->nvsp_version = ver_list[i];
			break;
		}

	if (i < 0) {
501
		ret = -EPROTO;
502
		goto cleanup;
503
	}
504 505 506

	pr_debug("Negotiated NVSP version:%x\n", net_device->nvsp_version);

507
	/* Send the ndis version */
508
	memset(init_packet, 0, sizeof(struct nvsp_message));
509

510
	if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_4)
511
		ndis_version = 0x00060001;
512 513
	else
		ndis_version = 0x0006001e;
514

515 516 517
	init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_NDIS_VER;
	init_packet->msg.v1_msg.
		send_ndis_ver.ndis_major_ver =
518
				(ndis_version & 0xFFFF0000) >> 16;
519 520
	init_packet->msg.v1_msg.
		send_ndis_ver.ndis_minor_ver =
521
				ndis_version & 0xFFFF;
522

523
	/* Send the init request */
524
	ret = vmbus_sendpacket(device->channel, init_packet,
525 526 527
				sizeof(struct nvsp_message),
				(unsigned long)init_packet,
				VM_PKT_DATA_INBAND, 0);
528
	if (ret != 0)
529
		goto cleanup;
530 531

	/* Post the big receive buffer to NetVSP */
532 533 534 535
	if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_2)
		net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE_LEGACY;
	else
		net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
536
	net_device->send_buf_size = NETVSC_SEND_BUFFER_SIZE;
537

538
	ret = netvsc_init_buf(device);
539

540
cleanup:
541 542 543
	return ret;
}

544
static void netvsc_disconnect_vsp(struct netvsc_device *net_device)
545
{
546
	netvsc_destroy_buf(net_device);
547 548
}

549
/*
550
 * netvsc_device_remove - Callback when the root bus device is removed
551
 */
552
int netvsc_device_remove(struct hv_device *device)
553
{
554
	struct netvsc_device *net_device;
555
	unsigned long flags;
556

557
	net_device = hv_get_drvdata(device);
558

559
	netvsc_disconnect_vsp(net_device);
560

561
	/*
562 563 564 565 566
	 * Since we have already drained, we don't need to busy wait
	 * as was done in final_release_stor_device()
	 * Note that we cannot set the ext pointer to NULL until
	 * we have drained - to drain the outgoing packets, we need to
	 * allow incoming packets.
567
	 */
568 569

	spin_lock_irqsave(&device->channel->inbound_lock, flags);
570
	hv_set_drvdata(device, NULL);
571
	spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
572

573 574 575 576
	/*
	 * At this point, no one should be accessing net_device
	 * except in here
	 */
577
	dev_notice(&device->device, "net device safe to remove\n");
578

579
	/* Now, we can close the channel safely */
580
	vmbus_close(device->channel);
581

582
	/* Release all resources */
583
	vfree(net_device->sub_cb_buf);
584
	free_netvsc_device(net_device);
585
	return 0;
586 587
}

588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605

#define RING_AVAIL_PERCENT_HIWATER 20
#define RING_AVAIL_PERCENT_LOWATER 10

/*
 * Get the percentage of available bytes to write in the ring.
 * The return value is in range from 0 to 100.
 */
static inline u32 hv_ringbuf_avail_percent(
		struct hv_ring_buffer_info *ring_info)
{
	u32 avail_read, avail_write;

	hv_get_ringbuffer_availbytes(ring_info, &avail_read, &avail_write);

	return avail_write * 100 / ring_info->ring_datasize;
}

606 607 608 609 610 611
static inline void netvsc_free_send_slot(struct netvsc_device *net_device,
					 u32 index)
{
	sync_change_bit(index, net_device->send_section_map);
}

612
static void netvsc_send_completion(struct netvsc_device *net_device,
613
				   struct vmbus_channel *incoming_channel,
614
				   struct hv_device *device,
615
				   struct vmpacket_descriptor *packet)
616
{
617 618
	struct nvsp_message *nvsp_packet;
	struct hv_netvsc_packet *nvsc_packet;
619
	struct net_device *ndev;
620
	u32 send_index;
621

622
	ndev = net_device->ndev;
623

624
	nvsp_packet = (struct nvsp_message *)((unsigned long)packet +
625
			(packet->offset8 << 3));
626

627 628 629 630
	if ((nvsp_packet->hdr.msg_type == NVSP_MSG_TYPE_INIT_COMPLETE) ||
	    (nvsp_packet->hdr.msg_type ==
	     NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE) ||
	    (nvsp_packet->hdr.msg_type ==
631 632 633
	     NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE) ||
	    (nvsp_packet->hdr.msg_type ==
	     NVSP_MSG5_TYPE_SUBCHANNEL)) {
634
		/* Copy the response back */
635
		memcpy(&net_device->channel_init_pkt, nvsp_packet,
636
		       sizeof(struct nvsp_message));
637
		complete(&net_device->channel_init_wait);
638 639
	} else if (nvsp_packet->hdr.msg_type ==
		   NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE) {
640
		int num_outstanding_sends;
641 642 643
		u16 q_idx = 0;
		struct vmbus_channel *channel = device->channel;
		int queue_sends;
644

645
		/* Get the send context */
646
		nvsc_packet = (struct hv_netvsc_packet *)(unsigned long)
647
			packet->trans_id;
648

649
		/* Notify the layer above us */
650
		if (nvsc_packet) {
651 652 653
			send_index = nvsc_packet->send_buf_index;
			if (send_index != NETVSC_INVALID_INDEX)
				netvsc_free_send_slot(net_device, send_index);
654
			q_idx = nvsc_packet->q_idx;
655
			channel = incoming_channel;
656
			netvsc_xmit_completion(nvsc_packet);
657
		}
658

659 660
		num_outstanding_sends =
			atomic_dec_return(&net_device->num_outstanding_sends);
661 662
		queue_sends = atomic_dec_return(&net_device->
						queue_sends[q_idx]);
663

664 665 666
		if (net_device->destroy && num_outstanding_sends == 0)
			wake_up(&net_device->wait_drain);

667 668 669 670 671 672
		if (netif_tx_queue_stopped(netdev_get_tx_queue(ndev, q_idx)) &&
		    !net_device->start_remove &&
		    (hv_ringbuf_avail_percent(&channel->outbound) >
		     RING_AVAIL_PERCENT_HIWATER || queue_sends < 1))
				netif_tx_wake_queue(netdev_get_tx_queue(
						    ndev, q_idx));
673
	} else {
674
		netdev_err(ndev, "Unknown send completion packet type- "
675
			   "%d received!!\n", nvsp_packet->hdr.msg_type);
676 677 678 679
	}

}

680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704
static u32 netvsc_get_next_send_section(struct netvsc_device *net_device)
{
	unsigned long index;
	u32 max_words = net_device->map_words;
	unsigned long *map_addr = (unsigned long *)net_device->send_section_map;
	u32 section_cnt = net_device->send_section_cnt;
	int ret_val = NETVSC_INVALID_INDEX;
	int i;
	int prev_val;

	for (i = 0; i < max_words; i++) {
		if (!~(map_addr[i]))
			continue;
		index = ffz(map_addr[i]);
		prev_val = sync_test_and_set_bit(index, &map_addr[i]);
		if (prev_val)
			continue;
		if ((index + (i * BITS_PER_LONG)) >= section_cnt)
			break;
		ret_val = (index + (i * BITS_PER_LONG));
		break;
	}
	return ret_val;
}

L
Lad, Prabhakar 已提交
705 706
static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device,
				   unsigned int section_index,
707
				   u32 pend_size,
708 709
				   struct hv_netvsc_packet *packet,
				   struct rndis_message *rndis_msg)
710 711
{
	char *start = net_device->send_buf;
712 713
	char *dest = start + (section_index * net_device->send_section_size)
		     + pend_size;
714 715
	int i;
	u32 msg_size = 0;
716 717
	u32 padding = 0;
	u32 remain = packet->total_data_buflen % net_device->pkt_align;
718 719
	u32 page_count = packet->cp_partial ? packet->rmsg_pgcnt :
		packet->page_buf_cnt;
720 721

	/* Add padding */
722 723
	if (packet->is_data_pkt && packet->xmit_more && remain &&
	    !packet->cp_partial) {
724
		padding = net_device->pkt_align - remain;
725
		rndis_msg->msg_len += padding;
726 727
		packet->total_data_buflen += padding;
	}
728

729
	for (i = 0; i < page_count; i++) {
730 731 732 733 734 735 736 737
		char *src = phys_to_virt(packet->page_buf[i].pfn << PAGE_SHIFT);
		u32 offset = packet->page_buf[i].offset;
		u32 len = packet->page_buf[i].len;

		memcpy(dest, (src + offset), len);
		msg_size += len;
		dest += len;
	}
738 739 740 741 742 743

	if (padding) {
		memset(dest, 0, padding);
		msg_size += padding;
	}

744 745 746
	return msg_size;
}

747 748 749
static inline int netvsc_send_pkt(
	struct hv_netvsc_packet *packet,
	struct netvsc_device *net_device)
750
{
751
	struct nvsp_message nvmsg;
752
	struct vmbus_channel *out_channel = get_channel(packet, net_device);
K
KY Srinivasan 已提交
753
	u16 q_idx = packet->q_idx;
754 755 756
	struct net_device *ndev = net_device->ndev;
	u64 req_id;
	int ret;
757
	struct hv_page_buffer *pgbuf;
758
	u32 ring_avail = hv_ringbuf_avail_percent(&out_channel->outbound);
759

760
	nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
761
	if (packet->is_data_pkt) {
762
		/* 0 is RMC_DATA; */
763
		nvmsg.msg.v1_msg.send_rndis_pkt.channel_type = 0;
764 765
	} else {
		/* 1 is RMC_CONTROL; */
766
		nvmsg.msg.v1_msg.send_rndis_pkt.channel_type = 1;
767
	}
768

769 770 771 772 773 774 775
	nvmsg.msg.v1_msg.send_rndis_pkt.send_buf_section_index =
		packet->send_buf_index;
	if (packet->send_buf_index == NETVSC_INVALID_INDEX)
		nvmsg.msg.v1_msg.send_rndis_pkt.send_buf_section_size = 0;
	else
		nvmsg.msg.v1_msg.send_rndis_pkt.send_buf_section_size =
			packet->total_data_buflen;
776

777
	if (packet->completion_func)
778
		req_id = (ulong)packet;
779 780 781
	else
		req_id = 0;

782 783 784
	if (out_channel->rescind)
		return -ENODEV;

785 786 787 788 789 790 791 792 793 794
	/*
	 * It is possible that once we successfully place this packet
	 * on the ringbuffer, we may stop the queue. In that case, we want
	 * to notify the host independent of the xmit_more flag. We don't
	 * need to be precise here; in the worst case we may signal the host
	 * unnecessarily.
	 */
	if (ring_avail < (RING_AVAIL_PERCENT_LOWATER + 1))
		packet->xmit_more = false;

795
	if (packet->page_buf_cnt) {
796 797
		pgbuf = packet->cp_partial ? packet->page_buf +
			packet->rmsg_pgcnt : packet->page_buf;
798 799 800 801 802 803 804 805
		ret = vmbus_sendpacket_pagebuffer_ctl(out_channel,
						      pgbuf,
						      packet->page_buf_cnt,
						      &nvmsg,
						      sizeof(struct nvsp_message),
						      req_id,
						      VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED,
						      !packet->xmit_more);
806
	} else {
807 808 809 810 811 812
		ret = vmbus_sendpacket_ctl(out_channel, &nvmsg,
					   sizeof(struct nvsp_message),
					   req_id,
					   VM_PKT_DATA_INBAND,
					   VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED,
					   !packet->xmit_more);
813 814
	}

815 816
	if (ret == 0) {
		atomic_inc(&net_device->num_outstanding_sends);
K
KY Srinivasan 已提交
817
		atomic_inc(&net_device->queue_sends[q_idx]);
818

819 820
		if (ring_avail < RING_AVAIL_PERCENT_LOWATER) {
			netif_tx_stop_queue(netdev_get_tx_queue(ndev, q_idx));
821

822
			if (atomic_read(&net_device->
K
KY Srinivasan 已提交
823
				queue_sends[q_idx]) < 1)
824
				netif_tx_wake_queue(netdev_get_tx_queue(
K
KY Srinivasan 已提交
825
						    ndev, q_idx));
826
		}
827
	} else if (ret == -EAGAIN) {
828
		netif_tx_stop_queue(netdev_get_tx_queue(
K
KY Srinivasan 已提交
829 830
				    ndev, q_idx));
		if (atomic_read(&net_device->queue_sends[q_idx]) < 1) {
831
			netif_tx_wake_queue(netdev_get_tx_queue(
K
KY Srinivasan 已提交
832
					    ndev, q_idx));
833 834
			ret = -ENOSPC;
		}
835
	} else {
836
		netdev_err(ndev, "Unable to send packet %p ret %d\n",
837
			   packet, ret);
838
	}
839

840 841 842 843
	return ret;
}

int netvsc_send(struct hv_device *device,
844 845
		struct hv_netvsc_packet *packet,
		struct rndis_message *rndis_msg)
846 847 848 849 850 851 852 853 854 855
{
	struct netvsc_device *net_device;
	int ret = 0, m_ret = 0;
	struct vmbus_channel *out_channel;
	u16 q_idx = packet->q_idx;
	u32 pktlen = packet->total_data_buflen, msd_len = 0;
	unsigned int section_index = NETVSC_INVALID_INDEX;
	unsigned long flag;
	struct multi_send_data *msdp;
	struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL;
856
	bool try_batch;
857 858 859 860 861

	net_device = get_outbound_net_device(device);
	if (!net_device)
		return -ENODEV;

862 863 864
	out_channel = get_channel(packet, net_device);
	q_idx = packet->q_idx;

865
	packet->send_buf_index = NETVSC_INVALID_INDEX;
866
	packet->cp_partial = false;
867 868 869 870 871 872 873 874

	msdp = &net_device->msd[q_idx];

	/* batch packets in send buffer if possible */
	spin_lock_irqsave(&msdp->lock, flag);
	if (msdp->pkt)
		msd_len = msdp->pkt->total_data_buflen;

875 876 877 878
	try_batch = packet->is_data_pkt && msd_len > 0 && msdp->count <
		    net_device->max_pkt;

	if (try_batch && msd_len + pktlen + net_device->pkt_align <
879 880 881
	    net_device->send_section_size) {
		section_index = msdp->pkt->send_buf_index;

882 883 884 885 886
	} else if (try_batch && msd_len + packet->rmsg_size <
		   net_device->send_section_size) {
		section_index = msdp->pkt->send_buf_index;
		packet->cp_partial = true;

887 888 889 890 891 892 893 894 895 896 897 898 899 900
	} else if (packet->is_data_pkt && pktlen + net_device->pkt_align <
		   net_device->send_section_size) {
		section_index = netvsc_get_next_send_section(net_device);
		if (section_index != NETVSC_INVALID_INDEX) {
				msd_send = msdp->pkt;
				msdp->pkt = NULL;
				msdp->count = 0;
				msd_len = 0;
		}
	}

	if (section_index != NETVSC_INVALID_INDEX) {
		netvsc_copy_to_send_buf(net_device,
					section_index, msd_len,
901
					packet, rndis_msg);
902

903
		packet->send_buf_index = section_index;
904 905 906 907 908 909 910 911

		if (packet->cp_partial) {
			packet->page_buf_cnt -= packet->rmsg_pgcnt;
			packet->total_data_buflen = msd_len + packet->rmsg_size;
		} else {
			packet->page_buf_cnt = 0;
			packet->total_data_buflen += msd_len;
		}
912

913 914 915
		if (msdp->pkt)
			netvsc_xmit_completion(msdp->pkt);

916
		if (packet->xmit_more && !packet->cp_partial) {
917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938
			msdp->pkt = packet;
			msdp->count++;
		} else {
			cur_send = packet;
			msdp->pkt = NULL;
			msdp->count = 0;
		}
	} else {
		msd_send = msdp->pkt;
		msdp->pkt = NULL;
		msdp->count = 0;
		cur_send = packet;
	}

	spin_unlock_irqrestore(&msdp->lock, flag);

	if (msd_send) {
		m_ret = netvsc_send_pkt(msd_send, net_device);

		if (m_ret != 0) {
			netvsc_free_send_slot(net_device,
					      msd_send->send_buf_index);
939
			netvsc_xmit_completion(msd_send);
940 941 942 943 944 945
		}
	}

	if (cur_send)
		ret = netvsc_send_pkt(cur_send, net_device);

946 947
	if (ret != 0 && section_index != NETVSC_INVALID_INDEX)
		netvsc_free_send_slot(net_device, section_index);
948

949 950 951
	return ret;
}

952
static void netvsc_send_recv_completion(struct hv_device *device,
953
					struct vmbus_channel *channel,
954
					struct netvsc_device *net_device,
955
					u64 transaction_id, u32 status)
956 957 958 959
{
	struct nvsp_message recvcompMessage;
	int retries = 0;
	int ret;
960 961 962
	struct net_device *ndev;

	ndev = net_device->ndev;
963 964 965 966

	recvcompMessage.hdr.msg_type =
				NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE;

967
	recvcompMessage.msg.v1_msg.send_rndis_pkt_complete.status = status;
968 969 970

retry_send_cmplt:
	/* Send the completion */
971
	ret = vmbus_sendpacket(channel, &recvcompMessage,
972 973 974 975 976
			       sizeof(struct nvsp_message), transaction_id,
			       VM_PKT_COMP, 0);
	if (ret == 0) {
		/* success */
		/* no-op */
977
	} else if (ret == -EAGAIN) {
978 979
		/* no more room...wait a bit and attempt to retry 3 times */
		retries++;
980
		netdev_err(ndev, "unable to send receive completion pkt"
981
			" (tid %llx)...retrying %d\n", transaction_id, retries);
982 983 984 985 986

		if (retries < 4) {
			udelay(100);
			goto retry_send_cmplt;
		} else {
987
			netdev_err(ndev, "unable to send receive "
988
				"completion pkt (tid %llx)...give up retrying\n",
989 990 991
				transaction_id);
		}
	} else {
992
		netdev_err(ndev, "unable to send receive "
993
			"completion pkt - %llx\n", transaction_id);
994 995 996
	}
}

997
static void netvsc_receive(struct netvsc_device *net_device,
998
			struct vmbus_channel *channel,
999 1000
			struct hv_device *device,
			struct vmpacket_descriptor *packet)
1001
{
1002 1003
	struct vmtransfer_page_packet_header *vmxferpage_packet;
	struct nvsp_message *nvsp_packet;
1004 1005 1006
	struct hv_netvsc_packet nv_pkt;
	struct hv_netvsc_packet *netvsc_packet = &nv_pkt;
	u32 status = NVSP_STAT_SUCCESS;
1007 1008
	int i;
	int count = 0;
1009
	struct net_device *ndev;
1010
	void *data;
1011

1012
	ndev = net_device->ndev;
1013

1014 1015 1016 1017
	/*
	 * All inbound packets other than send completion should be xfer page
	 * packet
	 */
1018
	if (packet->type != VM_PKT_DATA_USING_XFER_PAGES) {
1019
		netdev_err(ndev, "Unknown packet type received - %d\n",
1020
			   packet->type);
1021 1022 1023
		return;
	}

1024
	nvsp_packet = (struct nvsp_message *)((unsigned long)packet +
1025
			(packet->offset8 << 3));
1026

1027
	/* Make sure this is a valid nvsp packet */
1028 1029
	if (nvsp_packet->hdr.msg_type !=
	    NVSP_MSG1_TYPE_SEND_RNDIS_PKT) {
1030
		netdev_err(ndev, "Unknown nvsp packet type received-"
1031
			" %d\n", nvsp_packet->hdr.msg_type);
1032 1033 1034
		return;
	}

1035
	vmxferpage_packet = (struct vmtransfer_page_packet_header *)packet;
1036

1037
	if (vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID) {
1038
		netdev_err(ndev, "Invalid xfer page set id - "
1039
			   "expecting %x got %x\n", NETVSC_RECEIVE_BUFFER_ID,
1040
			   vmxferpage_packet->xfer_pageset_id);
1041 1042 1043
		return;
	}

1044
	count = vmxferpage_packet->range_cnt;
1045

1046
	/* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */
1047
	for (i = 0; i < count; i++) {
1048
		/* Initialize the netvsc packet */
1049
		netvsc_packet->status = NVSP_STAT_SUCCESS;
1050
		data = (void *)((unsigned long)net_device->
1051
			recv_buf + vmxferpage_packet->ranges[i].byte_offset);
1052
		netvsc_packet->total_data_buflen =
1053
					vmxferpage_packet->ranges[i].byte_count;
1054

1055
		/* Pass it to the upper layer */
1056
		rndis_filter_receive(device, netvsc_packet, &data, channel);
1057

1058 1059
		if (netvsc_packet->status != NVSP_STAT_SUCCESS)
			status = NVSP_STAT_FAIL;
1060 1061
	}

1062 1063
	netvsc_send_recv_completion(device, channel, net_device,
				    vmxferpage_packet->d.trans_id, status);
1064 1065
}

1066 1067

static void netvsc_send_table(struct hv_device *hdev,
1068
			      struct nvsp_message *nvmsg)
1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092
{
	struct netvsc_device *nvscdev;
	struct net_device *ndev;
	int i;
	u32 count, *tab;

	nvscdev = get_outbound_net_device(hdev);
	if (!nvscdev)
		return;
	ndev = nvscdev->ndev;

	count = nvmsg->msg.v5_msg.send_table.count;
	if (count != VRSS_SEND_TAB_SIZE) {
		netdev_err(ndev, "Received wrong send-table size:%u\n", count);
		return;
	}

	tab = (u32 *)((unsigned long)&nvmsg->msg.v5_msg.send_table +
		      nvmsg->msg.v5_msg.send_table.offset);

	for (i = 0; i < count; i++)
		nvscdev->send_table[i] = tab[i];
}

1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114
static void netvsc_send_vf(struct netvsc_device *nvdev,
			   struct nvsp_message *nvmsg)
{
	nvdev->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated;
	nvdev->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial;
}

static inline void netvsc_receive_inband(struct hv_device *hdev,
					 struct netvsc_device *nvdev,
					 struct nvsp_message *nvmsg)
{
	switch (nvmsg->hdr.msg_type) {
	case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE:
		netvsc_send_table(hdev, nvmsg);
		break;

	case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION:
		netvsc_send_vf(nvdev, nvmsg);
		break;
	}
}

1115
void netvsc_channel_cb(void *context)
1116
{
1117
	int ret;
1118 1119
	struct vmbus_channel *channel = (struct vmbus_channel *)context;
	struct hv_device *device;
1120 1121 1122
	struct netvsc_device *net_device;
	u32 bytes_recvd;
	u64 request_id;
1123
	struct vmpacket_descriptor *desc;
1124 1125
	unsigned char *buffer;
	int bufferlen = NETVSC_PACKET_SIZE;
1126
	struct net_device *ndev;
1127
	struct nvsp_message *nvmsg;
1128

1129 1130 1131 1132 1133
	if (channel->primary_channel != NULL)
		device = channel->primary_channel->device_obj;
	else
		device = channel->device_obj;

1134
	net_device = get_inbound_net_device(device);
1135
	if (!net_device)
1136
		return;
1137
	ndev = net_device->ndev;
1138
	buffer = get_per_channel_state(channel);
1139

1140
	do {
1141
		ret = vmbus_recvpacket_raw(channel, buffer, bufferlen,
1142
					   &bytes_recvd, &request_id);
1143
		if (ret == 0) {
1144
			if (bytes_recvd > 0) {
1145
				desc = (struct vmpacket_descriptor *)buffer;
1146 1147
				nvmsg = (struct nvsp_message *)((unsigned long)
					 desc + (desc->offset8 << 3));
1148 1149
				switch (desc->type) {
				case VM_PKT_COMP:
1150
					netvsc_send_completion(net_device,
1151
								channel,
1152
								device, desc);
1153 1154
					break;

1155
				case VM_PKT_DATA_USING_XFER_PAGES:
1156 1157 1158 1159 1160
					netvsc_receive(net_device, channel,
						       device, desc);
					break;

				case VM_PKT_DATA_INBAND:
1161 1162 1163
					netvsc_receive_inband(device,
							      net_device,
							      nvmsg);
1164 1165 1166
					break;

				default:
1167
					netdev_err(ndev,
1168 1169
						   "unhandled packet type %d, "
						   "tid %llx len %d\n",
1170
						   desc->type, request_id,
1171
						   bytes_recvd);
1172
					break;
1173 1174
				}

1175
			} else {
1176 1177 1178
				/*
				 * We are done for this pass.
				 */
1179 1180
				break;
			}
1181

1182
		} else if (ret == -ENOBUFS) {
1183 1184
			if (bufferlen > NETVSC_PACKET_SIZE)
				kfree(buffer);
1185
			/* Handle large packet */
1186
			buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
1187
			if (buffer == NULL) {
1188
				/* Try again next time around */
1189
				netdev_err(ndev,
1190
					   "unable to allocate buffer of size "
1191
					   "(%d)!!\n", bytes_recvd);
1192 1193 1194
				break;
			}

1195
			bufferlen = bytes_recvd;
1196 1197 1198
		}
	} while (1);

1199 1200
	if (bufferlen > NETVSC_PACKET_SIZE)
		kfree(buffer);
1201 1202
	return;
}
1203

1204 1205 1206 1207
/*
 * netvsc_device_add - Callback when the device belonging to this
 * driver is added
 */
1208
int netvsc_device_add(struct hv_device *device, void *additional_info)
1209 1210
{
	int ret = 0;
1211 1212
	int ring_size =
	((struct netvsc_device_info *)additional_info)->ring_size;
1213
	struct netvsc_device *net_device;
1214
	struct net_device *ndev;
1215 1216

	net_device = alloc_net_device(device);
1217 1218
	if (!net_device)
		return -ENOMEM;
1219

1220 1221
	net_device->ring_size = ring_size;

1222 1223 1224 1225 1226 1227 1228 1229 1230
	/*
	 * Coming into this function, struct net_device * is
	 * registered as the driver private data.
	 * In alloc_net_device(), we register struct netvsc_device *
	 * as the driver private data and stash away struct net_device *
	 * in struct netvsc_device *.
	 */
	ndev = net_device->ndev;

1231 1232 1233
	/* Add netvsc_device context to netvsc_device */
	net_device->nd_ctx = netdev_priv(ndev);

1234
	/* Initialize the NetVSC channel extension */
1235
	init_completion(&net_device->channel_init_wait);
1236

1237 1238
	set_per_channel_state(device->channel, net_device->cb_buffer);

1239
	/* Open the channel */
1240 1241
	ret = vmbus_open(device->channel, ring_size * PAGE_SIZE,
			 ring_size * PAGE_SIZE, NULL, 0,
1242
			 netvsc_channel_cb, device->channel);
1243 1244

	if (ret != 0) {
1245
		netdev_err(ndev, "unable to open channel: %d\n", ret);
1246 1247 1248 1249
		goto cleanup;
	}

	/* Channel is opened */
1250
	pr_info("hv_netvsc channel opened successfully\n");
1251

1252 1253
	net_device->chn_table[0] = device->channel;

1254 1255 1256
	/* Connect with the NetVsp */
	ret = netvsc_connect_vsp(device);
	if (ret != 0) {
1257
		netdev_err(ndev,
1258
			"unable to connect to NetVSP - %d\n", ret);
1259 1260 1261 1262 1263 1264 1265 1266 1267 1268
		goto close;
	}

	return ret;

close:
	/* Now, we can close the channel safely */
	vmbus_close(device->channel);

cleanup:
1269
	free_netvsc_device(net_device);
1270 1271 1272

	return ret;
}