netvsc.c 36.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 * Copyright (c) 2009, Microsoft Corporation.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
14
 * this program; if not, see <http://www.gnu.org/licenses/>.
15 16
 *
 * Authors:
17
 *   Haiyang Zhang <haiyangz@microsoft.com>
18 19
 *   Hank Janssen  <hjanssen@microsoft.com>
 */
20 21
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

22
#include <linux/kernel.h>
23 24
#include <linux/sched.h>
#include <linux/wait.h>
25
#include <linux/mm.h>
26
#include <linux/delay.h>
27
#include <linux/io.h>
28
#include <linux/slab.h>
29
#include <linux/netdevice.h>
30
#include <linux/if_ether.h>
31
#include <linux/vmalloc.h>
32
#include <asm/sync_bitops.h>
33

34
#include "hyperv_net.h"
35

36 37 38 39
/*
 * Switch the data path from the synthetic interface to the VF
 * interface.
 */
40
void netvsc_switch_datapath(struct net_device *ndev, bool vf)
41
{
42 43
	struct net_device_context *net_device_ctx = netdev_priv(ndev);
	struct hv_device *dev = net_device_ctx->device_ctx;
44 45
	struct netvsc_device *nv_dev = net_device_ctx->nvdev;
	struct nvsp_message *init_pkt = &nv_dev->channel_init_pkt;
46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61

	memset(init_pkt, 0, sizeof(struct nvsp_message));
	init_pkt->hdr.msg_type = NVSP_MSG4_TYPE_SWITCH_DATA_PATH;
	if (vf)
		init_pkt->msg.v4_msg.active_dp.active_datapath =
			NVSP_DATAPATH_VF;
	else
		init_pkt->msg.v4_msg.active_dp.active_datapath =
			NVSP_DATAPATH_SYNTHETIC;

	vmbus_sendpacket(dev->channel, init_pkt,
			       sizeof(struct nvsp_message),
			       (unsigned long)init_pkt,
			       VM_PKT_DATA_INBAND, 0);
}

62
static struct netvsc_device *alloc_net_device(void)
63
{
64
	struct netvsc_device *net_device;
65

66 67
	net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL);
	if (!net_device)
68 69
		return NULL;

70 71
	net_device->chan_table[0].mrc.buf
		= vzalloc(NETVSC_RECVSLOT_MAX * sizeof(struct recv_comp_data));
72

73
	init_waitqueue_head(&net_device->wait_drain);
74
	net_device->destroy = false;
75
	atomic_set(&net_device->open_cnt, 0);
76 77
	net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
	net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
78
	init_completion(&net_device->channel_init_wait);
79

80
	return net_device;
81 82
}

83
static void free_netvsc_device(struct rcu_head *head)
84
{
85 86
	struct netvsc_device *nvdev
		= container_of(head, struct netvsc_device, rcu);
87 88 89
	int i;

	for (i = 0; i < VRSS_CHANNEL_MAX; i++)
90
		vfree(nvdev->chan_table[i].mrc.buf);
91

92 93 94
	kfree(nvdev);
}

95 96 97 98
static void free_netvsc_device_rcu(struct netvsc_device *nvdev)
{
	call_rcu(&nvdev->rcu, free_netvsc_device);
}
99

100
static struct netvsc_device *get_outbound_net_device(struct hv_device *device)
101
{
102
	struct netvsc_device *net_device = hv_device_to_netvsc_device(device);
103

104
	if (net_device && net_device->destroy)
105
		net_device = NULL;
106

107
	return net_device;
108 109
}

110
static void netvsc_destroy_buf(struct hv_device *device)
111 112
{
	struct nvsp_message *revoke_packet;
113
	struct net_device *ndev = hv_get_drvdata(device);
114
	struct netvsc_device *net_device = net_device_to_netvsc_device(ndev);
115
	int ret;
116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132

	/*
	 * If we got a section count, it means we received a
	 * SendReceiveBufferComplete msg (ie sent
	 * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
	 * to send a revoke msg here
	 */
	if (net_device->recv_section_cnt) {
		/* Send the revoke receive buffer */
		revoke_packet = &net_device->revoke_packet;
		memset(revoke_packet, 0, sizeof(struct nvsp_message));

		revoke_packet->hdr.msg_type =
			NVSP_MSG1_TYPE_REVOKE_RECV_BUF;
		revoke_packet->msg.v1_msg.
		revoke_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;

133
		ret = vmbus_sendpacket(device->channel,
134 135 136 137 138 139 140 141 142
				       revoke_packet,
				       sizeof(struct nvsp_message),
				       (unsigned long)revoke_packet,
				       VM_PKT_DATA_INBAND, 0);
		/*
		 * If we failed here, we might as well return and
		 * have a leak rather than continue and a bugchk
		 */
		if (ret != 0) {
143
			netdev_err(ndev, "unable to send "
144
				"revoke receive buffer to netvsp\n");
145
			return;
146 147 148 149 150
		}
	}

	/* Teardown the gpadl on the vsp end */
	if (net_device->recv_buf_gpadl_handle) {
151 152
		ret = vmbus_teardown_gpadl(device->channel,
					   net_device->recv_buf_gpadl_handle);
153 154 155 156 157

		/* If we failed here, we might as well return and have a leak
		 * rather than continue and a bugchk
		 */
		if (ret != 0) {
158
			netdev_err(ndev,
159
				   "unable to teardown receive buffer's gpadl\n");
160
			return;
161 162 163 164 165 166
		}
		net_device->recv_buf_gpadl_handle = 0;
	}

	if (net_device->recv_buf) {
		/* Free up the receive buffer */
167
		vfree(net_device->recv_buf);
168 169 170 171 172 173 174 175 176
		net_device->recv_buf = NULL;
	}

	if (net_device->recv_section) {
		net_device->recv_section_cnt = 0;
		kfree(net_device->recv_section);
		net_device->recv_section = NULL;
	}

177 178
	/* Deal with the send buffer we may have setup.
	 * If we got a  send section size, it means we received a
179 180
	 * NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE msg (ie sent
	 * NVSP_MSG1_TYPE_SEND_SEND_BUF msg) therefore, we need
181 182 183 184 185 186 187 188 189
	 * to send a revoke msg here
	 */
	if (net_device->send_section_size) {
		/* Send the revoke receive buffer */
		revoke_packet = &net_device->revoke_packet;
		memset(revoke_packet, 0, sizeof(struct nvsp_message));

		revoke_packet->hdr.msg_type =
			NVSP_MSG1_TYPE_REVOKE_SEND_BUF;
190 191
		revoke_packet->msg.v1_msg.revoke_send_buf.id =
			NETVSC_SEND_BUFFER_ID;
192

193
		ret = vmbus_sendpacket(device->channel,
194 195 196 197 198 199 200 201 202 203
				       revoke_packet,
				       sizeof(struct nvsp_message),
				       (unsigned long)revoke_packet,
				       VM_PKT_DATA_INBAND, 0);
		/* If we failed here, we might as well return and
		 * have a leak rather than continue and a bugchk
		 */
		if (ret != 0) {
			netdev_err(ndev, "unable to send "
				   "revoke send buffer to netvsp\n");
204
			return;
205 206 207 208
		}
	}
	/* Teardown the gpadl on the vsp end */
	if (net_device->send_buf_gpadl_handle) {
209
		ret = vmbus_teardown_gpadl(device->channel,
210 211 212 213 214 215 216 217
					   net_device->send_buf_gpadl_handle);

		/* If we failed here, we might as well return and have a leak
		 * rather than continue and a bugchk
		 */
		if (ret != 0) {
			netdev_err(ndev,
				   "unable to teardown send buffer's gpadl\n");
218
			return;
219
		}
220
		net_device->send_buf_gpadl_handle = 0;
221 222
	}
	if (net_device->send_buf) {
223
		/* Free up the send buffer */
224
		vfree(net_device->send_buf);
225 226 227
		net_device->send_buf = NULL;
	}
	kfree(net_device->send_section_map);
228 229
}

230
static int netvsc_init_buf(struct hv_device *device)
231
{
232
	int ret = 0;
233 234
	struct netvsc_device *net_device;
	struct nvsp_message *init_packet;
235
	struct net_device *ndev;
236
	int node;
237

238
	net_device = get_outbound_net_device(device);
239
	if (!net_device)
240
		return -ENODEV;
241
	ndev = hv_get_drvdata(device);
242

243 244 245 246 247
	node = cpu_to_node(device->channel->target_cpu);
	net_device->recv_buf = vzalloc_node(net_device->recv_buf_size, node);
	if (!net_device->recv_buf)
		net_device->recv_buf = vzalloc(net_device->recv_buf_size);

248
	if (!net_device->recv_buf) {
249
		netdev_err(ndev, "unable to allocate receive "
250
			"buffer of size %d\n", net_device->recv_buf_size);
251
		ret = -ENOMEM;
252
		goto cleanup;
253 254
	}

255 256 257 258 259
	/*
	 * Establish the gpadl handle for this buffer on this
	 * channel.  Note: This call uses the vmbus connection rather
	 * than the channel to establish the gpadl handle.
	 */
260 261 262
	ret = vmbus_establish_gpadl(device->channel, net_device->recv_buf,
				    net_device->recv_buf_size,
				    &net_device->recv_buf_gpadl_handle);
263
	if (ret != 0) {
264
		netdev_err(ndev,
265
			"unable to establish receive buffer's gpadl\n");
266
		goto cleanup;
267 268
	}

269
	/* Notify the NetVsp of the gpadl handle */
270
	init_packet = &net_device->channel_init_pkt;
271

272
	memset(init_packet, 0, sizeof(struct nvsp_message));
273

274 275 276 277 278
	init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_RECV_BUF;
	init_packet->msg.v1_msg.send_recv_buf.
		gpadl_handle = net_device->recv_buf_gpadl_handle;
	init_packet->msg.v1_msg.
		send_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
279

280
	/* Send the gpadl notification request */
281
	ret = vmbus_sendpacket(device->channel, init_packet,
282
			       sizeof(struct nvsp_message),
283
			       (unsigned long)init_packet,
284
			       VM_PKT_DATA_INBAND,
285
			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
286
	if (ret != 0) {
287
		netdev_err(ndev,
288
			"unable to send receive buffer's gpadl to netvsp\n");
289
		goto cleanup;
290 291
	}

292
	wait_for_completion(&net_device->channel_init_wait);
293

294
	/* Check the response */
295 296
	if (init_packet->msg.v1_msg.
	    send_recv_buf_complete.status != NVSP_STAT_SUCCESS) {
297
		netdev_err(ndev, "Unable to complete receive buffer "
298
			   "initialization with NetVsp - status %d\n",
299 300
			   init_packet->msg.v1_msg.
			   send_recv_buf_complete.status);
301
		ret = -EINVAL;
302
		goto cleanup;
303 304
	}

305
	/* Parse the response */
306

307 308
	net_device->recv_section_cnt = init_packet->msg.
		v1_msg.send_recv_buf_complete.num_sections;
309

310 311 312 313 314
	net_device->recv_section = kmemdup(
		init_packet->msg.v1_msg.send_recv_buf_complete.sections,
		net_device->recv_section_cnt *
		sizeof(struct nvsp_1_receive_buffer_section),
		GFP_KERNEL);
315
	if (net_device->recv_section == NULL) {
316
		ret = -EINVAL;
317
		goto cleanup;
318 319
	}

320 321 322 323
	/*
	 * For 1st release, there should only be 1 section that represents the
	 * entire receive buffer
	 */
324 325
	if (net_device->recv_section_cnt != 1 ||
	    net_device->recv_section->offset != 0) {
326
		ret = -EINVAL;
327
		goto cleanup;
328 329
	}

330 331
	/* Now setup the send buffer.
	 */
332 333 334
	net_device->send_buf = vzalloc_node(net_device->send_buf_size, node);
	if (!net_device->send_buf)
		net_device->send_buf = vzalloc(net_device->send_buf_size);
335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358
	if (!net_device->send_buf) {
		netdev_err(ndev, "unable to allocate send "
			   "buffer of size %d\n", net_device->send_buf_size);
		ret = -ENOMEM;
		goto cleanup;
	}

	/* Establish the gpadl handle for this buffer on this
	 * channel.  Note: This call uses the vmbus connection rather
	 * than the channel to establish the gpadl handle.
	 */
	ret = vmbus_establish_gpadl(device->channel, net_device->send_buf,
				    net_device->send_buf_size,
				    &net_device->send_buf_gpadl_handle);
	if (ret != 0) {
		netdev_err(ndev,
			   "unable to establish send buffer's gpadl\n");
		goto cleanup;
	}

	/* Notify the NetVsp of the gpadl handle */
	init_packet = &net_device->channel_init_pkt;
	memset(init_packet, 0, sizeof(struct nvsp_message));
	init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_SEND_BUF;
359
	init_packet->msg.v1_msg.send_send_buf.gpadl_handle =
360
		net_device->send_buf_gpadl_handle;
361
	init_packet->msg.v1_msg.send_send_buf.id = NETVSC_SEND_BUFFER_ID;
362 363 364 365 366 367 368 369 370 371 372 373 374

	/* Send the gpadl notification request */
	ret = vmbus_sendpacket(device->channel, init_packet,
			       sizeof(struct nvsp_message),
			       (unsigned long)init_packet,
			       VM_PKT_DATA_INBAND,
			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
	if (ret != 0) {
		netdev_err(ndev,
			   "unable to send send buffer's gpadl to netvsp\n");
		goto cleanup;
	}

375
	wait_for_completion(&net_device->channel_init_wait);
376 377 378 379 380 381 382

	/* Check the response */
	if (init_packet->msg.v1_msg.
	    send_send_buf_complete.status != NVSP_STAT_SUCCESS) {
		netdev_err(ndev, "Unable to complete send buffer "
			   "initialization with NetVsp - status %d\n",
			   init_packet->msg.v1_msg.
383
			   send_send_buf_complete.status);
384 385 386 387 388 389 390 391 392 393 394
		ret = -EINVAL;
		goto cleanup;
	}

	/* Parse the response */
	net_device->send_section_size = init_packet->msg.
				v1_msg.send_send_buf_complete.section_size;

	/* Section count is simply the size divided by the section size.
	 */
	net_device->send_section_cnt =
S
Stephen Hemminger 已提交
395
		net_device->send_buf_size / net_device->send_section_size;
396

397 398
	netdev_dbg(ndev, "Send section size: %d, Section count:%d\n",
		   net_device->send_section_size, net_device->send_section_cnt);
399 400 401 402 403

	/* Setup state for managing the send buffer. */
	net_device->map_words = DIV_ROUND_UP(net_device->send_section_cnt,
					     BITS_PER_LONG);

S
Stephen Hemminger 已提交
404 405
	net_device->send_section_map = kcalloc(net_device->map_words,
					       sizeof(ulong), GFP_KERNEL);
406 407
	if (net_device->send_section_map == NULL) {
		ret = -ENOMEM;
408
		goto cleanup;
409
	}
410

411
	goto exit;
412

413
cleanup:
414
	netvsc_destroy_buf(device);
415

416
exit:
417 418 419
	return ret;
}

420 421 422 423 424
/* Negotiate NVSP protocol version */
static int negotiate_nvsp_ver(struct hv_device *device,
			      struct netvsc_device *net_device,
			      struct nvsp_message *init_packet,
			      u32 nvsp_ver)
425
{
426
	struct net_device *ndev = hv_get_drvdata(device);
427
	int ret;
428

429
	memset(init_packet, 0, sizeof(struct nvsp_message));
430
	init_packet->hdr.msg_type = NVSP_MSG_TYPE_INIT;
431 432
	init_packet->msg.init_msg.init.min_protocol_ver = nvsp_ver;
	init_packet->msg.init_msg.init.max_protocol_ver = nvsp_ver;
433

434
	/* Send the init request */
435
	ret = vmbus_sendpacket(device->channel, init_packet,
436
			       sizeof(struct nvsp_message),
437
			       (unsigned long)init_packet,
438
			       VM_PKT_DATA_INBAND,
439
			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
440

441
	if (ret != 0)
442
		return ret;
443

444
	wait_for_completion(&net_device->channel_init_wait);
445

446
	if (init_packet->msg.init_msg.init_complete.status !=
447 448
	    NVSP_STAT_SUCCESS)
		return -EINVAL;
449

450
	if (nvsp_ver == NVSP_PROTOCOL_VERSION_1)
451 452
		return 0;

453
	/* NVSPv2 or later: Send NDIS config */
454 455
	memset(init_packet, 0, sizeof(struct nvsp_message));
	init_packet->hdr.msg_type = NVSP_MSG2_TYPE_SEND_NDIS_CONFIG;
456
	init_packet->msg.v2_msg.send_ndis_config.mtu = ndev->mtu + ETH_HLEN;
457
	init_packet->msg.v2_msg.send_ndis_config.capability.ieee8021q = 1;
458

459
	if (nvsp_ver >= NVSP_PROTOCOL_VERSION_5) {
460 461
		init_packet->msg.v2_msg.send_ndis_config.capability.sriov = 1;

462 463 464 465
		/* Teaming bit is needed to receive link speed updates */
		init_packet->msg.v2_msg.send_ndis_config.capability.teaming = 1;
	}

466 467 468 469 470 471 472 473 474 475 476 477 478 479
	ret = vmbus_sendpacket(device->channel, init_packet,
				sizeof(struct nvsp_message),
				(unsigned long)init_packet,
				VM_PKT_DATA_INBAND, 0);

	return ret;
}

static int netvsc_connect_vsp(struct hv_device *device)
{
	int ret;
	struct netvsc_device *net_device;
	struct nvsp_message *init_packet;
	int ndis_version;
480 481
	const u32 ver_list[] = {
		NVSP_PROTOCOL_VERSION_1, NVSP_PROTOCOL_VERSION_2,
482
		NVSP_PROTOCOL_VERSION_4, NVSP_PROTOCOL_VERSION_5 };
483
	int i;
484 485 486 487 488 489 490 491

	net_device = get_outbound_net_device(device);
	if (!net_device)
		return -ENODEV;

	init_packet = &net_device->channel_init_pkt;

	/* Negotiate the latest NVSP protocol supported */
492
	for (i = ARRAY_SIZE(ver_list) - 1; i >= 0; i--)
493 494 495 496 497 498 499
		if (negotiate_nvsp_ver(device, net_device, init_packet,
				       ver_list[i])  == 0) {
			net_device->nvsp_version = ver_list[i];
			break;
		}

	if (i < 0) {
500
		ret = -EPROTO;
501
		goto cleanup;
502
	}
503 504 505

	pr_debug("Negotiated NVSP version:%x\n", net_device->nvsp_version);

506
	/* Send the ndis version */
507
	memset(init_packet, 0, sizeof(struct nvsp_message));
508

509
	if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_4)
510
		ndis_version = 0x00060001;
511 512
	else
		ndis_version = 0x0006001e;
513

514 515 516
	init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_NDIS_VER;
	init_packet->msg.v1_msg.
		send_ndis_ver.ndis_major_ver =
517
				(ndis_version & 0xFFFF0000) >> 16;
518 519
	init_packet->msg.v1_msg.
		send_ndis_ver.ndis_minor_ver =
520
				ndis_version & 0xFFFF;
521

522
	/* Send the init request */
523
	ret = vmbus_sendpacket(device->channel, init_packet,
524 525 526
				sizeof(struct nvsp_message),
				(unsigned long)init_packet,
				VM_PKT_DATA_INBAND, 0);
527
	if (ret != 0)
528
		goto cleanup;
529 530

	/* Post the big receive buffer to NetVSP */
531 532 533 534
	if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_2)
		net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE_LEGACY;
	else
		net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
535
	net_device->send_buf_size = NETVSC_SEND_BUFFER_SIZE;
536

537
	ret = netvsc_init_buf(device);
538

539
cleanup:
540 541 542
	return ret;
}

543
static void netvsc_disconnect_vsp(struct hv_device *device)
544
{
545
	netvsc_destroy_buf(device);
546 547
}

548
/*
549
 * netvsc_device_remove - Callback when the root bus device is removed
550
 */
551
void netvsc_device_remove(struct hv_device *device)
552
{
553 554 555
	struct net_device *ndev = hv_get_drvdata(device);
	struct net_device_context *net_device_ctx = netdev_priv(ndev);
	struct netvsc_device *net_device = net_device_ctx->nvdev;
S
stephen hemminger 已提交
556
	int i;
557

558
	netvsc_disconnect_vsp(device);
559

560
	RCU_INIT_POINTER(net_device_ctx->nvdev, NULL);
561

562 563 564 565
	/*
	 * At this point, no one should be accessing net_device
	 * except in here
	 */
566
	netdev_dbg(ndev, "net device safe to remove\n");
567

568
	/* Now, we can close the channel safely */
569
	vmbus_close(device->channel);
570

571 572
	for (i = 0; i < net_device->num_chn; i++)
		napi_disable(&net_device->chan_table[i].napi);
S
stephen hemminger 已提交
573

574
	/* Release all resources */
575
	free_netvsc_device_rcu(net_device);
576 577
}

578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594
#define RING_AVAIL_PERCENT_HIWATER 20
#define RING_AVAIL_PERCENT_LOWATER 10

/*
 * Get the percentage of available bytes to write in the ring.
 * The return value is in range from 0 to 100.
 */
static inline u32 hv_ringbuf_avail_percent(
		struct hv_ring_buffer_info *ring_info)
{
	u32 avail_read, avail_write;

	hv_get_ringbuffer_availbytes(ring_info, &avail_read, &avail_write);

	return avail_write * 100 / ring_info->ring_datasize;
}

595 596 597 598 599 600
static inline void netvsc_free_send_slot(struct netvsc_device *net_device,
					 u32 index)
{
	sync_change_bit(index, net_device->send_section_map);
}

601 602 603
static void netvsc_send_tx_complete(struct netvsc_device *net_device,
				    struct vmbus_channel *incoming_channel,
				    struct hv_device *device,
604
				    const struct vmpacket_descriptor *desc)
605
{
606
	struct sk_buff *skb = (struct sk_buff *)(unsigned long)desc->trans_id;
607 608 609 610 611 612 613
	struct net_device *ndev = hv_get_drvdata(device);
	struct vmbus_channel *channel = device->channel;
	u16 q_idx = 0;
	int queue_sends;

	/* Notify the layer above us */
	if (likely(skb)) {
614
		const struct hv_netvsc_packet *packet
615
			= (struct hv_netvsc_packet *)skb->cb;
616 617
		u32 send_index = packet->send_buf_index;
		struct netvsc_stats *tx_stats;
618 619 620

		if (send_index != NETVSC_INVALID_INDEX)
			netvsc_free_send_slot(net_device, send_index);
621
		q_idx = packet->q_idx;
622 623
		channel = incoming_channel;

624
		tx_stats = &net_device->chan_table[q_idx].tx_stats;
625 626 627 628 629 630

		u64_stats_update_begin(&tx_stats->syncp);
		tx_stats->packets += packet->total_packets;
		tx_stats->bytes += packet->total_bytes;
		u64_stats_update_end(&tx_stats->syncp);

S
Stephen Hemminger 已提交
631
		dev_consume_skb_any(skb);
632 633
	}

634 635
	queue_sends =
		atomic_dec_return(&net_device->chan_table[q_idx].queue_sends);
636

637
	if (net_device->destroy && queue_sends == 0)
638 639 640 641 642 643 644 645
		wake_up(&net_device->wait_drain);

	if (netif_tx_queue_stopped(netdev_get_tx_queue(ndev, q_idx)) &&
	    (hv_ringbuf_avail_percent(&channel->outbound) > RING_AVAIL_PERCENT_HIWATER ||
	     queue_sends < 1))
		netif_tx_wake_queue(netdev_get_tx_queue(ndev, q_idx));
}

646
static void netvsc_send_completion(struct netvsc_device *net_device,
647
				   struct vmbus_channel *incoming_channel,
648
				   struct hv_device *device,
649
				   const struct vmpacket_descriptor *desc)
650
{
651
	struct nvsp_message *nvsp_packet = hv_pkt_data(desc);
652
	struct net_device *ndev = hv_get_drvdata(device);
653

654 655 656 657 658
	switch (nvsp_packet->hdr.msg_type) {
	case NVSP_MSG_TYPE_INIT_COMPLETE:
	case NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE:
	case NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE:
	case NVSP_MSG5_TYPE_SUBCHANNEL:
659
		/* Copy the response back */
660
		memcpy(&net_device->channel_init_pkt, nvsp_packet,
661
		       sizeof(struct nvsp_message));
662
		complete(&net_device->channel_init_wait);
663 664 665 666
		break;

	case NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE:
		netvsc_send_tx_complete(net_device, incoming_channel,
667
					device, desc);
668
		break;
669

670 671 672 673
	default:
		netdev_err(ndev,
			   "Unknown send completion type %d received!!\n",
			   nvsp_packet->hdr.msg_type);
674 675 676
	}
}

677 678
static u32 netvsc_get_next_send_section(struct netvsc_device *net_device)
{
679 680 681 682 683 684
	unsigned long *map_addr = net_device->send_section_map;
	unsigned int i;

	for_each_clear_bit(i, map_addr, net_device->map_words) {
		if (sync_test_and_set_bit(i, map_addr) == 0)
			return i;
685
	}
686 687

	return NETVSC_INVALID_INDEX;
688 689
}

L
Lad, Prabhakar 已提交
690 691
static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device,
				   unsigned int section_index,
692
				   u32 pend_size,
693
				   struct hv_netvsc_packet *packet,
694
				   struct rndis_message *rndis_msg,
695 696
				   struct hv_page_buffer **pb,
				   struct sk_buff *skb)
697 698
{
	char *start = net_device->send_buf;
699 700
	char *dest = start + (section_index * net_device->send_section_size)
		     + pend_size;
701 702
	int i;
	u32 msg_size = 0;
703 704
	u32 padding = 0;
	u32 remain = packet->total_data_buflen % net_device->pkt_align;
705 706
	u32 page_count = packet->cp_partial ? packet->rmsg_pgcnt :
		packet->page_buf_cnt;
707 708

	/* Add padding */
709
	if (skb->xmit_more && remain && !packet->cp_partial) {
710
		padding = net_device->pkt_align - remain;
711
		rndis_msg->msg_len += padding;
712 713
		packet->total_data_buflen += padding;
	}
714

715
	for (i = 0; i < page_count; i++) {
716 717 718
		char *src = phys_to_virt((*pb)[i].pfn << PAGE_SHIFT);
		u32 offset = (*pb)[i].offset;
		u32 len = (*pb)[i].len;
719 720 721 722 723

		memcpy(dest, (src + offset), len);
		msg_size += len;
		dest += len;
	}
724 725 726 727 728 729

	if (padding) {
		memset(dest, 0, padding);
		msg_size += padding;
	}

730 731 732
	return msg_size;
}

733
static inline int netvsc_send_pkt(
734
	struct hv_device *device,
735
	struct hv_netvsc_packet *packet,
736
	struct netvsc_device *net_device,
737 738
	struct hv_page_buffer **pb,
	struct sk_buff *skb)
739
{
740
	struct nvsp_message nvmsg;
741 742 743
	struct netvsc_channel *nvchan
		= &net_device->chan_table[packet->q_idx];
	struct vmbus_channel *out_channel = nvchan->channel;
744
	struct net_device *ndev = hv_get_drvdata(device);
745
	struct netdev_queue *txq = netdev_get_tx_queue(ndev, packet->q_idx);
746 747
	u64 req_id;
	int ret;
748
	struct hv_page_buffer *pgbuf;
749
	u32 ring_avail = hv_ringbuf_avail_percent(&out_channel->outbound);
750

751
	nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
752
	if (skb != NULL) {
753
		/* 0 is RMC_DATA; */
754
		nvmsg.msg.v1_msg.send_rndis_pkt.channel_type = 0;
755 756
	} else {
		/* 1 is RMC_CONTROL; */
757
		nvmsg.msg.v1_msg.send_rndis_pkt.channel_type = 1;
758
	}
759

760 761 762 763 764 765 766
	nvmsg.msg.v1_msg.send_rndis_pkt.send_buf_section_index =
		packet->send_buf_index;
	if (packet->send_buf_index == NETVSC_INVALID_INDEX)
		nvmsg.msg.v1_msg.send_rndis_pkt.send_buf_section_size = 0;
	else
		nvmsg.msg.v1_msg.send_rndis_pkt.send_buf_section_size =
			packet->total_data_buflen;
767

768
	req_id = (ulong)skb;
769

770 771 772
	if (out_channel->rescind)
		return -ENODEV;

773
	if (packet->page_buf_cnt) {
774 775
		pgbuf = packet->cp_partial ? (*pb) +
			packet->rmsg_pgcnt : (*pb);
776 777 778 779 780 781
		ret = vmbus_sendpacket_pagebuffer_ctl(out_channel,
						      pgbuf,
						      packet->page_buf_cnt,
						      &nvmsg,
						      sizeof(struct nvsp_message),
						      req_id,
782
						      VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
783
	} else {
784 785 786 787
		ret = vmbus_sendpacket_ctl(out_channel, &nvmsg,
					   sizeof(struct nvsp_message),
					   req_id,
					   VM_PKT_DATA_INBAND,
788
					   VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
789 790
	}

791
	if (ret == 0) {
792
		atomic_inc_return(&nvchan->queue_sends);
793

794
		if (ring_avail < RING_AVAIL_PERCENT_LOWATER)
795
			netif_tx_stop_queue(txq);
796
	} else if (ret == -EAGAIN) {
797 798 799
		netif_tx_stop_queue(txq);
		if (atomic_read(&nvchan->queue_sends) < 1) {
			netif_tx_wake_queue(txq);
800 801
			ret = -ENOSPC;
		}
802
	} else {
803
		netdev_err(ndev, "Unable to send packet %p ret %d\n",
804
			   packet, ret);
805
	}
806

807 808 809
	return ret;
}

810 811 812 813 814 815 816 817 818 819 820 821
/* Move packet out of multi send data (msd), and clear msd */
static inline void move_pkt_msd(struct hv_netvsc_packet **msd_send,
				struct sk_buff **msd_skb,
				struct multi_send_data *msdp)
{
	*msd_skb = msdp->skb;
	*msd_send = msdp->pkt;
	msdp->skb = NULL;
	msdp->pkt = NULL;
	msdp->count = 0;
}

822
int netvsc_send(struct hv_device *device,
823
		struct hv_netvsc_packet *packet,
824
		struct rndis_message *rndis_msg,
825 826
		struct hv_page_buffer **pb,
		struct sk_buff *skb)
827 828
{
	struct netvsc_device *net_device;
829
	int ret = 0;
830
	struct netvsc_channel *nvchan;
831 832 833 834
	u32 pktlen = packet->total_data_buflen, msd_len = 0;
	unsigned int section_index = NETVSC_INVALID_INDEX;
	struct multi_send_data *msdp;
	struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL;
835
	struct sk_buff *msd_skb = NULL;
836
	bool try_batch;
837
	bool xmit_more = (skb != NULL) ? skb->xmit_more : false;
838 839 840 841 842

	net_device = get_outbound_net_device(device);
	if (!net_device)
		return -ENODEV;

843 844 845 846 847 848 849
	/* We may race with netvsc_connect_vsp()/netvsc_init_buf() and get
	 * here before the negotiation with the host is finished and
	 * send_section_map may not be allocated yet.
	 */
	if (!net_device->send_section_map)
		return -EAGAIN;

850
	nvchan = &net_device->chan_table[packet->q_idx];
851
	packet->send_buf_index = NETVSC_INVALID_INDEX;
852
	packet->cp_partial = false;
853

854 855 856 857 858 859 860 861
	/* Send control message directly without accessing msd (Multi-Send
	 * Data) field which may be changed during data packet processing.
	 */
	if (!skb) {
		cur_send = packet;
		goto send_now;
	}

862
	/* batch packets in send buffer if possible */
863
	msdp = &nvchan->msd;
864 865 866
	if (msdp->pkt)
		msd_len = msdp->pkt->total_data_buflen;

867
	try_batch =  msd_len > 0 && msdp->count < net_device->max_pkt;
868
	if (try_batch && msd_len + pktlen + net_device->pkt_align <
869 870 871
	    net_device->send_section_size) {
		section_index = msdp->pkt->send_buf_index;

872 873 874 875 876
	} else if (try_batch && msd_len + packet->rmsg_size <
		   net_device->send_section_size) {
		section_index = msdp->pkt->send_buf_index;
		packet->cp_partial = true;

877
	} else if (pktlen + net_device->pkt_align <
878 879 880
		   net_device->send_section_size) {
		section_index = netvsc_get_next_send_section(net_device);
		if (section_index != NETVSC_INVALID_INDEX) {
881 882
			move_pkt_msd(&msd_send, &msd_skb, msdp);
			msd_len = 0;
883 884 885 886 887 888
		}
	}

	if (section_index != NETVSC_INVALID_INDEX) {
		netvsc_copy_to_send_buf(net_device,
					section_index, msd_len,
889
					packet, rndis_msg, pb, skb);
890

891
		packet->send_buf_index = section_index;
892 893 894 895 896 897 898 899

		if (packet->cp_partial) {
			packet->page_buf_cnt -= packet->rmsg_pgcnt;
			packet->total_data_buflen = msd_len + packet->rmsg_size;
		} else {
			packet->page_buf_cnt = 0;
			packet->total_data_buflen += msd_len;
		}
900

901 902 903 904 905
		if (msdp->pkt) {
			packet->total_packets += msdp->pkt->total_packets;
			packet->total_bytes += msdp->pkt->total_bytes;
		}

906
		if (msdp->skb)
S
Stephen Hemminger 已提交
907
			dev_consume_skb_any(msdp->skb);
908

909
		if (xmit_more && !packet->cp_partial) {
910
			msdp->skb = skb;
911 912 913 914
			msdp->pkt = packet;
			msdp->count++;
		} else {
			cur_send = packet;
915
			msdp->skb = NULL;
916 917 918 919
			msdp->pkt = NULL;
			msdp->count = 0;
		}
	} else {
920
		move_pkt_msd(&msd_send, &msd_skb, msdp);
921 922 923 924
		cur_send = packet;
	}

	if (msd_send) {
925 926
		int m_ret = netvsc_send_pkt(device, msd_send, net_device,
					    NULL, msd_skb);
927 928 929 930

		if (m_ret != 0) {
			netvsc_free_send_slot(net_device,
					      msd_send->send_buf_index);
931
			dev_kfree_skb_any(msd_skb);
932 933 934
		}
	}

935
send_now:
936
	if (cur_send)
937
		ret = netvsc_send_pkt(device, cur_send, net_device, pb, skb);
938

939 940
	if (ret != 0 && section_index != NETVSC_INVALID_INDEX)
		netvsc_free_send_slot(net_device, section_index);
941

942 943 944
	return ret;
}

945 946
static int netvsc_send_recv_completion(struct vmbus_channel *channel,
				       u64 transaction_id, u32 status)
947 948 949 950 951 952 953
{
	struct nvsp_message recvcompMessage;
	int ret;

	recvcompMessage.hdr.msg_type =
				NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE;

954
	recvcompMessage.msg.v1_msg.send_rndis_pkt_complete.status = status;
955 956

	/* Send the completion */
957
	ret = vmbus_sendpacket(channel, &recvcompMessage,
958 959 960 961 962 963 964 965 966
			       sizeof(struct nvsp_message_header) + sizeof(u32),
			       transaction_id, VM_PKT_COMP, 0);

	return ret;
}

static inline void count_recv_comp_slot(struct netvsc_device *nvdev, u16 q_idx,
					u32 *filled, u32 *avail)
{
967 968 969
	struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc;
	u32 first = mrc->first;
	u32 next = mrc->next;
970 971 972 973 974 975 976 977 978 979 980

	*filled = (first > next) ? NETVSC_RECVSLOT_MAX - first + next :
		  next - first;

	*avail = NETVSC_RECVSLOT_MAX - *filled - 1;
}

/* Read the first filled slot, no change to index */
static inline struct recv_comp_data *read_recv_comp_slot(struct netvsc_device
							 *nvdev, u16 q_idx)
{
981
	struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc;
982 983
	u32 filled, avail;

984
	if (unlikely(!mrc->buf))
985 986 987 988 989 990
		return NULL;

	count_recv_comp_slot(nvdev, q_idx, &filled, &avail);
	if (!filled)
		return NULL;

991
	return mrc->buf + mrc->first * sizeof(struct recv_comp_data);
992 993 994 995 996
}

/* Put the first filled slot back to available pool */
static inline void put_recv_comp_slot(struct netvsc_device *nvdev, u16 q_idx)
{
997
	struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc;
998 999
	int num_recv;

1000
	mrc->first = (mrc->first + 1) % NETVSC_RECVSLOT_MAX;
1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025

	num_recv = atomic_dec_return(&nvdev->num_outstanding_recvs);

	if (nvdev->destroy && num_recv == 0)
		wake_up(&nvdev->wait_drain);
}

/* Check and send pending recv completions */
static void netvsc_chk_recv_comp(struct netvsc_device *nvdev,
				 struct vmbus_channel *channel, u16 q_idx)
{
	struct recv_comp_data *rcd;
	int ret;

	while (true) {
		rcd = read_recv_comp_slot(nvdev, q_idx);
		if (!rcd)
			break;

		ret = netvsc_send_recv_completion(channel, rcd->tid,
						  rcd->status);
		if (ret)
			break;

		put_recv_comp_slot(nvdev, q_idx);
1026 1027 1028
	}
}

1029 1030 1031 1032 1033 1034
#define NETVSC_RCD_WATERMARK 80

/* Get next available slot */
static inline struct recv_comp_data *get_recv_comp_slot(
	struct netvsc_device *nvdev, struct vmbus_channel *channel, u16 q_idx)
{
1035
	struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc;
1036 1037 1038
	u32 filled, avail, next;
	struct recv_comp_data *rcd;

1039
	if (unlikely(!nvdev->recv_section))
1040 1041
		return NULL;

1042
	if (unlikely(!mrc->buf))
1043 1044 1045 1046 1047 1048 1049 1050 1051 1052
		return NULL;

	if (atomic_read(&nvdev->num_outstanding_recvs) >
	    nvdev->recv_section->num_sub_allocs * NETVSC_RCD_WATERMARK / 100)
		netvsc_chk_recv_comp(nvdev, channel, q_idx);

	count_recv_comp_slot(nvdev, q_idx, &filled, &avail);
	if (!avail)
		return NULL;

1053 1054 1055
	next = mrc->next;
	rcd = mrc->buf + next * sizeof(struct recv_comp_data);
	mrc->next = (next + 1) % NETVSC_RECVSLOT_MAX;
1056 1057 1058 1059 1060 1061

	atomic_inc(&nvdev->num_outstanding_recvs);

	return rcd;
}

S
stephen hemminger 已提交
1062
static int netvsc_receive(struct net_device *ndev,
1063 1064 1065 1066
		   struct netvsc_device *net_device,
		   struct net_device_context *net_device_ctx,
		   struct hv_device *device,
		   struct vmbus_channel *channel,
1067
		   const struct vmpacket_descriptor *desc,
1068
		   struct nvsp_message *nvsp)
1069
{
1070 1071
	const struct vmtransfer_page_packet_header *vmxferpage_packet
		= container_of(desc, const struct vmtransfer_page_packet_header, d);
S
stephen hemminger 已提交
1072
	u16 q_idx = channel->offermsg.offer.sub_channel_index;
1073
	char *recv_buf = net_device->recv_buf;
1074
	u32 status = NVSP_STAT_SUCCESS;
1075 1076
	int i;
	int count = 0;
1077
	int ret;
1078

1079
	/* Make sure this is a valid nvsp packet */
1080 1081 1082 1083
	if (unlikely(nvsp->hdr.msg_type != NVSP_MSG1_TYPE_SEND_RNDIS_PKT)) {
		netif_err(net_device_ctx, rx_err, ndev,
			  "Unknown nvsp packet type received %u\n",
			  nvsp->hdr.msg_type);
S
stephen hemminger 已提交
1084
		return 0;
1085 1086
	}

1087 1088 1089 1090 1091
	if (unlikely(vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID)) {
		netif_err(net_device_ctx, rx_err, ndev,
			  "Invalid xfer page set id - expecting %x got %x\n",
			  NETVSC_RECEIVE_BUFFER_ID,
			  vmxferpage_packet->xfer_pageset_id);
S
stephen hemminger 已提交
1092
		return 0;
1093 1094
	}

1095
	count = vmxferpage_packet->range_cnt;
1096

1097
	/* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */
1098
	for (i = 0; i < count; i++) {
1099 1100 1101
		void *data = recv_buf
			+ vmxferpage_packet->ranges[i].byte_offset;
		u32 buflen = vmxferpage_packet->ranges[i].byte_count;
1102

1103
		/* Pass it to the upper layer */
1104 1105
		status = rndis_filter_receive(ndev, net_device, device,
					      channel, data, buflen);
1106 1107
	}

S
stephen hemminger 已提交
1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119
	if (net_device->chan_table[q_idx].mrc.buf) {
		struct recv_comp_data *rcd;

		rcd = get_recv_comp_slot(net_device, channel, q_idx);
		if (rcd) {
			rcd->tid = vmxferpage_packet->d.trans_id;
			rcd->status = status;
		} else {
			netdev_err(ndev, "Recv_comp full buf q:%hd, tid:%llx\n",
				   q_idx, vmxferpage_packet->d.trans_id);
		}
	} else {
1120 1121 1122 1123 1124 1125 1126
		ret = netvsc_send_recv_completion(channel,
						  vmxferpage_packet->d.trans_id,
						  status);
		if (ret)
			netdev_err(ndev, "Recv_comp q:%hd, tid:%llx, err:%d\n",
				   q_idx, vmxferpage_packet->d.trans_id, ret);
	}
S
stephen hemminger 已提交
1127
	return count;
1128 1129
}

1130
static void netvsc_send_table(struct hv_device *hdev,
1131
			      struct nvsp_message *nvmsg)
1132
{
1133
	struct net_device *ndev = hv_get_drvdata(hdev);
1134
	struct net_device_context *net_device_ctx = netdev_priv(ndev);
1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147
	int i;
	u32 count, *tab;

	count = nvmsg->msg.v5_msg.send_table.count;
	if (count != VRSS_SEND_TAB_SIZE) {
		netdev_err(ndev, "Received wrong send-table size:%u\n", count);
		return;
	}

	tab = (u32 *)((unsigned long)&nvmsg->msg.v5_msg.send_table +
		      nvmsg->msg.v5_msg.send_table.offset);

	for (i = 0; i < count; i++)
1148
		net_device_ctx->tx_send_table[i] = tab[i];
1149 1150
}

1151
static void netvsc_send_vf(struct net_device_context *net_device_ctx,
1152 1153
			   struct nvsp_message *nvmsg)
{
1154 1155
	net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated;
	net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial;
1156 1157 1158
}

static inline void netvsc_receive_inband(struct hv_device *hdev,
1159 1160
				 struct net_device_context *net_device_ctx,
				 struct nvsp_message *nvmsg)
1161 1162 1163 1164 1165 1166 1167
{
	switch (nvmsg->hdr.msg_type) {
	case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE:
		netvsc_send_table(hdev, nvmsg);
		break;

	case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION:
1168
		netvsc_send_vf(net_device_ctx, nvmsg);
1169 1170 1171 1172
		break;
	}
}

S
stephen hemminger 已提交
1173 1174 1175 1176 1177
static int netvsc_process_raw_pkt(struct hv_device *device,
				  struct vmbus_channel *channel,
				  struct netvsc_device *net_device,
				  struct net_device *ndev,
				  const struct vmpacket_descriptor *desc)
1178
{
1179
	struct net_device_context *net_device_ctx = netdev_priv(ndev);
1180
	struct nvsp_message *nvmsg = hv_pkt_data(desc);
1181 1182 1183 1184 1185 1186 1187

	switch (desc->type) {
	case VM_PKT_COMP:
		netvsc_send_completion(net_device, channel, device, desc);
		break;

	case VM_PKT_DATA_USING_XFER_PAGES:
S
stephen hemminger 已提交
1188 1189
		return netvsc_receive(ndev, net_device, net_device_ctx,
				      device, channel, desc, nvmsg);
1190 1191 1192
		break;

	case VM_PKT_DATA_INBAND:
1193
		netvsc_receive_inband(device, net_device_ctx, nvmsg);
1194 1195 1196 1197
		break;

	default:
		netdev_err(ndev, "unhandled packet type %d, tid %llx\n",
1198
			   desc->type, desc->trans_id);
1199 1200
		break;
	}
S
stephen hemminger 已提交
1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211

	return 0;
}

static struct hv_device *netvsc_channel_to_device(struct vmbus_channel *channel)
{
	struct vmbus_channel *primary = channel->primary_channel;

	return primary ? primary->device_obj : channel->device_obj;
}

1212 1213 1214 1215
/* Network processing softirq
 * Process data in incoming ring buffer from host
 * Stops when ring is empty or budget is met or exceeded.
 */
S
stephen hemminger 已提交
1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226
int netvsc_poll(struct napi_struct *napi, int budget)
{
	struct netvsc_channel *nvchan
		= container_of(napi, struct netvsc_channel, napi);
	struct vmbus_channel *channel = nvchan->channel;
	struct hv_device *device = netvsc_channel_to_device(channel);
	u16 q_idx = channel->offermsg.offer.sub_channel_index;
	struct net_device *ndev = hv_get_drvdata(device);
	struct netvsc_device *net_device = net_device_to_netvsc_device(ndev);
	int work_done = 0;

1227 1228 1229
	/* If starting a new interval */
	if (!nvchan->desc)
		nvchan->desc = hv_pkt_iter_first(channel);
S
stephen hemminger 已提交
1230

1231 1232 1233 1234
	while (nvchan->desc && work_done < budget) {
		work_done += netvsc_process_raw_pkt(device, channel, net_device,
						    ndev, nvchan->desc);
		nvchan->desc = hv_pkt_iter_next(channel, nvchan->desc);
S
stephen hemminger 已提交
1235 1236
	}

1237 1238
	/* If receive ring was exhausted
	 * and not doing busy poll
1239 1240 1241
	 * then re-enable host interrupts
	 *  and reschedule if ring is not empty.
	 */
S
stephen hemminger 已提交
1242 1243 1244 1245 1246 1247
	if (work_done < budget &&
	    napi_complete_done(napi, work_done) &&
	    hv_end_read(&channel->inbound) != 0)
		napi_reschedule(napi);

	netvsc_chk_recv_comp(net_device, channel, q_idx);
1248 1249 1250

	/* Driver may overshoot since multiple packets per descriptor */
	return min(work_done, budget);
1251 1252
}

1253 1254 1255
/* Call back when data is available in host ring buffer.
 * Processing is deferred until network softirq (NAPI)
 */
1256
void netvsc_channel_cb(void *context)
1257
{
1258
	struct netvsc_channel *nvchan = context;
1259

1260 1261 1262
	if (napi_schedule_prep(&nvchan->napi)) {
		/* disable interupts from host */
		hv_begin_read(&nvchan->channel->inbound);
1263

1264 1265
		__napi_schedule(&nvchan->napi);
	}
1266
}
1267

1268 1269 1270 1271
/*
 * netvsc_device_add - Callback when the device belonging to this
 * driver is added
 */
1272 1273
int netvsc_device_add(struct hv_device *device,
		      const struct netvsc_device_info *device_info)
1274
{
1275
	int i, ret = 0;
1276
	int ring_size = device_info->ring_size;
1277
	struct netvsc_device *net_device;
1278 1279
	struct net_device *ndev = hv_get_drvdata(device);
	struct net_device_context *net_device_ctx = netdev_priv(ndev);
1280

1281
	net_device = alloc_net_device();
1282 1283
	if (!net_device)
		return -ENOMEM;
1284

1285 1286
	net_device->ring_size = ring_size;

S
stephen hemminger 已提交
1287 1288 1289 1290 1291
	/* Because the device uses NAPI, all the interrupt batching and
	 * control is done via Net softirq, not the channel handling
	 */
	set_channel_read_mode(device->channel, HV_CALL_ISR);

1292
	/* Open the channel */
1293 1294
	ret = vmbus_open(device->channel, ring_size * PAGE_SIZE,
			 ring_size * PAGE_SIZE, NULL, 0,
1295 1296
			 netvsc_channel_cb,
			 net_device->chan_table);
1297 1298

	if (ret != 0) {
1299
		netdev_err(ndev, "unable to open channel: %d\n", ret);
1300 1301 1302 1303
		goto cleanup;
	}

	/* Channel is opened */
1304
	netdev_dbg(ndev, "hv_netvsc channel opened successfully\n");
1305

1306 1307 1308 1309
	/* If we're reopening the device we may have multiple queues, fill the
	 * chn_table with the default channel to use it before subchannels are
	 * opened.
	 */
S
stephen hemminger 已提交
1310 1311 1312 1313 1314 1315 1316 1317 1318 1319
	for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
		struct netvsc_channel *nvchan = &net_device->chan_table[i];

		nvchan->channel = device->channel;
		netif_napi_add(ndev, &nvchan->napi,
			       netvsc_poll, NAPI_POLL_WEIGHT);
	}

	/* Enable NAPI handler for init callbacks */
	napi_enable(&net_device->chan_table[0].napi);
1320 1321 1322 1323

	/* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is
	 * populated.
	 */
1324
	rcu_assign_pointer(net_device_ctx->nvdev, net_device);
1325

1326 1327 1328
	/* Connect with the NetVsp */
	ret = netvsc_connect_vsp(device);
	if (ret != 0) {
1329
		netdev_err(ndev,
1330
			"unable to connect to NetVSP - %d\n", ret);
1331 1332 1333 1334 1335 1336
		goto close;
	}

	return ret;

close:
S
stephen hemminger 已提交
1337 1338
	napi_disable(&net_device->chan_table[0].napi);

1339 1340 1341 1342
	/* Now, we can close the channel safely */
	vmbus_close(device->channel);

cleanup:
1343
	free_netvsc_device(&net_device->rcu);
1344 1345 1346

	return ret;
}