channel.c 23.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
/*
 * Copyright (c) 2009, Microsoft Corporation.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
 * Place - Suite 330, Boston, MA 02111-1307 USA.
 *
 * Authors:
 *   Haiyang Zhang <haiyangz@microsoft.com>
 *   Hank Janssen  <hjanssen@microsoft.com>
 */
21 22
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

23
#include <linux/kernel.h>
24 25
#include <linux/sched.h>
#include <linux/wait.h>
26
#include <linux/mm.h>
27
#include <linux/slab.h>
28
#include <linux/module.h>
29
#include <linux/hyperv.h>
30

31
#include "hyperv_vmbus.h"
32

K
K. Y. Srinivasan 已提交
33 34 35
#define NUM_PAGES_SPANNED(addr, len) \
((PAGE_ALIGN(addr + len) >> PAGE_SHIFT) - (addr >> PAGE_SHIFT))

36
/*
37
 * vmbus_setevent- Trigger an event notification on the specified
38
 * channel.
39
 */
40
static void vmbus_setevent(struct vmbus_channel *channel)
41
{
42
	struct hv_monitor_page *monitorpage;
43

44
	if (channel->offermsg.monitor_allocated) {
45
		/* Each u32 represents 32 channels */
46
		sync_set_bit(channel->offermsg.child_relid & 31,
47
			(unsigned long *) vmbus_connection.send_int_page +
48
			(channel->offermsg.child_relid >> 5));
49

50
		monitorpage = vmbus_connection.monitor_pages;
51
		monitorpage++; /* Get the child to parent monitor page */
52

53
		sync_set_bit(channel->monitor_bit,
54 55
			(unsigned long *)&monitorpage->trigger_group
					[channel->monitor_grp].pending);
56

57
	} else {
58
		vmbus_set_event(channel);
59 60 61
	}
}

62
/*
63
 * vmbus_get_debug_info -Retrieve various channel debug info
64
 */
65
void vmbus_get_debug_info(struct vmbus_channel *channel,
66
			      struct vmbus_channel_debug_info *debuginfo)
67
{
68
	struct hv_monitor_page *monitorpage;
69 70
	u8 monitor_group = (u8)channel->offermsg.monitorid / 32;
	u8 monitor_offset = (u8)channel->offermsg.monitorid % 32;
71

72
	monitorpage = (struct hv_monitor_page *)vmbus_connection.monitor_pages;
73

74
	debuginfo->servermonitor_pending =
75
			monitorpage->trigger_group[monitor_group].pending;
76
	debuginfo->servermonitor_latency =
77
			monitorpage->latency[monitor_group][monitor_offset];
78
	debuginfo->servermonitor_connectionid =
79 80
			monitorpage->parameter[monitor_group]
					[monitor_offset].connectionid.u.id;
81

82
	monitorpage++;
83

84
	debuginfo->clientmonitor_pending =
85
			monitorpage->trigger_group[monitor_group].pending;
86
	debuginfo->clientmonitor_latency =
87
			monitorpage->latency[monitor_group][monitor_offset];
88
	debuginfo->clientmonitor_connectionid =
89 90
			monitorpage->parameter[monitor_group]
					[monitor_offset].connectionid.u.id;
91

92 93
	hv_ringbuffer_get_debuginfo(&channel->inbound, &debuginfo->inbound);
	hv_ringbuffer_get_debuginfo(&channel->outbound, &debuginfo->outbound);
94 95
}

96
/*
97
 * vmbus_open - Open the specified channel.
98
 */
99
int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
100 101
		     u32 recv_ringbuffer_size, void *userdata, u32 userdatalen,
		     void (*onchannelcallback)(void *context), void *context)
102
{
103
	struct vmbus_channel_open_channel *open_msg;
104
	struct vmbus_channel_msginfo *open_info = NULL;
105
	void *in, *out;
106
	unsigned long flags;
107
	int ret, t, err = 0;
108

109 110 111 112 113 114 115 116 117
	spin_lock_irqsave(&newchannel->sc_lock, flags);
	if (newchannel->state == CHANNEL_OPEN_STATE) {
		newchannel->state = CHANNEL_OPENING_STATE;
	} else {
		spin_unlock_irqrestore(&newchannel->sc_lock, flags);
		return -EINVAL;
	}
	spin_unlock_irqrestore(&newchannel->sc_lock, flags);

118 119
	newchannel->onchannel_callback = onchannelcallback;
	newchannel->channel_callback_context = context;
120

121
	/* Allocate the ring buffer */
122 123 124
	out = (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
		get_order(send_ringbuffer_size + recv_ringbuffer_size));

125 126 127
	if (!out)
		return -ENOMEM;

128

129
	in = (void *)((unsigned long)out + send_ringbuffer_size);
130

131 132
	newchannel->ringbuffer_pages = out;
	newchannel->ringbuffer_pagecount = (send_ringbuffer_size +
133
					   recv_ringbuffer_size) >> PAGE_SHIFT;
134

135 136 137
	ret = hv_ringbuffer_init(
		&newchannel->outbound, out, send_ringbuffer_size);

138
	if (ret != 0) {
139
		err = ret;
140
		goto error0;
141 142
	}

143 144
	ret = hv_ringbuffer_init(
		&newchannel->inbound, in, recv_ringbuffer_size);
145
	if (ret != 0) {
146
		err = ret;
147
		goto error0;
148
	}
149 150


151
	/* Establish the gpadl for the ring buffer */
152
	newchannel->ringbuffer_gpadlhandle = 0;
153

154
	ret = vmbus_establish_gpadl(newchannel,
155
					 newchannel->outbound.ring_buffer,
156 157
					 send_ringbuffer_size +
					 recv_ringbuffer_size,
158
					 &newchannel->ringbuffer_gpadlhandle);
159

160
	if (ret != 0) {
161
		err = ret;
162
		goto error0;
163
	}
164

165
	/* Create and init the channel open message */
166
	open_info = kmalloc(sizeof(*open_info) +
167 168
			   sizeof(struct vmbus_channel_open_channel),
			   GFP_KERNEL);
169
	if (!open_info) {
170
		err = -ENOMEM;
171
		goto error0;
172
	}
173

174
	init_completion(&open_info->waitevent);
175

176
	open_msg = (struct vmbus_channel_open_channel *)open_info->msg;
177 178 179 180 181
	open_msg->header.msgtype = CHANNELMSG_OPENCHANNEL;
	open_msg->openid = newchannel->offermsg.child_relid;
	open_msg->child_relid = newchannel->offermsg.child_relid;
	open_msg->ringbuffer_gpadlhandle = newchannel->ringbuffer_gpadlhandle;
	open_msg->downstream_ringbuffer_pageoffset = send_ringbuffer_size >>
182
						  PAGE_SHIFT;
183
	open_msg->target_vp = newchannel->target_vp;
184

185
	if (userdatalen > MAX_USER_DEFINED_BYTES) {
186
		err = -EINVAL;
187
		goto error0;
188 189
	}

190
	if (userdatalen)
191
		memcpy(open_msg->userdata, userdata, userdatalen);
192

193
	spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
194
	list_add_tail(&open_info->msglistentry,
195
		      &vmbus_connection.chn_msg_list);
196
	spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
197

198
	ret = vmbus_post_msg(open_msg,
199
			       sizeof(struct vmbus_channel_open_channel));
200 201

	if (ret != 0)
202
		goto error1;
203

204
	t = wait_for_completion_timeout(&open_info->waitevent, 5*HZ);
205
	if (t == 0) {
206
		err = -ETIMEDOUT;
207
		goto error1;
208 209
	}

210

211 212
	if (open_info->response.open_result.status)
		err = open_info->response.open_result.status;
213

214
	spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
215
	list_del(&open_info->msglistentry);
216
	spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
217

218 219 220
	if (err == 0)
		newchannel->state = CHANNEL_OPENED_STATE;

221
	kfree(open_info);
222
	return err;
223

224 225 226 227 228 229
error1:
	spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
	list_del(&open_info->msglistentry);
	spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);

error0:
230 231
	free_pages((unsigned long)out,
		get_order(send_ringbuffer_size + recv_ringbuffer_size));
232
	kfree(open_info);
233
	return err;
234
}
235
EXPORT_SYMBOL_GPL(vmbus_open);
236

237
/*
238
 * create_gpadl_header - Creates a gpadl for the specified buffer
239
 */
240
static int create_gpadl_header(void *kbuffer, u32 size,
241 242
					 struct vmbus_channel_msginfo **msginfo,
					 u32 *messagecount)
243 244
{
	int i;
245
	int pagecount;
246
	unsigned long long pfn;
247 248 249 250 251
	struct vmbus_channel_gpadl_header *gpadl_header;
	struct vmbus_channel_gpadl_body *gpadl_body;
	struct vmbus_channel_msginfo *msgheader;
	struct vmbus_channel_msginfo *msgbody = NULL;
	u32 msgsize;
252

253
	int pfnsum, pfncount, pfnleft, pfncurr, pfnsize;
254

255 256
	pagecount = size >> PAGE_SHIFT;
	pfn = virt_to_phys(kbuffer) >> PAGE_SHIFT;
257

258
	/* do we need a gpadl body msg */
259
	pfnsize = MAX_SIZE_CHANNEL_MESSAGE -
260 261
		  sizeof(struct vmbus_channel_gpadl_header) -
		  sizeof(struct gpa_range);
262
	pfncount = pfnsize / sizeof(u64);
263

264
	if (pagecount > pfncount) {
265
		/* we need a gpadl body */
266
		/* fill in the header */
267
		msgsize = sizeof(struct vmbus_channel_msginfo) +
268
			  sizeof(struct vmbus_channel_gpadl_header) +
269 270 271
			  sizeof(struct gpa_range) + pfncount * sizeof(u64);
		msgheader =  kzalloc(msgsize, GFP_KERNEL);
		if (!msgheader)
272
			goto nomem;
273

274 275
		INIT_LIST_HEAD(&msgheader->submsglist);
		msgheader->msgsize = msgsize;
276

277
		gpadl_header = (struct vmbus_channel_gpadl_header *)
278 279 280
			msgheader->msg;
		gpadl_header->rangecount = 1;
		gpadl_header->range_buflen = sizeof(struct gpa_range) +
281
					 pagecount * sizeof(u64);
282 283
		gpadl_header->range[0].byte_offset = 0;
		gpadl_header->range[0].byte_count = size;
284
		for (i = 0; i < pfncount; i++)
285
			gpadl_header->range[0].pfn_array[i] = pfn+i;
286 287
		*msginfo = msgheader;
		*messagecount = 1;
288

289 290
		pfnsum = pfncount;
		pfnleft = pagecount - pfncount;
291

292
		/* how many pfns can we fit */
293
		pfnsize = MAX_SIZE_CHANNEL_MESSAGE -
294
			  sizeof(struct vmbus_channel_gpadl_body);
295
		pfncount = pfnsize / sizeof(u64);
296

297
		/* fill in the body */
298 299 300
		while (pfnleft) {
			if (pfnleft > pfncount)
				pfncurr = pfncount;
301
			else
302
				pfncurr = pfnleft;
303

304
			msgsize = sizeof(struct vmbus_channel_msginfo) +
305
				  sizeof(struct vmbus_channel_gpadl_body) +
306 307
				  pfncurr * sizeof(u64);
			msgbody = kzalloc(msgsize, GFP_KERNEL);
308 309 310 311 312 313 314 315 316 317 318 319 320 321 322

			if (!msgbody) {
				struct vmbus_channel_msginfo *pos = NULL;
				struct vmbus_channel_msginfo *tmp = NULL;
				/*
				 * Free up all the allocated messages.
				 */
				list_for_each_entry_safe(pos, tmp,
					&msgheader->submsglist,
					msglistentry) {

					list_del(&pos->msglistentry);
					kfree(pos);
				}

323
				goto nomem;
324 325
			}

326
			msgbody->msgsize = msgsize;
327 328
			(*messagecount)++;
			gpadl_body =
329
				(struct vmbus_channel_gpadl_body *)msgbody->msg;
330 331 332 333

			/*
			 * Gpadl is u32 and we are using a pointer which could
			 * be 64-bit
334 335
			 * This is governed by the guest/host protocol and
			 * so the hypervisor gurantees that this is ok.
336
			 */
337
			for (i = 0; i < pfncurr; i++)
338
				gpadl_body->pfn[i] = pfn + pfnsum + i;
339

340
			/* add to msg header */
341 342
			list_add_tail(&msgbody->msglistentry,
				      &msgheader->submsglist);
343 344
			pfnsum += pfncurr;
			pfnleft -= pfncurr;
345
		}
346
	} else {
347
		/* everything fits in a header */
348
		msgsize = sizeof(struct vmbus_channel_msginfo) +
349
			  sizeof(struct vmbus_channel_gpadl_header) +
350 351 352
			  sizeof(struct gpa_range) + pagecount * sizeof(u64);
		msgheader = kzalloc(msgsize, GFP_KERNEL);
		if (msgheader == NULL)
353
			goto nomem;
354
		msgheader->msgsize = msgsize;
355 356

		gpadl_header = (struct vmbus_channel_gpadl_header *)
357 358 359
			msgheader->msg;
		gpadl_header->rangecount = 1;
		gpadl_header->range_buflen = sizeof(struct gpa_range) +
360
					 pagecount * sizeof(u64);
361 362
		gpadl_header->range[0].byte_offset = 0;
		gpadl_header->range[0].byte_count = size;
363
		for (i = 0; i < pagecount; i++)
364
			gpadl_header->range[0].pfn_array[i] = pfn+i;
365 366 367

		*msginfo = msgheader;
		*messagecount = 1;
368 369 370
	}

	return 0;
371
nomem:
372 373
	kfree(msgheader);
	kfree(msgbody);
374
	return -ENOMEM;
375 376
}

377
/*
378
 * vmbus_establish_gpadl - Estabish a GPADL for the specified buffer
379
 *
380 381 382 383
 * @channel: a channel
 * @kbuffer: from kmalloc
 * @size: page-size multiple
 * @gpadl_handle: some funky thing
384
 */
385
int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
386
			       u32 size, u32 *gpadl_handle)
387
{
388 389 390 391 392
	struct vmbus_channel_gpadl_header *gpadlmsg;
	struct vmbus_channel_gpadl_body *gpadl_body;
	struct vmbus_channel_msginfo *msginfo = NULL;
	struct vmbus_channel_msginfo *submsginfo;
	u32 msgcount;
393
	struct list_head *curr;
394
	u32 next_gpadl_handle;
395
	unsigned long flags;
396
	int ret = 0;
397
	int t;
398

399 400
	next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
	atomic_inc(&vmbus_connection.next_gpadl_handle);
401

402
	ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
403 404
	if (ret)
		return ret;
405

406
	init_completion(&msginfo->waitevent);
407

408 409 410 411
	gpadlmsg = (struct vmbus_channel_gpadl_header *)msginfo->msg;
	gpadlmsg->header.msgtype = CHANNELMSG_GPADL_HEADER;
	gpadlmsg->child_relid = channel->offermsg.child_relid;
	gpadlmsg->gpadl = next_gpadl_handle;
412 413


414
	spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
415
	list_add_tail(&msginfo->msglistentry,
416
		      &vmbus_connection.chn_msg_list);
417

418
	spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
419

420
	ret = vmbus_post_msg(gpadlmsg, msginfo->msgsize -
421
			       sizeof(*msginfo));
422
	if (ret != 0)
423
		goto cleanup;
424

425
	if (msgcount > 1) {
426
		list_for_each(curr, &msginfo->submsglist) {
427

428 429
			submsginfo = (struct vmbus_channel_msginfo *)curr;
			gpadl_body =
430
			     (struct vmbus_channel_gpadl_body *)submsginfo->msg;
431

432 433 434
			gpadl_body->header.msgtype =
				CHANNELMSG_GPADL_BODY;
			gpadl_body->gpadl = next_gpadl_handle;
435

436
			ret = vmbus_post_msg(gpadl_body,
437
					       submsginfo->msgsize -
438
					       sizeof(*submsginfo));
439
			if (ret != 0)
440
				goto cleanup;
441

442 443
		}
	}
444
	t = wait_for_completion_timeout(&msginfo->waitevent, 5*HZ);
445
	BUG_ON(t == 0);
446

447

448
	/* At this point, we received the gpadl created msg */
449
	*gpadl_handle = gpadlmsg->gpadl;
450

451
cleanup:
452
	spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
453
	list_del(&msginfo->msglistentry);
454
	spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
455

456
	kfree(msginfo);
457 458
	return ret;
}
459
EXPORT_SYMBOL_GPL(vmbus_establish_gpadl);
460

461
/*
462
 * vmbus_teardown_gpadl -Teardown the specified GPADL handle
463
 */
464
int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle)
465
{
466
	struct vmbus_channel_gpadl_teardown *msg;
467
	struct vmbus_channel_msginfo *info;
468
	unsigned long flags;
469
	int ret, t;
470

471 472
	info = kmalloc(sizeof(*info) +
		       sizeof(struct vmbus_channel_gpadl_teardown), GFP_KERNEL);
473 474
	if (!info)
		return -ENOMEM;
475

476
	init_completion(&info->waitevent);
477

478
	msg = (struct vmbus_channel_gpadl_teardown *)info->msg;
479

480 481 482
	msg->header.msgtype = CHANNELMSG_GPADL_TEARDOWN;
	msg->child_relid = channel->offermsg.child_relid;
	msg->gpadl = gpadl_handle;
483

484
	spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
485
	list_add_tail(&info->msglistentry,
486
		      &vmbus_connection.chn_msg_list);
487
	spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
488
	ret = vmbus_post_msg(msg,
489
			       sizeof(struct vmbus_channel_gpadl_teardown));
490

491
	BUG_ON(ret != 0);
492
	t = wait_for_completion_timeout(&info->waitevent, 5*HZ);
493
	BUG_ON(t == 0);
494

495
	/* Received a torndown response */
496
	spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
497
	list_del(&info->msglistentry);
498
	spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
499

500
	kfree(info);
501 502
	return ret;
}
503
EXPORT_SYMBOL_GPL(vmbus_teardown_gpadl);
504

505
static void vmbus_close_internal(struct vmbus_channel *channel)
506
{
507
	struct vmbus_channel_close_channel *msg;
508
	int ret;
509
	unsigned long flags;
510

511 512
	channel->state = CHANNEL_OPEN_STATE;
	channel->sc_creation_callback = NULL;
513
	/* Stop callback and cancel the timer asap */
514
	spin_lock_irqsave(&channel->inbound_lock, flags);
515
	channel->onchannel_callback = NULL;
516
	spin_unlock_irqrestore(&channel->inbound_lock, flags);
517

518
	/* Send a closing message */
519

520
	msg = &channel->close_msg.msg;
521

522 523
	msg->header.msgtype = CHANNELMSG_CLOSECHANNEL;
	msg->child_relid = channel->offermsg.child_relid;
524

525
	ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_close_channel));
526

527
	BUG_ON(ret != 0);
528
	/* Tear down the gpadl for the channel's ring buffer */
529
	if (channel->ringbuffer_gpadlhandle)
530
		vmbus_teardown_gpadl(channel,
531
					  channel->ringbuffer_gpadlhandle);
532

533
	/* Cleanup the ring buffers for this channel */
534 535
	hv_ringbuffer_cleanup(&channel->outbound);
	hv_ringbuffer_cleanup(&channel->inbound);
536

537 538
	free_pages((unsigned long)channel->ringbuffer_pages,
		get_order(channel->ringbuffer_pagecount * PAGE_SIZE));
539 540 541


}
542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572

/*
 * vmbus_close - Close the specified channel
 */
void vmbus_close(struct vmbus_channel *channel)
{
	struct list_head *cur, *tmp;
	struct vmbus_channel *cur_channel;

	if (channel->primary_channel != NULL) {
		/*
		 * We will only close sub-channels when
		 * the primary is closed.
		 */
		return;
	}
	/*
	 * Close all the sub-channels first and then close the
	 * primary channel.
	 */
	list_for_each_safe(cur, tmp, &channel->sc_list) {
		cur_channel = list_entry(cur, struct vmbus_channel, sc_list);
		if (cur_channel->state != CHANNEL_OPENED_STATE)
			continue;
		vmbus_close_internal(cur_channel);
	}
	/*
	 * Now close the primary.
	 */
	vmbus_close_internal(channel);
}
573
EXPORT_SYMBOL_GPL(vmbus_close);
574

575
/**
576
 * vmbus_sendpacket() - Send the specified buffer on the given channel
577 578 579 580 581
 * @channel: Pointer to vmbus_channel structure.
 * @buffer: Pointer to the buffer you want to receive the data into.
 * @bufferlen: Maximum size of what the the buffer will hold
 * @requestid: Identifier of the request
 * @type: Type of packet that is being send e.g. negotiate, time
582 583
 * packet etc.
 *
584
 * Sends data in @buffer directly to hyper-v via the vmbus
585 586 587
 * This will send the data unparsed to hyper-v.
 *
 * Mainly used by Hyper-V drivers.
588
 */
589
int vmbus_sendpacket(struct vmbus_channel *channel, const void *buffer,
590 591
			   u32 bufferlen, u64 requestid,
			   enum vmbus_packet_type type, u32 flags)
592
{
593
	struct vmpacket_descriptor desc;
594
	u32 packetlen = sizeof(struct vmpacket_descriptor) + bufferlen;
595
	u32 packetlen_aligned = ALIGN(packetlen, sizeof(u64));
596 597
	struct scatterlist bufferlist[3];
	u64 aligned_data = 0;
598
	int ret;
599
	bool signal = false;
600 601


602
	/* Setup the descriptor */
603 604
	desc.type = type; /* VmbusPacketTypeDataInBand; */
	desc.flags = flags; /* VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED; */
605
	/* in 8-bytes granularity */
606 607 608
	desc.offset8 = sizeof(struct vmpacket_descriptor) >> 3;
	desc.len8 = (u16)(packetlen_aligned >> 3);
	desc.trans_id = requestid;
609

610 611 612 613 614
	sg_init_table(bufferlist, 3);
	sg_set_buf(&bufferlist[0], &desc, sizeof(struct vmpacket_descriptor));
	sg_set_buf(&bufferlist[1], buffer, bufferlen);
	sg_set_buf(&bufferlist[2], &aligned_data,
		   packetlen_aligned - packetlen);
615

616
	ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
617

618
	if (ret == 0 && signal)
619
		vmbus_setevent(channel);
620 621 622

	return ret;
}
623
EXPORT_SYMBOL(vmbus_sendpacket);
624

625
/*
626
 * vmbus_sendpacket_pagebuffer - Send a range of single-page buffer
627
 * packets using a GPADL Direct packet type.
628
 */
629
int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
630 631 632
				     struct hv_page_buffer pagebuffers[],
				     u32 pagecount, void *buffer, u32 bufferlen,
				     u64 requestid)
633
{
634 635
	int ret;
	int i;
636
	struct vmbus_channel_packet_page_buffer desc;
637 638 639 640 641
	u32 descsize;
	u32 packetlen;
	u32 packetlen_aligned;
	struct scatterlist bufferlist[3];
	u64 aligned_data = 0;
642
	bool signal = false;
643

644
	if (pagecount > MAX_PAGE_BUFFER_COUNT)
645
		return -EINVAL;
646 647


648
	/*
649
	 * Adjust the size down since vmbus_channel_packet_page_buffer is the
650 651
	 * largest size we support
	 */
652 653
	descsize = sizeof(struct vmbus_channel_packet_page_buffer) -
			  ((MAX_PAGE_BUFFER_COUNT - pagecount) *
654
			  sizeof(struct hv_page_buffer));
655
	packetlen = descsize + bufferlen;
656
	packetlen_aligned = ALIGN(packetlen, sizeof(u64));
657

658
	/* Setup the descriptor */
659
	desc.type = VM_PKT_DATA_USING_GPA_DIRECT;
660
	desc.flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
661 662 663 664 665 666
	desc.dataoffset8 = descsize >> 3; /* in 8-bytes grandularity */
	desc.length8 = (u16)(packetlen_aligned >> 3);
	desc.transactionid = requestid;
	desc.rangecount = pagecount;

	for (i = 0; i < pagecount; i++) {
667 668 669
		desc.range[i].len = pagebuffers[i].len;
		desc.range[i].offset = pagebuffers[i].offset;
		desc.range[i].pfn	 = pagebuffers[i].pfn;
670 671
	}

672 673 674 675 676
	sg_init_table(bufferlist, 3);
	sg_set_buf(&bufferlist[0], &desc, descsize);
	sg_set_buf(&bufferlist[1], buffer, bufferlen);
	sg_set_buf(&bufferlist[2], &aligned_data,
		packetlen_aligned - packetlen);
677

678
	ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
679

680
	if (ret == 0 && signal)
681
		vmbus_setevent(channel);
682 683 684

	return ret;
}
685
EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer);
686

687
/*
688
 * vmbus_sendpacket_multipagebuffer - Send a multi-page buffer packet
689
 * using a GPADL Direct packet type.
690
 */
691
int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
692 693
				struct hv_multipage_buffer *multi_pagebuffer,
				void *buffer, u32 bufferlen, u64 requestid)
694
{
695
	int ret;
696
	struct vmbus_channel_packet_multipage_buffer desc;
697 698 699 700 701
	u32 descsize;
	u32 packetlen;
	u32 packetlen_aligned;
	struct scatterlist bufferlist[3];
	u64 aligned_data = 0;
702
	bool signal = false;
703 704
	u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset,
					 multi_pagebuffer->len);
705 706


707
	if ((pfncount < 0) || (pfncount > MAX_MULTIPAGE_BUFFER_COUNT))
708
		return -EINVAL;
709

710
	/*
711
	 * Adjust the size down since vmbus_channel_packet_multipage_buffer is
712 713
	 * the largest size we support
	 */
714 715
	descsize = sizeof(struct vmbus_channel_packet_multipage_buffer) -
			  ((MAX_MULTIPAGE_BUFFER_COUNT - pfncount) *
716
			  sizeof(u64));
717
	packetlen = descsize + bufferlen;
718
	packetlen_aligned = ALIGN(packetlen, sizeof(u64));
719 720


721
	/* Setup the descriptor */
722
	desc.type = VM_PKT_DATA_USING_GPA_DIRECT;
723
	desc.flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
724 725 726
	desc.dataoffset8 = descsize >> 3; /* in 8-bytes grandularity */
	desc.length8 = (u16)(packetlen_aligned >> 3);
	desc.transactionid = requestid;
727
	desc.rangecount = 1;
728

729 730
	desc.range.len = multi_pagebuffer->len;
	desc.range.offset = multi_pagebuffer->offset;
731

732
	memcpy(desc.range.pfn_array, multi_pagebuffer->pfn_array,
733
	       pfncount * sizeof(u64));
734

735 736 737 738 739
	sg_init_table(bufferlist, 3);
	sg_set_buf(&bufferlist[0], &desc, descsize);
	sg_set_buf(&bufferlist[1], buffer, bufferlen);
	sg_set_buf(&bufferlist[2], &aligned_data,
		packetlen_aligned - packetlen);
740

741
	ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
742

743
	if (ret == 0 && signal)
744
		vmbus_setevent(channel);
745 746 747

	return ret;
}
748
EXPORT_SYMBOL_GPL(vmbus_sendpacket_multipagebuffer);
749 750

/**
751
 * vmbus_recvpacket() - Retrieve the user packet on the specified channel
752 753 754 755 756
 * @channel: Pointer to vmbus_channel structure.
 * @buffer: Pointer to the buffer you want to receive the data into.
 * @bufferlen: Maximum size of what the the buffer will hold
 * @buffer_actual_len: The actual size of the data after it was received
 * @requestid: Identifier of the request
757 758 759 760 761
 *
 * Receives directly from the hyper-v vmbus and puts the data it received
 * into Buffer. This will receive the data unparsed from hyper-v.
 *
 * Mainly used by Hyper-V drivers.
762
 */
763
int vmbus_recvpacket(struct vmbus_channel *channel, void *buffer,
764
			u32 bufferlen, u32 *buffer_actual_len, u64 *requestid)
765
{
766
	struct vmpacket_descriptor desc;
767 768
	u32 packetlen;
	u32 userlen;
769
	int ret;
770
	bool signal = false;
771

772 773
	*buffer_actual_len = 0;
	*requestid = 0;
774 775


776
	ret = hv_ringbuffer_peek(&channel->inbound, &desc,
777
			     sizeof(struct vmpacket_descriptor));
778
	if (ret != 0)
779 780
		return 0;

781 782
	packetlen = desc.len8 << 3;
	userlen = packetlen - (desc.offset8 << 3);
783

784
	*buffer_actual_len = userlen;
785

786
	if (userlen > bufferlen) {
787

788
		pr_err("Buffer too small - got %d needs %d\n",
789
			   bufferlen, userlen);
790
		return -ETOOSMALL;
791 792
	}

793
	*requestid = desc.trans_id;
794

795
	/* Copy over the packet to the user buffer */
796
	ret = hv_ringbuffer_read(&channel->inbound, buffer, userlen,
797
			     (desc.offset8 << 3), &signal);
798

799 800
	if (signal)
		vmbus_setevent(channel);
801 802 803

	return 0;
}
804
EXPORT_SYMBOL(vmbus_recvpacket);
805

806
/*
807
 * vmbus_recvpacket_raw - Retrieve the raw packet on the specified channel
808
 */
809
int vmbus_recvpacket_raw(struct vmbus_channel *channel, void *buffer,
810 811
			      u32 bufferlen, u32 *buffer_actual_len,
			      u64 *requestid)
812
{
813
	struct vmpacket_descriptor desc;
814
	u32 packetlen;
815
	int ret;
816
	bool signal = false;
817

818 819
	*buffer_actual_len = 0;
	*requestid = 0;
820 821


822
	ret = hv_ringbuffer_peek(&channel->inbound, &desc,
823
			     sizeof(struct vmpacket_descriptor));
824
	if (ret != 0)
825 826 827
		return 0;


828
	packetlen = desc.len8 << 3;
829

830
	*buffer_actual_len = packetlen;
831

832
	if (packetlen > bufferlen) {
833 834 835
		pr_err("Buffer too small - needed %d bytes but "
			"got space for only %d bytes\n",
			packetlen, bufferlen);
836
		return -ENOBUFS;
837 838
	}

839
	*requestid = desc.trans_id;
840

841
	/* Copy over the entire packet to the user buffer */
842 843 844 845 846
	ret = hv_ringbuffer_read(&channel->inbound, buffer, packetlen, 0,
				 &signal);

	if (signal)
		vmbus_setevent(channel);
847 848 849

	return 0;
}
850
EXPORT_SYMBOL_GPL(vmbus_recvpacket_raw);