ring_buffer.c 12.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
/*
 *
 * Copyright (c) 2009, Microsoft Corporation.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
 * Place - Suite 330, Boston, MA 02111-1307 USA.
 *
 * Authors:
 *   Haiyang Zhang <haiyangz@microsoft.com>
 *   Hank Janssen  <hjanssen@microsoft.com>
21
 *   K. Y. Srinivasan <kys@microsoft.com>
22 23
 *
 */
24
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25

26 27
#include <linux/kernel.h>
#include <linux/mm.h>
28
#include <linux/hyperv.h>
29

30
#include "hyperv_vmbus.h"
31

32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55
void hv_begin_read(struct hv_ring_buffer_info *rbi)
{
	rbi->ring_buffer->interrupt_mask = 1;
	smp_mb();
}

u32 hv_end_read(struct hv_ring_buffer_info *rbi)
{
	u32 read;
	u32 write;

	rbi->ring_buffer->interrupt_mask = 0;
	smp_mb();

	/*
	 * Now check to see if the ring buffer is still empty.
	 * If it is not, we raced and we need to process new
	 * incoming messages.
	 */
	hv_get_ringbuffer_availbytes(rbi, &read, &write);

	return read;
}

56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73
/*
 * When we write to the ring buffer, check if the host needs to
 * be signaled. Here is the details of this protocol:
 *
 *	1. The host guarantees that while it is draining the
 *	   ring buffer, it will set the interrupt_mask to
 *	   indicate it does not need to be interrupted when
 *	   new data is placed.
 *
 *	2. The host guarantees that it will completely drain
 *	   the ring buffer before exiting the read loop. Further,
 *	   once the ring buffer is empty, it will clear the
 *	   interrupt_mask and re-check to see if new data has
 *	   arrived.
 */

static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi)
{
74
	smp_mb();
75 76 77 78 79 80 81 82 83 84 85 86 87
	if (rbi->ring_buffer->interrupt_mask)
		return false;

	/*
	 * This is the only case we need to signal when the
	 * ring transitions from being empty to non-empty.
	 */
	if (old_write == rbi->ring_buffer->read_index)
		return true;

	return false;
}

88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131
/*
 * To optimize the flow management on the send-side,
 * when the sender is blocked because of lack of
 * sufficient space in the ring buffer, potential the
 * consumer of the ring buffer can signal the producer.
 * This is controlled by the following parameters:
 *
 * 1. pending_send_sz: This is the size in bytes that the
 *    producer is trying to send.
 * 2. The feature bit feat_pending_send_sz set to indicate if
 *    the consumer of the ring will signal when the ring
 *    state transitions from being full to a state where
 *    there is room for the producer to send the pending packet.
 */

static bool hv_need_to_signal_on_read(u32 old_rd,
					 struct hv_ring_buffer_info *rbi)
{
	u32 prev_write_sz;
	u32 cur_write_sz;
	u32 r_size;
	u32 write_loc = rbi->ring_buffer->write_index;
	u32 read_loc = rbi->ring_buffer->read_index;
	u32 pending_sz = rbi->ring_buffer->pending_send_sz;

	/*
	 * If the other end is not blocked on write don't bother.
	 */
	if (pending_sz == 0)
		return false;

	r_size = rbi->ring_datasize;
	cur_write_sz = write_loc >= read_loc ? r_size - (write_loc - read_loc) :
			read_loc - write_loc;

	prev_write_sz = write_loc >= old_rd ? r_size - (write_loc - old_rd) :
			old_rd - write_loc;


	if ((prev_write_sz < pending_sz) && (cur_write_sz >= pending_sz))
		return true;

	return false;
}
132

133 134 135 136 137 138
/*
 * hv_get_next_write_location()
 *
 * Get the next write location for the specified ring buffer
 *
 */
139
static inline u32
140
hv_get_next_write_location(struct hv_ring_buffer_info *ring_info)
141
{
142
	u32 next = ring_info->ring_buffer->write_index;
143 144 145 146

	return next;
}

147 148 149 150 151 152
/*
 * hv_set_next_write_location()
 *
 * Set the next write location for the specified ring buffer
 *
 */
153
static inline void
154
hv_set_next_write_location(struct hv_ring_buffer_info *ring_info,
155
		     u32 next_write_location)
156
{
157
	ring_info->ring_buffer->write_index = next_write_location;
158 159
}

160 161 162 163 164
/*
 * hv_get_next_read_location()
 *
 * Get the next read location for the specified ring buffer
 */
165
static inline u32
166
hv_get_next_read_location(struct hv_ring_buffer_info *ring_info)
167
{
168
	u32 next = ring_info->ring_buffer->read_index;
169 170 171 172

	return next;
}

173 174 175 176 177 178
/*
 * hv_get_next_readlocation_withoffset()
 *
 * Get the next read location + offset for the specified ring buffer.
 * This allows the caller to skip
 */
179
static inline u32
180
hv_get_next_readlocation_withoffset(struct hv_ring_buffer_info *ring_info,
181
				 u32 offset)
182
{
183
	u32 next = ring_info->ring_buffer->read_index;
184

185 186
	next += offset;
	next %= ring_info->ring_datasize;
187 188 189 190

	return next;
}

191 192 193 194 195 196 197
/*
 *
 * hv_set_next_read_location()
 *
 * Set the next read location for the specified ring buffer
 *
 */
198
static inline void
199
hv_set_next_read_location(struct hv_ring_buffer_info *ring_info,
200
		    u32 next_read_location)
201
{
202
	ring_info->ring_buffer->read_index = next_read_location;
203 204 205
}


206 207 208 209 210 211
/*
 *
 * hv_get_ring_buffer()
 *
 * Get the start of the ring buffer
 */
212
static inline void *
213
hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info)
214
{
215
	return (void *)ring_info->ring_buffer->buffer;
216 217 218
}


219 220 221 222 223 224
/*
 *
 * hv_get_ring_buffersize()
 *
 * Get the size of the ring buffer
 */
225
static inline u32
226
hv_get_ring_buffersize(struct hv_ring_buffer_info *ring_info)
227
{
228
	return ring_info->ring_datasize;
229 230
}

231 232 233 234 235 236 237
/*
 *
 * hv_get_ring_bufferindices()
 *
 * Get the read and write indices as u64 of the specified ring buffer
 *
 */
238
static inline u64
239
hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info)
240
{
241
	return (u64)ring_info->ring_buffer->write_index << 32;
242 243
}

244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280
/*
 *
 * hv_copyfrom_ringbuffer()
 *
 * Helper routine to copy to source from ring buffer.
 * Assume there is enough room. Handles wrap-around in src case only!!
 *
 */
static u32 hv_copyfrom_ringbuffer(
	struct hv_ring_buffer_info	*ring_info,
	void				*dest,
	u32				destlen,
	u32				start_read_offset)
{
	void *ring_buffer = hv_get_ring_buffer(ring_info);
	u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);

	u32 frag_len;

	/* wrap-around detected at the src */
	if (destlen > ring_buffer_size - start_read_offset) {
		frag_len = ring_buffer_size - start_read_offset;

		memcpy(dest, ring_buffer + start_read_offset, frag_len);
		memcpy(dest + frag_len, ring_buffer, destlen - frag_len);
	} else

		memcpy(dest, ring_buffer + start_read_offset, destlen);


	start_read_offset += destlen;
	start_read_offset %= ring_buffer_size;

	return start_read_offset;
}


281 282 283 284 285 286 287 288 289
/*
 *
 * hv_copyto_ringbuffer()
 *
 * Helper routine to copy from source to ring buffer.
 * Assume there is enough room. Handles wrap-around in dest case only!!
 *
 */
static u32 hv_copyto_ringbuffer(
290 291 292
	struct hv_ring_buffer_info	*ring_info,
	u32				start_write_offset,
	void				*src,
293 294 295 296 297 298 299 300 301 302 303 304 305
	u32				srclen)
{
	void *ring_buffer = hv_get_ring_buffer(ring_info);
	u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
	u32 frag_len;

	/* wrap-around detected! */
	if (srclen > ring_buffer_size - start_write_offset) {
		frag_len = ring_buffer_size - start_write_offset;
		memcpy(ring_buffer + start_write_offset, src, frag_len);
		memcpy(ring_buffer, src + frag_len, srclen - frag_len);
	} else
		memcpy(ring_buffer + start_write_offset, src, srclen);
306

307 308 309 310 311
	start_write_offset += srclen;
	start_write_offset %= ring_buffer_size;

	return start_write_offset;
}
312

313 314 315 316 317 318 319
/*
 *
 * hv_ringbuffer_get_debuginfo()
 *
 * Get various debug metrics for the specified ring buffer
 *
 */
320
void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
321
			    struct hv_ring_buffer_debug_info *debug_info)
322
{
323 324
	u32 bytes_avail_towrite;
	u32 bytes_avail_toread;
325

326
	if (ring_info->ring_buffer) {
327
		hv_get_ringbuffer_availbytes(ring_info,
328 329
					&bytes_avail_toread,
					&bytes_avail_towrite);
330

331 332
		debug_info->bytes_avail_toread = bytes_avail_toread;
		debug_info->bytes_avail_towrite = bytes_avail_towrite;
333
		debug_info->current_read_index =
334
			ring_info->ring_buffer->read_index;
335
		debug_info->current_write_index =
336
			ring_info->ring_buffer->write_index;
337
		debug_info->current_interrupt_mask =
338
			ring_info->ring_buffer->interrupt_mask;
339 340 341
	}
}

342 343 344 345 346 347 348
/*
 *
 * hv_ringbuffer_init()
 *
 *Initialize the ring buffer
 *
 */
349
int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
350
		   void *buffer, u32 buflen)
351
{
352
	if (sizeof(struct hv_ring_buffer) != PAGE_SIZE)
353
		return -EINVAL;
354

355
	memset(ring_info, 0, sizeof(struct hv_ring_buffer_info));
356

357 358 359
	ring_info->ring_buffer = (struct hv_ring_buffer *)buffer;
	ring_info->ring_buffer->read_index =
		ring_info->ring_buffer->write_index = 0;
360

361 362
	ring_info->ring_size = buflen;
	ring_info->ring_datasize = buflen - sizeof(struct hv_ring_buffer);
363

364
	spin_lock_init(&ring_info->ring_lock);
365 366 367 368

	return 0;
}

369 370 371 372 373 374 375
/*
 *
 * hv_ringbuffer_cleanup()
 *
 * Cleanup the ring buffer
 *
 */
376
void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
377 378 379
{
}

380 381 382 383 384 385 386
/*
 *
 * hv_ringbuffer_write()
 *
 * Write to the ring buffer
 *
 */
387
int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
388
		    struct scatterlist *sglist, u32 sgcount, bool *signal)
389
{
390
	int i = 0;
391 392 393
	u32 bytes_avail_towrite;
	u32 bytes_avail_toread;
	u32 totalbytes_towrite = 0;
394

395
	struct scatterlist *sg;
396
	u32 next_write_location;
397
	u32 old_write;
398
	u64 prev_indices = 0;
399
	unsigned long flags;
400

401
	for_each_sg(sglist, sg, sgcount, i)
402
	{
403
		totalbytes_towrite += sg->length;
404 405
	}

406
	totalbytes_towrite += sizeof(u64);
407

408
	spin_lock_irqsave(&outring_info->ring_lock, flags);
409

410
	hv_get_ringbuffer_availbytes(outring_info,
411 412
				&bytes_avail_toread,
				&bytes_avail_towrite);
413 414


415 416
	/* If there is only room for the packet, assume it is full. */
	/* Otherwise, the next time around, we think the ring buffer */
417
	/* is empty since the read index == write index */
418 419
	if (bytes_avail_towrite <= totalbytes_towrite) {
		spin_unlock_irqrestore(&outring_info->ring_lock, flags);
420
		return -EAGAIN;
421 422
	}

423
	/* Write to the ring buffer */
424
	next_write_location = hv_get_next_write_location(outring_info);
425

426 427
	old_write = next_write_location;

428
	for_each_sg(sglist, sg, sgcount, i)
429
	{
430
		next_write_location = hv_copyto_ringbuffer(outring_info,
431
						     next_write_location,
432 433
						     sg_virt(sg),
						     sg->length);
434 435
	}

436
	/* Set previous packet start */
437
	prev_indices = hv_get_ring_bufferindices(outring_info);
438

439
	next_write_location = hv_copyto_ringbuffer(outring_info,
440 441
					     next_write_location,
					     &prev_indices,
442
					     sizeof(u64));
443

444 445
	/* Issue a full memory barrier before updating the write index */
	smp_mb();
446

447
	/* Now, update the write location */
448
	hv_set_next_write_location(outring_info, next_write_location);
449 450


451
	spin_unlock_irqrestore(&outring_info->ring_lock, flags);
452 453

	*signal = hv_need_to_signal(old_write, outring_info);
454 455 456 457
	return 0;
}


458 459 460 461 462 463 464
/*
 *
 * hv_ringbuffer_peek()
 *
 * Read without advancing the read index
 *
 */
465
int hv_ringbuffer_peek(struct hv_ring_buffer_info *Inring_info,
466
		   void *Buffer, u32 buflen)
467
{
468 469 470
	u32 bytes_avail_towrite;
	u32 bytes_avail_toread;
	u32 next_read_location = 0;
471
	unsigned long flags;
472

473
	spin_lock_irqsave(&Inring_info->ring_lock, flags);
474

475
	hv_get_ringbuffer_availbytes(Inring_info,
476 477
				&bytes_avail_toread,
				&bytes_avail_towrite);
478

479
	/* Make sure there is something to read */
480
	if (bytes_avail_toread < buflen) {
481

482
		spin_unlock_irqrestore(&Inring_info->ring_lock, flags);
483

484
		return -EAGAIN;
485 486
	}

487
	/* Convert to byte offset */
488
	next_read_location = hv_get_next_read_location(Inring_info);
489

490
	next_read_location = hv_copyfrom_ringbuffer(Inring_info,
491
						Buffer,
492 493
						buflen,
						next_read_location);
494

495
	spin_unlock_irqrestore(&Inring_info->ring_lock, flags);
496 497 498 499 500

	return 0;
}


501 502 503 504 505 506 507
/*
 *
 * hv_ringbuffer_read()
 *
 * Read and advance the read index
 *
 */
508
int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
509
		   u32 buflen, u32 offset, bool *signal)
510
{
511 512 513 514
	u32 bytes_avail_towrite;
	u32 bytes_avail_toread;
	u32 next_read_location = 0;
	u64 prev_indices = 0;
515
	unsigned long flags;
516
	u32 old_read;
517

518
	if (buflen <= 0)
519
		return -EINVAL;
520

521
	spin_lock_irqsave(&inring_info->ring_lock, flags);
522

523
	hv_get_ringbuffer_availbytes(inring_info,
524 525
				&bytes_avail_toread,
				&bytes_avail_towrite);
526

527 528
	old_read = bytes_avail_toread;

529
	/* Make sure there is something to read */
530 531
	if (bytes_avail_toread < buflen) {
		spin_unlock_irqrestore(&inring_info->ring_lock, flags);
532

533
		return -EAGAIN;
534 535
	}

536
	next_read_location =
537
		hv_get_next_readlocation_withoffset(inring_info, offset);
538

539
	next_read_location = hv_copyfrom_ringbuffer(inring_info,
540 541 542
						buffer,
						buflen,
						next_read_location);
543

544
	next_read_location = hv_copyfrom_ringbuffer(inring_info,
545
						&prev_indices,
546
						sizeof(u64),
547
						next_read_location);
548

549
	/* Make sure all reads are done before we update the read index since */
550 551
	/* the writer may start writing to the read area once the read index */
	/*is updated */
552
	smp_mb();
553

554
	/* Update the read index */
555
	hv_set_next_read_location(inring_info, next_read_location);
556

557
	spin_unlock_irqrestore(&inring_info->ring_lock, flags);
558

559 560
	*signal = hv_need_to_signal_on_read(old_read, inring_info);

561 562
	return 0;
}