ring_buffer.c 12.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
/*
 *
 * Copyright (c) 2009, Microsoft Corporation.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
 * Place - Suite 330, Boston, MA 02111-1307 USA.
 *
 * Authors:
 *   Haiyang Zhang <haiyangz@microsoft.com>
 *   Hank Janssen  <hjanssen@microsoft.com>
21
 *   K. Y. Srinivasan <kys@microsoft.com>
22 23
 *
 */
24
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25

26 27
#include <linux/kernel.h>
#include <linux/mm.h>
28
#include <linux/hyperv.h>
29

30
#include "hyperv_vmbus.h"
31

32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55
void hv_begin_read(struct hv_ring_buffer_info *rbi)
{
	rbi->ring_buffer->interrupt_mask = 1;
	smp_mb();
}

u32 hv_end_read(struct hv_ring_buffer_info *rbi)
{
	u32 read;
	u32 write;

	rbi->ring_buffer->interrupt_mask = 0;
	smp_mb();

	/*
	 * Now check to see if the ring buffer is still empty.
	 * If it is not, we raced and we need to process new
	 * incoming messages.
	 */
	hv_get_ringbuffer_availbytes(rbi, &read, &write);

	return read;
}

56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86
/*
 * When we write to the ring buffer, check if the host needs to
 * be signaled. Here is the details of this protocol:
 *
 *	1. The host guarantees that while it is draining the
 *	   ring buffer, it will set the interrupt_mask to
 *	   indicate it does not need to be interrupted when
 *	   new data is placed.
 *
 *	2. The host guarantees that it will completely drain
 *	   the ring buffer before exiting the read loop. Further,
 *	   once the ring buffer is empty, it will clear the
 *	   interrupt_mask and re-check to see if new data has
 *	   arrived.
 */

static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi)
{
	if (rbi->ring_buffer->interrupt_mask)
		return false;

	/*
	 * This is the only case we need to signal when the
	 * ring transitions from being empty to non-empty.
	 */
	if (old_write == rbi->ring_buffer->read_index)
		return true;

	return false;
}

87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130
/*
 * To optimize the flow management on the send-side,
 * when the sender is blocked because of lack of
 * sufficient space in the ring buffer, potential the
 * consumer of the ring buffer can signal the producer.
 * This is controlled by the following parameters:
 *
 * 1. pending_send_sz: This is the size in bytes that the
 *    producer is trying to send.
 * 2. The feature bit feat_pending_send_sz set to indicate if
 *    the consumer of the ring will signal when the ring
 *    state transitions from being full to a state where
 *    there is room for the producer to send the pending packet.
 */

static bool hv_need_to_signal_on_read(u32 old_rd,
					 struct hv_ring_buffer_info *rbi)
{
	u32 prev_write_sz;
	u32 cur_write_sz;
	u32 r_size;
	u32 write_loc = rbi->ring_buffer->write_index;
	u32 read_loc = rbi->ring_buffer->read_index;
	u32 pending_sz = rbi->ring_buffer->pending_send_sz;

	/*
	 * If the other end is not blocked on write don't bother.
	 */
	if (pending_sz == 0)
		return false;

	r_size = rbi->ring_datasize;
	cur_write_sz = write_loc >= read_loc ? r_size - (write_loc - read_loc) :
			read_loc - write_loc;

	prev_write_sz = write_loc >= old_rd ? r_size - (write_loc - old_rd) :
			old_rd - write_loc;


	if ((prev_write_sz < pending_sz) && (cur_write_sz >= pending_sz))
		return true;

	return false;
}
131

132 133 134 135 136 137
/*
 * hv_get_next_write_location()
 *
 * Get the next write location for the specified ring buffer
 *
 */
138
static inline u32
139
hv_get_next_write_location(struct hv_ring_buffer_info *ring_info)
140
{
141
	u32 next = ring_info->ring_buffer->write_index;
142 143 144 145

	return next;
}

146 147 148 149 150 151
/*
 * hv_set_next_write_location()
 *
 * Set the next write location for the specified ring buffer
 *
 */
152
static inline void
153
hv_set_next_write_location(struct hv_ring_buffer_info *ring_info,
154
		     u32 next_write_location)
155
{
156
	ring_info->ring_buffer->write_index = next_write_location;
157 158
}

159 160 161 162 163
/*
 * hv_get_next_read_location()
 *
 * Get the next read location for the specified ring buffer
 */
164
static inline u32
165
hv_get_next_read_location(struct hv_ring_buffer_info *ring_info)
166
{
167
	u32 next = ring_info->ring_buffer->read_index;
168 169 170 171

	return next;
}

172 173 174 175 176 177
/*
 * hv_get_next_readlocation_withoffset()
 *
 * Get the next read location + offset for the specified ring buffer.
 * This allows the caller to skip
 */
178
static inline u32
179
hv_get_next_readlocation_withoffset(struct hv_ring_buffer_info *ring_info,
180
				 u32 offset)
181
{
182
	u32 next = ring_info->ring_buffer->read_index;
183

184 185
	next += offset;
	next %= ring_info->ring_datasize;
186 187 188 189

	return next;
}

190 191 192 193 194 195 196
/*
 *
 * hv_set_next_read_location()
 *
 * Set the next read location for the specified ring buffer
 *
 */
197
static inline void
198
hv_set_next_read_location(struct hv_ring_buffer_info *ring_info,
199
		    u32 next_read_location)
200
{
201
	ring_info->ring_buffer->read_index = next_read_location;
202 203 204
}


205 206 207 208 209 210
/*
 *
 * hv_get_ring_buffer()
 *
 * Get the start of the ring buffer
 */
211
static inline void *
212
hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info)
213
{
214
	return (void *)ring_info->ring_buffer->buffer;
215 216 217
}


218 219 220 221 222 223
/*
 *
 * hv_get_ring_buffersize()
 *
 * Get the size of the ring buffer
 */
224
static inline u32
225
hv_get_ring_buffersize(struct hv_ring_buffer_info *ring_info)
226
{
227
	return ring_info->ring_datasize;
228 229
}

230 231 232 233 234 235 236
/*
 *
 * hv_get_ring_bufferindices()
 *
 * Get the read and write indices as u64 of the specified ring buffer
 *
 */
237
static inline u64
238
hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info)
239
{
240
	return (u64)ring_info->ring_buffer->write_index << 32;
241 242
}

243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279
/*
 *
 * hv_copyfrom_ringbuffer()
 *
 * Helper routine to copy to source from ring buffer.
 * Assume there is enough room. Handles wrap-around in src case only!!
 *
 */
static u32 hv_copyfrom_ringbuffer(
	struct hv_ring_buffer_info	*ring_info,
	void				*dest,
	u32				destlen,
	u32				start_read_offset)
{
	void *ring_buffer = hv_get_ring_buffer(ring_info);
	u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);

	u32 frag_len;

	/* wrap-around detected at the src */
	if (destlen > ring_buffer_size - start_read_offset) {
		frag_len = ring_buffer_size - start_read_offset;

		memcpy(dest, ring_buffer + start_read_offset, frag_len);
		memcpy(dest + frag_len, ring_buffer, destlen - frag_len);
	} else

		memcpy(dest, ring_buffer + start_read_offset, destlen);


	start_read_offset += destlen;
	start_read_offset %= ring_buffer_size;

	return start_read_offset;
}


280 281 282 283 284 285 286 287 288
/*
 *
 * hv_copyto_ringbuffer()
 *
 * Helper routine to copy from source to ring buffer.
 * Assume there is enough room. Handles wrap-around in dest case only!!
 *
 */
static u32 hv_copyto_ringbuffer(
289 290 291
	struct hv_ring_buffer_info	*ring_info,
	u32				start_write_offset,
	void				*src,
292 293 294 295 296 297 298 299 300 301 302 303 304
	u32				srclen)
{
	void *ring_buffer = hv_get_ring_buffer(ring_info);
	u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
	u32 frag_len;

	/* wrap-around detected! */
	if (srclen > ring_buffer_size - start_write_offset) {
		frag_len = ring_buffer_size - start_write_offset;
		memcpy(ring_buffer + start_write_offset, src, frag_len);
		memcpy(ring_buffer, src + frag_len, srclen - frag_len);
	} else
		memcpy(ring_buffer + start_write_offset, src, srclen);
305

306 307 308 309 310
	start_write_offset += srclen;
	start_write_offset %= ring_buffer_size;

	return start_write_offset;
}
311

312 313 314 315 316 317 318
/*
 *
 * hv_ringbuffer_get_debuginfo()
 *
 * Get various debug metrics for the specified ring buffer
 *
 */
319
void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
320
			    struct hv_ring_buffer_debug_info *debug_info)
321
{
322 323
	u32 bytes_avail_towrite;
	u32 bytes_avail_toread;
324

325
	if (ring_info->ring_buffer) {
326
		hv_get_ringbuffer_availbytes(ring_info,
327 328
					&bytes_avail_toread,
					&bytes_avail_towrite);
329

330 331
		debug_info->bytes_avail_toread = bytes_avail_toread;
		debug_info->bytes_avail_towrite = bytes_avail_towrite;
332
		debug_info->current_read_index =
333
			ring_info->ring_buffer->read_index;
334
		debug_info->current_write_index =
335
			ring_info->ring_buffer->write_index;
336
		debug_info->current_interrupt_mask =
337
			ring_info->ring_buffer->interrupt_mask;
338 339 340
	}
}

341 342 343 344 345 346 347
/*
 *
 * hv_ringbuffer_init()
 *
 *Initialize the ring buffer
 *
 */
348
int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
349
		   void *buffer, u32 buflen)
350
{
351
	if (sizeof(struct hv_ring_buffer) != PAGE_SIZE)
352
		return -EINVAL;
353

354
	memset(ring_info, 0, sizeof(struct hv_ring_buffer_info));
355

356 357 358
	ring_info->ring_buffer = (struct hv_ring_buffer *)buffer;
	ring_info->ring_buffer->read_index =
		ring_info->ring_buffer->write_index = 0;
359

360 361
	ring_info->ring_size = buflen;
	ring_info->ring_datasize = buflen - sizeof(struct hv_ring_buffer);
362

363
	spin_lock_init(&ring_info->ring_lock);
364 365 366 367

	return 0;
}

368 369 370 371 372 373 374
/*
 *
 * hv_ringbuffer_cleanup()
 *
 * Cleanup the ring buffer
 *
 */
375
void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
376 377 378
{
}

379 380 381 382 383 384 385
/*
 *
 * hv_ringbuffer_write()
 *
 * Write to the ring buffer
 *
 */
386
int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
387
		    struct scatterlist *sglist, u32 sgcount, bool *signal)
388
{
389
	int i = 0;
390 391 392
	u32 bytes_avail_towrite;
	u32 bytes_avail_toread;
	u32 totalbytes_towrite = 0;
393

394
	struct scatterlist *sg;
395
	u32 next_write_location;
396
	u32 old_write;
397
	u64 prev_indices = 0;
398
	unsigned long flags;
399

400
	for_each_sg(sglist, sg, sgcount, i)
401
	{
402
		totalbytes_towrite += sg->length;
403 404
	}

405
	totalbytes_towrite += sizeof(u64);
406

407
	spin_lock_irqsave(&outring_info->ring_lock, flags);
408

409
	hv_get_ringbuffer_availbytes(outring_info,
410 411
				&bytes_avail_toread,
				&bytes_avail_towrite);
412 413


414 415
	/* If there is only room for the packet, assume it is full. */
	/* Otherwise, the next time around, we think the ring buffer */
416
	/* is empty since the read index == write index */
417 418
	if (bytes_avail_towrite <= totalbytes_towrite) {
		spin_unlock_irqrestore(&outring_info->ring_lock, flags);
419
		return -EAGAIN;
420 421
	}

422
	/* Write to the ring buffer */
423
	next_write_location = hv_get_next_write_location(outring_info);
424

425 426
	old_write = next_write_location;

427
	for_each_sg(sglist, sg, sgcount, i)
428
	{
429
		next_write_location = hv_copyto_ringbuffer(outring_info,
430
						     next_write_location,
431 432
						     sg_virt(sg),
						     sg->length);
433 434
	}

435
	/* Set previous packet start */
436
	prev_indices = hv_get_ring_bufferindices(outring_info);
437

438
	next_write_location = hv_copyto_ringbuffer(outring_info,
439 440
					     next_write_location,
					     &prev_indices,
441
					     sizeof(u64));
442

443 444
	/* Issue a full memory barrier before updating the write index */
	smp_mb();
445

446
	/* Now, update the write location */
447
	hv_set_next_write_location(outring_info, next_write_location);
448 449


450
	spin_unlock_irqrestore(&outring_info->ring_lock, flags);
451 452

	*signal = hv_need_to_signal(old_write, outring_info);
453 454 455 456
	return 0;
}


457 458 459 460 461 462 463
/*
 *
 * hv_ringbuffer_peek()
 *
 * Read without advancing the read index
 *
 */
464
int hv_ringbuffer_peek(struct hv_ring_buffer_info *Inring_info,
465
		   void *Buffer, u32 buflen)
466
{
467 468 469
	u32 bytes_avail_towrite;
	u32 bytes_avail_toread;
	u32 next_read_location = 0;
470
	unsigned long flags;
471

472
	spin_lock_irqsave(&Inring_info->ring_lock, flags);
473

474
	hv_get_ringbuffer_availbytes(Inring_info,
475 476
				&bytes_avail_toread,
				&bytes_avail_towrite);
477

478
	/* Make sure there is something to read */
479
	if (bytes_avail_toread < buflen) {
480

481
		spin_unlock_irqrestore(&Inring_info->ring_lock, flags);
482

483
		return -EAGAIN;
484 485
	}

486
	/* Convert to byte offset */
487
	next_read_location = hv_get_next_read_location(Inring_info);
488

489
	next_read_location = hv_copyfrom_ringbuffer(Inring_info,
490
						Buffer,
491 492
						buflen,
						next_read_location);
493

494
	spin_unlock_irqrestore(&Inring_info->ring_lock, flags);
495 496 497 498 499

	return 0;
}


500 501 502 503 504 505 506
/*
 *
 * hv_ringbuffer_read()
 *
 * Read and advance the read index
 *
 */
507
int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
508
		   u32 buflen, u32 offset, bool *signal)
509
{
510 511 512 513
	u32 bytes_avail_towrite;
	u32 bytes_avail_toread;
	u32 next_read_location = 0;
	u64 prev_indices = 0;
514
	unsigned long flags;
515
	u32 old_read;
516

517
	if (buflen <= 0)
518
		return -EINVAL;
519

520
	spin_lock_irqsave(&inring_info->ring_lock, flags);
521

522
	hv_get_ringbuffer_availbytes(inring_info,
523 524
				&bytes_avail_toread,
				&bytes_avail_towrite);
525

526 527
	old_read = bytes_avail_toread;

528
	/* Make sure there is something to read */
529 530
	if (bytes_avail_toread < buflen) {
		spin_unlock_irqrestore(&inring_info->ring_lock, flags);
531

532
		return -EAGAIN;
533 534
	}

535
	next_read_location =
536
		hv_get_next_readlocation_withoffset(inring_info, offset);
537

538
	next_read_location = hv_copyfrom_ringbuffer(inring_info,
539 540 541
						buffer,
						buflen,
						next_read_location);
542

543
	next_read_location = hv_copyfrom_ringbuffer(inring_info,
544
						&prev_indices,
545
						sizeof(u64),
546
						next_read_location);
547

548
	/* Make sure all reads are done before we update the read index since */
549 550
	/* the writer may start writing to the read area once the read index */
	/*is updated */
551
	smp_mb();
552

553
	/* Update the read index */
554
	hv_set_next_read_location(inring_info, next_read_location);
555

556
	spin_unlock_irqrestore(&inring_info->ring_lock, flags);
557

558 559
	*signal = hv_need_to_signal_on_read(old_read, inring_info);

560 561
	return 0;
}