ring_buffer.c 13.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
/*
 *
 * Copyright (c) 2009, Microsoft Corporation.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
 * Place - Suite 330, Boston, MA 02111-1307 USA.
 *
 * Authors:
 *   Haiyang Zhang <haiyangz@microsoft.com>
 *   Hank Janssen  <hjanssen@microsoft.com>
21
 *   K. Y. Srinivasan <kys@microsoft.com>
22 23
 *
 */
24
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25

26 27
#include <linux/kernel.h>
#include <linux/mm.h>
28
#include <linux/hyperv.h>
29
#include <linux/uio.h>
30

31
#include "hyperv_vmbus.h"
32

33 34 35
void hv_begin_read(struct hv_ring_buffer_info *rbi)
{
	rbi->ring_buffer->interrupt_mask = 1;
36
	mb();
37 38 39 40 41 42 43 44
}

u32 hv_end_read(struct hv_ring_buffer_info *rbi)
{
	u32 read;
	u32 write;

	rbi->ring_buffer->interrupt_mask = 0;
45
	mb();
46 47 48 49 50 51 52 53 54 55 56

	/*
	 * Now check to see if the ring buffer is still empty.
	 * If it is not, we raced and we need to process new
	 * incoming messages.
	 */
	hv_get_ringbuffer_availbytes(rbi, &read, &write);

	return read;
}

57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74
/*
 * When we write to the ring buffer, check if the host needs to
 * be signaled. Here is the details of this protocol:
 *
 *	1. The host guarantees that while it is draining the
 *	   ring buffer, it will set the interrupt_mask to
 *	   indicate it does not need to be interrupted when
 *	   new data is placed.
 *
 *	2. The host guarantees that it will completely drain
 *	   the ring buffer before exiting the read loop. Further,
 *	   once the ring buffer is empty, it will clear the
 *	   interrupt_mask and re-check to see if new data has
 *	   arrived.
 */

static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi)
{
75
	mb();
76 77 78
	if (rbi->ring_buffer->interrupt_mask)
		return false;

79 80
	/* check interrupt_mask before read_index */
	rmb();
81 82 83 84 85 86 87 88 89 90
	/*
	 * This is the only case we need to signal when the
	 * ring transitions from being empty to non-empty.
	 */
	if (old_write == rbi->ring_buffer->read_index)
		return true;

	return false;
}

91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134
/*
 * To optimize the flow management on the send-side,
 * when the sender is blocked because of lack of
 * sufficient space in the ring buffer, potential the
 * consumer of the ring buffer can signal the producer.
 * This is controlled by the following parameters:
 *
 * 1. pending_send_sz: This is the size in bytes that the
 *    producer is trying to send.
 * 2. The feature bit feat_pending_send_sz set to indicate if
 *    the consumer of the ring will signal when the ring
 *    state transitions from being full to a state where
 *    there is room for the producer to send the pending packet.
 */

static bool hv_need_to_signal_on_read(u32 old_rd,
					 struct hv_ring_buffer_info *rbi)
{
	u32 prev_write_sz;
	u32 cur_write_sz;
	u32 r_size;
	u32 write_loc = rbi->ring_buffer->write_index;
	u32 read_loc = rbi->ring_buffer->read_index;
	u32 pending_sz = rbi->ring_buffer->pending_send_sz;

	/*
	 * If the other end is not blocked on write don't bother.
	 */
	if (pending_sz == 0)
		return false;

	r_size = rbi->ring_datasize;
	cur_write_sz = write_loc >= read_loc ? r_size - (write_loc - read_loc) :
			read_loc - write_loc;

	prev_write_sz = write_loc >= old_rd ? r_size - (write_loc - old_rd) :
			old_rd - write_loc;


	if ((prev_write_sz < pending_sz) && (cur_write_sz >= pending_sz))
		return true;

	return false;
}
135

136 137 138 139 140 141
/*
 * hv_get_next_write_location()
 *
 * Get the next write location for the specified ring buffer
 *
 */
142
static inline u32
143
hv_get_next_write_location(struct hv_ring_buffer_info *ring_info)
144
{
145
	u32 next = ring_info->ring_buffer->write_index;
146 147 148 149

	return next;
}

150 151 152 153 154 155
/*
 * hv_set_next_write_location()
 *
 * Set the next write location for the specified ring buffer
 *
 */
156
static inline void
157
hv_set_next_write_location(struct hv_ring_buffer_info *ring_info,
158
		     u32 next_write_location)
159
{
160
	ring_info->ring_buffer->write_index = next_write_location;
161 162
}

163 164 165 166 167
/*
 * hv_get_next_read_location()
 *
 * Get the next read location for the specified ring buffer
 */
168
static inline u32
169
hv_get_next_read_location(struct hv_ring_buffer_info *ring_info)
170
{
171
	u32 next = ring_info->ring_buffer->read_index;
172 173 174 175

	return next;
}

176 177 178 179 180 181
/*
 * hv_get_next_readlocation_withoffset()
 *
 * Get the next read location + offset for the specified ring buffer.
 * This allows the caller to skip
 */
182
static inline u32
183
hv_get_next_readlocation_withoffset(struct hv_ring_buffer_info *ring_info,
184
				 u32 offset)
185
{
186
	u32 next = ring_info->ring_buffer->read_index;
187

188 189
	next += offset;
	next %= ring_info->ring_datasize;
190 191 192 193

	return next;
}

194 195 196 197 198 199 200
/*
 *
 * hv_set_next_read_location()
 *
 * Set the next read location for the specified ring buffer
 *
 */
201
static inline void
202
hv_set_next_read_location(struct hv_ring_buffer_info *ring_info,
203
		    u32 next_read_location)
204
{
205
	ring_info->ring_buffer->read_index = next_read_location;
206 207 208
}


209 210 211 212 213 214
/*
 *
 * hv_get_ring_buffer()
 *
 * Get the start of the ring buffer
 */
215
static inline void *
216
hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info)
217
{
218
	return (void *)ring_info->ring_buffer->buffer;
219 220 221
}


222 223 224 225 226 227
/*
 *
 * hv_get_ring_buffersize()
 *
 * Get the size of the ring buffer
 */
228
static inline u32
229
hv_get_ring_buffersize(struct hv_ring_buffer_info *ring_info)
230
{
231
	return ring_info->ring_datasize;
232 233
}

234 235 236 237 238 239 240
/*
 *
 * hv_get_ring_bufferindices()
 *
 * Get the read and write indices as u64 of the specified ring buffer
 *
 */
241
static inline u64
242
hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info)
243
{
244
	return (u64)ring_info->ring_buffer->write_index << 32;
245 246
}

247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283
/*
 *
 * hv_copyfrom_ringbuffer()
 *
 * Helper routine to copy to source from ring buffer.
 * Assume there is enough room. Handles wrap-around in src case only!!
 *
 */
static u32 hv_copyfrom_ringbuffer(
	struct hv_ring_buffer_info	*ring_info,
	void				*dest,
	u32				destlen,
	u32				start_read_offset)
{
	void *ring_buffer = hv_get_ring_buffer(ring_info);
	u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);

	u32 frag_len;

	/* wrap-around detected at the src */
	if (destlen > ring_buffer_size - start_read_offset) {
		frag_len = ring_buffer_size - start_read_offset;

		memcpy(dest, ring_buffer + start_read_offset, frag_len);
		memcpy(dest + frag_len, ring_buffer, destlen - frag_len);
	} else

		memcpy(dest, ring_buffer + start_read_offset, destlen);


	start_read_offset += destlen;
	start_read_offset %= ring_buffer_size;

	return start_read_offset;
}


284 285 286 287 288 289 290 291 292
/*
 *
 * hv_copyto_ringbuffer()
 *
 * Helper routine to copy from source to ring buffer.
 * Assume there is enough room. Handles wrap-around in dest case only!!
 *
 */
static u32 hv_copyto_ringbuffer(
293 294 295
	struct hv_ring_buffer_info	*ring_info,
	u32				start_write_offset,
	void				*src,
296 297 298 299 300 301 302 303 304 305 306 307 308
	u32				srclen)
{
	void *ring_buffer = hv_get_ring_buffer(ring_info);
	u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
	u32 frag_len;

	/* wrap-around detected! */
	if (srclen > ring_buffer_size - start_write_offset) {
		frag_len = ring_buffer_size - start_write_offset;
		memcpy(ring_buffer + start_write_offset, src, frag_len);
		memcpy(ring_buffer, src + frag_len, srclen - frag_len);
	} else
		memcpy(ring_buffer + start_write_offset, src, srclen);
309

310 311 312 313 314
	start_write_offset += srclen;
	start_write_offset %= ring_buffer_size;

	return start_write_offset;
}
315

316 317 318 319 320 321 322
/*
 *
 * hv_ringbuffer_get_debuginfo()
 *
 * Get various debug metrics for the specified ring buffer
 *
 */
323
void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
324
			    struct hv_ring_buffer_debug_info *debug_info)
325
{
326 327
	u32 bytes_avail_towrite;
	u32 bytes_avail_toread;
328

329
	if (ring_info->ring_buffer) {
330
		hv_get_ringbuffer_availbytes(ring_info,
331 332
					&bytes_avail_toread,
					&bytes_avail_towrite);
333

334 335
		debug_info->bytes_avail_toread = bytes_avail_toread;
		debug_info->bytes_avail_towrite = bytes_avail_towrite;
336
		debug_info->current_read_index =
337
			ring_info->ring_buffer->read_index;
338
		debug_info->current_write_index =
339
			ring_info->ring_buffer->write_index;
340
		debug_info->current_interrupt_mask =
341
			ring_info->ring_buffer->interrupt_mask;
342 343 344
	}
}

345 346 347 348 349 350 351
/*
 *
 * hv_ringbuffer_init()
 *
 *Initialize the ring buffer
 *
 */
352
int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
353
		   void *buffer, u32 buflen)
354
{
355
	if (sizeof(struct hv_ring_buffer) != PAGE_SIZE)
356
		return -EINVAL;
357

358
	memset(ring_info, 0, sizeof(struct hv_ring_buffer_info));
359

360 361 362
	ring_info->ring_buffer = (struct hv_ring_buffer *)buffer;
	ring_info->ring_buffer->read_index =
		ring_info->ring_buffer->write_index = 0;
363

364 365 366 367 368
	/*
	 * Set the feature bit for enabling flow control.
	 */
	ring_info->ring_buffer->feature_bits.value = 1;

369 370
	ring_info->ring_size = buflen;
	ring_info->ring_datasize = buflen - sizeof(struct hv_ring_buffer);
371

372
	spin_lock_init(&ring_info->ring_lock);
373 374 375 376

	return 0;
}

377 378 379 380 381 382 383
/*
 *
 * hv_ringbuffer_cleanup()
 *
 * Cleanup the ring buffer
 *
 */
384
void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
385 386 387
{
}

388 389 390 391 392 393 394
/*
 *
 * hv_ringbuffer_write()
 *
 * Write to the ring buffer
 *
 */
395
int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
396
		    struct kvec *kv_list, u32 kv_count, bool *signal)
397
{
398
	int i = 0;
399 400 401
	u32 bytes_avail_towrite;
	u32 bytes_avail_toread;
	u32 totalbytes_towrite = 0;
402

403
	u32 next_write_location;
404
	u32 old_write;
405
	u64 prev_indices = 0;
406
	unsigned long flags;
407

408 409
	for (i = 0; i < kv_count; i++)
		totalbytes_towrite += kv_list[i].iov_len;
410

411
	totalbytes_towrite += sizeof(u64);
412

413
	spin_lock_irqsave(&outring_info->ring_lock, flags);
414

415
	hv_get_ringbuffer_availbytes(outring_info,
416 417
				&bytes_avail_toread,
				&bytes_avail_towrite);
418 419


420 421
	/* If there is only room for the packet, assume it is full. */
	/* Otherwise, the next time around, we think the ring buffer */
422
	/* is empty since the read index == write index */
423 424
	if (bytes_avail_towrite <= totalbytes_towrite) {
		spin_unlock_irqrestore(&outring_info->ring_lock, flags);
425
		return -EAGAIN;
426 427
	}

428
	/* Write to the ring buffer */
429
	next_write_location = hv_get_next_write_location(outring_info);
430

431 432
	old_write = next_write_location;

433
	for (i = 0; i < kv_count; i++) {
434
		next_write_location = hv_copyto_ringbuffer(outring_info,
435
						     next_write_location,
436 437
						     kv_list[i].iov_base,
						     kv_list[i].iov_len);
438 439
	}

440
	/* Set previous packet start */
441
	prev_indices = hv_get_ring_bufferindices(outring_info);
442

443
	next_write_location = hv_copyto_ringbuffer(outring_info,
444 445
					     next_write_location,
					     &prev_indices,
446
					     sizeof(u64));
447

448
	/* Issue a full memory barrier before updating the write index */
449
	mb();
450

451
	/* Now, update the write location */
452
	hv_set_next_write_location(outring_info, next_write_location);
453 454


455
	spin_unlock_irqrestore(&outring_info->ring_lock, flags);
456 457

	*signal = hv_need_to_signal(old_write, outring_info);
458 459 460 461
	return 0;
}


462 463 464 465 466 467 468
/*
 *
 * hv_ringbuffer_peek()
 *
 * Read without advancing the read index
 *
 */
469
int hv_ringbuffer_peek(struct hv_ring_buffer_info *Inring_info,
470
		   void *Buffer, u32 buflen)
471
{
472 473 474
	u32 bytes_avail_towrite;
	u32 bytes_avail_toread;
	u32 next_read_location = 0;
475
	unsigned long flags;
476

477
	spin_lock_irqsave(&Inring_info->ring_lock, flags);
478

479
	hv_get_ringbuffer_availbytes(Inring_info,
480 481
				&bytes_avail_toread,
				&bytes_avail_towrite);
482

483
	/* Make sure there is something to read */
484
	if (bytes_avail_toread < buflen) {
485

486
		spin_unlock_irqrestore(&Inring_info->ring_lock, flags);
487

488
		return -EAGAIN;
489 490
	}

491
	/* Convert to byte offset */
492
	next_read_location = hv_get_next_read_location(Inring_info);
493

494
	next_read_location = hv_copyfrom_ringbuffer(Inring_info,
495
						Buffer,
496 497
						buflen,
						next_read_location);
498

499
	spin_unlock_irqrestore(&Inring_info->ring_lock, flags);
500 501 502 503 504

	return 0;
}


505 506 507 508 509 510 511
/*
 *
 * hv_ringbuffer_read()
 *
 * Read and advance the read index
 *
 */
512
int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
513
		   u32 buflen, u32 offset, bool *signal)
514
{
515 516 517 518
	u32 bytes_avail_towrite;
	u32 bytes_avail_toread;
	u32 next_read_location = 0;
	u64 prev_indices = 0;
519
	unsigned long flags;
520
	u32 old_read;
521

522
	if (buflen <= 0)
523
		return -EINVAL;
524

525
	spin_lock_irqsave(&inring_info->ring_lock, flags);
526

527
	hv_get_ringbuffer_availbytes(inring_info,
528 529
				&bytes_avail_toread,
				&bytes_avail_towrite);
530

531 532
	old_read = bytes_avail_toread;

533
	/* Make sure there is something to read */
534 535
	if (bytes_avail_toread < buflen) {
		spin_unlock_irqrestore(&inring_info->ring_lock, flags);
536

537
		return -EAGAIN;
538 539
	}

540
	next_read_location =
541
		hv_get_next_readlocation_withoffset(inring_info, offset);
542

543
	next_read_location = hv_copyfrom_ringbuffer(inring_info,
544 545 546
						buffer,
						buflen,
						next_read_location);
547

548
	next_read_location = hv_copyfrom_ringbuffer(inring_info,
549
						&prev_indices,
550
						sizeof(u64),
551
						next_read_location);
552

553
	/* Make sure all reads are done before we update the read index since */
554 555
	/* the writer may start writing to the read area once the read index */
	/*is updated */
556
	mb();
557

558
	/* Update the read index */
559
	hv_set_next_read_location(inring_info, next_read_location);
560

561
	spin_unlock_irqrestore(&inring_info->ring_lock, flags);
562

563 564
	*signal = hv_need_to_signal_on_read(old_read, inring_info);

565 566
	return 0;
}