ring_buffer.c 12.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
/*
 *
 * Copyright (c) 2009, Microsoft Corporation.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
 * Place - Suite 330, Boston, MA 02111-1307 USA.
 *
 * Authors:
 *   Haiyang Zhang <haiyangz@microsoft.com>
 *   Hank Janssen  <hjanssen@microsoft.com>
21
 *   K. Y. Srinivasan <kys@microsoft.com>
22 23
 *
 */
24
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25

26 27
#include <linux/kernel.h>
#include <linux/mm.h>
28
#include <linux/hyperv.h>
29
#include <linux/uio.h>
30

31
#include "hyperv_vmbus.h"
32

33 34 35
void hv_begin_read(struct hv_ring_buffer_info *rbi)
{
	rbi->ring_buffer->interrupt_mask = 1;
36
	mb();
37 38 39 40 41 42 43 44
}

u32 hv_end_read(struct hv_ring_buffer_info *rbi)
{
	u32 read;
	u32 write;

	rbi->ring_buffer->interrupt_mask = 0;
45
	mb();
46 47 48 49 50 51 52 53 54 55 56

	/*
	 * Now check to see if the ring buffer is still empty.
	 * If it is not, we raced and we need to process new
	 * incoming messages.
	 */
	hv_get_ringbuffer_availbytes(rbi, &read, &write);

	return read;
}

57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74
/*
 * When we write to the ring buffer, check if the host needs to
 * be signaled. Here is the details of this protocol:
 *
 *	1. The host guarantees that while it is draining the
 *	   ring buffer, it will set the interrupt_mask to
 *	   indicate it does not need to be interrupted when
 *	   new data is placed.
 *
 *	2. The host guarantees that it will completely drain
 *	   the ring buffer before exiting the read loop. Further,
 *	   once the ring buffer is empty, it will clear the
 *	   interrupt_mask and re-check to see if new data has
 *	   arrived.
 */

static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi)
{
75
	mb();
76 77 78
	if (rbi->ring_buffer->interrupt_mask)
		return false;

79 80
	/* check interrupt_mask before read_index */
	rmb();
81 82 83 84 85 86 87 88 89 90
	/*
	 * This is the only case we need to signal when the
	 * ring transitions from being empty to non-empty.
	 */
	if (old_write == rbi->ring_buffer->read_index)
		return true;

	return false;
}

91 92 93 94 95 96 97 98 99 100 101 102 103 104 105
/*
 * To optimize the flow management on the send-side,
 * when the sender is blocked because of lack of
 * sufficient space in the ring buffer, potential the
 * consumer of the ring buffer can signal the producer.
 * This is controlled by the following parameters:
 *
 * 1. pending_send_sz: This is the size in bytes that the
 *    producer is trying to send.
 * 2. The feature bit feat_pending_send_sz set to indicate if
 *    the consumer of the ring will signal when the ring
 *    state transitions from being full to a state where
 *    there is room for the producer to send the pending packet.
 */

106 107
static bool hv_need_to_signal_on_read(u32 prev_write_sz,
				      struct hv_ring_buffer_info *rbi)
108 109 110 111 112 113 114
{
	u32 cur_write_sz;
	u32 r_size;
	u32 write_loc = rbi->ring_buffer->write_index;
	u32 read_loc = rbi->ring_buffer->read_index;
	u32 pending_sz = rbi->ring_buffer->pending_send_sz;

115
	/* If the other end is not blocked on write don't bother. */
116 117 118 119 120 121 122 123 124 125 126 127
	if (pending_sz == 0)
		return false;

	r_size = rbi->ring_datasize;
	cur_write_sz = write_loc >= read_loc ? r_size - (write_loc - read_loc) :
			read_loc - write_loc;

	if ((prev_write_sz < pending_sz) && (cur_write_sz >= pending_sz))
		return true;

	return false;
}
128

129
/* Get the next write location for the specified ring buffer. */
130
static inline u32
131
hv_get_next_write_location(struct hv_ring_buffer_info *ring_info)
132
{
133
	u32 next = ring_info->ring_buffer->write_index;
134 135 136 137

	return next;
}

138
/* Set the next write location for the specified ring buffer. */
139
static inline void
140
hv_set_next_write_location(struct hv_ring_buffer_info *ring_info,
141
		     u32 next_write_location)
142
{
143
	ring_info->ring_buffer->write_index = next_write_location;
144 145
}

146
/* Get the next read location for the specified ring buffer. */
147
static inline u32
148
hv_get_next_read_location(struct hv_ring_buffer_info *ring_info)
149
{
150
	u32 next = ring_info->ring_buffer->read_index;
151 152 153 154

	return next;
}

155 156
/*
 * Get the next read location + offset for the specified ring buffer.
157
 * This allows the caller to skip.
158
 */
159
static inline u32
160
hv_get_next_readlocation_withoffset(struct hv_ring_buffer_info *ring_info,
161
				 u32 offset)
162
{
163
	u32 next = ring_info->ring_buffer->read_index;
164

165 166
	next += offset;
	next %= ring_info->ring_datasize;
167 168 169 170

	return next;
}

171
/* Set the next read location for the specified ring buffer. */
172
static inline void
173
hv_set_next_read_location(struct hv_ring_buffer_info *ring_info,
174
		    u32 next_read_location)
175
{
176
	ring_info->ring_buffer->read_index = next_read_location;
177 178 179
}


180
/* Get the start of the ring buffer. */
181
static inline void *
182
hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info)
183
{
184
	return (void *)ring_info->ring_buffer->buffer;
185 186 187
}


188
/* Get the size of the ring buffer. */
189
static inline u32
190
hv_get_ring_buffersize(struct hv_ring_buffer_info *ring_info)
191
{
192
	return ring_info->ring_datasize;
193 194
}

195
/* Get the read and write indices as u64 of the specified ring buffer. */
196
static inline u64
197
hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info)
198
{
199
	return (u64)ring_info->ring_buffer->write_index << 32;
200 201
}

202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234
/*
 * Helper routine to copy to source from ring buffer.
 * Assume there is enough room. Handles wrap-around in src case only!!
 */
static u32 hv_copyfrom_ringbuffer(
	struct hv_ring_buffer_info	*ring_info,
	void				*dest,
	u32				destlen,
	u32				start_read_offset)
{
	void *ring_buffer = hv_get_ring_buffer(ring_info);
	u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);

	u32 frag_len;

	/* wrap-around detected at the src */
	if (destlen > ring_buffer_size - start_read_offset) {
		frag_len = ring_buffer_size - start_read_offset;

		memcpy(dest, ring_buffer + start_read_offset, frag_len);
		memcpy(dest + frag_len, ring_buffer, destlen - frag_len);
	} else

		memcpy(dest, ring_buffer + start_read_offset, destlen);


	start_read_offset += destlen;
	start_read_offset %= ring_buffer_size;

	return start_read_offset;
}


235 236 237 238 239
/*
 * Helper routine to copy from source to ring buffer.
 * Assume there is enough room. Handles wrap-around in dest case only!!
 */
static u32 hv_copyto_ringbuffer(
240 241 242
	struct hv_ring_buffer_info	*ring_info,
	u32				start_write_offset,
	void				*src,
243 244 245 246 247 248 249 250 251 252 253 254 255
	u32				srclen)
{
	void *ring_buffer = hv_get_ring_buffer(ring_info);
	u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
	u32 frag_len;

	/* wrap-around detected! */
	if (srclen > ring_buffer_size - start_write_offset) {
		frag_len = ring_buffer_size - start_write_offset;
		memcpy(ring_buffer + start_write_offset, src, frag_len);
		memcpy(ring_buffer, src + frag_len, srclen - frag_len);
	} else
		memcpy(ring_buffer + start_write_offset, src, srclen);
256

257 258 259 260 261
	start_write_offset += srclen;
	start_write_offset %= ring_buffer_size;

	return start_write_offset;
}
262

263
/* Get various debug metrics for the specified ring buffer. */
264
void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
265
			    struct hv_ring_buffer_debug_info *debug_info)
266
{
267 268
	u32 bytes_avail_towrite;
	u32 bytes_avail_toread;
269

270
	if (ring_info->ring_buffer) {
271
		hv_get_ringbuffer_availbytes(ring_info,
272 273
					&bytes_avail_toread,
					&bytes_avail_towrite);
274

275 276
		debug_info->bytes_avail_toread = bytes_avail_toread;
		debug_info->bytes_avail_towrite = bytes_avail_towrite;
277
		debug_info->current_read_index =
278
			ring_info->ring_buffer->read_index;
279
		debug_info->current_write_index =
280
			ring_info->ring_buffer->write_index;
281
		debug_info->current_interrupt_mask =
282
			ring_info->ring_buffer->interrupt_mask;
283 284 285
	}
}

286
/* Initialize the ring buffer. */
287
int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
288
		   void *buffer, u32 buflen)
289
{
290
	if (sizeof(struct hv_ring_buffer) != PAGE_SIZE)
291
		return -EINVAL;
292

293
	memset(ring_info, 0, sizeof(struct hv_ring_buffer_info));
294

295 296 297
	ring_info->ring_buffer = (struct hv_ring_buffer *)buffer;
	ring_info->ring_buffer->read_index =
		ring_info->ring_buffer->write_index = 0;
298

299
	/* Set the feature bit for enabling flow control. */
300 301
	ring_info->ring_buffer->feature_bits.value = 1;

302 303
	ring_info->ring_size = buflen;
	ring_info->ring_datasize = buflen - sizeof(struct hv_ring_buffer);
304

305
	spin_lock_init(&ring_info->ring_lock);
306 307 308 309

	return 0;
}

310
/* Cleanup the ring buffer. */
311
void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
312 313 314
{
}

315
/* Write to the ring buffer. */
316
int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
317
		    struct kvec *kv_list, u32 kv_count, bool *signal)
318
{
319
	int i = 0;
320 321 322
	u32 bytes_avail_towrite;
	u32 bytes_avail_toread;
	u32 totalbytes_towrite = 0;
323

324
	u32 next_write_location;
325
	u32 old_write;
326
	u64 prev_indices = 0;
327
	unsigned long flags;
328

329 330
	for (i = 0; i < kv_count; i++)
		totalbytes_towrite += kv_list[i].iov_len;
331

332
	totalbytes_towrite += sizeof(u64);
333

334
	spin_lock_irqsave(&outring_info->ring_lock, flags);
335

336
	hv_get_ringbuffer_availbytes(outring_info,
337 338
				&bytes_avail_toread,
				&bytes_avail_towrite);
339

340 341 342 343 344
	/*
	 * If there is only room for the packet, assume it is full.
	 * Otherwise, the next time around, we think the ring buffer
	 * is empty since the read index == write index.
	 */
345 346
	if (bytes_avail_towrite <= totalbytes_towrite) {
		spin_unlock_irqrestore(&outring_info->ring_lock, flags);
347
		return -EAGAIN;
348 349
	}

350
	/* Write to the ring buffer */
351
	next_write_location = hv_get_next_write_location(outring_info);
352

353 354
	old_write = next_write_location;

355
	for (i = 0; i < kv_count; i++) {
356
		next_write_location = hv_copyto_ringbuffer(outring_info,
357
						     next_write_location,
358 359
						     kv_list[i].iov_base,
						     kv_list[i].iov_len);
360 361
	}

362
	/* Set previous packet start */
363
	prev_indices = hv_get_ring_bufferindices(outring_info);
364

365
	next_write_location = hv_copyto_ringbuffer(outring_info,
366 367
					     next_write_location,
					     &prev_indices,
368
					     sizeof(u64));
369

370
	/* Issue a full memory barrier before updating the write index */
371
	mb();
372

373
	/* Now, update the write location */
374
	hv_set_next_write_location(outring_info, next_write_location);
375 376


377
	spin_unlock_irqrestore(&outring_info->ring_lock, flags);
378 379

	*signal = hv_need_to_signal(old_write, outring_info);
380 381 382
	return 0;
}

383 384 385
static inline int __hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
				       void *buffer, u32 buflen, u32 offset,
				       bool *signal, bool advance)
386
{
387 388 389 390
	u32 bytes_avail_towrite;
	u32 bytes_avail_toread;
	u32 next_read_location = 0;
	u64 prev_indices = 0;
391
	unsigned long flags;
392

393
	if (buflen <= 0)
394
		return -EINVAL;
395

396
	spin_lock_irqsave(&inring_info->ring_lock, flags);
397

398
	hv_get_ringbuffer_availbytes(inring_info,
399 400
				&bytes_avail_toread,
				&bytes_avail_towrite);
401

402
	/* Make sure there is something to read */
403 404
	if (bytes_avail_toread < buflen) {
		spin_unlock_irqrestore(&inring_info->ring_lock, flags);
405

406
		return -EAGAIN;
407 408
	}

409
	next_read_location =
410
		hv_get_next_readlocation_withoffset(inring_info, offset);
411

412
	next_read_location = hv_copyfrom_ringbuffer(inring_info,
413 414 415
						buffer,
						buflen,
						next_read_location);
416

417 418 419
	if (!advance)
		goto out_unlock;

420
	next_read_location = hv_copyfrom_ringbuffer(inring_info,
421
						&prev_indices,
422
						sizeof(u64),
423
						next_read_location);
424

425 426 427 428 429
	/*
	 * Make sure all reads are done before we update the read index since
	 * the writer may start writing to the read area once the read index
	 * is updated.
	 */
430
	mb();
431

432
	/* Update the read index */
433
	hv_set_next_read_location(inring_info, next_read_location);
434

435
	*signal = hv_need_to_signal_on_read(bytes_avail_towrite, inring_info);
436

437 438
out_unlock:
	spin_unlock_irqrestore(&inring_info->ring_lock, flags);
439 440
	return 0;
}
441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457

/* Read from ring buffer without advancing the read index. */
int hv_ringbuffer_peek(struct hv_ring_buffer_info *inring_info,
		       void *buffer, u32 buflen)
{
	return __hv_ringbuffer_read(inring_info, buffer, buflen,
				    0, NULL, false);
}

/* Read from ring buffer and advance the read index. */
int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
		       void *buffer, u32 buflen, u32 offset,
		       bool *signal)
{
	return __hv_ringbuffer_read(inring_info, buffer, buflen,
				    offset, signal, true);
}