ring_buffer.c 10.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
/*
 *
 * Copyright (c) 2009, Microsoft Corporation.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
 * Place - Suite 330, Boston, MA 02111-1307 USA.
 *
 * Authors:
 *   Haiyang Zhang <haiyangz@microsoft.com>
 *   Hank Janssen  <hjanssen@microsoft.com>
21
 *   K. Y. Srinivasan <kys@microsoft.com>
22 23
 *
 */
24
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25

26 27
#include <linux/kernel.h>
#include <linux/mm.h>
28
#include <linux/hyperv.h>
29

30
#include "hyperv_vmbus.h"
31

32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55
void hv_begin_read(struct hv_ring_buffer_info *rbi)
{
	rbi->ring_buffer->interrupt_mask = 1;
	smp_mb();
}

u32 hv_end_read(struct hv_ring_buffer_info *rbi)
{
	u32 read;
	u32 write;

	rbi->ring_buffer->interrupt_mask = 0;
	smp_mb();

	/*
	 * Now check to see if the ring buffer is still empty.
	 * If it is not, we raced and we need to process new
	 * incoming messages.
	 */
	hv_get_ringbuffer_availbytes(rbi, &read, &write);

	return read;
}

56

57 58 59 60 61 62
/*
 * hv_get_next_write_location()
 *
 * Get the next write location for the specified ring buffer
 *
 */
63
static inline u32
64
hv_get_next_write_location(struct hv_ring_buffer_info *ring_info)
65
{
66
	u32 next = ring_info->ring_buffer->write_index;
67 68 69 70

	return next;
}

71 72 73 74 75 76
/*
 * hv_set_next_write_location()
 *
 * Set the next write location for the specified ring buffer
 *
 */
77
static inline void
78
hv_set_next_write_location(struct hv_ring_buffer_info *ring_info,
79
		     u32 next_write_location)
80
{
81
	ring_info->ring_buffer->write_index = next_write_location;
82 83
}

84 85 86 87 88
/*
 * hv_get_next_read_location()
 *
 * Get the next read location for the specified ring buffer
 */
89
static inline u32
90
hv_get_next_read_location(struct hv_ring_buffer_info *ring_info)
91
{
92
	u32 next = ring_info->ring_buffer->read_index;
93 94 95 96

	return next;
}

97 98 99 100 101 102
/*
 * hv_get_next_readlocation_withoffset()
 *
 * Get the next read location + offset for the specified ring buffer.
 * This allows the caller to skip
 */
103
static inline u32
104
hv_get_next_readlocation_withoffset(struct hv_ring_buffer_info *ring_info,
105
				 u32 offset)
106
{
107
	u32 next = ring_info->ring_buffer->read_index;
108

109 110
	next += offset;
	next %= ring_info->ring_datasize;
111 112 113 114

	return next;
}

115 116 117 118 119 120 121
/*
 *
 * hv_set_next_read_location()
 *
 * Set the next read location for the specified ring buffer
 *
 */
122
static inline void
123
hv_set_next_read_location(struct hv_ring_buffer_info *ring_info,
124
		    u32 next_read_location)
125
{
126
	ring_info->ring_buffer->read_index = next_read_location;
127 128 129
}


130 131 132 133 134 135
/*
 *
 * hv_get_ring_buffer()
 *
 * Get the start of the ring buffer
 */
136
static inline void *
137
hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info)
138
{
139
	return (void *)ring_info->ring_buffer->buffer;
140 141 142
}


143 144 145 146 147 148
/*
 *
 * hv_get_ring_buffersize()
 *
 * Get the size of the ring buffer
 */
149
static inline u32
150
hv_get_ring_buffersize(struct hv_ring_buffer_info *ring_info)
151
{
152
	return ring_info->ring_datasize;
153 154
}

155 156 157 158 159 160 161
/*
 *
 * hv_get_ring_bufferindices()
 *
 * Get the read and write indices as u64 of the specified ring buffer
 *
 */
162
static inline u64
163
hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info)
164
{
165
	return (u64)ring_info->ring_buffer->write_index << 32;
166 167
}

168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204
/*
 *
 * hv_copyfrom_ringbuffer()
 *
 * Helper routine to copy to source from ring buffer.
 * Assume there is enough room. Handles wrap-around in src case only!!
 *
 */
static u32 hv_copyfrom_ringbuffer(
	struct hv_ring_buffer_info	*ring_info,
	void				*dest,
	u32				destlen,
	u32				start_read_offset)
{
	void *ring_buffer = hv_get_ring_buffer(ring_info);
	u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);

	u32 frag_len;

	/* wrap-around detected at the src */
	if (destlen > ring_buffer_size - start_read_offset) {
		frag_len = ring_buffer_size - start_read_offset;

		memcpy(dest, ring_buffer + start_read_offset, frag_len);
		memcpy(dest + frag_len, ring_buffer, destlen - frag_len);
	} else

		memcpy(dest, ring_buffer + start_read_offset, destlen);


	start_read_offset += destlen;
	start_read_offset %= ring_buffer_size;

	return start_read_offset;
}


205 206 207 208 209 210 211 212 213
/*
 *
 * hv_copyto_ringbuffer()
 *
 * Helper routine to copy from source to ring buffer.
 * Assume there is enough room. Handles wrap-around in dest case only!!
 *
 */
static u32 hv_copyto_ringbuffer(
214 215 216
	struct hv_ring_buffer_info	*ring_info,
	u32				start_write_offset,
	void				*src,
217 218 219 220 221 222 223 224 225 226 227 228 229
	u32				srclen)
{
	void *ring_buffer = hv_get_ring_buffer(ring_info);
	u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
	u32 frag_len;

	/* wrap-around detected! */
	if (srclen > ring_buffer_size - start_write_offset) {
		frag_len = ring_buffer_size - start_write_offset;
		memcpy(ring_buffer + start_write_offset, src, frag_len);
		memcpy(ring_buffer, src + frag_len, srclen - frag_len);
	} else
		memcpy(ring_buffer + start_write_offset, src, srclen);
230

231 232 233 234 235
	start_write_offset += srclen;
	start_write_offset %= ring_buffer_size;

	return start_write_offset;
}
236

237 238 239 240 241 242 243
/*
 *
 * hv_ringbuffer_get_debuginfo()
 *
 * Get various debug metrics for the specified ring buffer
 *
 */
244
void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
245
			    struct hv_ring_buffer_debug_info *debug_info)
246
{
247 248
	u32 bytes_avail_towrite;
	u32 bytes_avail_toread;
249

250
	if (ring_info->ring_buffer) {
251
		hv_get_ringbuffer_availbytes(ring_info,
252 253
					&bytes_avail_toread,
					&bytes_avail_towrite);
254

255 256
		debug_info->bytes_avail_toread = bytes_avail_toread;
		debug_info->bytes_avail_towrite = bytes_avail_towrite;
257
		debug_info->current_read_index =
258
			ring_info->ring_buffer->read_index;
259
		debug_info->current_write_index =
260
			ring_info->ring_buffer->write_index;
261
		debug_info->current_interrupt_mask =
262
			ring_info->ring_buffer->interrupt_mask;
263 264 265 266
	}
}


267 268 269 270 271 272 273
/*
 *
 * hv_get_ringbuffer_interrupt_mask()
 *
 * Get the interrupt mask for the specified ring buffer
 *
 */
274
u32 hv_get_ringbuffer_interrupt_mask(struct hv_ring_buffer_info *rbi)
275
{
276
	return rbi->ring_buffer->interrupt_mask;
277 278
}

279 280 281 282 283 284 285
/*
 *
 * hv_ringbuffer_init()
 *
 *Initialize the ring buffer
 *
 */
286
int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
287
		   void *buffer, u32 buflen)
288
{
289
	if (sizeof(struct hv_ring_buffer) != PAGE_SIZE)
290
		return -EINVAL;
291

292
	memset(ring_info, 0, sizeof(struct hv_ring_buffer_info));
293

294 295 296
	ring_info->ring_buffer = (struct hv_ring_buffer *)buffer;
	ring_info->ring_buffer->read_index =
		ring_info->ring_buffer->write_index = 0;
297

298 299
	ring_info->ring_size = buflen;
	ring_info->ring_datasize = buflen - sizeof(struct hv_ring_buffer);
300

301
	spin_lock_init(&ring_info->ring_lock);
302 303 304 305

	return 0;
}

306 307 308 309 310 311 312
/*
 *
 * hv_ringbuffer_cleanup()
 *
 * Cleanup the ring buffer
 *
 */
313
void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
314 315 316
{
}

317 318 319 320 321 322 323
/*
 *
 * hv_ringbuffer_write()
 *
 * Write to the ring buffer
 *
 */
324
int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
325
		    struct scatterlist *sglist, u32 sgcount)
326
{
327
	int i = 0;
328 329 330
	u32 bytes_avail_towrite;
	u32 bytes_avail_toread;
	u32 totalbytes_towrite = 0;
331

332
	struct scatterlist *sg;
333
	u32 next_write_location;
334
	u64 prev_indices = 0;
335
	unsigned long flags;
336

337
	for_each_sg(sglist, sg, sgcount, i)
338
	{
339
		totalbytes_towrite += sg->length;
340 341
	}

342
	totalbytes_towrite += sizeof(u64);
343

344
	spin_lock_irqsave(&outring_info->ring_lock, flags);
345

346
	hv_get_ringbuffer_availbytes(outring_info,
347 348
				&bytes_avail_toread,
				&bytes_avail_towrite);
349 350


351 352
	/* If there is only room for the packet, assume it is full. */
	/* Otherwise, the next time around, we think the ring buffer */
353
	/* is empty since the read index == write index */
354 355
	if (bytes_avail_towrite <= totalbytes_towrite) {
		spin_unlock_irqrestore(&outring_info->ring_lock, flags);
356
		return -EAGAIN;
357 358
	}

359
	/* Write to the ring buffer */
360
	next_write_location = hv_get_next_write_location(outring_info);
361

362
	for_each_sg(sglist, sg, sgcount, i)
363
	{
364
		next_write_location = hv_copyto_ringbuffer(outring_info,
365
						     next_write_location,
366 367
						     sg_virt(sg),
						     sg->length);
368 369
	}

370
	/* Set previous packet start */
371
	prev_indices = hv_get_ring_bufferindices(outring_info);
372

373
	next_write_location = hv_copyto_ringbuffer(outring_info,
374 375
					     next_write_location,
					     &prev_indices,
376
					     sizeof(u64));
377

378
	/* Make sure we flush all writes before updating the writeIndex */
379
	smp_wmb();
380

381
	/* Now, update the write location */
382
	hv_set_next_write_location(outring_info, next_write_location);
383 384


385
	spin_unlock_irqrestore(&outring_info->ring_lock, flags);
386 387 388 389
	return 0;
}


390 391 392 393 394 395 396
/*
 *
 * hv_ringbuffer_peek()
 *
 * Read without advancing the read index
 *
 */
397
int hv_ringbuffer_peek(struct hv_ring_buffer_info *Inring_info,
398
		   void *Buffer, u32 buflen)
399
{
400 401 402
	u32 bytes_avail_towrite;
	u32 bytes_avail_toread;
	u32 next_read_location = 0;
403
	unsigned long flags;
404

405
	spin_lock_irqsave(&Inring_info->ring_lock, flags);
406

407
	hv_get_ringbuffer_availbytes(Inring_info,
408 409
				&bytes_avail_toread,
				&bytes_avail_towrite);
410

411
	/* Make sure there is something to read */
412
	if (bytes_avail_toread < buflen) {
413

414
		spin_unlock_irqrestore(&Inring_info->ring_lock, flags);
415

416
		return -EAGAIN;
417 418
	}

419
	/* Convert to byte offset */
420
	next_read_location = hv_get_next_read_location(Inring_info);
421

422
	next_read_location = hv_copyfrom_ringbuffer(Inring_info,
423
						Buffer,
424 425
						buflen,
						next_read_location);
426

427
	spin_unlock_irqrestore(&Inring_info->ring_lock, flags);
428 429 430 431 432

	return 0;
}


433 434 435 436 437 438 439
/*
 *
 * hv_ringbuffer_read()
 *
 * Read and advance the read index
 *
 */
440
int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
441
		   u32 buflen, u32 offset)
442
{
443 444 445 446
	u32 bytes_avail_towrite;
	u32 bytes_avail_toread;
	u32 next_read_location = 0;
	u64 prev_indices = 0;
447
	unsigned long flags;
448

449
	if (buflen <= 0)
450
		return -EINVAL;
451

452
	spin_lock_irqsave(&inring_info->ring_lock, flags);
453

454
	hv_get_ringbuffer_availbytes(inring_info,
455 456
				&bytes_avail_toread,
				&bytes_avail_towrite);
457

458
	/* Make sure there is something to read */
459 460
	if (bytes_avail_toread < buflen) {
		spin_unlock_irqrestore(&inring_info->ring_lock, flags);
461

462
		return -EAGAIN;
463 464
	}

465
	next_read_location =
466
		hv_get_next_readlocation_withoffset(inring_info, offset);
467

468
	next_read_location = hv_copyfrom_ringbuffer(inring_info,
469 470 471
						buffer,
						buflen,
						next_read_location);
472

473
	next_read_location = hv_copyfrom_ringbuffer(inring_info,
474
						&prev_indices,
475
						sizeof(u64),
476
						next_read_location);
477

478
	/* Make sure all reads are done before we update the read index since */
479 480
	/* the writer may start writing to the read area once the read index */
	/*is updated */
481
	smp_mb();
482

483
	/* Update the read index */
484
	hv_set_next_read_location(inring_info, next_read_location);
485

486
	spin_unlock_irqrestore(&inring_info->ring_lock, flags);
487 488 489

	return 0;
}