ring_buffer.c 13.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
/*
 *
 * Copyright (c) 2009, Microsoft Corporation.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
 * Place - Suite 330, Boston, MA 02111-1307 USA.
 *
 * Authors:
 *   Haiyang Zhang <haiyangz@microsoft.com>
 *   Hank Janssen  <hjanssen@microsoft.com>
21
 *   K. Y. Srinivasan <kys@microsoft.com>
22 23
 *
 */
24
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25

26 27
#include <linux/kernel.h>
#include <linux/mm.h>
28
#include <linux/hyperv.h>
29
#include <linux/uio.h>
30 31
#include <linux/vmalloc.h>
#include <linux/slab.h>
32
#include <linux/prefetch.h>
33

34
#include "hyperv_vmbus.h"
35

36 37
#define VMBUS_PKT_TRAILER	8

38 39 40 41 42 43 44 45 46 47 48 49 50 51
/*
 * When we write to the ring buffer, check if the host needs to
 * be signaled. Here is the details of this protocol:
 *
 *	1. The host guarantees that while it is draining the
 *	   ring buffer, it will set the interrupt_mask to
 *	   indicate it does not need to be interrupted when
 *	   new data is placed.
 *
 *	2. The host guarantees that it will completely drain
 *	   the ring buffer before exiting the read loop. Further,
 *	   once the ring buffer is empty, it will clear the
 *	   interrupt_mask and re-check to see if new data has
 *	   arrived.
52 53 54 55 56 57 58 59 60
 *
 * KYS: Oct. 30, 2016:
 * It looks like Windows hosts have logic to deal with DOS attacks that
 * can be triggered if it receives interrupts when it is not expecting
 * the interrupt. The host expects interrupts only when the ring
 * transitions from empty to non-empty (or full to non full on the guest
 * to host ring).
 * So, base the signaling decision solely on the ring state until the
 * host logic is fixed.
61 62
 */

63
static void hv_signal_on_write(u32 old_write, struct vmbus_channel *channel)
64
{
65 66
	struct hv_ring_buffer_info *rbi = &channel->outbound;

67
	virt_mb();
68
	if (READ_ONCE(rbi->ring_buffer->interrupt_mask))
69
		return;
70

71
	/* check interrupt_mask before read_index */
72
	virt_rmb();
73 74 75 76
	/*
	 * This is the only case we need to signal when the
	 * ring transitions from being empty to non-empty.
	 */
77
	if (old_write == READ_ONCE(rbi->ring_buffer->read_index))
78
		vmbus_setevent(channel);
79 80
}

81
/* Get the next write location for the specified ring buffer. */
82
static inline u32
83
hv_get_next_write_location(struct hv_ring_buffer_info *ring_info)
84
{
85
	u32 next = ring_info->ring_buffer->write_index;
86 87 88 89

	return next;
}

90
/* Set the next write location for the specified ring buffer. */
91
static inline void
92
hv_set_next_write_location(struct hv_ring_buffer_info *ring_info,
93
		     u32 next_write_location)
94
{
95
	ring_info->ring_buffer->write_index = next_write_location;
96 97
}

98
/* Set the next read location for the specified ring buffer. */
99
static inline void
100
hv_set_next_read_location(struct hv_ring_buffer_info *ring_info,
101
		    u32 next_read_location)
102
{
103
	ring_info->ring_buffer->read_index = next_read_location;
104
	ring_info->priv_read_index = next_read_location;
105 106
}

107
/* Get the size of the ring buffer. */
108
static inline u32
109
hv_get_ring_buffersize(const struct hv_ring_buffer_info *ring_info)
110
{
111
	return ring_info->ring_datasize;
112 113
}

114
/* Get the read and write indices as u64 of the specified ring buffer. */
115
static inline u64
116
hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info)
117
{
118
	return (u64)ring_info->ring_buffer->write_index << 32;
119 120
}

121 122 123 124 125
/*
 * Helper routine to copy from source to ring buffer.
 * Assume there is enough room. Handles wrap-around in dest case only!!
 */
static u32 hv_copyto_ringbuffer(
126 127
	struct hv_ring_buffer_info	*ring_info,
	u32				start_write_offset,
128
	const void			*src,
129 130 131 132
	u32				srclen)
{
	void *ring_buffer = hv_get_ring_buffer(ring_info);
	u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
133 134

	memcpy(ring_buffer + start_write_offset, src, srclen);
135

136
	start_write_offset += srclen;
137 138
	if (start_write_offset >= ring_buffer_size)
		start_write_offset -= ring_buffer_size;
139 140 141

	return start_write_offset;
}
142

143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165
/*
 *
 * hv_get_ringbuffer_availbytes()
 *
 * Get number of bytes available to read and to write to
 * for the specified ring buffer
 */
static void
hv_get_ringbuffer_availbytes(const struct hv_ring_buffer_info *rbi,
			     u32 *read, u32 *write)
{
	u32 read_loc, write_loc, dsize;

	/* Capture the read/write indices before they changed */
	read_loc = READ_ONCE(rbi->ring_buffer->read_index);
	write_loc = READ_ONCE(rbi->ring_buffer->write_index);
	dsize = rbi->ring_datasize;

	*write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
		read_loc - write_loc;
	*read = dsize - *write;
}

166
/* Get various debug metrics for the specified ring buffer. */
167 168
void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
				 struct hv_ring_buffer_debug_info *debug_info)
169
{
170 171
	u32 bytes_avail_towrite;
	u32 bytes_avail_toread;
172

173
	if (ring_info->ring_buffer) {
174
		hv_get_ringbuffer_availbytes(ring_info,
175 176
					&bytes_avail_toread,
					&bytes_avail_towrite);
177

178 179
		debug_info->bytes_avail_toread = bytes_avail_toread;
		debug_info->bytes_avail_towrite = bytes_avail_towrite;
180
		debug_info->current_read_index =
181
			ring_info->ring_buffer->read_index;
182
		debug_info->current_write_index =
183
			ring_info->ring_buffer->write_index;
184
		debug_info->current_interrupt_mask =
185
			ring_info->ring_buffer->interrupt_mask;
186 187
	}
}
188
EXPORT_SYMBOL_GPL(hv_ringbuffer_get_debuginfo);
189

190
/* Initialize the ring buffer. */
191
int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
192
		       struct page *pages, u32 page_cnt)
193
{
194 195 196 197
	int i;
	struct page **pages_wraparound;

	BUILD_BUG_ON((sizeof(struct hv_ring_buffer) != PAGE_SIZE));
198

199
	memset(ring_info, 0, sizeof(struct hv_ring_buffer_info));
200

201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222
	/*
	 * First page holds struct hv_ring_buffer, do wraparound mapping for
	 * the rest.
	 */
	pages_wraparound = kzalloc(sizeof(struct page *) * (page_cnt * 2 - 1),
				   GFP_KERNEL);
	if (!pages_wraparound)
		return -ENOMEM;

	pages_wraparound[0] = pages;
	for (i = 0; i < 2 * (page_cnt - 1); i++)
		pages_wraparound[i + 1] = &pages[i % (page_cnt - 1) + 1];

	ring_info->ring_buffer = (struct hv_ring_buffer *)
		vmap(pages_wraparound, page_cnt * 2 - 1, VM_MAP, PAGE_KERNEL);

	kfree(pages_wraparound);


	if (!ring_info->ring_buffer)
		return -ENOMEM;

223 224
	ring_info->ring_buffer->read_index =
		ring_info->ring_buffer->write_index = 0;
225

226
	/* Set the feature bit for enabling flow control. */
227 228
	ring_info->ring_buffer->feature_bits.value = 1;

229 230 231
	ring_info->ring_size = page_cnt << PAGE_SHIFT;
	ring_info->ring_datasize = ring_info->ring_size -
		sizeof(struct hv_ring_buffer);
232

233
	spin_lock_init(&ring_info->ring_lock);
234 235 236 237

	return 0;
}

238
/* Cleanup the ring buffer. */
239
void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
240
{
241
	vunmap(ring_info->ring_buffer);
242 243
}

244
/* Write to the ring buffer. */
245
int hv_ringbuffer_write(struct vmbus_channel *channel,
246
			const struct kvec *kv_list, u32 kv_count)
247
{
248
	int i;
249
	u32 bytes_avail_towrite;
250
	u32 totalbytes_towrite = sizeof(u64);
251
	u32 next_write_location;
252
	u32 old_write;
253 254
	u64 prev_indices;
	unsigned long flags;
255
	struct hv_ring_buffer_info *outring_info = &channel->outbound;
256

257 258 259
	if (channel->rescind)
		return -ENODEV;

260 261
	for (i = 0; i < kv_count; i++)
		totalbytes_towrite += kv_list[i].iov_len;
262

263
	spin_lock_irqsave(&outring_info->ring_lock, flags);
264

265
	bytes_avail_towrite = hv_get_bytes_to_write(outring_info);
266

267 268 269 270 271
	/*
	 * If there is only room for the packet, assume it is full.
	 * Otherwise, the next time around, we think the ring buffer
	 * is empty since the read index == write index.
	 */
272
	if (bytes_avail_towrite <= totalbytes_towrite) {
273
		spin_unlock_irqrestore(&outring_info->ring_lock, flags);
274
		return -EAGAIN;
275 276
	}

277
	/* Write to the ring buffer */
278
	next_write_location = hv_get_next_write_location(outring_info);
279

280 281
	old_write = next_write_location;

282
	for (i = 0; i < kv_count; i++) {
283
		next_write_location = hv_copyto_ringbuffer(outring_info,
284
						     next_write_location,
285 286
						     kv_list[i].iov_base,
						     kv_list[i].iov_len);
287 288
	}

289
	/* Set previous packet start */
290
	prev_indices = hv_get_ring_bufferindices(outring_info);
291

292
	next_write_location = hv_copyto_ringbuffer(outring_info,
293 294
					     next_write_location,
					     &prev_indices,
295
					     sizeof(u64));
296

297
	/* Issue a full memory barrier before updating the write index */
298
	virt_mb();
299

300
	/* Now, update the write location */
301
	hv_set_next_write_location(outring_info, next_write_location);
302 303


304
	spin_unlock_irqrestore(&outring_info->ring_lock, flags);
305

306
	hv_signal_on_write(old_write, channel);
307 308 309 310

	if (channel->rescind)
		return -ENODEV;

311 312 313
	return 0;
}

314
int hv_ringbuffer_read(struct vmbus_channel *channel,
315
		       void *buffer, u32 buflen, u32 *buffer_actual_len,
316
		       u64 *requestid, bool raw)
317
{
318 319 320 321
	struct vmpacket_descriptor *desc;
	u32 packetlen, offset;

	if (unlikely(buflen == 0))
322
		return -EINVAL;
323

324 325 326
	*buffer_actual_len = 0;
	*requestid = 0;

327
	/* Make sure there is something to read */
328 329
	desc = hv_pkt_iter_first(channel);
	if (desc == NULL) {
330 331 332 333
		/*
		 * No error is set when there is even no header, drivers are
		 * supposed to analyze buffer_actual_len.
		 */
334
		return 0;
335
	}
336

337 338
	offset = raw ? 0 : (desc->offset8 << 3);
	packetlen = (desc->len8 << 3) - offset;
339
	*buffer_actual_len = packetlen;
340
	*requestid = desc->trans_id;
341

342
	if (unlikely(packetlen > buflen))
343
		return -ENOBUFS;
344

345 346
	/* since ring is double mapped, only one copy is necessary */
	memcpy(buffer, (const char *)desc + offset, packetlen);
347

348 349
	/* Advance ring index to next packet descriptor */
	__hv_pkt_iter_next(channel, desc);
350

351 352
	/* Notify host of update */
	hv_pkt_iter_close(channel);
353

354
	return 0;
355
}
356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382

/*
 * Determine number of bytes available in ring buffer after
 * the current iterator (priv_read_index) location.
 *
 * This is similar to hv_get_bytes_to_read but with private
 * read index instead.
 */
static u32 hv_pkt_iter_avail(const struct hv_ring_buffer_info *rbi)
{
	u32 priv_read_loc = rbi->priv_read_index;
	u32 write_loc = READ_ONCE(rbi->ring_buffer->write_index);

	if (write_loc >= priv_read_loc)
		return write_loc - priv_read_loc;
	else
		return (rbi->ring_datasize - priv_read_loc) + write_loc;
}

/*
 * Get first vmbus packet from ring buffer after read_index
 *
 * If ring buffer is empty, returns NULL and no other action needed.
 */
struct vmpacket_descriptor *hv_pkt_iter_first(struct vmbus_channel *channel)
{
	struct hv_ring_buffer_info *rbi = &channel->inbound;
383
	struct vmpacket_descriptor *desc;
384 385 386 387

	if (hv_pkt_iter_avail(rbi) < sizeof(struct vmpacket_descriptor))
		return NULL;

388 389 390 391 392
	desc = hv_get_ring_buffer(rbi) + rbi->priv_read_index;
	if (desc)
		prefetch((char *)desc + (desc->len8 << 3));

	return desc;
393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415
}
EXPORT_SYMBOL_GPL(hv_pkt_iter_first);

/*
 * Get next vmbus packet from ring buffer.
 *
 * Advances the current location (priv_read_index) and checks for more
 * data. If the end of the ring buffer is reached, then return NULL.
 */
struct vmpacket_descriptor *
__hv_pkt_iter_next(struct vmbus_channel *channel,
		   const struct vmpacket_descriptor *desc)
{
	struct hv_ring_buffer_info *rbi = &channel->inbound;
	u32 packetlen = desc->len8 << 3;
	u32 dsize = rbi->ring_datasize;

	/* bump offset to next potential packet */
	rbi->priv_read_index += packetlen + VMBUS_PKT_TRAILER;
	if (rbi->priv_read_index >= dsize)
		rbi->priv_read_index -= dsize;

	/* more data? */
416
	return hv_pkt_iter_first(channel);
417 418 419 420 421 422 423 424 425
}
EXPORT_SYMBOL_GPL(__hv_pkt_iter_next);

/*
 * Update host ring buffer after iterating over packets.
 */
void hv_pkt_iter_close(struct vmbus_channel *channel)
{
	struct hv_ring_buffer_info *rbi = &channel->inbound;
426
	u32 orig_write_sz = hv_get_bytes_to_write(rbi);
427 428 429 430 431 432 433 434 435

	/*
	 * Make sure all reads are done before we update the read index since
	 * the writer may start writing to the read area once the read index
	 * is updated.
	 */
	virt_rmb();
	rbi->ring_buffer->read_index = rbi->priv_read_index;

436 437 438 439 440 441 442 443 444 445 446 447 448
	/*
	 * Issue a full memory barrier before making the signaling decision.
	 * Here is the reason for having this barrier:
	 * If the reading of the pend_sz (in this function)
	 * were to be reordered and read before we commit the new read
	 * index (in the calling function)  we could
	 * have a problem. If the host were to set the pending_sz after we
	 * have sampled pending_sz and go to sleep before we commit the
	 * read index, we could miss sending the interrupt. Issue a full
	 * memory barrier to address this.
	 */
	virt_mb();

449 450
	/* If host has disabled notifications then skip */
	if (rbi->ring_buffer->interrupt_mask)
451 452
		return;

453 454
	if (rbi->ring_buffer->feature_bits.feat_pending_send_sz) {
		u32 pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz);
455

456 457 458 459 460 461 462 463 464 465 466 467 468 469 470
		/*
		 * If there was space before we began iteration,
		 * then host was not blocked. Also handles case where
		 * pending_sz is zero then host has nothing pending
		 * and does not need to be signaled.
		 */
		if (orig_write_sz > pending_sz)
			return;

		/* If pending write will not fit, don't give false hope. */
		if (hv_get_bytes_to_write(rbi) < pending_sz)
			return;
	}

	vmbus_setevent(channel);
471 472
}
EXPORT_SYMBOL_GPL(hv_pkt_iter_close);