ring_buffer.c 12.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
/*
 *
 * Copyright (c) 2009, Microsoft Corporation.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
 * Place - Suite 330, Boston, MA 02111-1307 USA.
 *
 * Authors:
 *   Haiyang Zhang <haiyangz@microsoft.com>
 *   Hank Janssen  <hjanssen@microsoft.com>
21
 *   K. Y. Srinivasan <kys@microsoft.com>
22 23
 *
 */
24
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25

26 27
#include <linux/kernel.h>
#include <linux/mm.h>
28
#include <linux/hyperv.h>
29
#include <linux/uio.h>
30 31
#include <linux/vmalloc.h>
#include <linux/slab.h>
32
#include <linux/prefetch.h>
33

34
#include "hyperv_vmbus.h"
35

36 37
#define VMBUS_PKT_TRAILER	8

38 39 40 41 42 43 44 45 46 47 48 49 50 51
/*
 * When we write to the ring buffer, check if the host needs to
 * be signaled. Here is the details of this protocol:
 *
 *	1. The host guarantees that while it is draining the
 *	   ring buffer, it will set the interrupt_mask to
 *	   indicate it does not need to be interrupted when
 *	   new data is placed.
 *
 *	2. The host guarantees that it will completely drain
 *	   the ring buffer before exiting the read loop. Further,
 *	   once the ring buffer is empty, it will clear the
 *	   interrupt_mask and re-check to see if new data has
 *	   arrived.
52 53 54 55 56 57 58 59 60
 *
 * KYS: Oct. 30, 2016:
 * It looks like Windows hosts have logic to deal with DOS attacks that
 * can be triggered if it receives interrupts when it is not expecting
 * the interrupt. The host expects interrupts only when the ring
 * transitions from empty to non-empty (or full to non full on the guest
 * to host ring).
 * So, base the signaling decision solely on the ring state until the
 * host logic is fixed.
61 62
 */

63
static void hv_signal_on_write(u32 old_write, struct vmbus_channel *channel)
64
{
65 66
	struct hv_ring_buffer_info *rbi = &channel->outbound;

67
	virt_mb();
68
	if (READ_ONCE(rbi->ring_buffer->interrupt_mask))
69
		return;
70

71
	/* check interrupt_mask before read_index */
72
	virt_rmb();
73 74 75 76
	/*
	 * This is the only case we need to signal when the
	 * ring transitions from being empty to non-empty.
	 */
77
	if (old_write == READ_ONCE(rbi->ring_buffer->read_index))
78
		vmbus_setevent(channel);
79 80
}

81
/* Get the next write location for the specified ring buffer. */
82
static inline u32
83
hv_get_next_write_location(struct hv_ring_buffer_info *ring_info)
84
{
85
	u32 next = ring_info->ring_buffer->write_index;
86 87 88 89

	return next;
}

90
/* Set the next write location for the specified ring buffer. */
91
static inline void
92
hv_set_next_write_location(struct hv_ring_buffer_info *ring_info,
93
		     u32 next_write_location)
94
{
95
	ring_info->ring_buffer->write_index = next_write_location;
96 97
}

98
/* Set the next read location for the specified ring buffer. */
99
static inline void
100
hv_set_next_read_location(struct hv_ring_buffer_info *ring_info,
101
		    u32 next_read_location)
102
{
103
	ring_info->ring_buffer->read_index = next_read_location;
104
	ring_info->priv_read_index = next_read_location;
105 106
}

107
/* Get the size of the ring buffer. */
108
static inline u32
109
hv_get_ring_buffersize(const struct hv_ring_buffer_info *ring_info)
110
{
111
	return ring_info->ring_datasize;
112 113
}

114
/* Get the read and write indices as u64 of the specified ring buffer. */
115
static inline u64
116
hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info)
117
{
118
	return (u64)ring_info->ring_buffer->write_index << 32;
119 120
}

121 122 123 124 125
/*
 * Helper routine to copy from source to ring buffer.
 * Assume there is enough room. Handles wrap-around in dest case only!!
 */
static u32 hv_copyto_ringbuffer(
126 127
	struct hv_ring_buffer_info	*ring_info,
	u32				start_write_offset,
128
	const void			*src,
129 130 131 132
	u32				srclen)
{
	void *ring_buffer = hv_get_ring_buffer(ring_info);
	u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
133 134

	memcpy(ring_buffer + start_write_offset, src, srclen);
135

136
	start_write_offset += srclen;
137 138
	if (start_write_offset >= ring_buffer_size)
		start_write_offset -= ring_buffer_size;
139 140 141

	return start_write_offset;
}
142

143
/* Get various debug metrics for the specified ring buffer. */
144 145
void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
				 struct hv_ring_buffer_debug_info *debug_info)
146
{
147 148
	u32 bytes_avail_towrite;
	u32 bytes_avail_toread;
149

150
	if (ring_info->ring_buffer) {
151
		hv_get_ringbuffer_availbytes(ring_info,
152 153
					&bytes_avail_toread,
					&bytes_avail_towrite);
154

155 156
		debug_info->bytes_avail_toread = bytes_avail_toread;
		debug_info->bytes_avail_towrite = bytes_avail_towrite;
157
		debug_info->current_read_index =
158
			ring_info->ring_buffer->read_index;
159
		debug_info->current_write_index =
160
			ring_info->ring_buffer->write_index;
161
		debug_info->current_interrupt_mask =
162
			ring_info->ring_buffer->interrupt_mask;
163 164
	}
}
165
EXPORT_SYMBOL_GPL(hv_ringbuffer_get_debuginfo);
166

167
/* Initialize the ring buffer. */
168
int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
169
		       struct page *pages, u32 page_cnt)
170
{
171 172 173 174
	int i;
	struct page **pages_wraparound;

	BUILD_BUG_ON((sizeof(struct hv_ring_buffer) != PAGE_SIZE));
175

176
	memset(ring_info, 0, sizeof(struct hv_ring_buffer_info));
177

178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199
	/*
	 * First page holds struct hv_ring_buffer, do wraparound mapping for
	 * the rest.
	 */
	pages_wraparound = kzalloc(sizeof(struct page *) * (page_cnt * 2 - 1),
				   GFP_KERNEL);
	if (!pages_wraparound)
		return -ENOMEM;

	pages_wraparound[0] = pages;
	for (i = 0; i < 2 * (page_cnt - 1); i++)
		pages_wraparound[i + 1] = &pages[i % (page_cnt - 1) + 1];

	ring_info->ring_buffer = (struct hv_ring_buffer *)
		vmap(pages_wraparound, page_cnt * 2 - 1, VM_MAP, PAGE_KERNEL);

	kfree(pages_wraparound);


	if (!ring_info->ring_buffer)
		return -ENOMEM;

200 201
	ring_info->ring_buffer->read_index =
		ring_info->ring_buffer->write_index = 0;
202

203
	/* Set the feature bit for enabling flow control. */
204 205
	ring_info->ring_buffer->feature_bits.value = 1;

206 207 208
	ring_info->ring_size = page_cnt << PAGE_SHIFT;
	ring_info->ring_datasize = ring_info->ring_size -
		sizeof(struct hv_ring_buffer);
209

210
	spin_lock_init(&ring_info->ring_lock);
211 212 213 214

	return 0;
}

215
/* Cleanup the ring buffer. */
216
void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
217
{
218
	vunmap(ring_info->ring_buffer);
219 220
}

221
/* Write to the ring buffer. */
222
int hv_ringbuffer_write(struct vmbus_channel *channel,
223
			const struct kvec *kv_list, u32 kv_count)
224
{
225
	int i;
226
	u32 bytes_avail_towrite;
227
	u32 totalbytes_towrite = sizeof(u64);
228
	u32 next_write_location;
229
	u32 old_write;
230 231
	u64 prev_indices;
	unsigned long flags;
232
	struct hv_ring_buffer_info *outring_info = &channel->outbound;
233

234 235 236
	if (channel->rescind)
		return -ENODEV;

237 238
	for (i = 0; i < kv_count; i++)
		totalbytes_towrite += kv_list[i].iov_len;
239

240
	spin_lock_irqsave(&outring_info->ring_lock, flags);
241

242
	bytes_avail_towrite = hv_get_bytes_to_write(outring_info);
243

244 245 246 247 248
	/*
	 * If there is only room for the packet, assume it is full.
	 * Otherwise, the next time around, we think the ring buffer
	 * is empty since the read index == write index.
	 */
249
	if (bytes_avail_towrite <= totalbytes_towrite) {
250
		spin_unlock_irqrestore(&outring_info->ring_lock, flags);
251
		return -EAGAIN;
252 253
	}

254
	/* Write to the ring buffer */
255
	next_write_location = hv_get_next_write_location(outring_info);
256

257 258
	old_write = next_write_location;

259
	for (i = 0; i < kv_count; i++) {
260
		next_write_location = hv_copyto_ringbuffer(outring_info,
261
						     next_write_location,
262 263
						     kv_list[i].iov_base,
						     kv_list[i].iov_len);
264 265
	}

266
	/* Set previous packet start */
267
	prev_indices = hv_get_ring_bufferindices(outring_info);
268

269
	next_write_location = hv_copyto_ringbuffer(outring_info,
270 271
					     next_write_location,
					     &prev_indices,
272
					     sizeof(u64));
273

274
	/* Issue a full memory barrier before updating the write index */
275
	virt_mb();
276

277
	/* Now, update the write location */
278
	hv_set_next_write_location(outring_info, next_write_location);
279 280


281
	spin_unlock_irqrestore(&outring_info->ring_lock, flags);
282

283
	hv_signal_on_write(old_write, channel);
284 285 286 287

	if (channel->rescind)
		return -ENODEV;

288 289 290
	return 0;
}

291
int hv_ringbuffer_read(struct vmbus_channel *channel,
292
		       void *buffer, u32 buflen, u32 *buffer_actual_len,
293
		       u64 *requestid, bool raw)
294
{
295 296 297 298
	struct vmpacket_descriptor *desc;
	u32 packetlen, offset;

	if (unlikely(buflen == 0))
299
		return -EINVAL;
300

301 302 303
	*buffer_actual_len = 0;
	*requestid = 0;

304
	/* Make sure there is something to read */
305 306
	desc = hv_pkt_iter_first(channel);
	if (desc == NULL) {
307 308 309 310
		/*
		 * No error is set when there is even no header, drivers are
		 * supposed to analyze buffer_actual_len.
		 */
311
		return 0;
312
	}
313

314 315
	offset = raw ? 0 : (desc->offset8 << 3);
	packetlen = (desc->len8 << 3) - offset;
316
	*buffer_actual_len = packetlen;
317
	*requestid = desc->trans_id;
318

319
	if (unlikely(packetlen > buflen))
320
		return -ENOBUFS;
321

322 323
	/* since ring is double mapped, only one copy is necessary */
	memcpy(buffer, (const char *)desc + offset, packetlen);
324

325 326
	/* Advance ring index to next packet descriptor */
	__hv_pkt_iter_next(channel, desc);
327

328 329
	/* Notify host of update */
	hv_pkt_iter_close(channel);
330

331
	return 0;
332
}
333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360

/*
 * Determine number of bytes available in ring buffer after
 * the current iterator (priv_read_index) location.
 *
 * This is similar to hv_get_bytes_to_read but with private
 * read index instead.
 */
static u32 hv_pkt_iter_avail(const struct hv_ring_buffer_info *rbi)
{
	u32 priv_read_loc = rbi->priv_read_index;
	u32 write_loc = READ_ONCE(rbi->ring_buffer->write_index);

	if (write_loc >= priv_read_loc)
		return write_loc - priv_read_loc;
	else
		return (rbi->ring_datasize - priv_read_loc) + write_loc;
}

/*
 * Get first vmbus packet from ring buffer after read_index
 *
 * If ring buffer is empty, returns NULL and no other action needed.
 */
struct vmpacket_descriptor *hv_pkt_iter_first(struct vmbus_channel *channel)
{
	struct hv_ring_buffer_info *rbi = &channel->inbound;

361
	/* set state for later hv_pkt_iter_close */
362
	rbi->cached_read_index = rbi->ring_buffer->read_index;
363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403

	if (hv_pkt_iter_avail(rbi) < sizeof(struct vmpacket_descriptor))
		return NULL;

	return hv_get_ring_buffer(rbi) + rbi->priv_read_index;
}
EXPORT_SYMBOL_GPL(hv_pkt_iter_first);

/*
 * Get next vmbus packet from ring buffer.
 *
 * Advances the current location (priv_read_index) and checks for more
 * data. If the end of the ring buffer is reached, then return NULL.
 */
struct vmpacket_descriptor *
__hv_pkt_iter_next(struct vmbus_channel *channel,
		   const struct vmpacket_descriptor *desc)
{
	struct hv_ring_buffer_info *rbi = &channel->inbound;
	u32 packetlen = desc->len8 << 3;
	u32 dsize = rbi->ring_datasize;

	/* bump offset to next potential packet */
	rbi->priv_read_index += packetlen + VMBUS_PKT_TRAILER;
	if (rbi->priv_read_index >= dsize)
		rbi->priv_read_index -= dsize;

	/* more data? */
	if (hv_pkt_iter_avail(rbi) < sizeof(struct vmpacket_descriptor))
		return NULL;
	else
		return hv_get_ring_buffer(rbi) + rbi->priv_read_index;
}
EXPORT_SYMBOL_GPL(__hv_pkt_iter_next);

/*
 * Update host ring buffer after iterating over packets.
 */
void hv_pkt_iter_close(struct vmbus_channel *channel)
{
	struct hv_ring_buffer_info *rbi = &channel->inbound;
404 405
	u32 cur_write_sz, cached_write_sz;
	u32 pending_sz;
406 407 408 409 410 411 412 413 414

	/*
	 * Make sure all reads are done before we update the read index since
	 * the writer may start writing to the read area once the read index
	 * is updated.
	 */
	virt_rmb();
	rbi->ring_buffer->read_index = rbi->priv_read_index;

415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440
	/*
	 * Issue a full memory barrier before making the signaling decision.
	 * Here is the reason for having this barrier:
	 * If the reading of the pend_sz (in this function)
	 * were to be reordered and read before we commit the new read
	 * index (in the calling function)  we could
	 * have a problem. If the host were to set the pending_sz after we
	 * have sampled pending_sz and go to sleep before we commit the
	 * read index, we could miss sending the interrupt. Issue a full
	 * memory barrier to address this.
	 */
	virt_mb();

	pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz);
	/* If the other end is not blocked on write don't bother. */
	if (pending_sz == 0)
		return;

	cur_write_sz = hv_get_bytes_to_write(rbi);

	if (cur_write_sz < pending_sz)
		return;

	cached_write_sz = hv_get_cached_bytes_to_write(rbi);
	if (cached_write_sz < pending_sz)
		vmbus_setevent(channel);
441 442
}
EXPORT_SYMBOL_GPL(hv_pkt_iter_close);