ring_buffer.c 11.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
/*
 *
 * Copyright (c) 2009, Microsoft Corporation.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
 * Place - Suite 330, Boston, MA 02111-1307 USA.
 *
 * Authors:
 *   Haiyang Zhang <haiyangz@microsoft.com>
 *   Hank Janssen  <hjanssen@microsoft.com>
21
 *   K. Y. Srinivasan <kys@microsoft.com>
22 23
 *
 */
24
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25

26 27
#include <linux/kernel.h>
#include <linux/mm.h>
28
#include <linux/hyperv.h>
29
#include <linux/uio.h>
30 31
#include <linux/vmalloc.h>
#include <linux/slab.h>
32

33
#include "hyperv_vmbus.h"
34

35 36 37 38 39 40 41 42 43 44 45 46 47 48
/*
 * When we write to the ring buffer, check if the host needs to
 * be signaled. Here is the details of this protocol:
 *
 *	1. The host guarantees that while it is draining the
 *	   ring buffer, it will set the interrupt_mask to
 *	   indicate it does not need to be interrupted when
 *	   new data is placed.
 *
 *	2. The host guarantees that it will completely drain
 *	   the ring buffer before exiting the read loop. Further,
 *	   once the ring buffer is empty, it will clear the
 *	   interrupt_mask and re-check to see if new data has
 *	   arrived.
49 50 51 52 53 54 55 56 57
 *
 * KYS: Oct. 30, 2016:
 * It looks like Windows hosts have logic to deal with DOS attacks that
 * can be triggered if it receives interrupts when it is not expecting
 * the interrupt. The host expects interrupts only when the ring
 * transitions from empty to non-empty (or full to non full on the guest
 * to host ring).
 * So, base the signaling decision solely on the ring state until the
 * host logic is fixed.
58 59
 */

60
static void hv_signal_on_write(u32 old_write, struct vmbus_channel *channel)
61
{
62 63
	struct hv_ring_buffer_info *rbi = &channel->outbound;

64
	virt_mb();
65
	if (READ_ONCE(rbi->ring_buffer->interrupt_mask))
66
		return;
67

68
	/* check interrupt_mask before read_index */
69
	virt_rmb();
70 71 72 73
	/*
	 * This is the only case we need to signal when the
	 * ring transitions from being empty to non-empty.
	 */
74
	if (old_write == READ_ONCE(rbi->ring_buffer->read_index))
75
		vmbus_setevent(channel);
76

77
	return;
78 79
}

80
/* Get the next write location for the specified ring buffer. */
81
static inline u32
82
hv_get_next_write_location(struct hv_ring_buffer_info *ring_info)
83
{
84
	u32 next = ring_info->ring_buffer->write_index;
85 86 87 88

	return next;
}

89
/* Set the next write location for the specified ring buffer. */
90
static inline void
91
hv_set_next_write_location(struct hv_ring_buffer_info *ring_info,
92
		     u32 next_write_location)
93
{
94
	ring_info->ring_buffer->write_index = next_write_location;
95 96
}

97
/* Get the next read location for the specified ring buffer. */
98
static inline u32
99
hv_get_next_read_location(const struct hv_ring_buffer_info *ring_info)
100
{
101
	return ring_info->ring_buffer->read_index;
102 103
}

104 105
/*
 * Get the next read location + offset for the specified ring buffer.
106
 * This allows the caller to skip.
107
 */
108
static inline u32
109 110
hv_get_next_readlocation_withoffset(const struct hv_ring_buffer_info *ring_info,
				    u32 offset)
111
{
112
	u32 next = ring_info->ring_buffer->read_index;
113

114
	next += offset;
115 116
	if (next >= ring_info->ring_datasize)
		next -= ring_info->ring_datasize;
117 118 119 120

	return next;
}

121
/* Set the next read location for the specified ring buffer. */
122
static inline void
123
hv_set_next_read_location(struct hv_ring_buffer_info *ring_info,
124
		    u32 next_read_location)
125
{
126
	ring_info->ring_buffer->read_index = next_read_location;
127
	ring_info->priv_read_index = next_read_location;
128 129
}

130
/* Get the size of the ring buffer. */
131
static inline u32
132
hv_get_ring_buffersize(const struct hv_ring_buffer_info *ring_info)
133
{
134
	return ring_info->ring_datasize;
135 136
}

137
/* Get the read and write indices as u64 of the specified ring buffer. */
138
static inline u64
139
hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info)
140
{
141
	return (u64)ring_info->ring_buffer->write_index << 32;
142 143
}

144 145 146 147 148
/*
 * Helper routine to copy to source from ring buffer.
 * Assume there is enough room. Handles wrap-around in src case only!!
 */
static u32 hv_copyfrom_ringbuffer(
149
	const struct hv_ring_buffer_info *ring_info,
150 151 152 153 154 155 156
	void				*dest,
	u32				destlen,
	u32				start_read_offset)
{
	void *ring_buffer = hv_get_ring_buffer(ring_info);
	u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);

157
	memcpy(dest, ring_buffer + start_read_offset, destlen);
158 159

	start_read_offset += destlen;
160 161
	if (start_read_offset >= ring_buffer_size)
		start_read_offset -= ring_buffer_size;
162 163 164 165 166

	return start_read_offset;
}


167 168 169 170 171
/*
 * Helper routine to copy from source to ring buffer.
 * Assume there is enough room. Handles wrap-around in dest case only!!
 */
static u32 hv_copyto_ringbuffer(
172 173
	struct hv_ring_buffer_info	*ring_info,
	u32				start_write_offset,
174
	const void			*src,
175 176 177 178
	u32				srclen)
{
	void *ring_buffer = hv_get_ring_buffer(ring_info);
	u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
179 180

	memcpy(ring_buffer + start_write_offset, src, srclen);
181

182
	start_write_offset += srclen;
183 184
	if (start_write_offset >= ring_buffer_size)
		start_write_offset -= ring_buffer_size;
185 186 187

	return start_write_offset;
}
188

189
/* Get various debug metrics for the specified ring buffer. */
190 191
void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
				 struct hv_ring_buffer_debug_info *debug_info)
192
{
193 194
	u32 bytes_avail_towrite;
	u32 bytes_avail_toread;
195

196
	if (ring_info->ring_buffer) {
197
		hv_get_ringbuffer_availbytes(ring_info,
198 199
					&bytes_avail_toread,
					&bytes_avail_towrite);
200

201 202
		debug_info->bytes_avail_toread = bytes_avail_toread;
		debug_info->bytes_avail_towrite = bytes_avail_towrite;
203
		debug_info->current_read_index =
204
			ring_info->ring_buffer->read_index;
205
		debug_info->current_write_index =
206
			ring_info->ring_buffer->write_index;
207
		debug_info->current_interrupt_mask =
208
			ring_info->ring_buffer->interrupt_mask;
209 210 211
	}
}

212
/* Initialize the ring buffer. */
213
int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
214
		       struct page *pages, u32 page_cnt)
215
{
216 217 218 219
	int i;
	struct page **pages_wraparound;

	BUILD_BUG_ON((sizeof(struct hv_ring_buffer) != PAGE_SIZE));
220

221
	memset(ring_info, 0, sizeof(struct hv_ring_buffer_info));
222

223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244
	/*
	 * First page holds struct hv_ring_buffer, do wraparound mapping for
	 * the rest.
	 */
	pages_wraparound = kzalloc(sizeof(struct page *) * (page_cnt * 2 - 1),
				   GFP_KERNEL);
	if (!pages_wraparound)
		return -ENOMEM;

	pages_wraparound[0] = pages;
	for (i = 0; i < 2 * (page_cnt - 1); i++)
		pages_wraparound[i + 1] = &pages[i % (page_cnt - 1) + 1];

	ring_info->ring_buffer = (struct hv_ring_buffer *)
		vmap(pages_wraparound, page_cnt * 2 - 1, VM_MAP, PAGE_KERNEL);

	kfree(pages_wraparound);


	if (!ring_info->ring_buffer)
		return -ENOMEM;

245 246
	ring_info->ring_buffer->read_index =
		ring_info->ring_buffer->write_index = 0;
247

248
	/* Set the feature bit for enabling flow control. */
249 250
	ring_info->ring_buffer->feature_bits.value = 1;

251 252 253
	ring_info->ring_size = page_cnt << PAGE_SHIFT;
	ring_info->ring_datasize = ring_info->ring_size -
		sizeof(struct hv_ring_buffer);
254

255
	spin_lock_init(&ring_info->ring_lock);
256 257 258 259

	return 0;
}

260
/* Cleanup the ring buffer. */
261
void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
262
{
263
	vunmap(ring_info->ring_buffer);
264 265
}

266
/* Write to the ring buffer. */
267
int hv_ringbuffer_write(struct vmbus_channel *channel,
268
			const struct kvec *kv_list, u32 kv_count)
269
{
270
	int i = 0;
271 272
	u32 bytes_avail_towrite;
	u32 totalbytes_towrite = 0;
273

274
	u32 next_write_location;
275
	u32 old_write;
276
	u64 prev_indices = 0;
277
	unsigned long flags = 0;
278
	struct hv_ring_buffer_info *outring_info = &channel->outbound;
279

280 281 282
	if (channel->rescind)
		return -ENODEV;

283 284
	for (i = 0; i < kv_count; i++)
		totalbytes_towrite += kv_list[i].iov_len;
285

286
	totalbytes_towrite += sizeof(u64);
287

288
	spin_lock_irqsave(&outring_info->ring_lock, flags);
289

290
	bytes_avail_towrite = hv_get_bytes_to_write(outring_info);
291

292 293 294 295 296
	/*
	 * If there is only room for the packet, assume it is full.
	 * Otherwise, the next time around, we think the ring buffer
	 * is empty since the read index == write index.
	 */
297
	if (bytes_avail_towrite <= totalbytes_towrite) {
298
		spin_unlock_irqrestore(&outring_info->ring_lock, flags);
299
		return -EAGAIN;
300 301
	}

302
	/* Write to the ring buffer */
303
	next_write_location = hv_get_next_write_location(outring_info);
304

305 306
	old_write = next_write_location;

307
	for (i = 0; i < kv_count; i++) {
308
		next_write_location = hv_copyto_ringbuffer(outring_info,
309
						     next_write_location,
310 311
						     kv_list[i].iov_base,
						     kv_list[i].iov_len);
312 313
	}

314
	/* Set previous packet start */
315
	prev_indices = hv_get_ring_bufferindices(outring_info);
316

317
	next_write_location = hv_copyto_ringbuffer(outring_info,
318 319
					     next_write_location,
					     &prev_indices,
320
					     sizeof(u64));
321

322
	/* Issue a full memory barrier before updating the write index */
323
	virt_mb();
324

325
	/* Now, update the write location */
326
	hv_set_next_write_location(outring_info, next_write_location);
327 328


329
	spin_unlock_irqrestore(&outring_info->ring_lock, flags);
330

331
	hv_signal_on_write(old_write, channel);
332 333 334 335

	if (channel->rescind)
		return -ENODEV;

336 337 338
	return 0;
}

339
int hv_ringbuffer_read(struct vmbus_channel *channel,
340
		       void *buffer, u32 buflen, u32 *buffer_actual_len,
341
		       u64 *requestid, bool raw)
342
{
343 344 345
	u32 bytes_avail_toread;
	u32 next_read_location = 0;
	u64 prev_indices = 0;
346 347 348 349
	struct vmpacket_descriptor desc;
	u32 offset;
	u32 packetlen;
	int ret = 0;
350
	struct hv_ring_buffer_info *inring_info = &channel->inbound;
351

352
	if (buflen <= 0)
353
		return -EINVAL;
354 355


356 357 358
	*buffer_actual_len = 0;
	*requestid = 0;

359
	bytes_avail_toread = hv_get_bytes_to_read(inring_info);
360
	/* Make sure there is something to read */
361 362 363 364 365
	if (bytes_avail_toread < sizeof(desc)) {
		/*
		 * No error is set when there is even no header, drivers are
		 * supposed to analyze buffer_actual_len.
		 */
366
		return ret;
367
	}
368

369
	init_cached_read_index(channel);
370 371 372 373 374 375 376 377 378 379
	next_read_location = hv_get_next_read_location(inring_info);
	next_read_location = hv_copyfrom_ringbuffer(inring_info, &desc,
						    sizeof(desc),
						    next_read_location);

	offset = raw ? 0 : (desc.offset8 << 3);
	packetlen = (desc.len8 << 3) - offset;
	*buffer_actual_len = packetlen;
	*requestid = desc.trans_id;

380 381
	if (bytes_avail_toread < packetlen + offset)
		return -EAGAIN;
382

383 384
	if (packetlen > buflen)
		return -ENOBUFS;
385

386
	next_read_location =
387
		hv_get_next_readlocation_withoffset(inring_info, offset);
388

389
	next_read_location = hv_copyfrom_ringbuffer(inring_info,
390
						buffer,
391
						packetlen,
392
						next_read_location);
393

394
	next_read_location = hv_copyfrom_ringbuffer(inring_info,
395
						&prev_indices,
396
						sizeof(u64),
397
						next_read_location);
398

399 400 401 402 403
	/*
	 * Make sure all reads are done before we update the read index since
	 * the writer may start writing to the read area once the read index
	 * is updated.
	 */
404
	virt_mb();
405

406
	/* Update the read index */
407
	hv_set_next_read_location(inring_info, next_read_location);
408

409
	hv_signal_on_read(channel);
410

411
	return ret;
412
}