industrialio-buffer.c 19.9 KB
Newer Older
1 2 3 4 5 6 7 8
/* The industrial I/O core
 *
 * Copyright (c) 2008 Jonathan Cameron
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 as published by
 * the Free Software Foundation.
 *
9
 * Handling of buffer allocation / resizing.
10 11 12 13 14 15 16
 *
 *
 * Things to look at here.
 * - Better memory allocation techniques?
 * - Alternative access techniques?
 */
#include <linux/kernel.h>
17
#include <linux/export.h>
18 19 20
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/cdev.h>
21
#include <linux/slab.h>
22
#include <linux/poll.h>
23

24
#include <linux/iio/iio.h>
25
#include "iio_core.h"
26 27
#include <linux/iio/sysfs.h>
#include <linux/iio/buffer.h>
28

29 30 31 32
static const char * const iio_endian_prefix[] = {
	[IIO_BE] = "be",
	[IIO_LE] = "le",
};
33 34

/**
35
 * iio_buffer_read_first_n_outer() - chrdev read for buffer access
36
 *
37 38
 * This function relies on all buffer implementations having an
 * iio_buffer as their first element.
39
 **/
40 41
ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
				      size_t n, loff_t *f_ps)
42
{
43
	struct iio_dev *indio_dev = filp->private_data;
44
	struct iio_buffer *rb = indio_dev->buffer;
45

46
	if (!rb || !rb->access->read_first_n)
47
		return -EINVAL;
48
	return rb->access->read_first_n(rb, n, buf);
49 50
}

51
/**
52
 * iio_buffer_poll() - poll the buffer to find out if it has data
53
 */
54 55
unsigned int iio_buffer_poll(struct file *filp,
			     struct poll_table_struct *wait)
56
{
57
	struct iio_dev *indio_dev = filp->private_data;
58
	struct iio_buffer *rb = indio_dev->buffer;
59 60 61 62 63

	poll_wait(filp, &rb->pollq, wait);
	if (rb->stufftoread)
		return POLLIN | POLLRDNORM;
	/* need a way of knowing if there may be enough data... */
64
	return 0;
65 66
}

67
void iio_buffer_init(struct iio_buffer *buffer)
68
{
69
	INIT_LIST_HEAD(&buffer->demux_list);
70
	init_waitqueue_head(&buffer->pollq);
71
}
72
EXPORT_SYMBOL(iio_buffer_init);
73

74
static ssize_t iio_show_scan_index(struct device *dev,
75 76
				   struct device_attribute *attr,
				   char *buf)
77
{
78
	return sprintf(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index);
79 80 81 82 83 84 85
}

static ssize_t iio_show_fixed_type(struct device *dev,
				   struct device_attribute *attr,
				   char *buf)
{
	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
86 87 88
	u8 type = this_attr->c->scan_type.endianness;

	if (type == IIO_CPU) {
89 90 91 92 93
#ifdef __LITTLE_ENDIAN
		type = IIO_LE;
#else
		type = IIO_BE;
#endif
94 95 96
	}
	return sprintf(buf, "%s:%c%d/%d>>%u\n",
		       iio_endian_prefix[type],
97 98 99 100 101 102
		       this_attr->c->scan_type.sign,
		       this_attr->c->scan_type.realbits,
		       this_attr->c->scan_type.storagebits,
		       this_attr->c->scan_type.shift);
}

103 104 105 106 107
static ssize_t iio_scan_el_show(struct device *dev,
				struct device_attribute *attr,
				char *buf)
{
	int ret;
L
Lars-Peter Clausen 已提交
108
	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
109

110 111 112
	ret = test_bit(to_iio_dev_attr(attr)->address,
		       indio_dev->buffer->scan_mask);

113 114 115
	return sprintf(buf, "%d\n", ret);
}

116
static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
117
{
118
	clear_bit(bit, buffer->scan_mask);
119 120 121 122 123 124 125 126
	return 0;
}

static ssize_t iio_scan_el_store(struct device *dev,
				 struct device_attribute *attr,
				 const char *buf,
				 size_t len)
{
127
	int ret;
128
	bool state;
L
Lars-Peter Clausen 已提交
129
	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
130
	struct iio_buffer *buffer = indio_dev->buffer;
131 132
	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);

133 134 135
	ret = strtobool(buf, &state);
	if (ret < 0)
		return ret;
136
	mutex_lock(&indio_dev->mlock);
137
	if (iio_buffer_enabled(indio_dev)) {
138 139 140
		ret = -EBUSY;
		goto error_ret;
	}
141
	ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address);
142 143 144
	if (ret < 0)
		goto error_ret;
	if (!state && ret) {
145
		ret = iio_scan_mask_clear(buffer, this_attr->address);
146 147 148
		if (ret)
			goto error_ret;
	} else if (state && !ret) {
149
		ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address);
150 151 152 153 154 155 156
		if (ret)
			goto error_ret;
	}

error_ret:
	mutex_unlock(&indio_dev->mlock);

157
	return ret < 0 ? ret : len;
158 159 160 161 162 163 164

}

static ssize_t iio_scan_el_ts_show(struct device *dev,
				   struct device_attribute *attr,
				   char *buf)
{
L
Lars-Peter Clausen 已提交
165
	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
166
	return sprintf(buf, "%d\n", indio_dev->buffer->scan_timestamp);
167 168 169 170 171 172 173
}

static ssize_t iio_scan_el_ts_store(struct device *dev,
				    struct device_attribute *attr,
				    const char *buf,
				    size_t len)
{
174
	int ret;
L
Lars-Peter Clausen 已提交
175
	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
176
	bool state;
177

178 179 180 181
	ret = strtobool(buf, &state);
	if (ret < 0)
		return ret;

182
	mutex_lock(&indio_dev->mlock);
183
	if (iio_buffer_enabled(indio_dev)) {
184 185 186
		ret = -EBUSY;
		goto error_ret;
	}
187
	indio_dev->buffer->scan_timestamp = state;
188
	indio_dev->scan_timestamp = state;
189 190 191 192 193 194
error_ret:
	mutex_unlock(&indio_dev->mlock);

	return ret ? ret : len;
}

195 196
static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
					const struct iio_chan_spec *chan)
197
{
198
	int ret, attrcount = 0;
199
	struct iio_buffer *buffer = indio_dev->buffer;
200

201
	ret = __iio_add_chan_devattr("index",
202 203 204 205 206
				     chan,
				     &iio_show_scan_index,
				     NULL,
				     0,
				     0,
207
				     &indio_dev->dev,
208
				     &buffer->scan_el_dev_attr_list);
209 210
	if (ret)
		goto error_ret;
211 212
	attrcount++;
	ret = __iio_add_chan_devattr("type",
213 214 215 216 217
				     chan,
				     &iio_show_fixed_type,
				     NULL,
				     0,
				     0,
218
				     &indio_dev->dev,
219
				     &buffer->scan_el_dev_attr_list);
220 221
	if (ret)
		goto error_ret;
222
	attrcount++;
223
	if (chan->type != IIO_TIMESTAMP)
224
		ret = __iio_add_chan_devattr("en",
225 226 227 228 229
					     chan,
					     &iio_scan_el_show,
					     &iio_scan_el_store,
					     chan->scan_index,
					     0,
230
					     &indio_dev->dev,
231
					     &buffer->scan_el_dev_attr_list);
232
	else
233
		ret = __iio_add_chan_devattr("en",
234 235 236 237 238
					     chan,
					     &iio_scan_el_ts_show,
					     &iio_scan_el_ts_store,
					     chan->scan_index,
					     0,
239
					     &indio_dev->dev,
240
					     &buffer->scan_el_dev_attr_list);
241 242
	attrcount++;
	ret = attrcount;
243 244 245 246
error_ret:
	return ret;
}

247 248
static void iio_buffer_remove_and_free_scan_dev_attr(struct iio_dev *indio_dev,
						     struct iio_dev_attr *p)
249 250 251 252 253
{
	kfree(p->dev_attr.attr.name);
	kfree(p);
}

254
static void __iio_buffer_attr_cleanup(struct iio_dev *indio_dev)
255 256
{
	struct iio_dev_attr *p, *n;
257
	struct iio_buffer *buffer = indio_dev->buffer;
258

259
	list_for_each_entry_safe(p, n,
260 261
				 &buffer->scan_el_dev_attr_list, l)
		iio_buffer_remove_and_free_scan_dev_attr(indio_dev, p);
262 263
}

264 265
static const char * const iio_scan_elements_group_name = "scan_elements";

266 267 268
int iio_buffer_register(struct iio_dev *indio_dev,
			const struct iio_chan_spec *channels,
			int num_channels)
269
{
270 271
	struct iio_dev_attr *p;
	struct attribute **attr;
272
	struct iio_buffer *buffer = indio_dev->buffer;
273 274
	int ret, i, attrn, attrcount, attrcount_orig = 0;

275 276
	if (buffer->attrs)
		indio_dev->groups[indio_dev->groupcounter++] = buffer->attrs;
277

278 279
	if (buffer->scan_el_attrs != NULL) {
		attr = buffer->scan_el_attrs->attrs;
280 281 282 283
		while (*attr++ != NULL)
			attrcount_orig++;
	}
	attrcount = attrcount_orig;
284
	INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list);
285 286 287
	if (channels) {
		/* new magic */
		for (i = 0; i < num_channels; i++) {
288 289 290
			if (channels[i].scan_index < 0)
				continue;

291 292 293 294
			/* Establish necessary mask length */
			if (channels[i].scan_index >
			    (int)indio_dev->masklength - 1)
				indio_dev->masklength
295
					= channels[i].scan_index + 1;
296

297
			ret = iio_buffer_add_channel_sysfs(indio_dev,
298
							 &channels[i]);
299
			if (ret < 0)
300 301
				goto error_cleanup_dynamic;
			attrcount += ret;
302
			if (channels[i].type == IIO_TIMESTAMP)
303
				indio_dev->scan_index_timestamp =
304
					channels[i].scan_index;
305
		}
306
		if (indio_dev->masklength && buffer->scan_mask == NULL) {
307 308 309
			buffer->scan_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
						    sizeof(*buffer->scan_mask),
						    GFP_KERNEL);
310
			if (buffer->scan_mask == NULL) {
311
				ret = -ENOMEM;
312
				goto error_cleanup_dynamic;
313 314
			}
		}
315 316
	}

317
	buffer->scan_el_group.name = iio_scan_elements_group_name;
318

319 320 321
	buffer->scan_el_group.attrs = kcalloc(attrcount + 1,
					      sizeof(buffer->scan_el_group.attrs[0]),
					      GFP_KERNEL);
322
	if (buffer->scan_el_group.attrs == NULL) {
323 324 325
		ret = -ENOMEM;
		goto error_free_scan_mask;
	}
326 327 328
	if (buffer->scan_el_attrs)
		memcpy(buffer->scan_el_group.attrs, buffer->scan_el_attrs,
		       sizeof(buffer->scan_el_group.attrs[0])*attrcount_orig);
329 330
	attrn = attrcount_orig;

331 332 333
	list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l)
		buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr;
	indio_dev->groups[indio_dev->groupcounter++] = &buffer->scan_el_group;
334

335
	return 0;
336 337

error_free_scan_mask:
338
	kfree(buffer->scan_mask);
339
error_cleanup_dynamic:
340
	__iio_buffer_attr_cleanup(indio_dev);
341

342 343
	return ret;
}
344
EXPORT_SYMBOL(iio_buffer_register);
345

346
void iio_buffer_unregister(struct iio_dev *indio_dev)
347
{
348 349 350
	kfree(indio_dev->buffer->scan_mask);
	kfree(indio_dev->buffer->scan_el_group.attrs);
	__iio_buffer_attr_cleanup(indio_dev);
351
}
352
EXPORT_SYMBOL(iio_buffer_unregister);
353

354 355 356
ssize_t iio_buffer_read_length(struct device *dev,
			       struct device_attribute *attr,
			       char *buf)
357
{
L
Lars-Peter Clausen 已提交
358
	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
359
	struct iio_buffer *buffer = indio_dev->buffer;
360

361
	if (buffer->access->get_length)
362
		return sprintf(buf, "%d\n",
363
			       buffer->access->get_length(buffer));
364

365
	return 0;
366
}
367
EXPORT_SYMBOL(iio_buffer_read_length);
368

369 370 371 372
ssize_t iio_buffer_write_length(struct device *dev,
				struct device_attribute *attr,
				const char *buf,
				size_t len)
373 374 375
{
	int ret;
	ulong val;
L
Lars-Peter Clausen 已提交
376
	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
377
	struct iio_buffer *buffer = indio_dev->buffer;
378

379 380 381 382
	ret = strict_strtoul(buf, 10, &val);
	if (ret)
		return ret;

383 384
	if (buffer->access->get_length)
		if (val == buffer->access->get_length(buffer))
385 386
			return len;

387 388 389 390
	mutex_lock(&indio_dev->mlock);
	if (iio_buffer_enabled(indio_dev)) {
		ret = -EBUSY;
	} else {
391
		if (buffer->access->set_length)
392 393
			buffer->access->set_length(buffer, val);
		ret = 0;
394
	}
395
	mutex_unlock(&indio_dev->mlock);
396

397
	return ret ? ret : len;
398
}
399
EXPORT_SYMBOL(iio_buffer_write_length);
400

401 402 403 404
ssize_t iio_buffer_store_enable(struct device *dev,
				struct device_attribute *attr,
				const char *buf,
				size_t len)
405 406 407 408
{
	int ret;
	bool requested_state, current_state;
	int previous_mode;
L
Lars-Peter Clausen 已提交
409
	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
410
	struct iio_buffer *buffer = indio_dev->buffer;
411

412 413
	mutex_lock(&indio_dev->mlock);
	previous_mode = indio_dev->currentmode;
414
	requested_state = !(buf[0] == '0');
415
	current_state = iio_buffer_enabled(indio_dev);
416
	if (current_state == requested_state) {
417
		printk(KERN_INFO "iio-buffer, current state requested again\n");
418 419 420
		goto done;
	}
	if (requested_state) {
421 422
		if (indio_dev->setup_ops->preenable) {
			ret = indio_dev->setup_ops->preenable(indio_dev);
423 424
			if (ret) {
				printk(KERN_ERR
425
				       "Buffer not started: "
426
				       "buffer preenable failed\n");
427 428 429
				goto error_ret;
			}
		}
430 431
		if (buffer->access->request_update) {
			ret = buffer->access->request_update(buffer);
432 433
			if (ret) {
				printk(KERN_INFO
434
				       "Buffer not started: "
435
				       "buffer parameter update failed\n");
436 437 438
				goto error_ret;
			}
		}
439
		/* Definitely possible for devices to support both of these. */
440 441
		if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) {
			if (!indio_dev->trig) {
442 443 444 445 446
				printk(KERN_INFO
				       "Buffer not started: no trigger\n");
				ret = -EINVAL;
				goto error_ret;
			}
447 448 449
			indio_dev->currentmode = INDIO_BUFFER_TRIGGERED;
		} else if (indio_dev->modes & INDIO_BUFFER_HARDWARE)
			indio_dev->currentmode = INDIO_BUFFER_HARDWARE;
450 451 452 453 454
		else { /* should never be reached */
			ret = -EINVAL;
			goto error_ret;
		}

455 456
		if (indio_dev->setup_ops->postenable) {
			ret = indio_dev->setup_ops->postenable(indio_dev);
457 458
			if (ret) {
				printk(KERN_INFO
459
				       "Buffer not started: "
460
				       "postenable failed\n");
461
				indio_dev->currentmode = previous_mode;
462 463
				if (indio_dev->setup_ops->postdisable)
					indio_dev->setup_ops->
464
						postdisable(indio_dev);
465 466 467 468
				goto error_ret;
			}
		}
	} else {
469 470
		if (indio_dev->setup_ops->predisable) {
			ret = indio_dev->setup_ops->predisable(indio_dev);
471 472 473
			if (ret)
				goto error_ret;
		}
474
		indio_dev->currentmode = INDIO_DIRECT_MODE;
475 476
		if (indio_dev->setup_ops->postdisable) {
			ret = indio_dev->setup_ops->postdisable(indio_dev);
477 478 479 480 481
			if (ret)
				goto error_ret;
		}
	}
done:
482
	mutex_unlock(&indio_dev->mlock);
483 484 485
	return len;

error_ret:
486
	mutex_unlock(&indio_dev->mlock);
487 488
	return ret;
}
489
EXPORT_SYMBOL(iio_buffer_store_enable);
490

491 492 493
ssize_t iio_buffer_show_enable(struct device *dev,
			       struct device_attribute *attr,
			       char *buf)
494
{
L
Lars-Peter Clausen 已提交
495
	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
496
	return sprintf(buf, "%d\n", iio_buffer_enabled(indio_dev));
497
}
498
EXPORT_SYMBOL(iio_buffer_show_enable);
499

500
/* note NULL used as error indicator as it doesn't make sense. */
501
static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
502
					  unsigned int masklength,
503
					  const unsigned long *mask)
504 505 506 507 508 509 510 511 512 513 514
{
	if (bitmap_empty(mask, masklength))
		return NULL;
	while (*av_masks) {
		if (bitmap_subset(mask, av_masks, masklength))
			return av_masks;
		av_masks += BITS_TO_LONGS(masklength);
	}
	return NULL;
}

515 516
static int iio_compute_scan_bytes(struct iio_dev *indio_dev, const long *mask,
				  bool timestamp)
517 518 519 520 521 522
{
	const struct iio_chan_spec *ch;
	unsigned bytes = 0;
	int length, i;

	/* How much space will the demuxed element take? */
523
	for_each_set_bit(i, mask,
524 525
			 indio_dev->masklength) {
		ch = iio_find_channel_from_si(indio_dev, i);
526
		length = ch->scan_type.storagebits / 8;
527 528 529
		bytes = ALIGN(bytes, length);
		bytes += length;
	}
530
	if (timestamp) {
531
		ch = iio_find_channel_from_si(indio_dev,
532
					      indio_dev->scan_index_timestamp);
533
		length = ch->scan_type.storagebits / 8;
534 535 536
		bytes = ALIGN(bytes, length);
		bytes += length;
	}
537 538 539 540 541 542 543 544 545
	return bytes;
}

int iio_sw_buffer_preenable(struct iio_dev *indio_dev)
{
	struct iio_buffer *buffer = indio_dev->buffer;
	dev_dbg(&indio_dev->dev, "%s\n", __func__);

	/* How much space will the demuxed element take? */
546 547
	indio_dev->scan_bytes =
		iio_compute_scan_bytes(indio_dev, buffer->scan_mask,
548
				       buffer->scan_timestamp);
549
	buffer->access->set_bytes_per_datum(buffer, indio_dev->scan_bytes);
550 551 552 553 554 555 556 557 558

	/* What scan mask do we actually have ?*/
	if (indio_dev->available_scan_masks)
		indio_dev->active_scan_mask =
			iio_scan_mask_match(indio_dev->available_scan_masks,
					    indio_dev->masklength,
					    buffer->scan_mask);
	else
		indio_dev->active_scan_mask = buffer->scan_mask;
559 560 561 562

	if (indio_dev->active_scan_mask == NULL)
		return -EINVAL;

563 564 565 566 567 568
	iio_update_demux(indio_dev);

	if (indio_dev->info->update_scan_mode)
		return indio_dev->info
			->update_scan_mode(indio_dev,
					   indio_dev->active_scan_mask);
569 570 571 572
	return 0;
}
EXPORT_SYMBOL(iio_sw_buffer_preenable);

573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588
/**
 * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected
 * @indio_dev: the iio device
 * @mask: scan mask to be checked
 *
 * Return true if exactly one bit is set in the scan mask, false otherwise. It
 * can be used for devices where only one channel can be active for sampling at
 * a time.
 */
bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
	const unsigned long *mask)
{
	return bitmap_weight(mask, indio_dev->masklength) == 1;
}
EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot);

589 590 591 592 593 594 595 596 597
static bool iio_validate_scan_mask(struct iio_dev *indio_dev,
	const unsigned long *mask)
{
	if (!indio_dev->setup_ops->validate_scan_mask)
		return true;

	return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask);
}

598 599
/**
 * iio_scan_mask_set() - set particular bit in the scan mask
600
 * @buffer: the buffer whose scan mask we are interested in
601 602
 * @bit: the bit to be set.
 **/
603 604
int iio_scan_mask_set(struct iio_dev *indio_dev,
		      struct iio_buffer *buffer, int bit)
605
{
606
	const unsigned long *mask;
607 608 609
	unsigned long *trialmask;

	trialmask = kmalloc(sizeof(*trialmask)*
610
			    BITS_TO_LONGS(indio_dev->masklength),
611 612 613 614
			    GFP_KERNEL);

	if (trialmask == NULL)
		return -ENOMEM;
615
	if (!indio_dev->masklength) {
616
		WARN_ON("trying to set scanmask prior to registering buffer\n");
617
		goto err_invalid_mask;
618
	}
619
	bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength);
620 621
	set_bit(bit, trialmask);

622 623 624
	if (!iio_validate_scan_mask(indio_dev, trialmask))
		goto err_invalid_mask;

625 626 627
	if (indio_dev->available_scan_masks) {
		mask = iio_scan_mask_match(indio_dev->available_scan_masks,
					   indio_dev->masklength,
628
					   trialmask);
629 630
		if (!mask)
			goto err_invalid_mask;
631
	}
632
	bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength);
633 634 635 636

	kfree(trialmask);

	return 0;
637 638 639 640 641

err_invalid_mask:
	kfree(trialmask);
	return -EINVAL;
}
642 643
EXPORT_SYMBOL_GPL(iio_scan_mask_set);

644 645
int iio_scan_mask_query(struct iio_dev *indio_dev,
			struct iio_buffer *buffer, int bit)
646
{
647
	if (bit > indio_dev->masklength)
648 649
		return -EINVAL;

650
	if (!buffer->scan_mask)
651 652
		return 0;

653
	return test_bit(bit, buffer->scan_mask);
654 655
};
EXPORT_SYMBOL_GPL(iio_scan_mask_query);
656 657 658 659

/**
 * struct iio_demux_table() - table describing demux memcpy ops
 * @from:	index to copy from
660
 * @to:		index to copy to
661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693
 * @length:	how many bytes to copy
 * @l:		list head used for management
 */
struct iio_demux_table {
	unsigned from;
	unsigned to;
	unsigned length;
	struct list_head l;
};

static unsigned char *iio_demux(struct iio_buffer *buffer,
				 unsigned char *datain)
{
	struct iio_demux_table *t;

	if (list_empty(&buffer->demux_list))
		return datain;
	list_for_each_entry(t, &buffer->demux_list, l)
		memcpy(buffer->demux_bounce + t->to,
		       datain + t->from, t->length);

	return buffer->demux_bounce;
}

int iio_push_to_buffer(struct iio_buffer *buffer, unsigned char *data,
		       s64 timestamp)
{
	unsigned char *dataout = iio_demux(buffer, data);

	return buffer->access->store_to(buffer, dataout, timestamp);
}
EXPORT_SYMBOL_GPL(iio_push_to_buffer);

694 695 696 697 698 699 700 701 702
static void iio_buffer_demux_free(struct iio_buffer *buffer)
{
	struct iio_demux_table *p, *q;
	list_for_each_entry_safe(p, q, &buffer->demux_list, l) {
		list_del(&p->l);
		kfree(p);
	}
}

703 704 705 706 707 708
int iio_update_demux(struct iio_dev *indio_dev)
{
	const struct iio_chan_spec *ch;
	struct iio_buffer *buffer = indio_dev->buffer;
	int ret, in_ind = -1, out_ind, length;
	unsigned in_loc = 0, out_loc = 0;
709
	struct iio_demux_table *p;
710 711

	/* Clear out any old demux */
712
	iio_buffer_demux_free(buffer);
713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765
	kfree(buffer->demux_bounce);
	buffer->demux_bounce = NULL;

	/* First work out which scan mode we will actually have */
	if (bitmap_equal(indio_dev->active_scan_mask,
			 buffer->scan_mask,
			 indio_dev->masklength))
		return 0;

	/* Now we have the two masks, work from least sig and build up sizes */
	for_each_set_bit(out_ind,
			 indio_dev->active_scan_mask,
			 indio_dev->masklength) {
		in_ind = find_next_bit(indio_dev->active_scan_mask,
				       indio_dev->masklength,
				       in_ind + 1);
		while (in_ind != out_ind) {
			in_ind = find_next_bit(indio_dev->active_scan_mask,
					       indio_dev->masklength,
					       in_ind + 1);
			ch = iio_find_channel_from_si(indio_dev, in_ind);
			length = ch->scan_type.storagebits/8;
			/* Make sure we are aligned */
			in_loc += length;
			if (in_loc % length)
				in_loc += length - in_loc % length;
		}
		p = kmalloc(sizeof(*p), GFP_KERNEL);
		if (p == NULL) {
			ret = -ENOMEM;
			goto error_clear_mux_table;
		}
		ch = iio_find_channel_from_si(indio_dev, in_ind);
		length = ch->scan_type.storagebits/8;
		if (out_loc % length)
			out_loc += length - out_loc % length;
		if (in_loc % length)
			in_loc += length - in_loc % length;
		p->from = in_loc;
		p->to = out_loc;
		p->length = length;
		list_add_tail(&p->l, &buffer->demux_list);
		out_loc += length;
		in_loc += length;
	}
	/* Relies on scan_timestamp being last */
	if (buffer->scan_timestamp) {
		p = kmalloc(sizeof(*p), GFP_KERNEL);
		if (p == NULL) {
			ret = -ENOMEM;
			goto error_clear_mux_table;
		}
		ch = iio_find_channel_from_si(indio_dev,
766
			indio_dev->scan_index_timestamp);
767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786
		length = ch->scan_type.storagebits/8;
		if (out_loc % length)
			out_loc += length - out_loc % length;
		if (in_loc % length)
			in_loc += length - in_loc % length;
		p->from = in_loc;
		p->to = out_loc;
		p->length = length;
		list_add_tail(&p->l, &buffer->demux_list);
		out_loc += length;
		in_loc += length;
	}
	buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL);
	if (buffer->demux_bounce == NULL) {
		ret = -ENOMEM;
		goto error_clear_mux_table;
	}
	return 0;

error_clear_mux_table:
787 788
	iio_buffer_demux_free(buffer);

789 790 791
	return ret;
}
EXPORT_SYMBOL_GPL(iio_update_demux);