industrialio-buffer.c 23.5 KB
Newer Older
1 2 3 4 5 6 7 8
/* The industrial I/O core
 *
 * Copyright (c) 2008 Jonathan Cameron
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 as published by
 * the Free Software Foundation.
 *
9
 * Handling of buffer allocation / resizing.
10 11 12 13 14 15 16
 *
 *
 * Things to look at here.
 * - Better memory allocation techniques?
 * - Alternative access techniques?
 */
#include <linux/kernel.h>
17
#include <linux/export.h>
18 19 20
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/cdev.h>
21
#include <linux/slab.h>
22
#include <linux/poll.h>
23

24
#include <linux/iio/iio.h>
25
#include "iio_core.h"
26 27
#include <linux/iio/sysfs.h>
#include <linux/iio/buffer.h>
28

29 30 31 32
static const char * const iio_endian_prefix[] = {
	[IIO_BE] = "be",
	[IIO_LE] = "le",
};
33

34 35 36 37 38 39 40 41 42 43 44 45
static bool iio_buffer_is_active(struct iio_dev *indio_dev,
				 struct iio_buffer *buf)
{
	struct list_head *p;

	list_for_each(p, &indio_dev->buffer_list)
		if (p == &buf->buffer_list)
			return true;

	return false;
}

46
/**
47
 * iio_buffer_read_first_n_outer() - chrdev read for buffer access
48
 *
49 50
 * This function relies on all buffer implementations having an
 * iio_buffer as their first element.
51
 **/
52 53
ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
				      size_t n, loff_t *f_ps)
54
{
55
	struct iio_dev *indio_dev = filp->private_data;
56
	struct iio_buffer *rb = indio_dev->buffer;
57

58
	if (!rb || !rb->access->read_first_n)
59
		return -EINVAL;
60
	return rb->access->read_first_n(rb, n, buf);
61 62
}

63
/**
64
 * iio_buffer_poll() - poll the buffer to find out if it has data
65
 */
66 67
unsigned int iio_buffer_poll(struct file *filp,
			     struct poll_table_struct *wait)
68
{
69
	struct iio_dev *indio_dev = filp->private_data;
70
	struct iio_buffer *rb = indio_dev->buffer;
71 72 73 74 75

	poll_wait(filp, &rb->pollq, wait);
	if (rb->stufftoread)
		return POLLIN | POLLRDNORM;
	/* need a way of knowing if there may be enough data... */
76
	return 0;
77 78
}

79
void iio_buffer_init(struct iio_buffer *buffer)
80
{
81
	INIT_LIST_HEAD(&buffer->demux_list);
82
	init_waitqueue_head(&buffer->pollq);
83
}
84
EXPORT_SYMBOL(iio_buffer_init);
85

86
static ssize_t iio_show_scan_index(struct device *dev,
87 88
				   struct device_attribute *attr,
				   char *buf)
89
{
90
	return sprintf(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index);
91 92 93 94 95 96 97
}

static ssize_t iio_show_fixed_type(struct device *dev,
				   struct device_attribute *attr,
				   char *buf)
{
	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
98 99 100
	u8 type = this_attr->c->scan_type.endianness;

	if (type == IIO_CPU) {
101 102 103 104 105
#ifdef __LITTLE_ENDIAN
		type = IIO_LE;
#else
		type = IIO_BE;
#endif
106 107 108
	}
	return sprintf(buf, "%s:%c%d/%d>>%u\n",
		       iio_endian_prefix[type],
109 110 111 112 113 114
		       this_attr->c->scan_type.sign,
		       this_attr->c->scan_type.realbits,
		       this_attr->c->scan_type.storagebits,
		       this_attr->c->scan_type.shift);
}

115 116 117 118 119
static ssize_t iio_scan_el_show(struct device *dev,
				struct device_attribute *attr,
				char *buf)
{
	int ret;
L
Lars-Peter Clausen 已提交
120
	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
121

122 123 124
	ret = test_bit(to_iio_dev_attr(attr)->address,
		       indio_dev->buffer->scan_mask);

125 126 127
	return sprintf(buf, "%d\n", ret);
}

128
static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
129
{
130
	clear_bit(bit, buffer->scan_mask);
131 132 133 134 135 136 137 138
	return 0;
}

static ssize_t iio_scan_el_store(struct device *dev,
				 struct device_attribute *attr,
				 const char *buf,
				 size_t len)
{
139
	int ret;
140
	bool state;
L
Lars-Peter Clausen 已提交
141
	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
142
	struct iio_buffer *buffer = indio_dev->buffer;
143 144
	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);

145 146 147
	ret = strtobool(buf, &state);
	if (ret < 0)
		return ret;
148
	mutex_lock(&indio_dev->mlock);
149
	if (iio_buffer_is_active(indio_dev, indio_dev->buffer)) {
150 151 152
		ret = -EBUSY;
		goto error_ret;
	}
153
	ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address);
154 155 156
	if (ret < 0)
		goto error_ret;
	if (!state && ret) {
157
		ret = iio_scan_mask_clear(buffer, this_attr->address);
158 159 160
		if (ret)
			goto error_ret;
	} else if (state && !ret) {
161
		ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address);
162 163 164 165 166 167 168
		if (ret)
			goto error_ret;
	}

error_ret:
	mutex_unlock(&indio_dev->mlock);

169
	return ret < 0 ? ret : len;
170 171 172 173 174 175 176

}

static ssize_t iio_scan_el_ts_show(struct device *dev,
				   struct device_attribute *attr,
				   char *buf)
{
L
Lars-Peter Clausen 已提交
177
	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
178
	return sprintf(buf, "%d\n", indio_dev->buffer->scan_timestamp);
179 180 181 182 183 184 185
}

static ssize_t iio_scan_el_ts_store(struct device *dev,
				    struct device_attribute *attr,
				    const char *buf,
				    size_t len)
{
186
	int ret;
L
Lars-Peter Clausen 已提交
187
	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
188
	bool state;
189

190 191 192 193
	ret = strtobool(buf, &state);
	if (ret < 0)
		return ret;

194
	mutex_lock(&indio_dev->mlock);
195
	if (iio_buffer_is_active(indio_dev, indio_dev->buffer)) {
196 197 198
		ret = -EBUSY;
		goto error_ret;
	}
199
	indio_dev->buffer->scan_timestamp = state;
200 201 202 203 204 205
error_ret:
	mutex_unlock(&indio_dev->mlock);

	return ret ? ret : len;
}

206 207
static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
					const struct iio_chan_spec *chan)
208
{
209
	int ret, attrcount = 0;
210
	struct iio_buffer *buffer = indio_dev->buffer;
211

212
	ret = __iio_add_chan_devattr("index",
213 214 215 216 217
				     chan,
				     &iio_show_scan_index,
				     NULL,
				     0,
				     0,
218
				     &indio_dev->dev,
219
				     &buffer->scan_el_dev_attr_list);
220 221
	if (ret)
		goto error_ret;
222 223
	attrcount++;
	ret = __iio_add_chan_devattr("type",
224 225 226 227 228
				     chan,
				     &iio_show_fixed_type,
				     NULL,
				     0,
				     0,
229
				     &indio_dev->dev,
230
				     &buffer->scan_el_dev_attr_list);
231 232
	if (ret)
		goto error_ret;
233
	attrcount++;
234
	if (chan->type != IIO_TIMESTAMP)
235
		ret = __iio_add_chan_devattr("en",
236 237 238 239 240
					     chan,
					     &iio_scan_el_show,
					     &iio_scan_el_store,
					     chan->scan_index,
					     0,
241
					     &indio_dev->dev,
242
					     &buffer->scan_el_dev_attr_list);
243
	else
244
		ret = __iio_add_chan_devattr("en",
245 246 247 248 249
					     chan,
					     &iio_scan_el_ts_show,
					     &iio_scan_el_ts_store,
					     chan->scan_index,
					     0,
250
					     &indio_dev->dev,
251
					     &buffer->scan_el_dev_attr_list);
252 253
	attrcount++;
	ret = attrcount;
254 255 256 257
error_ret:
	return ret;
}

258 259
static void iio_buffer_remove_and_free_scan_dev_attr(struct iio_dev *indio_dev,
						     struct iio_dev_attr *p)
260 261 262 263 264
{
	kfree(p->dev_attr.attr.name);
	kfree(p);
}

265
static void __iio_buffer_attr_cleanup(struct iio_dev *indio_dev)
266 267
{
	struct iio_dev_attr *p, *n;
268
	struct iio_buffer *buffer = indio_dev->buffer;
269

270
	list_for_each_entry_safe(p, n,
271 272
				 &buffer->scan_el_dev_attr_list, l)
		iio_buffer_remove_and_free_scan_dev_attr(indio_dev, p);
273 274
}

275 276
static const char * const iio_scan_elements_group_name = "scan_elements";

277 278 279
int iio_buffer_register(struct iio_dev *indio_dev,
			const struct iio_chan_spec *channels,
			int num_channels)
280
{
281 282
	struct iio_dev_attr *p;
	struct attribute **attr;
283
	struct iio_buffer *buffer = indio_dev->buffer;
284 285
	int ret, i, attrn, attrcount, attrcount_orig = 0;

286 287
	if (buffer->attrs)
		indio_dev->groups[indio_dev->groupcounter++] = buffer->attrs;
288

289 290
	if (buffer->scan_el_attrs != NULL) {
		attr = buffer->scan_el_attrs->attrs;
291 292 293 294
		while (*attr++ != NULL)
			attrcount_orig++;
	}
	attrcount = attrcount_orig;
295
	INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list);
296 297 298
	if (channels) {
		/* new magic */
		for (i = 0; i < num_channels; i++) {
299 300 301
			if (channels[i].scan_index < 0)
				continue;

302 303 304 305
			/* Establish necessary mask length */
			if (channels[i].scan_index >
			    (int)indio_dev->masklength - 1)
				indio_dev->masklength
306
					= channels[i].scan_index + 1;
307

308
			ret = iio_buffer_add_channel_sysfs(indio_dev,
309
							 &channels[i]);
310
			if (ret < 0)
311 312
				goto error_cleanup_dynamic;
			attrcount += ret;
313
			if (channels[i].type == IIO_TIMESTAMP)
314
				indio_dev->scan_index_timestamp =
315
					channels[i].scan_index;
316
		}
317
		if (indio_dev->masklength && buffer->scan_mask == NULL) {
318 319 320
			buffer->scan_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
						    sizeof(*buffer->scan_mask),
						    GFP_KERNEL);
321
			if (buffer->scan_mask == NULL) {
322
				ret = -ENOMEM;
323
				goto error_cleanup_dynamic;
324 325
			}
		}
326 327
	}

328
	buffer->scan_el_group.name = iio_scan_elements_group_name;
329

330 331 332
	buffer->scan_el_group.attrs = kcalloc(attrcount + 1,
					      sizeof(buffer->scan_el_group.attrs[0]),
					      GFP_KERNEL);
333
	if (buffer->scan_el_group.attrs == NULL) {
334 335 336
		ret = -ENOMEM;
		goto error_free_scan_mask;
	}
337 338 339
	if (buffer->scan_el_attrs)
		memcpy(buffer->scan_el_group.attrs, buffer->scan_el_attrs,
		       sizeof(buffer->scan_el_group.attrs[0])*attrcount_orig);
340 341
	attrn = attrcount_orig;

342 343 344
	list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l)
		buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr;
	indio_dev->groups[indio_dev->groupcounter++] = &buffer->scan_el_group;
345

346
	return 0;
347 348

error_free_scan_mask:
349
	kfree(buffer->scan_mask);
350
error_cleanup_dynamic:
351
	__iio_buffer_attr_cleanup(indio_dev);
352

353 354
	return ret;
}
355
EXPORT_SYMBOL(iio_buffer_register);
356

357
void iio_buffer_unregister(struct iio_dev *indio_dev)
358
{
359 360 361
	kfree(indio_dev->buffer->scan_mask);
	kfree(indio_dev->buffer->scan_el_group.attrs);
	__iio_buffer_attr_cleanup(indio_dev);
362
}
363
EXPORT_SYMBOL(iio_buffer_unregister);
364

365 366 367
ssize_t iio_buffer_read_length(struct device *dev,
			       struct device_attribute *attr,
			       char *buf)
368
{
L
Lars-Peter Clausen 已提交
369
	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
370
	struct iio_buffer *buffer = indio_dev->buffer;
371

372
	if (buffer->access->get_length)
373
		return sprintf(buf, "%d\n",
374
			       buffer->access->get_length(buffer));
375

376
	return 0;
377
}
378
EXPORT_SYMBOL(iio_buffer_read_length);
379

380 381 382 383
ssize_t iio_buffer_write_length(struct device *dev,
				struct device_attribute *attr,
				const char *buf,
				size_t len)
384
{
L
Lars-Peter Clausen 已提交
385
	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
386
	struct iio_buffer *buffer = indio_dev->buffer;
387 388
	unsigned int val;
	int ret;
389

390
	ret = kstrtouint(buf, 10, &val);
391 392 393
	if (ret)
		return ret;

394 395
	if (buffer->access->get_length)
		if (val == buffer->access->get_length(buffer))
396 397
			return len;

398
	mutex_lock(&indio_dev->mlock);
399
	if (iio_buffer_is_active(indio_dev, indio_dev->buffer)) {
400 401
		ret = -EBUSY;
	} else {
402
		if (buffer->access->set_length)
403 404
			buffer->access->set_length(buffer, val);
		ret = 0;
405
	}
406
	mutex_unlock(&indio_dev->mlock);
407

408
	return ret ? ret : len;
409
}
410
EXPORT_SYMBOL(iio_buffer_write_length);
411

412 413 414
ssize_t iio_buffer_show_enable(struct device *dev,
			       struct device_attribute *attr,
			       char *buf)
415
{
L
Lars-Peter Clausen 已提交
416
	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
417 418 419
	return sprintf(buf, "%d\n",
		       iio_buffer_is_active(indio_dev,
					    indio_dev->buffer));
420
}
421
EXPORT_SYMBOL(iio_buffer_show_enable);
422

423
/* note NULL used as error indicator as it doesn't make sense. */
424
static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
425
					  unsigned int masklength,
426
					  const unsigned long *mask)
427 428 429 430 431 432 433 434 435 436 437
{
	if (bitmap_empty(mask, masklength))
		return NULL;
	while (*av_masks) {
		if (bitmap_subset(mask, av_masks, masklength))
			return av_masks;
		av_masks += BITS_TO_LONGS(masklength);
	}
	return NULL;
}

438 439
static int iio_compute_scan_bytes(struct iio_dev *indio_dev, const long *mask,
				  bool timestamp)
440 441 442 443 444 445
{
	const struct iio_chan_spec *ch;
	unsigned bytes = 0;
	int length, i;

	/* How much space will the demuxed element take? */
446
	for_each_set_bit(i, mask,
447 448
			 indio_dev->masklength) {
		ch = iio_find_channel_from_si(indio_dev, i);
449
		length = ch->scan_type.storagebits / 8;
450 451 452
		bytes = ALIGN(bytes, length);
		bytes += length;
	}
453
	if (timestamp) {
454
		ch = iio_find_channel_from_si(indio_dev,
455
					      indio_dev->scan_index_timestamp);
456
		length = ch->scan_type.storagebits / 8;
457 458 459
		bytes = ALIGN(bytes, length);
		bytes += length;
	}
460 461 462
	return bytes;
}

463 464 465
int iio_update_buffers(struct iio_dev *indio_dev,
		       struct iio_buffer *insert_buffer,
		       struct iio_buffer *remove_buffer)
466
{
467 468 469 470 471
	int ret;
	int success = 0;
	struct iio_buffer *buffer;
	unsigned long *compound_mask;
	const unsigned long *old_mask;
472

473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503
	/* Wind down existing buffers - iff there are any */
	if (!list_empty(&indio_dev->buffer_list)) {
		if (indio_dev->setup_ops->predisable) {
			ret = indio_dev->setup_ops->predisable(indio_dev);
			if (ret)
				goto error_ret;
		}
		indio_dev->currentmode = INDIO_DIRECT_MODE;
		if (indio_dev->setup_ops->postdisable) {
			ret = indio_dev->setup_ops->postdisable(indio_dev);
			if (ret)
				goto error_ret;
		}
	}
	/* Keep a copy of current setup to allow roll back */
	old_mask = indio_dev->active_scan_mask;
	if (!indio_dev->available_scan_masks)
		indio_dev->active_scan_mask = NULL;

	if (remove_buffer)
		list_del(&remove_buffer->buffer_list);
	if (insert_buffer)
		list_add(&insert_buffer->buffer_list, &indio_dev->buffer_list);

	/* If no buffers in list, we are done */
	if (list_empty(&indio_dev->buffer_list)) {
		indio_dev->currentmode = INDIO_DIRECT_MODE;
		if (indio_dev->available_scan_masks == NULL)
			kfree(old_mask);
		return 0;
	}
504 505

	/* What scan mask do we actually have ?*/
506 507 508 509 510 511 512 513 514 515 516 517 518 519 520
	compound_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
				sizeof(long), GFP_KERNEL);
	if (compound_mask == NULL) {
		if (indio_dev->available_scan_masks == NULL)
			kfree(old_mask);
		return -ENOMEM;
	}
	indio_dev->scan_timestamp = 0;

	list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
		bitmap_or(compound_mask, compound_mask, buffer->scan_mask,
			  indio_dev->masklength);
		indio_dev->scan_timestamp |= buffer->scan_timestamp;
	}
	if (indio_dev->available_scan_masks) {
521 522 523
		indio_dev->active_scan_mask =
			iio_scan_mask_match(indio_dev->available_scan_masks,
					    indio_dev->masklength,
524 525 526 527 528 529 530 531 532 533 534 535 536
					    compound_mask);
		if (indio_dev->active_scan_mask == NULL) {
			/*
			 * Roll back.
			 * Note can only occur when adding a buffer.
			 */
			list_del(&insert_buffer->buffer_list);
			indio_dev->active_scan_mask = old_mask;
			success = -EINVAL;
		}
	} else {
		indio_dev->active_scan_mask = compound_mask;
	}
537

538 539
	iio_update_demux(indio_dev);

540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565
	/* Wind up again */
	if (indio_dev->setup_ops->preenable) {
		ret = indio_dev->setup_ops->preenable(indio_dev);
		if (ret) {
			printk(KERN_ERR
			       "Buffer not started:"
			       "buffer preenable failed\n");
			goto error_remove_inserted;
		}
	}
	indio_dev->scan_bytes =
		iio_compute_scan_bytes(indio_dev,
				       indio_dev->active_scan_mask,
				       indio_dev->scan_timestamp);
	list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list)
		if (buffer->access->request_update) {
			ret = buffer->access->request_update(buffer);
			if (ret) {
				printk(KERN_INFO
				       "Buffer not started:"
				       "buffer parameter update failed\n");
				goto error_run_postdisable;
			}
		}
	if (indio_dev->info->update_scan_mode) {
		ret = indio_dev->info
566 567
			->update_scan_mode(indio_dev,
					   indio_dev->active_scan_mask);
568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676
		if (ret < 0) {
			printk(KERN_INFO "update scan mode failed\n");
			goto error_run_postdisable;
		}
	}
	/* Definitely possible for devices to support both of these.*/
	if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) {
		if (!indio_dev->trig) {
			printk(KERN_INFO "Buffer not started: no trigger\n");
			ret = -EINVAL;
			/* Can only occur on first buffer */
			goto error_run_postdisable;
		}
		indio_dev->currentmode = INDIO_BUFFER_TRIGGERED;
	} else if (indio_dev->modes & INDIO_BUFFER_HARDWARE) {
		indio_dev->currentmode = INDIO_BUFFER_HARDWARE;
	} else { /* should never be reached */
		ret = -EINVAL;
		goto error_run_postdisable;
	}

	if (indio_dev->setup_ops->postenable) {
		ret = indio_dev->setup_ops->postenable(indio_dev);
		if (ret) {
			printk(KERN_INFO
			       "Buffer not started: postenable failed\n");
			indio_dev->currentmode = INDIO_DIRECT_MODE;
			if (indio_dev->setup_ops->postdisable)
				indio_dev->setup_ops->postdisable(indio_dev);
			goto error_disable_all_buffers;
		}
	}

	if (indio_dev->available_scan_masks)
		kfree(compound_mask);
	else
		kfree(old_mask);

	return success;

error_disable_all_buffers:
	indio_dev->currentmode = INDIO_DIRECT_MODE;
error_run_postdisable:
	if (indio_dev->setup_ops->postdisable)
		indio_dev->setup_ops->postdisable(indio_dev);
error_remove_inserted:

	if (insert_buffer)
		list_del(&insert_buffer->buffer_list);
	indio_dev->active_scan_mask = old_mask;
	kfree(compound_mask);
error_ret:

	return ret;
}
EXPORT_SYMBOL_GPL(iio_update_buffers);

ssize_t iio_buffer_store_enable(struct device *dev,
				struct device_attribute *attr,
				const char *buf,
				size_t len)
{
	int ret;
	bool requested_state;
	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
	struct iio_buffer *pbuf = indio_dev->buffer;
	bool inlist;

	ret = strtobool(buf, &requested_state);
	if (ret < 0)
		return ret;

	mutex_lock(&indio_dev->mlock);

	/* Find out if it is in the list */
	inlist = iio_buffer_is_active(indio_dev, pbuf);
	/* Already in desired state */
	if (inlist == requested_state)
		goto done;

	if (requested_state)
		ret = iio_update_buffers(indio_dev,
					 indio_dev->buffer, NULL);
	else
		ret = iio_update_buffers(indio_dev,
					 NULL, indio_dev->buffer);

	if (ret < 0)
		goto done;
done:
	mutex_unlock(&indio_dev->mlock);
	return (ret < 0) ? ret : len;
}
EXPORT_SYMBOL(iio_buffer_store_enable);

int iio_sw_buffer_preenable(struct iio_dev *indio_dev)
{
	struct iio_buffer *buffer;
	unsigned bytes;
	dev_dbg(&indio_dev->dev, "%s\n", __func__);

	list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list)
		if (buffer->access->set_bytes_per_datum) {
			bytes = iio_compute_scan_bytes(indio_dev,
						       buffer->scan_mask,
						       buffer->scan_timestamp);

			buffer->access->set_bytes_per_datum(buffer, bytes);
		}
677 678 679 680
	return 0;
}
EXPORT_SYMBOL(iio_sw_buffer_preenable);

681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696
/**
 * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected
 * @indio_dev: the iio device
 * @mask: scan mask to be checked
 *
 * Return true if exactly one bit is set in the scan mask, false otherwise. It
 * can be used for devices where only one channel can be active for sampling at
 * a time.
 */
bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
	const unsigned long *mask)
{
	return bitmap_weight(mask, indio_dev->masklength) == 1;
}
EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot);

697 698 699 700 701 702 703 704 705
static bool iio_validate_scan_mask(struct iio_dev *indio_dev,
	const unsigned long *mask)
{
	if (!indio_dev->setup_ops->validate_scan_mask)
		return true;

	return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask);
}

706 707
/**
 * iio_scan_mask_set() - set particular bit in the scan mask
708
 * @buffer: the buffer whose scan mask we are interested in
709
 * @bit: the bit to be set.
710 711 712 713 714
 *
 * Note that at this point we have no way of knowing what other
 * buffers might request, hence this code only verifies that the
 * individual buffers request is plausible.
 */
715 716
int iio_scan_mask_set(struct iio_dev *indio_dev,
		      struct iio_buffer *buffer, int bit)
717
{
718
	const unsigned long *mask;
719 720 721
	unsigned long *trialmask;

	trialmask = kmalloc(sizeof(*trialmask)*
722
			    BITS_TO_LONGS(indio_dev->masklength),
723 724 725 726
			    GFP_KERNEL);

	if (trialmask == NULL)
		return -ENOMEM;
727
	if (!indio_dev->masklength) {
728
		WARN_ON("trying to set scanmask prior to registering buffer\n");
729
		goto err_invalid_mask;
730
	}
731
	bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength);
732 733
	set_bit(bit, trialmask);

734 735 736
	if (!iio_validate_scan_mask(indio_dev, trialmask))
		goto err_invalid_mask;

737 738 739
	if (indio_dev->available_scan_masks) {
		mask = iio_scan_mask_match(indio_dev->available_scan_masks,
					   indio_dev->masklength,
740
					   trialmask);
741 742
		if (!mask)
			goto err_invalid_mask;
743
	}
744
	bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength);
745 746 747 748

	kfree(trialmask);

	return 0;
749 750 751 752 753

err_invalid_mask:
	kfree(trialmask);
	return -EINVAL;
}
754 755
EXPORT_SYMBOL_GPL(iio_scan_mask_set);

756 757
int iio_scan_mask_query(struct iio_dev *indio_dev,
			struct iio_buffer *buffer, int bit)
758
{
759
	if (bit > indio_dev->masklength)
760 761
		return -EINVAL;

762
	if (!buffer->scan_mask)
763 764
		return 0;

765
	return test_bit(bit, buffer->scan_mask);
766 767
};
EXPORT_SYMBOL_GPL(iio_scan_mask_query);
768 769 770 771

/**
 * struct iio_demux_table() - table describing demux memcpy ops
 * @from:	index to copy from
772
 * @to:		index to copy to
773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796
 * @length:	how many bytes to copy
 * @l:		list head used for management
 */
struct iio_demux_table {
	unsigned from;
	unsigned to;
	unsigned length;
	struct list_head l;
};

static unsigned char *iio_demux(struct iio_buffer *buffer,
				 unsigned char *datain)
{
	struct iio_demux_table *t;

	if (list_empty(&buffer->demux_list))
		return datain;
	list_for_each_entry(t, &buffer->demux_list, l)
		memcpy(buffer->demux_bounce + t->to,
		       datain + t->from, t->length);

	return buffer->demux_bounce;
}

797
static int iio_push_to_buffer(struct iio_buffer *buffer, unsigned char *data)
798 799 800
{
	unsigned char *dataout = iio_demux(buffer, data);

801
	return buffer->access->store_to(buffer, dataout);
802 803
}

804 805 806 807 808 809 810 811 812
static void iio_buffer_demux_free(struct iio_buffer *buffer)
{
	struct iio_demux_table *p, *q;
	list_for_each_entry_safe(p, q, &buffer->demux_list, l) {
		list_del(&p->l);
		kfree(p);
	}
}

813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830

int iio_push_to_buffers(struct iio_dev *indio_dev, unsigned char *data)
{
	int ret;
	struct iio_buffer *buf;

	list_for_each_entry(buf, &indio_dev->buffer_list, buffer_list) {
		ret = iio_push_to_buffer(buf, data);
		if (ret < 0)
			return ret;
	}

	return 0;
}
EXPORT_SYMBOL_GPL(iio_push_to_buffers);

static int iio_buffer_update_demux(struct iio_dev *indio_dev,
				   struct iio_buffer *buffer)
831 832 833 834
{
	const struct iio_chan_spec *ch;
	int ret, in_ind = -1, out_ind, length;
	unsigned in_loc = 0, out_loc = 0;
835
	struct iio_demux_table *p;
836 837

	/* Clear out any old demux */
838
	iio_buffer_demux_free(buffer);
839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891
	kfree(buffer->demux_bounce);
	buffer->demux_bounce = NULL;

	/* First work out which scan mode we will actually have */
	if (bitmap_equal(indio_dev->active_scan_mask,
			 buffer->scan_mask,
			 indio_dev->masklength))
		return 0;

	/* Now we have the two masks, work from least sig and build up sizes */
	for_each_set_bit(out_ind,
			 indio_dev->active_scan_mask,
			 indio_dev->masklength) {
		in_ind = find_next_bit(indio_dev->active_scan_mask,
				       indio_dev->masklength,
				       in_ind + 1);
		while (in_ind != out_ind) {
			in_ind = find_next_bit(indio_dev->active_scan_mask,
					       indio_dev->masklength,
					       in_ind + 1);
			ch = iio_find_channel_from_si(indio_dev, in_ind);
			length = ch->scan_type.storagebits/8;
			/* Make sure we are aligned */
			in_loc += length;
			if (in_loc % length)
				in_loc += length - in_loc % length;
		}
		p = kmalloc(sizeof(*p), GFP_KERNEL);
		if (p == NULL) {
			ret = -ENOMEM;
			goto error_clear_mux_table;
		}
		ch = iio_find_channel_from_si(indio_dev, in_ind);
		length = ch->scan_type.storagebits/8;
		if (out_loc % length)
			out_loc += length - out_loc % length;
		if (in_loc % length)
			in_loc += length - in_loc % length;
		p->from = in_loc;
		p->to = out_loc;
		p->length = length;
		list_add_tail(&p->l, &buffer->demux_list);
		out_loc += length;
		in_loc += length;
	}
	/* Relies on scan_timestamp being last */
	if (buffer->scan_timestamp) {
		p = kmalloc(sizeof(*p), GFP_KERNEL);
		if (p == NULL) {
			ret = -ENOMEM;
			goto error_clear_mux_table;
		}
		ch = iio_find_channel_from_si(indio_dev,
892
			indio_dev->scan_index_timestamp);
893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912
		length = ch->scan_type.storagebits/8;
		if (out_loc % length)
			out_loc += length - out_loc % length;
		if (in_loc % length)
			in_loc += length - in_loc % length;
		p->from = in_loc;
		p->to = out_loc;
		p->length = length;
		list_add_tail(&p->l, &buffer->demux_list);
		out_loc += length;
		in_loc += length;
	}
	buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL);
	if (buffer->demux_bounce == NULL) {
		ret = -ENOMEM;
		goto error_clear_mux_table;
	}
	return 0;

error_clear_mux_table:
913 914
	iio_buffer_demux_free(buffer);

915 916
	return ret;
}
917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935

int iio_update_demux(struct iio_dev *indio_dev)
{
	struct iio_buffer *buffer;
	int ret;

	list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
		ret = iio_buffer_update_demux(indio_dev, buffer);
		if (ret < 0)
			goto error_clear_mux_table;
	}
	return 0;

error_clear_mux_table:
	list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list)
		iio_buffer_demux_free(buffer);

	return ret;
}
936
EXPORT_SYMBOL_GPL(iio_update_demux);