industrialio-buffer.c 26.8 KB
Newer Older
1 2 3 4 5 6 7 8
/* The industrial I/O core
 *
 * Copyright (c) 2008 Jonathan Cameron
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 as published by
 * the Free Software Foundation.
 *
9
 * Handling of buffer allocation / resizing.
10 11 12 13 14 15 16
 *
 *
 * Things to look at here.
 * - Better memory allocation techniques?
 * - Alternative access techniques?
 */
#include <linux/kernel.h>
17
#include <linux/export.h>
18 19 20
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/cdev.h>
21
#include <linux/slab.h>
22
#include <linux/poll.h>
23
#include <linux/sched.h>
24

25
#include <linux/iio/iio.h>
26
#include "iio_core.h"
27 28
#include <linux/iio/sysfs.h>
#include <linux/iio/buffer.h>
29

30 31 32 33
static const char * const iio_endian_prefix[] = {
	[IIO_BE] = "be",
	[IIO_LE] = "le",
};
34

35
static bool iio_buffer_is_active(struct iio_buffer *buf)
36
{
37
	return !list_empty(&buf->buffer_list);
38 39
}

40 41 42 43 44 45 46 47
static bool iio_buffer_data_available(struct iio_buffer *buf)
{
	if (buf->access->data_available)
		return buf->access->data_available(buf);

	return buf->stufftoread;
}

48
/**
49
 * iio_buffer_read_first_n_outer() - chrdev read for buffer access
50
 *
51 52
 * This function relies on all buffer implementations having an
 * iio_buffer as their first element.
53
 **/
54 55
ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
				      size_t n, loff_t *f_ps)
56
{
57
	struct iio_dev *indio_dev = filp->private_data;
58
	struct iio_buffer *rb = indio_dev->buffer;
59
	int ret;
60

61 62 63
	if (!indio_dev->info)
		return -ENODEV;

64
	if (!rb || !rb->access->read_first_n)
65
		return -EINVAL;
66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86

	do {
		if (!iio_buffer_data_available(rb)) {
			if (filp->f_flags & O_NONBLOCK)
				return -EAGAIN;

			ret = wait_event_interruptible(rb->pollq,
					iio_buffer_data_available(rb) ||
					indio_dev->info == NULL);
			if (ret)
				return ret;
			if (indio_dev->info == NULL)
				return -ENODEV;
		}

		ret = rb->access->read_first_n(rb, n, buf);
		if (ret == 0 && (filp->f_flags & O_NONBLOCK))
			ret = -EAGAIN;
	 } while (ret == 0);

	return ret;
87 88
}

89
/**
90
 * iio_buffer_poll() - poll the buffer to find out if it has data
91
 */
92 93
unsigned int iio_buffer_poll(struct file *filp,
			     struct poll_table_struct *wait)
94
{
95
	struct iio_dev *indio_dev = filp->private_data;
96
	struct iio_buffer *rb = indio_dev->buffer;
97

98 99 100
	if (!indio_dev->info)
		return -ENODEV;

101
	poll_wait(filp, &rb->pollq, wait);
102
	if (iio_buffer_data_available(rb))
103 104
		return POLLIN | POLLRDNORM;
	/* need a way of knowing if there may be enough data... */
105
	return 0;
106 107
}

108 109 110 111 112 113 114 115 116 117 118 119 120 121 122
/**
 * iio_buffer_wakeup_poll - Wakes up the buffer waitqueue
 * @indio_dev: The IIO device
 *
 * Wakes up the event waitqueue used for poll(). Should usually
 * be called when the device is unregistered.
 */
void iio_buffer_wakeup_poll(struct iio_dev *indio_dev)
{
	if (!indio_dev->buffer)
		return;

	wake_up(&indio_dev->buffer->pollq);
}

123
void iio_buffer_init(struct iio_buffer *buffer)
124
{
125
	INIT_LIST_HEAD(&buffer->demux_list);
126
	INIT_LIST_HEAD(&buffer->buffer_list);
127
	init_waitqueue_head(&buffer->pollq);
128
	kref_init(&buffer->ref);
129
}
130
EXPORT_SYMBOL(iio_buffer_init);
131

132
static ssize_t iio_show_scan_index(struct device *dev,
133 134
				   struct device_attribute *attr,
				   char *buf)
135
{
136
	return sprintf(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index);
137 138 139 140 141 142 143
}

static ssize_t iio_show_fixed_type(struct device *dev,
				   struct device_attribute *attr,
				   char *buf)
{
	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
144 145 146
	u8 type = this_attr->c->scan_type.endianness;

	if (type == IIO_CPU) {
147 148 149 150 151
#ifdef __LITTLE_ENDIAN
		type = IIO_LE;
#else
		type = IIO_BE;
#endif
152 153 154
	}
	return sprintf(buf, "%s:%c%d/%d>>%u\n",
		       iio_endian_prefix[type],
155 156 157 158 159 160
		       this_attr->c->scan_type.sign,
		       this_attr->c->scan_type.realbits,
		       this_attr->c->scan_type.storagebits,
		       this_attr->c->scan_type.shift);
}

161 162 163 164 165
static ssize_t iio_scan_el_show(struct device *dev,
				struct device_attribute *attr,
				char *buf)
{
	int ret;
L
Lars-Peter Clausen 已提交
166
	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
167

168 169 170
	ret = test_bit(to_iio_dev_attr(attr)->address,
		       indio_dev->buffer->scan_mask);

171 172 173
	return sprintf(buf, "%d\n", ret);
}

174
static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
175
{
176
	clear_bit(bit, buffer->scan_mask);
177 178 179 180 181 182 183 184
	return 0;
}

static ssize_t iio_scan_el_store(struct device *dev,
				 struct device_attribute *attr,
				 const char *buf,
				 size_t len)
{
185
	int ret;
186
	bool state;
L
Lars-Peter Clausen 已提交
187
	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
188
	struct iio_buffer *buffer = indio_dev->buffer;
189 190
	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);

191 192 193
	ret = strtobool(buf, &state);
	if (ret < 0)
		return ret;
194
	mutex_lock(&indio_dev->mlock);
195
	if (iio_buffer_is_active(indio_dev->buffer)) {
196 197 198
		ret = -EBUSY;
		goto error_ret;
	}
199
	ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address);
200 201 202
	if (ret < 0)
		goto error_ret;
	if (!state && ret) {
203
		ret = iio_scan_mask_clear(buffer, this_attr->address);
204 205 206
		if (ret)
			goto error_ret;
	} else if (state && !ret) {
207
		ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address);
208 209 210 211 212 213 214
		if (ret)
			goto error_ret;
	}

error_ret:
	mutex_unlock(&indio_dev->mlock);

215
	return ret < 0 ? ret : len;
216 217 218 219 220 221 222

}

static ssize_t iio_scan_el_ts_show(struct device *dev,
				   struct device_attribute *attr,
				   char *buf)
{
L
Lars-Peter Clausen 已提交
223
	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
224
	return sprintf(buf, "%d\n", indio_dev->buffer->scan_timestamp);
225 226 227 228 229 230 231
}

static ssize_t iio_scan_el_ts_store(struct device *dev,
				    struct device_attribute *attr,
				    const char *buf,
				    size_t len)
{
232
	int ret;
L
Lars-Peter Clausen 已提交
233
	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
234
	bool state;
235

236 237 238 239
	ret = strtobool(buf, &state);
	if (ret < 0)
		return ret;

240
	mutex_lock(&indio_dev->mlock);
241
	if (iio_buffer_is_active(indio_dev->buffer)) {
242 243 244
		ret = -EBUSY;
		goto error_ret;
	}
245
	indio_dev->buffer->scan_timestamp = state;
246 247 248 249 250 251
error_ret:
	mutex_unlock(&indio_dev->mlock);

	return ret ? ret : len;
}

252 253
static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
					const struct iio_chan_spec *chan)
254
{
255
	int ret, attrcount = 0;
256
	struct iio_buffer *buffer = indio_dev->buffer;
257

258
	ret = __iio_add_chan_devattr("index",
259 260 261 262
				     chan,
				     &iio_show_scan_index,
				     NULL,
				     0,
263
				     IIO_SEPARATE,
264
				     &indio_dev->dev,
265
				     &buffer->scan_el_dev_attr_list);
266 267
	if (ret)
		goto error_ret;
268 269
	attrcount++;
	ret = __iio_add_chan_devattr("type",
270 271 272 273 274
				     chan,
				     &iio_show_fixed_type,
				     NULL,
				     0,
				     0,
275
				     &indio_dev->dev,
276
				     &buffer->scan_el_dev_attr_list);
277 278
	if (ret)
		goto error_ret;
279
	attrcount++;
280
	if (chan->type != IIO_TIMESTAMP)
281
		ret = __iio_add_chan_devattr("en",
282 283 284 285 286
					     chan,
					     &iio_scan_el_show,
					     &iio_scan_el_store,
					     chan->scan_index,
					     0,
287
					     &indio_dev->dev,
288
					     &buffer->scan_el_dev_attr_list);
289
	else
290
		ret = __iio_add_chan_devattr("en",
291 292 293 294 295
					     chan,
					     &iio_scan_el_ts_show,
					     &iio_scan_el_ts_store,
					     chan->scan_index,
					     0,
296
					     &indio_dev->dev,
297
					     &buffer->scan_el_dev_attr_list);
298 299
	if (ret)
		goto error_ret;
300 301
	attrcount++;
	ret = attrcount;
302 303 304 305
error_ret:
	return ret;
}

306 307
static const char * const iio_scan_elements_group_name = "scan_elements";

308 309 310
int iio_buffer_register(struct iio_dev *indio_dev,
			const struct iio_chan_spec *channels,
			int num_channels)
311
{
312 313
	struct iio_dev_attr *p;
	struct attribute **attr;
314
	struct iio_buffer *buffer = indio_dev->buffer;
315 316
	int ret, i, attrn, attrcount, attrcount_orig = 0;

317 318
	if (buffer->attrs)
		indio_dev->groups[indio_dev->groupcounter++] = buffer->attrs;
319

320 321
	if (buffer->scan_el_attrs != NULL) {
		attr = buffer->scan_el_attrs->attrs;
322 323 324 325
		while (*attr++ != NULL)
			attrcount_orig++;
	}
	attrcount = attrcount_orig;
326
	INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list);
327 328 329
	if (channels) {
		/* new magic */
		for (i = 0; i < num_channels; i++) {
330 331 332
			if (channels[i].scan_index < 0)
				continue;

333 334 335 336
			/* Establish necessary mask length */
			if (channels[i].scan_index >
			    (int)indio_dev->masklength - 1)
				indio_dev->masklength
337
					= channels[i].scan_index + 1;
338

339
			ret = iio_buffer_add_channel_sysfs(indio_dev,
340
							 &channels[i]);
341
			if (ret < 0)
342 343
				goto error_cleanup_dynamic;
			attrcount += ret;
344
			if (channels[i].type == IIO_TIMESTAMP)
345
				indio_dev->scan_index_timestamp =
346
					channels[i].scan_index;
347
		}
348
		if (indio_dev->masklength && buffer->scan_mask == NULL) {
349 350 351
			buffer->scan_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
						    sizeof(*buffer->scan_mask),
						    GFP_KERNEL);
352
			if (buffer->scan_mask == NULL) {
353
				ret = -ENOMEM;
354
				goto error_cleanup_dynamic;
355 356
			}
		}
357 358
	}

359
	buffer->scan_el_group.name = iio_scan_elements_group_name;
360

361 362 363
	buffer->scan_el_group.attrs = kcalloc(attrcount + 1,
					      sizeof(buffer->scan_el_group.attrs[0]),
					      GFP_KERNEL);
364
	if (buffer->scan_el_group.attrs == NULL) {
365 366 367
		ret = -ENOMEM;
		goto error_free_scan_mask;
	}
368 369 370
	if (buffer->scan_el_attrs)
		memcpy(buffer->scan_el_group.attrs, buffer->scan_el_attrs,
		       sizeof(buffer->scan_el_group.attrs[0])*attrcount_orig);
371 372
	attrn = attrcount_orig;

373 374 375
	list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l)
		buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr;
	indio_dev->groups[indio_dev->groupcounter++] = &buffer->scan_el_group;
376

377
	return 0;
378 379

error_free_scan_mask:
380
	kfree(buffer->scan_mask);
381
error_cleanup_dynamic:
382
	iio_free_chan_devattr_list(&buffer->scan_el_dev_attr_list);
383

384 385
	return ret;
}
386
EXPORT_SYMBOL(iio_buffer_register);
387

388
void iio_buffer_unregister(struct iio_dev *indio_dev)
389
{
390 391
	kfree(indio_dev->buffer->scan_mask);
	kfree(indio_dev->buffer->scan_el_group.attrs);
392
	iio_free_chan_devattr_list(&indio_dev->buffer->scan_el_dev_attr_list);
393
}
394
EXPORT_SYMBOL(iio_buffer_unregister);
395

396 397 398
ssize_t iio_buffer_read_length(struct device *dev,
			       struct device_attribute *attr,
			       char *buf)
399
{
L
Lars-Peter Clausen 已提交
400
	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
401
	struct iio_buffer *buffer = indio_dev->buffer;
402

403
	if (buffer->access->get_length)
404
		return sprintf(buf, "%d\n",
405
			       buffer->access->get_length(buffer));
406

407
	return 0;
408
}
409
EXPORT_SYMBOL(iio_buffer_read_length);
410

411 412 413 414
ssize_t iio_buffer_write_length(struct device *dev,
				struct device_attribute *attr,
				const char *buf,
				size_t len)
415
{
L
Lars-Peter Clausen 已提交
416
	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
417
	struct iio_buffer *buffer = indio_dev->buffer;
418 419
	unsigned int val;
	int ret;
420

421
	ret = kstrtouint(buf, 10, &val);
422 423 424
	if (ret)
		return ret;

425 426
	if (buffer->access->get_length)
		if (val == buffer->access->get_length(buffer))
427 428
			return len;

429
	mutex_lock(&indio_dev->mlock);
430
	if (iio_buffer_is_active(indio_dev->buffer)) {
431 432
		ret = -EBUSY;
	} else {
433
		if (buffer->access->set_length)
434 435
			buffer->access->set_length(buffer, val);
		ret = 0;
436
	}
437
	mutex_unlock(&indio_dev->mlock);
438

439
	return ret ? ret : len;
440
}
441
EXPORT_SYMBOL(iio_buffer_write_length);
442

443 444 445
ssize_t iio_buffer_show_enable(struct device *dev,
			       struct device_attribute *attr,
			       char *buf)
446
{
L
Lars-Peter Clausen 已提交
447
	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
448
	return sprintf(buf, "%d\n", iio_buffer_is_active(indio_dev->buffer));
449
}
450
EXPORT_SYMBOL(iio_buffer_show_enable);
451

452
/* Note NULL used as error indicator as it doesn't make sense. */
453
static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
454
					  unsigned int masklength,
455
					  const unsigned long *mask)
456 457 458 459 460 461 462 463 464 465 466
{
	if (bitmap_empty(mask, masklength))
		return NULL;
	while (*av_masks) {
		if (bitmap_subset(mask, av_masks, masklength))
			return av_masks;
		av_masks += BITS_TO_LONGS(masklength);
	}
	return NULL;
}

467 468
static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
				const unsigned long *mask, bool timestamp)
469 470 471 472 473 474
{
	const struct iio_chan_spec *ch;
	unsigned bytes = 0;
	int length, i;

	/* How much space will the demuxed element take? */
475
	for_each_set_bit(i, mask,
476 477
			 indio_dev->masklength) {
		ch = iio_find_channel_from_si(indio_dev, i);
478
		length = ch->scan_type.storagebits / 8;
479 480 481
		bytes = ALIGN(bytes, length);
		bytes += length;
	}
482
	if (timestamp) {
483
		ch = iio_find_channel_from_si(indio_dev,
484
					      indio_dev->scan_index_timestamp);
485
		length = ch->scan_type.storagebits / 8;
486 487 488
		bytes = ALIGN(bytes, length);
		bytes += length;
	}
489 490 491
	return bytes;
}

492 493 494 495 496 497 498 499 500 501 502 503 504
static void iio_buffer_activate(struct iio_dev *indio_dev,
	struct iio_buffer *buffer)
{
	iio_buffer_get(buffer);
	list_add(&buffer->buffer_list, &indio_dev->buffer_list);
}

static void iio_buffer_deactivate(struct iio_buffer *buffer)
{
	list_del_init(&buffer->buffer_list);
	iio_buffer_put(buffer);
}

505 506 507 508 509 510 511 512 513 514 515 516
void iio_disable_all_buffers(struct iio_dev *indio_dev)
{
	struct iio_buffer *buffer, *_buffer;

	if (list_empty(&indio_dev->buffer_list))
		return;

	if (indio_dev->setup_ops->predisable)
		indio_dev->setup_ops->predisable(indio_dev);

	list_for_each_entry_safe(buffer, _buffer,
			&indio_dev->buffer_list, buffer_list)
517
		iio_buffer_deactivate(buffer);
518 519 520 521

	indio_dev->currentmode = INDIO_DIRECT_MODE;
	if (indio_dev->setup_ops->postdisable)
		indio_dev->setup_ops->postdisable(indio_dev);
522 523 524

	if (indio_dev->available_scan_masks == NULL)
		kfree(indio_dev->active_scan_mask);
525 526
}

527 528 529 530 531 532 533 534 535 536 537 538 539 540
static void iio_buffer_update_bytes_per_datum(struct iio_dev *indio_dev,
	struct iio_buffer *buffer)
{
	unsigned int bytes;

	if (!buffer->access->set_bytes_per_datum)
		return;

	bytes = iio_compute_scan_bytes(indio_dev, buffer->scan_mask,
		buffer->scan_timestamp);

	buffer->access->set_bytes_per_datum(buffer, bytes);
}

541
static int __iio_update_buffers(struct iio_dev *indio_dev,
542 543
		       struct iio_buffer *insert_buffer,
		       struct iio_buffer *remove_buffer)
544
{
545 546 547 548 549
	int ret;
	int success = 0;
	struct iio_buffer *buffer;
	unsigned long *compound_mask;
	const unsigned long *old_mask;
550

551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570
	/* Wind down existing buffers - iff there are any */
	if (!list_empty(&indio_dev->buffer_list)) {
		if (indio_dev->setup_ops->predisable) {
			ret = indio_dev->setup_ops->predisable(indio_dev);
			if (ret)
				goto error_ret;
		}
		indio_dev->currentmode = INDIO_DIRECT_MODE;
		if (indio_dev->setup_ops->postdisable) {
			ret = indio_dev->setup_ops->postdisable(indio_dev);
			if (ret)
				goto error_ret;
		}
	}
	/* Keep a copy of current setup to allow roll back */
	old_mask = indio_dev->active_scan_mask;
	if (!indio_dev->available_scan_masks)
		indio_dev->active_scan_mask = NULL;

	if (remove_buffer)
571
		iio_buffer_deactivate(remove_buffer);
572
	if (insert_buffer)
573
		iio_buffer_activate(indio_dev, insert_buffer);
574 575 576 577 578 579 580 581

	/* If no buffers in list, we are done */
	if (list_empty(&indio_dev->buffer_list)) {
		indio_dev->currentmode = INDIO_DIRECT_MODE;
		if (indio_dev->available_scan_masks == NULL)
			kfree(old_mask);
		return 0;
	}
582

583
	/* What scan mask do we actually have? */
584 585 586 587 588 589 590 591 592 593 594 595 596 597 598
	compound_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
				sizeof(long), GFP_KERNEL);
	if (compound_mask == NULL) {
		if (indio_dev->available_scan_masks == NULL)
			kfree(old_mask);
		return -ENOMEM;
	}
	indio_dev->scan_timestamp = 0;

	list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
		bitmap_or(compound_mask, compound_mask, buffer->scan_mask,
			  indio_dev->masklength);
		indio_dev->scan_timestamp |= buffer->scan_timestamp;
	}
	if (indio_dev->available_scan_masks) {
599 600 601
		indio_dev->active_scan_mask =
			iio_scan_mask_match(indio_dev->available_scan_masks,
					    indio_dev->masklength,
602 603 604 605 606 607
					    compound_mask);
		if (indio_dev->active_scan_mask == NULL) {
			/*
			 * Roll back.
			 * Note can only occur when adding a buffer.
			 */
608
			iio_buffer_deactivate(insert_buffer);
609 610 611 612 613 614 615 616 617
			if (old_mask) {
				indio_dev->active_scan_mask = old_mask;
				success = -EINVAL;
			}
			else {
				kfree(compound_mask);
				ret = -EINVAL;
				goto error_ret;
			}
618 619 620 621
		}
	} else {
		indio_dev->active_scan_mask = compound_mask;
	}
622

623 624
	iio_update_demux(indio_dev);

625 626 627 628 629
	/* Wind up again */
	if (indio_dev->setup_ops->preenable) {
		ret = indio_dev->setup_ops->preenable(indio_dev);
		if (ret) {
			printk(KERN_ERR
630
			       "Buffer not started: buffer preenable failed (%d)\n", ret);
631 632 633 634 635 636 637
			goto error_remove_inserted;
		}
	}
	indio_dev->scan_bytes =
		iio_compute_scan_bytes(indio_dev,
				       indio_dev->active_scan_mask,
				       indio_dev->scan_timestamp);
638 639
	list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
		iio_buffer_update_bytes_per_datum(indio_dev, buffer);
640 641 642 643
		if (buffer->access->request_update) {
			ret = buffer->access->request_update(buffer);
			if (ret) {
				printk(KERN_INFO
644
				       "Buffer not started: buffer parameter update failed (%d)\n", ret);
645 646 647
				goto error_run_postdisable;
			}
		}
648
	}
649 650
	if (indio_dev->info->update_scan_mode) {
		ret = indio_dev->info
651 652
			->update_scan_mode(indio_dev,
					   indio_dev->active_scan_mask);
653
		if (ret < 0) {
654
			printk(KERN_INFO "Buffer not started: update scan mode failed (%d)\n", ret);
655 656 657
			goto error_run_postdisable;
		}
	}
658
	/* Definitely possible for devices to support both of these. */
659 660 661 662 663 664 665 666 667 668
	if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) {
		if (!indio_dev->trig) {
			printk(KERN_INFO "Buffer not started: no trigger\n");
			ret = -EINVAL;
			/* Can only occur on first buffer */
			goto error_run_postdisable;
		}
		indio_dev->currentmode = INDIO_BUFFER_TRIGGERED;
	} else if (indio_dev->modes & INDIO_BUFFER_HARDWARE) {
		indio_dev->currentmode = INDIO_BUFFER_HARDWARE;
669
	} else { /* Should never be reached */
670 671 672 673 674 675 676 677
		ret = -EINVAL;
		goto error_run_postdisable;
	}

	if (indio_dev->setup_ops->postenable) {
		ret = indio_dev->setup_ops->postenable(indio_dev);
		if (ret) {
			printk(KERN_INFO
678
			       "Buffer not started: postenable failed (%d)\n", ret);
679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700
			indio_dev->currentmode = INDIO_DIRECT_MODE;
			if (indio_dev->setup_ops->postdisable)
				indio_dev->setup_ops->postdisable(indio_dev);
			goto error_disable_all_buffers;
		}
	}

	if (indio_dev->available_scan_masks)
		kfree(compound_mask);
	else
		kfree(old_mask);

	return success;

error_disable_all_buffers:
	indio_dev->currentmode = INDIO_DIRECT_MODE;
error_run_postdisable:
	if (indio_dev->setup_ops->postdisable)
		indio_dev->setup_ops->postdisable(indio_dev);
error_remove_inserted:

	if (insert_buffer)
701
		iio_buffer_deactivate(insert_buffer);
702 703 704 705 706 707
	indio_dev->active_scan_mask = old_mask;
	kfree(compound_mask);
error_ret:

	return ret;
}
708 709 710 711 712 713 714

int iio_update_buffers(struct iio_dev *indio_dev,
		       struct iio_buffer *insert_buffer,
		       struct iio_buffer *remove_buffer)
{
	int ret;

715 716 717
	if (insert_buffer == remove_buffer)
		return 0;

718 719 720
	mutex_lock(&indio_dev->info_exist_lock);
	mutex_lock(&indio_dev->mlock);

721 722 723 724 725 726 727 728 729 730 731
	if (insert_buffer && iio_buffer_is_active(insert_buffer))
		insert_buffer = NULL;

	if (remove_buffer && !iio_buffer_is_active(remove_buffer))
		remove_buffer = NULL;

	if (!insert_buffer && !remove_buffer) {
		ret = 0;
		goto out_unlock;
	}

732 733 734 735 736 737 738 739 740 741 742 743 744
	if (indio_dev->info == NULL) {
		ret = -ENODEV;
		goto out_unlock;
	}

	ret = __iio_update_buffers(indio_dev, insert_buffer, remove_buffer);

out_unlock:
	mutex_unlock(&indio_dev->mlock);
	mutex_unlock(&indio_dev->info_exist_lock);

	return ret;
}
745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763
EXPORT_SYMBOL_GPL(iio_update_buffers);

ssize_t iio_buffer_store_enable(struct device *dev,
				struct device_attribute *attr,
				const char *buf,
				size_t len)
{
	int ret;
	bool requested_state;
	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
	bool inlist;

	ret = strtobool(buf, &requested_state);
	if (ret < 0)
		return ret;

	mutex_lock(&indio_dev->mlock);

	/* Find out if it is in the list */
764
	inlist = iio_buffer_is_active(indio_dev->buffer);
765 766 767 768 769
	/* Already in desired state */
	if (inlist == requested_state)
		goto done;

	if (requested_state)
770
		ret = __iio_update_buffers(indio_dev,
771 772
					 indio_dev->buffer, NULL);
	else
773
		ret = __iio_update_buffers(indio_dev,
774 775 776 777 778 779 780 781 782 783
					 NULL, indio_dev->buffer);

	if (ret < 0)
		goto done;
done:
	mutex_unlock(&indio_dev->mlock);
	return (ret < 0) ? ret : len;
}
EXPORT_SYMBOL(iio_buffer_store_enable);

784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799
/**
 * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected
 * @indio_dev: the iio device
 * @mask: scan mask to be checked
 *
 * Return true if exactly one bit is set in the scan mask, false otherwise. It
 * can be used for devices where only one channel can be active for sampling at
 * a time.
 */
bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
	const unsigned long *mask)
{
	return bitmap_weight(mask, indio_dev->masklength) == 1;
}
EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot);

800 801 802 803 804 805 806 807 808
static bool iio_validate_scan_mask(struct iio_dev *indio_dev,
	const unsigned long *mask)
{
	if (!indio_dev->setup_ops->validate_scan_mask)
		return true;

	return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask);
}

809 810
/**
 * iio_scan_mask_set() - set particular bit in the scan mask
811
 * @indio_dev: the iio device
812
 * @buffer: the buffer whose scan mask we are interested in
813
 * @bit: the bit to be set.
814 815 816 817 818
 *
 * Note that at this point we have no way of knowing what other
 * buffers might request, hence this code only verifies that the
 * individual buffers request is plausible.
 */
819 820
int iio_scan_mask_set(struct iio_dev *indio_dev,
		      struct iio_buffer *buffer, int bit)
821
{
822
	const unsigned long *mask;
823 824 825
	unsigned long *trialmask;

	trialmask = kmalloc(sizeof(*trialmask)*
826
			    BITS_TO_LONGS(indio_dev->masklength),
827 828 829 830
			    GFP_KERNEL);

	if (trialmask == NULL)
		return -ENOMEM;
831
	if (!indio_dev->masklength) {
832
		WARN_ON("Trying to set scanmask prior to registering buffer\n");
833
		goto err_invalid_mask;
834
	}
835
	bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength);
836 837
	set_bit(bit, trialmask);

838 839 840
	if (!iio_validate_scan_mask(indio_dev, trialmask))
		goto err_invalid_mask;

841 842 843
	if (indio_dev->available_scan_masks) {
		mask = iio_scan_mask_match(indio_dev->available_scan_masks,
					   indio_dev->masklength,
844
					   trialmask);
845 846
		if (!mask)
			goto err_invalid_mask;
847
	}
848
	bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength);
849 850 851 852

	kfree(trialmask);

	return 0;
853 854 855 856 857

err_invalid_mask:
	kfree(trialmask);
	return -EINVAL;
}
858 859
EXPORT_SYMBOL_GPL(iio_scan_mask_set);

860 861
int iio_scan_mask_query(struct iio_dev *indio_dev,
			struct iio_buffer *buffer, int bit)
862
{
863
	if (bit > indio_dev->masklength)
864 865
		return -EINVAL;

866
	if (!buffer->scan_mask)
867 868
		return 0;

869
	return test_bit(bit, buffer->scan_mask);
870 871
};
EXPORT_SYMBOL_GPL(iio_scan_mask_query);
872 873 874 875

/**
 * struct iio_demux_table() - table describing demux memcpy ops
 * @from:	index to copy from
876
 * @to:		index to copy to
877 878 879 880 881 882 883 884 885 886
 * @length:	how many bytes to copy
 * @l:		list head used for management
 */
struct iio_demux_table {
	unsigned from;
	unsigned to;
	unsigned length;
	struct list_head l;
};

887 888
static const void *iio_demux(struct iio_buffer *buffer,
				 const void *datain)
889 890 891 892 893 894 895 896 897 898 899 900
{
	struct iio_demux_table *t;

	if (list_empty(&buffer->demux_list))
		return datain;
	list_for_each_entry(t, &buffer->demux_list, l)
		memcpy(buffer->demux_bounce + t->to,
		       datain + t->from, t->length);

	return buffer->demux_bounce;
}

901
static int iio_push_to_buffer(struct iio_buffer *buffer, const void *data)
902
{
903
	const void *dataout = iio_demux(buffer, data);
904

905
	return buffer->access->store_to(buffer, dataout);
906 907
}

908 909 910 911 912 913 914 915 916
static void iio_buffer_demux_free(struct iio_buffer *buffer)
{
	struct iio_demux_table *p, *q;
	list_for_each_entry_safe(p, q, &buffer->demux_list, l) {
		list_del(&p->l);
		kfree(p);
	}
}

917

918
int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data)
919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934
{
	int ret;
	struct iio_buffer *buf;

	list_for_each_entry(buf, &indio_dev->buffer_list, buffer_list) {
		ret = iio_push_to_buffer(buf, data);
		if (ret < 0)
			return ret;
	}

	return 0;
}
EXPORT_SYMBOL_GPL(iio_push_to_buffers);

static int iio_buffer_update_demux(struct iio_dev *indio_dev,
				   struct iio_buffer *buffer)
935 936 937 938
{
	const struct iio_chan_spec *ch;
	int ret, in_ind = -1, out_ind, length;
	unsigned in_loc = 0, out_loc = 0;
939
	struct iio_demux_table *p;
940 941

	/* Clear out any old demux */
942
	iio_buffer_demux_free(buffer);
943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995
	kfree(buffer->demux_bounce);
	buffer->demux_bounce = NULL;

	/* First work out which scan mode we will actually have */
	if (bitmap_equal(indio_dev->active_scan_mask,
			 buffer->scan_mask,
			 indio_dev->masklength))
		return 0;

	/* Now we have the two masks, work from least sig and build up sizes */
	for_each_set_bit(out_ind,
			 indio_dev->active_scan_mask,
			 indio_dev->masklength) {
		in_ind = find_next_bit(indio_dev->active_scan_mask,
				       indio_dev->masklength,
				       in_ind + 1);
		while (in_ind != out_ind) {
			in_ind = find_next_bit(indio_dev->active_scan_mask,
					       indio_dev->masklength,
					       in_ind + 1);
			ch = iio_find_channel_from_si(indio_dev, in_ind);
			length = ch->scan_type.storagebits/8;
			/* Make sure we are aligned */
			in_loc += length;
			if (in_loc % length)
				in_loc += length - in_loc % length;
		}
		p = kmalloc(sizeof(*p), GFP_KERNEL);
		if (p == NULL) {
			ret = -ENOMEM;
			goto error_clear_mux_table;
		}
		ch = iio_find_channel_from_si(indio_dev, in_ind);
		length = ch->scan_type.storagebits/8;
		if (out_loc % length)
			out_loc += length - out_loc % length;
		if (in_loc % length)
			in_loc += length - in_loc % length;
		p->from = in_loc;
		p->to = out_loc;
		p->length = length;
		list_add_tail(&p->l, &buffer->demux_list);
		out_loc += length;
		in_loc += length;
	}
	/* Relies on scan_timestamp being last */
	if (buffer->scan_timestamp) {
		p = kmalloc(sizeof(*p), GFP_KERNEL);
		if (p == NULL) {
			ret = -ENOMEM;
			goto error_clear_mux_table;
		}
		ch = iio_find_channel_from_si(indio_dev,
996
			indio_dev->scan_index_timestamp);
997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016
		length = ch->scan_type.storagebits/8;
		if (out_loc % length)
			out_loc += length - out_loc % length;
		if (in_loc % length)
			in_loc += length - in_loc % length;
		p->from = in_loc;
		p->to = out_loc;
		p->length = length;
		list_add_tail(&p->l, &buffer->demux_list);
		out_loc += length;
		in_loc += length;
	}
	buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL);
	if (buffer->demux_bounce == NULL) {
		ret = -ENOMEM;
		goto error_clear_mux_table;
	}
	return 0;

error_clear_mux_table:
1017 1018
	iio_buffer_demux_free(buffer);

1019 1020
	return ret;
}
1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039

int iio_update_demux(struct iio_dev *indio_dev)
{
	struct iio_buffer *buffer;
	int ret;

	list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
		ret = iio_buffer_update_demux(indio_dev, buffer);
		if (ret < 0)
			goto error_clear_mux_table;
	}
	return 0;

error_clear_mux_table:
	list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list)
		iio_buffer_demux_free(buffer);

	return ret;
}
1040
EXPORT_SYMBOL_GPL(iio_update_demux);
1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082

/**
 * iio_buffer_release() - Free a buffer's resources
 * @ref: Pointer to the kref embedded in the iio_buffer struct
 *
 * This function is called when the last reference to the buffer has been
 * dropped. It will typically free all resources allocated by the buffer. Do not
 * call this function manually, always use iio_buffer_put() when done using a
 * buffer.
 */
static void iio_buffer_release(struct kref *ref)
{
	struct iio_buffer *buffer = container_of(ref, struct iio_buffer, ref);

	buffer->access->release(buffer);
}

/**
 * iio_buffer_get() - Grab a reference to the buffer
 * @buffer: The buffer to grab a reference for, may be NULL
 *
 * Returns the pointer to the buffer that was passed into the function.
 */
struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer)
{
	if (buffer)
		kref_get(&buffer->ref);

	return buffer;
}
EXPORT_SYMBOL_GPL(iio_buffer_get);

/**
 * iio_buffer_put() - Release the reference to the buffer
 * @buffer: The buffer to release the reference for, may be NULL
 */
void iio_buffer_put(struct iio_buffer *buffer)
{
	if (buffer)
		kref_put(&buffer->ref, iio_buffer_release);
}
EXPORT_SYMBOL_GPL(iio_buffer_put);