industrialio-buffer.c 27.7 KB
Newer Older
1 2 3 4 5 6 7 8
/* The industrial I/O core
 *
 * Copyright (c) 2008 Jonathan Cameron
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 as published by
 * the Free Software Foundation.
 *
9
 * Handling of buffer allocation / resizing.
10 11 12 13 14 15 16
 *
 *
 * Things to look at here.
 * - Better memory allocation techniques?
 * - Alternative access techniques?
 */
#include <linux/kernel.h>
17
#include <linux/export.h>
18 19 20
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/cdev.h>
21
#include <linux/slab.h>
22
#include <linux/poll.h>
23
#include <linux/sched.h>
24

25
#include <linux/iio/iio.h>
26
#include "iio_core.h"
27 28
#include <linux/iio/sysfs.h>
#include <linux/iio/buffer.h>
29

30 31 32 33
static const char * const iio_endian_prefix[] = {
	[IIO_BE] = "be",
	[IIO_LE] = "le",
};
34

35
static bool iio_buffer_is_active(struct iio_buffer *buf)
36
{
37
	return !list_empty(&buf->buffer_list);
38 39
}

40 41 42 43 44 45 46 47
static bool iio_buffer_data_available(struct iio_buffer *buf)
{
	if (buf->access->data_available)
		return buf->access->data_available(buf);

	return buf->stufftoread;
}

48
/**
49
 * iio_buffer_read_first_n_outer() - chrdev read for buffer access
50
 *
51 52
 * This function relies on all buffer implementations having an
 * iio_buffer as their first element.
53
 **/
54 55
ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
				      size_t n, loff_t *f_ps)
56
{
57
	struct iio_dev *indio_dev = filp->private_data;
58
	struct iio_buffer *rb = indio_dev->buffer;
59
	int ret;
60

61 62 63
	if (!indio_dev->info)
		return -ENODEV;

64
	if (!rb || !rb->access->read_first_n)
65
		return -EINVAL;
66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86

	do {
		if (!iio_buffer_data_available(rb)) {
			if (filp->f_flags & O_NONBLOCK)
				return -EAGAIN;

			ret = wait_event_interruptible(rb->pollq,
					iio_buffer_data_available(rb) ||
					indio_dev->info == NULL);
			if (ret)
				return ret;
			if (indio_dev->info == NULL)
				return -ENODEV;
		}

		ret = rb->access->read_first_n(rb, n, buf);
		if (ret == 0 && (filp->f_flags & O_NONBLOCK))
			ret = -EAGAIN;
	 } while (ret == 0);

	return ret;
87 88
}

89
/**
90
 * iio_buffer_poll() - poll the buffer to find out if it has data
91
 */
92 93
unsigned int iio_buffer_poll(struct file *filp,
			     struct poll_table_struct *wait)
94
{
95
	struct iio_dev *indio_dev = filp->private_data;
96
	struct iio_buffer *rb = indio_dev->buffer;
97

98 99 100
	if (!indio_dev->info)
		return -ENODEV;

101
	poll_wait(filp, &rb->pollq, wait);
102
	if (iio_buffer_data_available(rb))
103 104
		return POLLIN | POLLRDNORM;
	/* need a way of knowing if there may be enough data... */
105
	return 0;
106 107
}

108 109 110 111 112 113 114 115 116 117 118 119 120 121 122
/**
 * iio_buffer_wakeup_poll - Wakes up the buffer waitqueue
 * @indio_dev: The IIO device
 *
 * Wakes up the event waitqueue used for poll(). Should usually
 * be called when the device is unregistered.
 */
void iio_buffer_wakeup_poll(struct iio_dev *indio_dev)
{
	if (!indio_dev->buffer)
		return;

	wake_up(&indio_dev->buffer->pollq);
}

123
void iio_buffer_init(struct iio_buffer *buffer)
124
{
125
	INIT_LIST_HEAD(&buffer->demux_list);
126
	INIT_LIST_HEAD(&buffer->buffer_list);
127
	init_waitqueue_head(&buffer->pollq);
128
	kref_init(&buffer->ref);
129
}
130
EXPORT_SYMBOL(iio_buffer_init);
131

132
static ssize_t iio_show_scan_index(struct device *dev,
133 134
				   struct device_attribute *attr,
				   char *buf)
135
{
136
	return sprintf(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index);
137 138 139 140 141 142 143
}

static ssize_t iio_show_fixed_type(struct device *dev,
				   struct device_attribute *attr,
				   char *buf)
{
	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
144 145 146
	u8 type = this_attr->c->scan_type.endianness;

	if (type == IIO_CPU) {
147 148 149 150 151
#ifdef __LITTLE_ENDIAN
		type = IIO_LE;
#else
		type = IIO_BE;
#endif
152
	}
153 154 155 156 157 158 159 160 161 162
	if (this_attr->c->scan_type.repeat > 1)
		return sprintf(buf, "%s:%c%d/%dX%d>>%u\n",
		       iio_endian_prefix[type],
		       this_attr->c->scan_type.sign,
		       this_attr->c->scan_type.realbits,
		       this_attr->c->scan_type.storagebits,
		       this_attr->c->scan_type.repeat,
		       this_attr->c->scan_type.shift);
		else
			return sprintf(buf, "%s:%c%d/%d>>%u\n",
163
		       iio_endian_prefix[type],
164 165 166 167 168 169
		       this_attr->c->scan_type.sign,
		       this_attr->c->scan_type.realbits,
		       this_attr->c->scan_type.storagebits,
		       this_attr->c->scan_type.shift);
}

170 171 172 173 174
static ssize_t iio_scan_el_show(struct device *dev,
				struct device_attribute *attr,
				char *buf)
{
	int ret;
L
Lars-Peter Clausen 已提交
175
	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
176

177 178
	/* Ensure ret is 0 or 1. */
	ret = !!test_bit(to_iio_dev_attr(attr)->address,
179 180
		       indio_dev->buffer->scan_mask);

181 182 183
	return sprintf(buf, "%d\n", ret);
}

184
static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
185
{
186
	clear_bit(bit, buffer->scan_mask);
187 188 189 190 191 192 193 194
	return 0;
}

static ssize_t iio_scan_el_store(struct device *dev,
				 struct device_attribute *attr,
				 const char *buf,
				 size_t len)
{
195
	int ret;
196
	bool state;
L
Lars-Peter Clausen 已提交
197
	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
198
	struct iio_buffer *buffer = indio_dev->buffer;
199 200
	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);

201 202 203
	ret = strtobool(buf, &state);
	if (ret < 0)
		return ret;
204
	mutex_lock(&indio_dev->mlock);
205
	if (iio_buffer_is_active(indio_dev->buffer)) {
206 207 208
		ret = -EBUSY;
		goto error_ret;
	}
209
	ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address);
210 211 212
	if (ret < 0)
		goto error_ret;
	if (!state && ret) {
213
		ret = iio_scan_mask_clear(buffer, this_attr->address);
214 215 216
		if (ret)
			goto error_ret;
	} else if (state && !ret) {
217
		ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address);
218 219 220 221 222 223 224
		if (ret)
			goto error_ret;
	}

error_ret:
	mutex_unlock(&indio_dev->mlock);

225
	return ret < 0 ? ret : len;
226 227 228 229 230 231 232

}

static ssize_t iio_scan_el_ts_show(struct device *dev,
				   struct device_attribute *attr,
				   char *buf)
{
L
Lars-Peter Clausen 已提交
233
	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
234
	return sprintf(buf, "%d\n", indio_dev->buffer->scan_timestamp);
235 236 237 238 239 240 241
}

static ssize_t iio_scan_el_ts_store(struct device *dev,
				    struct device_attribute *attr,
				    const char *buf,
				    size_t len)
{
242
	int ret;
L
Lars-Peter Clausen 已提交
243
	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
244
	bool state;
245

246 247 248 249
	ret = strtobool(buf, &state);
	if (ret < 0)
		return ret;

250
	mutex_lock(&indio_dev->mlock);
251
	if (iio_buffer_is_active(indio_dev->buffer)) {
252 253 254
		ret = -EBUSY;
		goto error_ret;
	}
255
	indio_dev->buffer->scan_timestamp = state;
256 257 258 259 260 261
error_ret:
	mutex_unlock(&indio_dev->mlock);

	return ret ? ret : len;
}

262 263
static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
					const struct iio_chan_spec *chan)
264
{
265
	int ret, attrcount = 0;
266
	struct iio_buffer *buffer = indio_dev->buffer;
267

268
	ret = __iio_add_chan_devattr("index",
269 270 271 272
				     chan,
				     &iio_show_scan_index,
				     NULL,
				     0,
273
				     IIO_SEPARATE,
274
				     &indio_dev->dev,
275
				     &buffer->scan_el_dev_attr_list);
276
	if (ret)
277
		return ret;
278 279
	attrcount++;
	ret = __iio_add_chan_devattr("type",
280 281 282 283 284
				     chan,
				     &iio_show_fixed_type,
				     NULL,
				     0,
				     0,
285
				     &indio_dev->dev,
286
				     &buffer->scan_el_dev_attr_list);
287
	if (ret)
288
		return ret;
289
	attrcount++;
290
	if (chan->type != IIO_TIMESTAMP)
291
		ret = __iio_add_chan_devattr("en",
292 293 294 295 296
					     chan,
					     &iio_scan_el_show,
					     &iio_scan_el_store,
					     chan->scan_index,
					     0,
297
					     &indio_dev->dev,
298
					     &buffer->scan_el_dev_attr_list);
299
	else
300
		ret = __iio_add_chan_devattr("en",
301 302 303 304 305
					     chan,
					     &iio_scan_el_ts_show,
					     &iio_scan_el_ts_store,
					     chan->scan_index,
					     0,
306
					     &indio_dev->dev,
307
					     &buffer->scan_el_dev_attr_list);
308
	if (ret)
309
		return ret;
310 311
	attrcount++;
	ret = attrcount;
312 313 314
	return ret;
}

315 316
static const char * const iio_scan_elements_group_name = "scan_elements";

317 318 319
int iio_buffer_register(struct iio_dev *indio_dev,
			const struct iio_chan_spec *channels,
			int num_channels)
320
{
321 322
	struct iio_dev_attr *p;
	struct attribute **attr;
323
	struct iio_buffer *buffer = indio_dev->buffer;
324 325
	int ret, i, attrn, attrcount, attrcount_orig = 0;

326 327
	if (buffer->attrs)
		indio_dev->groups[indio_dev->groupcounter++] = buffer->attrs;
328

329 330
	if (buffer->scan_el_attrs != NULL) {
		attr = buffer->scan_el_attrs->attrs;
331 332 333 334
		while (*attr++ != NULL)
			attrcount_orig++;
	}
	attrcount = attrcount_orig;
335
	INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list);
336 337 338
	if (channels) {
		/* new magic */
		for (i = 0; i < num_channels; i++) {
339 340 341
			if (channels[i].scan_index < 0)
				continue;

342 343 344 345
			/* Establish necessary mask length */
			if (channels[i].scan_index >
			    (int)indio_dev->masklength - 1)
				indio_dev->masklength
346
					= channels[i].scan_index + 1;
347

348
			ret = iio_buffer_add_channel_sysfs(indio_dev,
349
							 &channels[i]);
350
			if (ret < 0)
351 352
				goto error_cleanup_dynamic;
			attrcount += ret;
353
			if (channels[i].type == IIO_TIMESTAMP)
354
				indio_dev->scan_index_timestamp =
355
					channels[i].scan_index;
356
		}
357
		if (indio_dev->masklength && buffer->scan_mask == NULL) {
358 359 360
			buffer->scan_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
						    sizeof(*buffer->scan_mask),
						    GFP_KERNEL);
361
			if (buffer->scan_mask == NULL) {
362
				ret = -ENOMEM;
363
				goto error_cleanup_dynamic;
364 365
			}
		}
366 367
	}

368
	buffer->scan_el_group.name = iio_scan_elements_group_name;
369

370 371 372
	buffer->scan_el_group.attrs = kcalloc(attrcount + 1,
					      sizeof(buffer->scan_el_group.attrs[0]),
					      GFP_KERNEL);
373
	if (buffer->scan_el_group.attrs == NULL) {
374 375 376
		ret = -ENOMEM;
		goto error_free_scan_mask;
	}
377 378 379
	if (buffer->scan_el_attrs)
		memcpy(buffer->scan_el_group.attrs, buffer->scan_el_attrs,
		       sizeof(buffer->scan_el_group.attrs[0])*attrcount_orig);
380 381
	attrn = attrcount_orig;

382 383 384
	list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l)
		buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr;
	indio_dev->groups[indio_dev->groupcounter++] = &buffer->scan_el_group;
385

386
	return 0;
387 388

error_free_scan_mask:
389
	kfree(buffer->scan_mask);
390
error_cleanup_dynamic:
391
	iio_free_chan_devattr_list(&buffer->scan_el_dev_attr_list);
392

393 394
	return ret;
}
395
EXPORT_SYMBOL(iio_buffer_register);
396

397
void iio_buffer_unregister(struct iio_dev *indio_dev)
398
{
399 400
	kfree(indio_dev->buffer->scan_mask);
	kfree(indio_dev->buffer->scan_el_group.attrs);
401
	iio_free_chan_devattr_list(&indio_dev->buffer->scan_el_dev_attr_list);
402
}
403
EXPORT_SYMBOL(iio_buffer_unregister);
404

405 406 407
ssize_t iio_buffer_read_length(struct device *dev,
			       struct device_attribute *attr,
			       char *buf)
408
{
L
Lars-Peter Clausen 已提交
409
	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
410
	struct iio_buffer *buffer = indio_dev->buffer;
411

412
	if (buffer->access->get_length)
413
		return sprintf(buf, "%d\n",
414
			       buffer->access->get_length(buffer));
415

416
	return 0;
417
}
418
EXPORT_SYMBOL(iio_buffer_read_length);
419

420 421 422 423
ssize_t iio_buffer_write_length(struct device *dev,
				struct device_attribute *attr,
				const char *buf,
				size_t len)
424
{
L
Lars-Peter Clausen 已提交
425
	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
426
	struct iio_buffer *buffer = indio_dev->buffer;
427 428
	unsigned int val;
	int ret;
429

430
	ret = kstrtouint(buf, 10, &val);
431 432 433
	if (ret)
		return ret;

434 435
	if (buffer->access->get_length)
		if (val == buffer->access->get_length(buffer))
436 437
			return len;

438
	mutex_lock(&indio_dev->mlock);
439
	if (iio_buffer_is_active(indio_dev->buffer)) {
440 441
		ret = -EBUSY;
	} else {
442
		if (buffer->access->set_length)
443 444
			buffer->access->set_length(buffer, val);
		ret = 0;
445
	}
446
	mutex_unlock(&indio_dev->mlock);
447

448
	return ret ? ret : len;
449
}
450
EXPORT_SYMBOL(iio_buffer_write_length);
451

452 453 454
ssize_t iio_buffer_show_enable(struct device *dev,
			       struct device_attribute *attr,
			       char *buf)
455
{
L
Lars-Peter Clausen 已提交
456
	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
457
	return sprintf(buf, "%d\n", iio_buffer_is_active(indio_dev->buffer));
458
}
459
EXPORT_SYMBOL(iio_buffer_show_enable);
460

461
/* Note NULL used as error indicator as it doesn't make sense. */
462
static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
463
					  unsigned int masklength,
464
					  const unsigned long *mask)
465 466 467 468 469 470 471 472 473 474 475
{
	if (bitmap_empty(mask, masklength))
		return NULL;
	while (*av_masks) {
		if (bitmap_subset(mask, av_masks, masklength))
			return av_masks;
		av_masks += BITS_TO_LONGS(masklength);
	}
	return NULL;
}

476 477
static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
				const unsigned long *mask, bool timestamp)
478 479 480 481 482 483
{
	const struct iio_chan_spec *ch;
	unsigned bytes = 0;
	int length, i;

	/* How much space will the demuxed element take? */
484
	for_each_set_bit(i, mask,
485 486
			 indio_dev->masklength) {
		ch = iio_find_channel_from_si(indio_dev, i);
487 488 489 490 491
		if (ch->scan_type.repeat > 1)
			length = ch->scan_type.storagebits / 8 *
				ch->scan_type.repeat;
		else
			length = ch->scan_type.storagebits / 8;
492 493 494
		bytes = ALIGN(bytes, length);
		bytes += length;
	}
495
	if (timestamp) {
496
		ch = iio_find_channel_from_si(indio_dev,
497
					      indio_dev->scan_index_timestamp);
498 499 500 501 502
		if (ch->scan_type.repeat > 1)
			length = ch->scan_type.storagebits / 8 *
				ch->scan_type.repeat;
		else
			length = ch->scan_type.storagebits / 8;
503 504 505
		bytes = ALIGN(bytes, length);
		bytes += length;
	}
506 507 508
	return bytes;
}

509 510 511 512 513 514 515 516 517 518 519 520 521
static void iio_buffer_activate(struct iio_dev *indio_dev,
	struct iio_buffer *buffer)
{
	iio_buffer_get(buffer);
	list_add(&buffer->buffer_list, &indio_dev->buffer_list);
}

static void iio_buffer_deactivate(struct iio_buffer *buffer)
{
	list_del_init(&buffer->buffer_list);
	iio_buffer_put(buffer);
}

522 523 524 525 526 527 528 529 530 531 532 533
void iio_disable_all_buffers(struct iio_dev *indio_dev)
{
	struct iio_buffer *buffer, *_buffer;

	if (list_empty(&indio_dev->buffer_list))
		return;

	if (indio_dev->setup_ops->predisable)
		indio_dev->setup_ops->predisable(indio_dev);

	list_for_each_entry_safe(buffer, _buffer,
			&indio_dev->buffer_list, buffer_list)
534
		iio_buffer_deactivate(buffer);
535 536 537 538

	indio_dev->currentmode = INDIO_DIRECT_MODE;
	if (indio_dev->setup_ops->postdisable)
		indio_dev->setup_ops->postdisable(indio_dev);
539 540 541

	if (indio_dev->available_scan_masks == NULL)
		kfree(indio_dev->active_scan_mask);
542 543
}

544 545 546 547 548 549 550 551 552 553 554 555 556 557
static void iio_buffer_update_bytes_per_datum(struct iio_dev *indio_dev,
	struct iio_buffer *buffer)
{
	unsigned int bytes;

	if (!buffer->access->set_bytes_per_datum)
		return;

	bytes = iio_compute_scan_bytes(indio_dev, buffer->scan_mask,
		buffer->scan_timestamp);

	buffer->access->set_bytes_per_datum(buffer, bytes);
}

558
static int __iio_update_buffers(struct iio_dev *indio_dev,
559 560
		       struct iio_buffer *insert_buffer,
		       struct iio_buffer *remove_buffer)
561
{
562 563 564 565 566
	int ret;
	int success = 0;
	struct iio_buffer *buffer;
	unsigned long *compound_mask;
	const unsigned long *old_mask;
567

568 569 570 571 572
	/* Wind down existing buffers - iff there are any */
	if (!list_empty(&indio_dev->buffer_list)) {
		if (indio_dev->setup_ops->predisable) {
			ret = indio_dev->setup_ops->predisable(indio_dev);
			if (ret)
573
				return ret;
574 575 576 577 578
		}
		indio_dev->currentmode = INDIO_DIRECT_MODE;
		if (indio_dev->setup_ops->postdisable) {
			ret = indio_dev->setup_ops->postdisable(indio_dev);
			if (ret)
579
				return ret;
580 581 582 583 584 585 586 587
		}
	}
	/* Keep a copy of current setup to allow roll back */
	old_mask = indio_dev->active_scan_mask;
	if (!indio_dev->available_scan_masks)
		indio_dev->active_scan_mask = NULL;

	if (remove_buffer)
588
		iio_buffer_deactivate(remove_buffer);
589
	if (insert_buffer)
590
		iio_buffer_activate(indio_dev, insert_buffer);
591 592 593 594 595 596 597 598

	/* If no buffers in list, we are done */
	if (list_empty(&indio_dev->buffer_list)) {
		indio_dev->currentmode = INDIO_DIRECT_MODE;
		if (indio_dev->available_scan_masks == NULL)
			kfree(old_mask);
		return 0;
	}
599

600
	/* What scan mask do we actually have? */
601 602 603 604 605 606 607 608 609 610 611 612 613 614 615
	compound_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
				sizeof(long), GFP_KERNEL);
	if (compound_mask == NULL) {
		if (indio_dev->available_scan_masks == NULL)
			kfree(old_mask);
		return -ENOMEM;
	}
	indio_dev->scan_timestamp = 0;

	list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
		bitmap_or(compound_mask, compound_mask, buffer->scan_mask,
			  indio_dev->masklength);
		indio_dev->scan_timestamp |= buffer->scan_timestamp;
	}
	if (indio_dev->available_scan_masks) {
616 617 618
		indio_dev->active_scan_mask =
			iio_scan_mask_match(indio_dev->available_scan_masks,
					    indio_dev->masklength,
619 620 621 622 623 624
					    compound_mask);
		if (indio_dev->active_scan_mask == NULL) {
			/*
			 * Roll back.
			 * Note can only occur when adding a buffer.
			 */
625
			iio_buffer_deactivate(insert_buffer);
626 627 628 629 630 631 632
			if (old_mask) {
				indio_dev->active_scan_mask = old_mask;
				success = -EINVAL;
			}
			else {
				kfree(compound_mask);
				ret = -EINVAL;
633
				return ret;
634
			}
635 636 637 638
		}
	} else {
		indio_dev->active_scan_mask = compound_mask;
	}
639

640 641
	iio_update_demux(indio_dev);

642 643 644 645 646
	/* Wind up again */
	if (indio_dev->setup_ops->preenable) {
		ret = indio_dev->setup_ops->preenable(indio_dev);
		if (ret) {
			printk(KERN_ERR
647
			       "Buffer not started: buffer preenable failed (%d)\n", ret);
648 649 650 651 652 653 654
			goto error_remove_inserted;
		}
	}
	indio_dev->scan_bytes =
		iio_compute_scan_bytes(indio_dev,
				       indio_dev->active_scan_mask,
				       indio_dev->scan_timestamp);
655 656
	list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
		iio_buffer_update_bytes_per_datum(indio_dev, buffer);
657 658 659 660
		if (buffer->access->request_update) {
			ret = buffer->access->request_update(buffer);
			if (ret) {
				printk(KERN_INFO
661
				       "Buffer not started: buffer parameter update failed (%d)\n", ret);
662 663 664
				goto error_run_postdisable;
			}
		}
665
	}
666 667
	if (indio_dev->info->update_scan_mode) {
		ret = indio_dev->info
668 669
			->update_scan_mode(indio_dev,
					   indio_dev->active_scan_mask);
670
		if (ret < 0) {
671
			printk(KERN_INFO "Buffer not started: update scan mode failed (%d)\n", ret);
672 673 674
			goto error_run_postdisable;
		}
	}
675
	/* Definitely possible for devices to support both of these. */
676 677 678 679 680 681 682 683 684 685
	if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) {
		if (!indio_dev->trig) {
			printk(KERN_INFO "Buffer not started: no trigger\n");
			ret = -EINVAL;
			/* Can only occur on first buffer */
			goto error_run_postdisable;
		}
		indio_dev->currentmode = INDIO_BUFFER_TRIGGERED;
	} else if (indio_dev->modes & INDIO_BUFFER_HARDWARE) {
		indio_dev->currentmode = INDIO_BUFFER_HARDWARE;
686
	} else { /* Should never be reached */
687 688 689 690 691 692 693 694
		ret = -EINVAL;
		goto error_run_postdisable;
	}

	if (indio_dev->setup_ops->postenable) {
		ret = indio_dev->setup_ops->postenable(indio_dev);
		if (ret) {
			printk(KERN_INFO
695
			       "Buffer not started: postenable failed (%d)\n", ret);
696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716
			indio_dev->currentmode = INDIO_DIRECT_MODE;
			if (indio_dev->setup_ops->postdisable)
				indio_dev->setup_ops->postdisable(indio_dev);
			goto error_disable_all_buffers;
		}
	}

	if (indio_dev->available_scan_masks)
		kfree(compound_mask);
	else
		kfree(old_mask);

	return success;

error_disable_all_buffers:
	indio_dev->currentmode = INDIO_DIRECT_MODE;
error_run_postdisable:
	if (indio_dev->setup_ops->postdisable)
		indio_dev->setup_ops->postdisable(indio_dev);
error_remove_inserted:
	if (insert_buffer)
717
		iio_buffer_deactivate(insert_buffer);
718 719 720 721
	indio_dev->active_scan_mask = old_mask;
	kfree(compound_mask);
	return ret;
}
722 723 724 725 726 727 728

int iio_update_buffers(struct iio_dev *indio_dev,
		       struct iio_buffer *insert_buffer,
		       struct iio_buffer *remove_buffer)
{
	int ret;

729 730 731
	if (insert_buffer == remove_buffer)
		return 0;

732 733 734
	mutex_lock(&indio_dev->info_exist_lock);
	mutex_lock(&indio_dev->mlock);

735 736 737 738 739 740 741 742 743 744 745
	if (insert_buffer && iio_buffer_is_active(insert_buffer))
		insert_buffer = NULL;

	if (remove_buffer && !iio_buffer_is_active(remove_buffer))
		remove_buffer = NULL;

	if (!insert_buffer && !remove_buffer) {
		ret = 0;
		goto out_unlock;
	}

746 747 748 749 750 751 752 753 754 755 756 757 758
	if (indio_dev->info == NULL) {
		ret = -ENODEV;
		goto out_unlock;
	}

	ret = __iio_update_buffers(indio_dev, insert_buffer, remove_buffer);

out_unlock:
	mutex_unlock(&indio_dev->mlock);
	mutex_unlock(&indio_dev->info_exist_lock);

	return ret;
}
759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777
EXPORT_SYMBOL_GPL(iio_update_buffers);

ssize_t iio_buffer_store_enable(struct device *dev,
				struct device_attribute *attr,
				const char *buf,
				size_t len)
{
	int ret;
	bool requested_state;
	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
	bool inlist;

	ret = strtobool(buf, &requested_state);
	if (ret < 0)
		return ret;

	mutex_lock(&indio_dev->mlock);

	/* Find out if it is in the list */
778
	inlist = iio_buffer_is_active(indio_dev->buffer);
779 780 781 782 783
	/* Already in desired state */
	if (inlist == requested_state)
		goto done;

	if (requested_state)
784
		ret = __iio_update_buffers(indio_dev,
785 786
					 indio_dev->buffer, NULL);
	else
787
		ret = __iio_update_buffers(indio_dev,
788 789 790 791 792 793 794 795 796 797
					 NULL, indio_dev->buffer);

	if (ret < 0)
		goto done;
done:
	mutex_unlock(&indio_dev->mlock);
	return (ret < 0) ? ret : len;
}
EXPORT_SYMBOL(iio_buffer_store_enable);

798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813
/**
 * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected
 * @indio_dev: the iio device
 * @mask: scan mask to be checked
 *
 * Return true if exactly one bit is set in the scan mask, false otherwise. It
 * can be used for devices where only one channel can be active for sampling at
 * a time.
 */
bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
	const unsigned long *mask)
{
	return bitmap_weight(mask, indio_dev->masklength) == 1;
}
EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot);

814 815 816 817 818 819 820 821 822
static bool iio_validate_scan_mask(struct iio_dev *indio_dev,
	const unsigned long *mask)
{
	if (!indio_dev->setup_ops->validate_scan_mask)
		return true;

	return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask);
}

823 824
/**
 * iio_scan_mask_set() - set particular bit in the scan mask
825
 * @indio_dev: the iio device
826
 * @buffer: the buffer whose scan mask we are interested in
827
 * @bit: the bit to be set.
828 829 830 831 832
 *
 * Note that at this point we have no way of knowing what other
 * buffers might request, hence this code only verifies that the
 * individual buffers request is plausible.
 */
833 834
int iio_scan_mask_set(struct iio_dev *indio_dev,
		      struct iio_buffer *buffer, int bit)
835
{
836
	const unsigned long *mask;
837 838 839
	unsigned long *trialmask;

	trialmask = kmalloc(sizeof(*trialmask)*
840
			    BITS_TO_LONGS(indio_dev->masklength),
841 842 843 844
			    GFP_KERNEL);

	if (trialmask == NULL)
		return -ENOMEM;
845
	if (!indio_dev->masklength) {
846
		WARN_ON("Trying to set scanmask prior to registering buffer\n");
847
		goto err_invalid_mask;
848
	}
849
	bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength);
850 851
	set_bit(bit, trialmask);

852 853 854
	if (!iio_validate_scan_mask(indio_dev, trialmask))
		goto err_invalid_mask;

855 856 857
	if (indio_dev->available_scan_masks) {
		mask = iio_scan_mask_match(indio_dev->available_scan_masks,
					   indio_dev->masklength,
858
					   trialmask);
859 860
		if (!mask)
			goto err_invalid_mask;
861
	}
862
	bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength);
863 864 865 866

	kfree(trialmask);

	return 0;
867 868 869 870 871

err_invalid_mask:
	kfree(trialmask);
	return -EINVAL;
}
872 873
EXPORT_SYMBOL_GPL(iio_scan_mask_set);

874 875
int iio_scan_mask_query(struct iio_dev *indio_dev,
			struct iio_buffer *buffer, int bit)
876
{
877
	if (bit > indio_dev->masklength)
878 879
		return -EINVAL;

880
	if (!buffer->scan_mask)
881 882
		return 0;

883 884
	/* Ensure return value is 0 or 1. */
	return !!test_bit(bit, buffer->scan_mask);
885 886
};
EXPORT_SYMBOL_GPL(iio_scan_mask_query);
887 888 889 890

/**
 * struct iio_demux_table() - table describing demux memcpy ops
 * @from:	index to copy from
891
 * @to:		index to copy to
892 893 894 895 896 897 898 899 900 901
 * @length:	how many bytes to copy
 * @l:		list head used for management
 */
struct iio_demux_table {
	unsigned from;
	unsigned to;
	unsigned length;
	struct list_head l;
};

902 903
static const void *iio_demux(struct iio_buffer *buffer,
				 const void *datain)
904 905 906 907 908 909 910 911 912 913 914 915
{
	struct iio_demux_table *t;

	if (list_empty(&buffer->demux_list))
		return datain;
	list_for_each_entry(t, &buffer->demux_list, l)
		memcpy(buffer->demux_bounce + t->to,
		       datain + t->from, t->length);

	return buffer->demux_bounce;
}

916
static int iio_push_to_buffer(struct iio_buffer *buffer, const void *data)
917
{
918
	const void *dataout = iio_demux(buffer, data);
919

920
	return buffer->access->store_to(buffer, dataout);
921 922
}

923 924 925 926 927 928 929 930 931
static void iio_buffer_demux_free(struct iio_buffer *buffer)
{
	struct iio_demux_table *p, *q;
	list_for_each_entry_safe(p, q, &buffer->demux_list, l) {
		list_del(&p->l);
		kfree(p);
	}
}

932

933
int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data)
934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949
{
	int ret;
	struct iio_buffer *buf;

	list_for_each_entry(buf, &indio_dev->buffer_list, buffer_list) {
		ret = iio_push_to_buffer(buf, data);
		if (ret < 0)
			return ret;
	}

	return 0;
}
EXPORT_SYMBOL_GPL(iio_push_to_buffers);

static int iio_buffer_update_demux(struct iio_dev *indio_dev,
				   struct iio_buffer *buffer)
950 951 952 953
{
	const struct iio_chan_spec *ch;
	int ret, in_ind = -1, out_ind, length;
	unsigned in_loc = 0, out_loc = 0;
954
	struct iio_demux_table *p;
955 956

	/* Clear out any old demux */
957
	iio_buffer_demux_free(buffer);
958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978
	kfree(buffer->demux_bounce);
	buffer->demux_bounce = NULL;

	/* First work out which scan mode we will actually have */
	if (bitmap_equal(indio_dev->active_scan_mask,
			 buffer->scan_mask,
			 indio_dev->masklength))
		return 0;

	/* Now we have the two masks, work from least sig and build up sizes */
	for_each_set_bit(out_ind,
			 indio_dev->active_scan_mask,
			 indio_dev->masklength) {
		in_ind = find_next_bit(indio_dev->active_scan_mask,
				       indio_dev->masklength,
				       in_ind + 1);
		while (in_ind != out_ind) {
			in_ind = find_next_bit(indio_dev->active_scan_mask,
					       indio_dev->masklength,
					       in_ind + 1);
			ch = iio_find_channel_from_si(indio_dev, in_ind);
979 980 981 982 983
			if (ch->scan_type.repeat > 1)
				length = ch->scan_type.storagebits / 8 *
					ch->scan_type.repeat;
			else
				length = ch->scan_type.storagebits / 8;
984 985 986 987 988 989 990 991 992 993 994
			/* Make sure we are aligned */
			in_loc += length;
			if (in_loc % length)
				in_loc += length - in_loc % length;
		}
		p = kmalloc(sizeof(*p), GFP_KERNEL);
		if (p == NULL) {
			ret = -ENOMEM;
			goto error_clear_mux_table;
		}
		ch = iio_find_channel_from_si(indio_dev, in_ind);
995 996 997 998 999
		if (ch->scan_type.repeat > 1)
			length = ch->scan_type.storagebits / 8 *
				ch->scan_type.repeat;
		else
			length = ch->scan_type.storagebits / 8;
1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018
		if (out_loc % length)
			out_loc += length - out_loc % length;
		if (in_loc % length)
			in_loc += length - in_loc % length;
		p->from = in_loc;
		p->to = out_loc;
		p->length = length;
		list_add_tail(&p->l, &buffer->demux_list);
		out_loc += length;
		in_loc += length;
	}
	/* Relies on scan_timestamp being last */
	if (buffer->scan_timestamp) {
		p = kmalloc(sizeof(*p), GFP_KERNEL);
		if (p == NULL) {
			ret = -ENOMEM;
			goto error_clear_mux_table;
		}
		ch = iio_find_channel_from_si(indio_dev,
1019
			indio_dev->scan_index_timestamp);
1020 1021 1022 1023 1024
		if (ch->scan_type.repeat > 1)
			length = ch->scan_type.storagebits / 8 *
				ch->scan_type.repeat;
		else
			length = ch->scan_type.storagebits / 8;
1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043
		if (out_loc % length)
			out_loc += length - out_loc % length;
		if (in_loc % length)
			in_loc += length - in_loc % length;
		p->from = in_loc;
		p->to = out_loc;
		p->length = length;
		list_add_tail(&p->l, &buffer->demux_list);
		out_loc += length;
		in_loc += length;
	}
	buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL);
	if (buffer->demux_bounce == NULL) {
		ret = -ENOMEM;
		goto error_clear_mux_table;
	}
	return 0;

error_clear_mux_table:
1044 1045
	iio_buffer_demux_free(buffer);

1046 1047
	return ret;
}
1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066

int iio_update_demux(struct iio_dev *indio_dev)
{
	struct iio_buffer *buffer;
	int ret;

	list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
		ret = iio_buffer_update_demux(indio_dev, buffer);
		if (ret < 0)
			goto error_clear_mux_table;
	}
	return 0;

error_clear_mux_table:
	list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list)
		iio_buffer_demux_free(buffer);

	return ret;
}
1067
EXPORT_SYMBOL_GPL(iio_update_demux);
1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109

/**
 * iio_buffer_release() - Free a buffer's resources
 * @ref: Pointer to the kref embedded in the iio_buffer struct
 *
 * This function is called when the last reference to the buffer has been
 * dropped. It will typically free all resources allocated by the buffer. Do not
 * call this function manually, always use iio_buffer_put() when done using a
 * buffer.
 */
static void iio_buffer_release(struct kref *ref)
{
	struct iio_buffer *buffer = container_of(ref, struct iio_buffer, ref);

	buffer->access->release(buffer);
}

/**
 * iio_buffer_get() - Grab a reference to the buffer
 * @buffer: The buffer to grab a reference for, may be NULL
 *
 * Returns the pointer to the buffer that was passed into the function.
 */
struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer)
{
	if (buffer)
		kref_get(&buffer->ref);

	return buffer;
}
EXPORT_SYMBOL_GPL(iio_buffer_get);

/**
 * iio_buffer_put() - Release the reference to the buffer
 * @buffer: The buffer to release the reference for, may be NULL
 */
void iio_buffer_put(struct iio_buffer *buffer)
{
	if (buffer)
		kref_put(&buffer->ref, iio_buffer_release);
}
EXPORT_SYMBOL_GPL(iio_buffer_put);