industrialio-buffer.c 23.3 KB
Newer Older
1 2 3 4 5 6 7 8
/* The industrial I/O core
 *
 * Copyright (c) 2008 Jonathan Cameron
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 as published by
 * the Free Software Foundation.
 *
9
 * Handling of buffer allocation / resizing.
10 11 12 13 14 15 16
 *
 *
 * Things to look at here.
 * - Better memory allocation techniques?
 * - Alternative access techniques?
 */
#include <linux/kernel.h>
17
#include <linux/export.h>
18 19 20
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/cdev.h>
21
#include <linux/slab.h>
22
#include <linux/poll.h>
23

24
#include <linux/iio/iio.h>
25
#include "iio_core.h"
26 27
#include <linux/iio/sysfs.h>
#include <linux/iio/buffer.h>
28

29 30 31 32
static const char * const iio_endian_prefix[] = {
	[IIO_BE] = "be",
	[IIO_LE] = "le",
};
33

34
static bool iio_buffer_is_active(struct iio_buffer *buf)
35
{
36
	return !list_empty(&buf->buffer_list);
37 38
}

39
/**
40
 * iio_buffer_read_first_n_outer() - chrdev read for buffer access
41
 *
42 43
 * This function relies on all buffer implementations having an
 * iio_buffer as their first element.
44
 **/
45 46
ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
				      size_t n, loff_t *f_ps)
47
{
48
	struct iio_dev *indio_dev = filp->private_data;
49
	struct iio_buffer *rb = indio_dev->buffer;
50

51
	if (!rb || !rb->access->read_first_n)
52
		return -EINVAL;
53
	return rb->access->read_first_n(rb, n, buf);
54 55
}

56
/**
57
 * iio_buffer_poll() - poll the buffer to find out if it has data
58
 */
59 60
unsigned int iio_buffer_poll(struct file *filp,
			     struct poll_table_struct *wait)
61
{
62
	struct iio_dev *indio_dev = filp->private_data;
63
	struct iio_buffer *rb = indio_dev->buffer;
64 65 66 67 68

	poll_wait(filp, &rb->pollq, wait);
	if (rb->stufftoread)
		return POLLIN | POLLRDNORM;
	/* need a way of knowing if there may be enough data... */
69
	return 0;
70 71
}

72
void iio_buffer_init(struct iio_buffer *buffer)
73
{
74
	INIT_LIST_HEAD(&buffer->demux_list);
75
	INIT_LIST_HEAD(&buffer->buffer_list);
76
	init_waitqueue_head(&buffer->pollq);
77
}
78
EXPORT_SYMBOL(iio_buffer_init);
79

80
static ssize_t iio_show_scan_index(struct device *dev,
81 82
				   struct device_attribute *attr,
				   char *buf)
83
{
84
	return sprintf(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index);
85 86 87 88 89 90 91
}

static ssize_t iio_show_fixed_type(struct device *dev,
				   struct device_attribute *attr,
				   char *buf)
{
	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
92 93 94
	u8 type = this_attr->c->scan_type.endianness;

	if (type == IIO_CPU) {
95 96 97 98 99
#ifdef __LITTLE_ENDIAN
		type = IIO_LE;
#else
		type = IIO_BE;
#endif
100 101 102
	}
	return sprintf(buf, "%s:%c%d/%d>>%u\n",
		       iio_endian_prefix[type],
103 104 105 106 107 108
		       this_attr->c->scan_type.sign,
		       this_attr->c->scan_type.realbits,
		       this_attr->c->scan_type.storagebits,
		       this_attr->c->scan_type.shift);
}

109 110 111 112 113
static ssize_t iio_scan_el_show(struct device *dev,
				struct device_attribute *attr,
				char *buf)
{
	int ret;
L
Lars-Peter Clausen 已提交
114
	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
115

116 117 118
	ret = test_bit(to_iio_dev_attr(attr)->address,
		       indio_dev->buffer->scan_mask);

119 120 121
	return sprintf(buf, "%d\n", ret);
}

122
static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
123
{
124
	clear_bit(bit, buffer->scan_mask);
125 126 127 128 129 130 131 132
	return 0;
}

static ssize_t iio_scan_el_store(struct device *dev,
				 struct device_attribute *attr,
				 const char *buf,
				 size_t len)
{
133
	int ret;
134
	bool state;
L
Lars-Peter Clausen 已提交
135
	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
136
	struct iio_buffer *buffer = indio_dev->buffer;
137 138
	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);

139 140 141
	ret = strtobool(buf, &state);
	if (ret < 0)
		return ret;
142
	mutex_lock(&indio_dev->mlock);
143
	if (iio_buffer_is_active(indio_dev->buffer)) {
144 145 146
		ret = -EBUSY;
		goto error_ret;
	}
147
	ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address);
148 149 150
	if (ret < 0)
		goto error_ret;
	if (!state && ret) {
151
		ret = iio_scan_mask_clear(buffer, this_attr->address);
152 153 154
		if (ret)
			goto error_ret;
	} else if (state && !ret) {
155
		ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address);
156 157 158 159 160 161 162
		if (ret)
			goto error_ret;
	}

error_ret:
	mutex_unlock(&indio_dev->mlock);

163
	return ret < 0 ? ret : len;
164 165 166 167 168 169 170

}

static ssize_t iio_scan_el_ts_show(struct device *dev,
				   struct device_attribute *attr,
				   char *buf)
{
L
Lars-Peter Clausen 已提交
171
	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
172
	return sprintf(buf, "%d\n", indio_dev->buffer->scan_timestamp);
173 174 175 176 177 178 179
}

static ssize_t iio_scan_el_ts_store(struct device *dev,
				    struct device_attribute *attr,
				    const char *buf,
				    size_t len)
{
180
	int ret;
L
Lars-Peter Clausen 已提交
181
	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
182
	bool state;
183

184 185 186 187
	ret = strtobool(buf, &state);
	if (ret < 0)
		return ret;

188
	mutex_lock(&indio_dev->mlock);
189
	if (iio_buffer_is_active(indio_dev->buffer)) {
190 191 192
		ret = -EBUSY;
		goto error_ret;
	}
193
	indio_dev->buffer->scan_timestamp = state;
194 195 196 197 198 199
error_ret:
	mutex_unlock(&indio_dev->mlock);

	return ret ? ret : len;
}

200 201
static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
					const struct iio_chan_spec *chan)
202
{
203
	int ret, attrcount = 0;
204
	struct iio_buffer *buffer = indio_dev->buffer;
205

206
	ret = __iio_add_chan_devattr("index",
207 208 209 210
				     chan,
				     &iio_show_scan_index,
				     NULL,
				     0,
211
				     IIO_SEPARATE,
212
				     &indio_dev->dev,
213
				     &buffer->scan_el_dev_attr_list);
214 215
	if (ret)
		goto error_ret;
216 217
	attrcount++;
	ret = __iio_add_chan_devattr("type",
218 219 220 221 222
				     chan,
				     &iio_show_fixed_type,
				     NULL,
				     0,
				     0,
223
				     &indio_dev->dev,
224
				     &buffer->scan_el_dev_attr_list);
225 226
	if (ret)
		goto error_ret;
227
	attrcount++;
228
	if (chan->type != IIO_TIMESTAMP)
229
		ret = __iio_add_chan_devattr("en",
230 231 232 233 234
					     chan,
					     &iio_scan_el_show,
					     &iio_scan_el_store,
					     chan->scan_index,
					     0,
235
					     &indio_dev->dev,
236
					     &buffer->scan_el_dev_attr_list);
237
	else
238
		ret = __iio_add_chan_devattr("en",
239 240 241 242 243
					     chan,
					     &iio_scan_el_ts_show,
					     &iio_scan_el_ts_store,
					     chan->scan_index,
					     0,
244
					     &indio_dev->dev,
245
					     &buffer->scan_el_dev_attr_list);
246 247
	attrcount++;
	ret = attrcount;
248 249 250 251
error_ret:
	return ret;
}

252 253
static void iio_buffer_remove_and_free_scan_dev_attr(struct iio_dev *indio_dev,
						     struct iio_dev_attr *p)
254 255 256 257 258
{
	kfree(p->dev_attr.attr.name);
	kfree(p);
}

259
static void __iio_buffer_attr_cleanup(struct iio_dev *indio_dev)
260 261
{
	struct iio_dev_attr *p, *n;
262
	struct iio_buffer *buffer = indio_dev->buffer;
263

264
	list_for_each_entry_safe(p, n,
265 266
				 &buffer->scan_el_dev_attr_list, l)
		iio_buffer_remove_and_free_scan_dev_attr(indio_dev, p);
267 268
}

269 270
static const char * const iio_scan_elements_group_name = "scan_elements";

271 272 273
int iio_buffer_register(struct iio_dev *indio_dev,
			const struct iio_chan_spec *channels,
			int num_channels)
274
{
275 276
	struct iio_dev_attr *p;
	struct attribute **attr;
277
	struct iio_buffer *buffer = indio_dev->buffer;
278 279
	int ret, i, attrn, attrcount, attrcount_orig = 0;

280 281
	if (buffer->attrs)
		indio_dev->groups[indio_dev->groupcounter++] = buffer->attrs;
282

283 284
	if (buffer->scan_el_attrs != NULL) {
		attr = buffer->scan_el_attrs->attrs;
285 286 287 288
		while (*attr++ != NULL)
			attrcount_orig++;
	}
	attrcount = attrcount_orig;
289
	INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list);
290 291 292
	if (channels) {
		/* new magic */
		for (i = 0; i < num_channels; i++) {
293 294 295
			if (channels[i].scan_index < 0)
				continue;

296 297 298 299
			/* Establish necessary mask length */
			if (channels[i].scan_index >
			    (int)indio_dev->masklength - 1)
				indio_dev->masklength
300
					= channels[i].scan_index + 1;
301

302
			ret = iio_buffer_add_channel_sysfs(indio_dev,
303
							 &channels[i]);
304
			if (ret < 0)
305 306
				goto error_cleanup_dynamic;
			attrcount += ret;
307
			if (channels[i].type == IIO_TIMESTAMP)
308
				indio_dev->scan_index_timestamp =
309
					channels[i].scan_index;
310
		}
311
		if (indio_dev->masklength && buffer->scan_mask == NULL) {
312 313 314
			buffer->scan_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
						    sizeof(*buffer->scan_mask),
						    GFP_KERNEL);
315
			if (buffer->scan_mask == NULL) {
316
				ret = -ENOMEM;
317
				goto error_cleanup_dynamic;
318 319
			}
		}
320 321
	}

322
	buffer->scan_el_group.name = iio_scan_elements_group_name;
323

324 325 326
	buffer->scan_el_group.attrs = kcalloc(attrcount + 1,
					      sizeof(buffer->scan_el_group.attrs[0]),
					      GFP_KERNEL);
327
	if (buffer->scan_el_group.attrs == NULL) {
328 329 330
		ret = -ENOMEM;
		goto error_free_scan_mask;
	}
331 332 333
	if (buffer->scan_el_attrs)
		memcpy(buffer->scan_el_group.attrs, buffer->scan_el_attrs,
		       sizeof(buffer->scan_el_group.attrs[0])*attrcount_orig);
334 335
	attrn = attrcount_orig;

336 337 338
	list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l)
		buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr;
	indio_dev->groups[indio_dev->groupcounter++] = &buffer->scan_el_group;
339

340
	return 0;
341 342

error_free_scan_mask:
343
	kfree(buffer->scan_mask);
344
error_cleanup_dynamic:
345
	__iio_buffer_attr_cleanup(indio_dev);
346

347 348
	return ret;
}
349
EXPORT_SYMBOL(iio_buffer_register);
350

351
void iio_buffer_unregister(struct iio_dev *indio_dev)
352
{
353 354 355
	kfree(indio_dev->buffer->scan_mask);
	kfree(indio_dev->buffer->scan_el_group.attrs);
	__iio_buffer_attr_cleanup(indio_dev);
356
}
357
EXPORT_SYMBOL(iio_buffer_unregister);
358

359 360 361
ssize_t iio_buffer_read_length(struct device *dev,
			       struct device_attribute *attr,
			       char *buf)
362
{
L
Lars-Peter Clausen 已提交
363
	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
364
	struct iio_buffer *buffer = indio_dev->buffer;
365

366
	if (buffer->access->get_length)
367
		return sprintf(buf, "%d\n",
368
			       buffer->access->get_length(buffer));
369

370
	return 0;
371
}
372
EXPORT_SYMBOL(iio_buffer_read_length);
373

374 375 376 377
ssize_t iio_buffer_write_length(struct device *dev,
				struct device_attribute *attr,
				const char *buf,
				size_t len)
378
{
L
Lars-Peter Clausen 已提交
379
	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
380
	struct iio_buffer *buffer = indio_dev->buffer;
381 382
	unsigned int val;
	int ret;
383

384
	ret = kstrtouint(buf, 10, &val);
385 386 387
	if (ret)
		return ret;

388 389
	if (buffer->access->get_length)
		if (val == buffer->access->get_length(buffer))
390 391
			return len;

392
	mutex_lock(&indio_dev->mlock);
393
	if (iio_buffer_is_active(indio_dev->buffer)) {
394 395
		ret = -EBUSY;
	} else {
396
		if (buffer->access->set_length)
397 398
			buffer->access->set_length(buffer, val);
		ret = 0;
399
	}
400
	mutex_unlock(&indio_dev->mlock);
401

402
	return ret ? ret : len;
403
}
404
EXPORT_SYMBOL(iio_buffer_write_length);
405

406 407 408
ssize_t iio_buffer_show_enable(struct device *dev,
			       struct device_attribute *attr,
			       char *buf)
409
{
L
Lars-Peter Clausen 已提交
410
	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
411
	return sprintf(buf, "%d\n", iio_buffer_is_active(indio_dev->buffer));
412
}
413
EXPORT_SYMBOL(iio_buffer_show_enable);
414

415
/* note NULL used as error indicator as it doesn't make sense. */
416
static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
417
					  unsigned int masklength,
418
					  const unsigned long *mask)
419 420 421 422 423 424 425 426 427 428 429
{
	if (bitmap_empty(mask, masklength))
		return NULL;
	while (*av_masks) {
		if (bitmap_subset(mask, av_masks, masklength))
			return av_masks;
		av_masks += BITS_TO_LONGS(masklength);
	}
	return NULL;
}

430 431
static int iio_compute_scan_bytes(struct iio_dev *indio_dev, const long *mask,
				  bool timestamp)
432 433 434 435 436 437
{
	const struct iio_chan_spec *ch;
	unsigned bytes = 0;
	int length, i;

	/* How much space will the demuxed element take? */
438
	for_each_set_bit(i, mask,
439 440
			 indio_dev->masklength) {
		ch = iio_find_channel_from_si(indio_dev, i);
441
		length = ch->scan_type.storagebits / 8;
442 443 444
		bytes = ALIGN(bytes, length);
		bytes += length;
	}
445
	if (timestamp) {
446
		ch = iio_find_channel_from_si(indio_dev,
447
					      indio_dev->scan_index_timestamp);
448
		length = ch->scan_type.storagebits / 8;
449 450 451
		bytes = ALIGN(bytes, length);
		bytes += length;
	}
452 453 454
	return bytes;
}

455 456 457
int iio_update_buffers(struct iio_dev *indio_dev,
		       struct iio_buffer *insert_buffer,
		       struct iio_buffer *remove_buffer)
458
{
459 460 461 462 463
	int ret;
	int success = 0;
	struct iio_buffer *buffer;
	unsigned long *compound_mask;
	const unsigned long *old_mask;
464

465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484
	/* Wind down existing buffers - iff there are any */
	if (!list_empty(&indio_dev->buffer_list)) {
		if (indio_dev->setup_ops->predisable) {
			ret = indio_dev->setup_ops->predisable(indio_dev);
			if (ret)
				goto error_ret;
		}
		indio_dev->currentmode = INDIO_DIRECT_MODE;
		if (indio_dev->setup_ops->postdisable) {
			ret = indio_dev->setup_ops->postdisable(indio_dev);
			if (ret)
				goto error_ret;
		}
	}
	/* Keep a copy of current setup to allow roll back */
	old_mask = indio_dev->active_scan_mask;
	if (!indio_dev->available_scan_masks)
		indio_dev->active_scan_mask = NULL;

	if (remove_buffer)
485
		list_del_init(&remove_buffer->buffer_list);
486 487 488 489 490 491 492 493 494 495
	if (insert_buffer)
		list_add(&insert_buffer->buffer_list, &indio_dev->buffer_list);

	/* If no buffers in list, we are done */
	if (list_empty(&indio_dev->buffer_list)) {
		indio_dev->currentmode = INDIO_DIRECT_MODE;
		if (indio_dev->available_scan_masks == NULL)
			kfree(old_mask);
		return 0;
	}
496 497

	/* What scan mask do we actually have ?*/
498 499 500 501 502 503 504 505 506 507 508 509 510 511 512
	compound_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
				sizeof(long), GFP_KERNEL);
	if (compound_mask == NULL) {
		if (indio_dev->available_scan_masks == NULL)
			kfree(old_mask);
		return -ENOMEM;
	}
	indio_dev->scan_timestamp = 0;

	list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
		bitmap_or(compound_mask, compound_mask, buffer->scan_mask,
			  indio_dev->masklength);
		indio_dev->scan_timestamp |= buffer->scan_timestamp;
	}
	if (indio_dev->available_scan_masks) {
513 514 515
		indio_dev->active_scan_mask =
			iio_scan_mask_match(indio_dev->available_scan_masks,
					    indio_dev->masklength,
516 517 518 519 520 521
					    compound_mask);
		if (indio_dev->active_scan_mask == NULL) {
			/*
			 * Roll back.
			 * Note can only occur when adding a buffer.
			 */
522
			list_del_init(&insert_buffer->buffer_list);
523 524 525 526 527 528
			indio_dev->active_scan_mask = old_mask;
			success = -EINVAL;
		}
	} else {
		indio_dev->active_scan_mask = compound_mask;
	}
529

530 531
	iio_update_demux(indio_dev);

532 533 534 535 536
	/* Wind up again */
	if (indio_dev->setup_ops->preenable) {
		ret = indio_dev->setup_ops->preenable(indio_dev);
		if (ret) {
			printk(KERN_ERR
537
			       "Buffer not started: buffer preenable failed (%d)\n", ret);
538 539 540 541 542 543 544 545 546 547 548 549
			goto error_remove_inserted;
		}
	}
	indio_dev->scan_bytes =
		iio_compute_scan_bytes(indio_dev,
				       indio_dev->active_scan_mask,
				       indio_dev->scan_timestamp);
	list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list)
		if (buffer->access->request_update) {
			ret = buffer->access->request_update(buffer);
			if (ret) {
				printk(KERN_INFO
550
				       "Buffer not started: buffer parameter update failed (%d)\n", ret);
551 552 553 554 555
				goto error_run_postdisable;
			}
		}
	if (indio_dev->info->update_scan_mode) {
		ret = indio_dev->info
556 557
			->update_scan_mode(indio_dev,
					   indio_dev->active_scan_mask);
558
		if (ret < 0) {
559
			printk(KERN_INFO "Buffer not started: update scan mode failed (%d)\n", ret);
560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582
			goto error_run_postdisable;
		}
	}
	/* Definitely possible for devices to support both of these.*/
	if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) {
		if (!indio_dev->trig) {
			printk(KERN_INFO "Buffer not started: no trigger\n");
			ret = -EINVAL;
			/* Can only occur on first buffer */
			goto error_run_postdisable;
		}
		indio_dev->currentmode = INDIO_BUFFER_TRIGGERED;
	} else if (indio_dev->modes & INDIO_BUFFER_HARDWARE) {
		indio_dev->currentmode = INDIO_BUFFER_HARDWARE;
	} else { /* should never be reached */
		ret = -EINVAL;
		goto error_run_postdisable;
	}

	if (indio_dev->setup_ops->postenable) {
		ret = indio_dev->setup_ops->postenable(indio_dev);
		if (ret) {
			printk(KERN_INFO
583
			       "Buffer not started: postenable failed (%d)\n", ret);
584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605
			indio_dev->currentmode = INDIO_DIRECT_MODE;
			if (indio_dev->setup_ops->postdisable)
				indio_dev->setup_ops->postdisable(indio_dev);
			goto error_disable_all_buffers;
		}
	}

	if (indio_dev->available_scan_masks)
		kfree(compound_mask);
	else
		kfree(old_mask);

	return success;

error_disable_all_buffers:
	indio_dev->currentmode = INDIO_DIRECT_MODE;
error_run_postdisable:
	if (indio_dev->setup_ops->postdisable)
		indio_dev->setup_ops->postdisable(indio_dev);
error_remove_inserted:

	if (insert_buffer)
606
		list_del_init(&insert_buffer->buffer_list);
607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631
	indio_dev->active_scan_mask = old_mask;
	kfree(compound_mask);
error_ret:

	return ret;
}
EXPORT_SYMBOL_GPL(iio_update_buffers);

ssize_t iio_buffer_store_enable(struct device *dev,
				struct device_attribute *attr,
				const char *buf,
				size_t len)
{
	int ret;
	bool requested_state;
	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
	bool inlist;

	ret = strtobool(buf, &requested_state);
	if (ret < 0)
		return ret;

	mutex_lock(&indio_dev->mlock);

	/* Find out if it is in the list */
632
	inlist = iio_buffer_is_active(indio_dev->buffer);
633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665
	/* Already in desired state */
	if (inlist == requested_state)
		goto done;

	if (requested_state)
		ret = iio_update_buffers(indio_dev,
					 indio_dev->buffer, NULL);
	else
		ret = iio_update_buffers(indio_dev,
					 NULL, indio_dev->buffer);

	if (ret < 0)
		goto done;
done:
	mutex_unlock(&indio_dev->mlock);
	return (ret < 0) ? ret : len;
}
EXPORT_SYMBOL(iio_buffer_store_enable);

int iio_sw_buffer_preenable(struct iio_dev *indio_dev)
{
	struct iio_buffer *buffer;
	unsigned bytes;
	dev_dbg(&indio_dev->dev, "%s\n", __func__);

	list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list)
		if (buffer->access->set_bytes_per_datum) {
			bytes = iio_compute_scan_bytes(indio_dev,
						       buffer->scan_mask,
						       buffer->scan_timestamp);

			buffer->access->set_bytes_per_datum(buffer, bytes);
		}
666 667 668 669
	return 0;
}
EXPORT_SYMBOL(iio_sw_buffer_preenable);

670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685
/**
 * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected
 * @indio_dev: the iio device
 * @mask: scan mask to be checked
 *
 * Return true if exactly one bit is set in the scan mask, false otherwise. It
 * can be used for devices where only one channel can be active for sampling at
 * a time.
 */
bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
	const unsigned long *mask)
{
	return bitmap_weight(mask, indio_dev->masklength) == 1;
}
EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot);

686 687 688 689 690 691 692 693 694
static bool iio_validate_scan_mask(struct iio_dev *indio_dev,
	const unsigned long *mask)
{
	if (!indio_dev->setup_ops->validate_scan_mask)
		return true;

	return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask);
}

695 696
/**
 * iio_scan_mask_set() - set particular bit in the scan mask
697
 * @buffer: the buffer whose scan mask we are interested in
698
 * @bit: the bit to be set.
699 700 701 702 703
 *
 * Note that at this point we have no way of knowing what other
 * buffers might request, hence this code only verifies that the
 * individual buffers request is plausible.
 */
704 705
int iio_scan_mask_set(struct iio_dev *indio_dev,
		      struct iio_buffer *buffer, int bit)
706
{
707
	const unsigned long *mask;
708 709 710
	unsigned long *trialmask;

	trialmask = kmalloc(sizeof(*trialmask)*
711
			    BITS_TO_LONGS(indio_dev->masklength),
712 713 714 715
			    GFP_KERNEL);

	if (trialmask == NULL)
		return -ENOMEM;
716
	if (!indio_dev->masklength) {
717
		WARN_ON("trying to set scanmask prior to registering buffer\n");
718
		goto err_invalid_mask;
719
	}
720
	bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength);
721 722
	set_bit(bit, trialmask);

723 724 725
	if (!iio_validate_scan_mask(indio_dev, trialmask))
		goto err_invalid_mask;

726 727 728
	if (indio_dev->available_scan_masks) {
		mask = iio_scan_mask_match(indio_dev->available_scan_masks,
					   indio_dev->masklength,
729
					   trialmask);
730 731
		if (!mask)
			goto err_invalid_mask;
732
	}
733
	bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength);
734 735 736 737

	kfree(trialmask);

	return 0;
738 739 740 741 742

err_invalid_mask:
	kfree(trialmask);
	return -EINVAL;
}
743 744
EXPORT_SYMBOL_GPL(iio_scan_mask_set);

745 746
int iio_scan_mask_query(struct iio_dev *indio_dev,
			struct iio_buffer *buffer, int bit)
747
{
748
	if (bit > indio_dev->masklength)
749 750
		return -EINVAL;

751
	if (!buffer->scan_mask)
752 753
		return 0;

754
	return test_bit(bit, buffer->scan_mask);
755 756
};
EXPORT_SYMBOL_GPL(iio_scan_mask_query);
757 758 759 760

/**
 * struct iio_demux_table() - table describing demux memcpy ops
 * @from:	index to copy from
761
 * @to:		index to copy to
762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785
 * @length:	how many bytes to copy
 * @l:		list head used for management
 */
struct iio_demux_table {
	unsigned from;
	unsigned to;
	unsigned length;
	struct list_head l;
};

static unsigned char *iio_demux(struct iio_buffer *buffer,
				 unsigned char *datain)
{
	struct iio_demux_table *t;

	if (list_empty(&buffer->demux_list))
		return datain;
	list_for_each_entry(t, &buffer->demux_list, l)
		memcpy(buffer->demux_bounce + t->to,
		       datain + t->from, t->length);

	return buffer->demux_bounce;
}

786
static int iio_push_to_buffer(struct iio_buffer *buffer, unsigned char *data)
787 788 789
{
	unsigned char *dataout = iio_demux(buffer, data);

790
	return buffer->access->store_to(buffer, dataout);
791 792
}

793 794 795 796 797 798 799 800 801
static void iio_buffer_demux_free(struct iio_buffer *buffer)
{
	struct iio_demux_table *p, *q;
	list_for_each_entry_safe(p, q, &buffer->demux_list, l) {
		list_del(&p->l);
		kfree(p);
	}
}

802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819

int iio_push_to_buffers(struct iio_dev *indio_dev, unsigned char *data)
{
	int ret;
	struct iio_buffer *buf;

	list_for_each_entry(buf, &indio_dev->buffer_list, buffer_list) {
		ret = iio_push_to_buffer(buf, data);
		if (ret < 0)
			return ret;
	}

	return 0;
}
EXPORT_SYMBOL_GPL(iio_push_to_buffers);

static int iio_buffer_update_demux(struct iio_dev *indio_dev,
				   struct iio_buffer *buffer)
820 821 822 823
{
	const struct iio_chan_spec *ch;
	int ret, in_ind = -1, out_ind, length;
	unsigned in_loc = 0, out_loc = 0;
824
	struct iio_demux_table *p;
825 826

	/* Clear out any old demux */
827
	iio_buffer_demux_free(buffer);
828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880
	kfree(buffer->demux_bounce);
	buffer->demux_bounce = NULL;

	/* First work out which scan mode we will actually have */
	if (bitmap_equal(indio_dev->active_scan_mask,
			 buffer->scan_mask,
			 indio_dev->masklength))
		return 0;

	/* Now we have the two masks, work from least sig and build up sizes */
	for_each_set_bit(out_ind,
			 indio_dev->active_scan_mask,
			 indio_dev->masklength) {
		in_ind = find_next_bit(indio_dev->active_scan_mask,
				       indio_dev->masklength,
				       in_ind + 1);
		while (in_ind != out_ind) {
			in_ind = find_next_bit(indio_dev->active_scan_mask,
					       indio_dev->masklength,
					       in_ind + 1);
			ch = iio_find_channel_from_si(indio_dev, in_ind);
			length = ch->scan_type.storagebits/8;
			/* Make sure we are aligned */
			in_loc += length;
			if (in_loc % length)
				in_loc += length - in_loc % length;
		}
		p = kmalloc(sizeof(*p), GFP_KERNEL);
		if (p == NULL) {
			ret = -ENOMEM;
			goto error_clear_mux_table;
		}
		ch = iio_find_channel_from_si(indio_dev, in_ind);
		length = ch->scan_type.storagebits/8;
		if (out_loc % length)
			out_loc += length - out_loc % length;
		if (in_loc % length)
			in_loc += length - in_loc % length;
		p->from = in_loc;
		p->to = out_loc;
		p->length = length;
		list_add_tail(&p->l, &buffer->demux_list);
		out_loc += length;
		in_loc += length;
	}
	/* Relies on scan_timestamp being last */
	if (buffer->scan_timestamp) {
		p = kmalloc(sizeof(*p), GFP_KERNEL);
		if (p == NULL) {
			ret = -ENOMEM;
			goto error_clear_mux_table;
		}
		ch = iio_find_channel_from_si(indio_dev,
881
			indio_dev->scan_index_timestamp);
882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901
		length = ch->scan_type.storagebits/8;
		if (out_loc % length)
			out_loc += length - out_loc % length;
		if (in_loc % length)
			in_loc += length - in_loc % length;
		p->from = in_loc;
		p->to = out_loc;
		p->length = length;
		list_add_tail(&p->l, &buffer->demux_list);
		out_loc += length;
		in_loc += length;
	}
	buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL);
	if (buffer->demux_bounce == NULL) {
		ret = -ENOMEM;
		goto error_clear_mux_table;
	}
	return 0;

error_clear_mux_table:
902 903
	iio_buffer_demux_free(buffer);

904 905
	return ret;
}
906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924

int iio_update_demux(struct iio_dev *indio_dev)
{
	struct iio_buffer *buffer;
	int ret;

	list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
		ret = iio_buffer_update_demux(indio_dev, buffer);
		if (ret < 0)
			goto error_clear_mux_table;
	}
	return 0;

error_clear_mux_table:
	list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list)
		iio_buffer_demux_free(buffer);

	return ret;
}
925
EXPORT_SYMBOL_GPL(iio_update_demux);