industrialio-buffer.c 23.4 KB
Newer Older
1 2 3 4 5 6 7 8
/* The industrial I/O core
 *
 * Copyright (c) 2008 Jonathan Cameron
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 as published by
 * the Free Software Foundation.
 *
9
 * Handling of buffer allocation / resizing.
10 11 12 13 14 15 16
 *
 *
 * Things to look at here.
 * - Better memory allocation techniques?
 * - Alternative access techniques?
 */
#include <linux/kernel.h>
17
#include <linux/export.h>
18 19 20
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/cdev.h>
21
#include <linux/slab.h>
22
#include <linux/poll.h>
23

24
#include <linux/iio/iio.h>
25
#include "iio_core.h"
26 27
#include <linux/iio/sysfs.h>
#include <linux/iio/buffer.h>
28

29 30 31 32
static const char * const iio_endian_prefix[] = {
	[IIO_BE] = "be",
	[IIO_LE] = "le",
};
33

34
static bool iio_buffer_is_active(struct iio_buffer *buf)
35
{
36
	return !list_empty(&buf->buffer_list);
37 38
}

39
/**
40
 * iio_buffer_read_first_n_outer() - chrdev read for buffer access
41
 *
42 43
 * This function relies on all buffer implementations having an
 * iio_buffer as their first element.
44
 **/
45 46
ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
				      size_t n, loff_t *f_ps)
47
{
48
	struct iio_dev *indio_dev = filp->private_data;
49
	struct iio_buffer *rb = indio_dev->buffer;
50

51
	if (!rb || !rb->access->read_first_n)
52
		return -EINVAL;
53
	return rb->access->read_first_n(rb, n, buf);
54 55
}

56
/**
57
 * iio_buffer_poll() - poll the buffer to find out if it has data
58
 */
59 60
unsigned int iio_buffer_poll(struct file *filp,
			     struct poll_table_struct *wait)
61
{
62
	struct iio_dev *indio_dev = filp->private_data;
63
	struct iio_buffer *rb = indio_dev->buffer;
64 65 66 67 68

	poll_wait(filp, &rb->pollq, wait);
	if (rb->stufftoread)
		return POLLIN | POLLRDNORM;
	/* need a way of knowing if there may be enough data... */
69
	return 0;
70 71
}

72
void iio_buffer_init(struct iio_buffer *buffer)
73
{
74
	INIT_LIST_HEAD(&buffer->demux_list);
75
	INIT_LIST_HEAD(&buffer->buffer_list);
76
	init_waitqueue_head(&buffer->pollq);
77
}
78
EXPORT_SYMBOL(iio_buffer_init);
79

80
static ssize_t iio_show_scan_index(struct device *dev,
81 82
				   struct device_attribute *attr,
				   char *buf)
83
{
84
	return sprintf(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index);
85 86 87 88 89 90 91
}

static ssize_t iio_show_fixed_type(struct device *dev,
				   struct device_attribute *attr,
				   char *buf)
{
	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
92 93 94
	u8 type = this_attr->c->scan_type.endianness;

	if (type == IIO_CPU) {
95 96 97 98 99
#ifdef __LITTLE_ENDIAN
		type = IIO_LE;
#else
		type = IIO_BE;
#endif
100 101 102
	}
	return sprintf(buf, "%s:%c%d/%d>>%u\n",
		       iio_endian_prefix[type],
103 104 105 106 107 108
		       this_attr->c->scan_type.sign,
		       this_attr->c->scan_type.realbits,
		       this_attr->c->scan_type.storagebits,
		       this_attr->c->scan_type.shift);
}

109 110 111 112 113
static ssize_t iio_scan_el_show(struct device *dev,
				struct device_attribute *attr,
				char *buf)
{
	int ret;
L
Lars-Peter Clausen 已提交
114
	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
115

116 117 118
	ret = test_bit(to_iio_dev_attr(attr)->address,
		       indio_dev->buffer->scan_mask);

119 120 121
	return sprintf(buf, "%d\n", ret);
}

122
static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
123
{
124
	clear_bit(bit, buffer->scan_mask);
125 126 127 128 129 130 131 132
	return 0;
}

static ssize_t iio_scan_el_store(struct device *dev,
				 struct device_attribute *attr,
				 const char *buf,
				 size_t len)
{
133
	int ret;
134
	bool state;
L
Lars-Peter Clausen 已提交
135
	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
136
	struct iio_buffer *buffer = indio_dev->buffer;
137 138
	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);

139 140 141
	ret = strtobool(buf, &state);
	if (ret < 0)
		return ret;
142
	mutex_lock(&indio_dev->mlock);
143
	if (iio_buffer_is_active(indio_dev->buffer)) {
144 145 146
		ret = -EBUSY;
		goto error_ret;
	}
147
	ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address);
148 149 150
	if (ret < 0)
		goto error_ret;
	if (!state && ret) {
151
		ret = iio_scan_mask_clear(buffer, this_attr->address);
152 153 154
		if (ret)
			goto error_ret;
	} else if (state && !ret) {
155
		ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address);
156 157 158 159 160 161 162
		if (ret)
			goto error_ret;
	}

error_ret:
	mutex_unlock(&indio_dev->mlock);

163
	return ret < 0 ? ret : len;
164 165 166 167 168 169 170

}

static ssize_t iio_scan_el_ts_show(struct device *dev,
				   struct device_attribute *attr,
				   char *buf)
{
L
Lars-Peter Clausen 已提交
171
	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
172
	return sprintf(buf, "%d\n", indio_dev->buffer->scan_timestamp);
173 174 175 176 177 178 179
}

static ssize_t iio_scan_el_ts_store(struct device *dev,
				    struct device_attribute *attr,
				    const char *buf,
				    size_t len)
{
180
	int ret;
L
Lars-Peter Clausen 已提交
181
	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
182
	bool state;
183

184 185 186 187
	ret = strtobool(buf, &state);
	if (ret < 0)
		return ret;

188
	mutex_lock(&indio_dev->mlock);
189
	if (iio_buffer_is_active(indio_dev->buffer)) {
190 191 192
		ret = -EBUSY;
		goto error_ret;
	}
193
	indio_dev->buffer->scan_timestamp = state;
194 195 196 197 198 199
error_ret:
	mutex_unlock(&indio_dev->mlock);

	return ret ? ret : len;
}

200 201
static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
					const struct iio_chan_spec *chan)
202
{
203
	int ret, attrcount = 0;
204
	struct iio_buffer *buffer = indio_dev->buffer;
205

206
	ret = __iio_add_chan_devattr("index",
207 208 209 210
				     chan,
				     &iio_show_scan_index,
				     NULL,
				     0,
211
				     IIO_SEPARATE,
212
				     &indio_dev->dev,
213
				     &buffer->scan_el_dev_attr_list);
214 215
	if (ret)
		goto error_ret;
216 217
	attrcount++;
	ret = __iio_add_chan_devattr("type",
218 219 220 221 222
				     chan,
				     &iio_show_fixed_type,
				     NULL,
				     0,
				     0,
223
				     &indio_dev->dev,
224
				     &buffer->scan_el_dev_attr_list);
225 226
	if (ret)
		goto error_ret;
227
	attrcount++;
228
	if (chan->type != IIO_TIMESTAMP)
229
		ret = __iio_add_chan_devattr("en",
230 231 232 233 234
					     chan,
					     &iio_scan_el_show,
					     &iio_scan_el_store,
					     chan->scan_index,
					     0,
235
					     &indio_dev->dev,
236
					     &buffer->scan_el_dev_attr_list);
237
	else
238
		ret = __iio_add_chan_devattr("en",
239 240 241 242 243
					     chan,
					     &iio_scan_el_ts_show,
					     &iio_scan_el_ts_store,
					     chan->scan_index,
					     0,
244
					     &indio_dev->dev,
245
					     &buffer->scan_el_dev_attr_list);
246 247
	if (ret)
		goto error_ret;
248 249
	attrcount++;
	ret = attrcount;
250 251 252 253
error_ret:
	return ret;
}

254 255
static void iio_buffer_remove_and_free_scan_dev_attr(struct iio_dev *indio_dev,
						     struct iio_dev_attr *p)
256 257 258 259 260
{
	kfree(p->dev_attr.attr.name);
	kfree(p);
}

261
static void __iio_buffer_attr_cleanup(struct iio_dev *indio_dev)
262 263
{
	struct iio_dev_attr *p, *n;
264
	struct iio_buffer *buffer = indio_dev->buffer;
265

266
	list_for_each_entry_safe(p, n,
267 268
				 &buffer->scan_el_dev_attr_list, l)
		iio_buffer_remove_and_free_scan_dev_attr(indio_dev, p);
269 270
}

271 272
static const char * const iio_scan_elements_group_name = "scan_elements";

273 274 275
int iio_buffer_register(struct iio_dev *indio_dev,
			const struct iio_chan_spec *channels,
			int num_channels)
276
{
277 278
	struct iio_dev_attr *p;
	struct attribute **attr;
279
	struct iio_buffer *buffer = indio_dev->buffer;
280 281
	int ret, i, attrn, attrcount, attrcount_orig = 0;

282 283
	if (buffer->attrs)
		indio_dev->groups[indio_dev->groupcounter++] = buffer->attrs;
284

285 286
	if (buffer->scan_el_attrs != NULL) {
		attr = buffer->scan_el_attrs->attrs;
287 288 289 290
		while (*attr++ != NULL)
			attrcount_orig++;
	}
	attrcount = attrcount_orig;
291
	INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list);
292 293 294
	if (channels) {
		/* new magic */
		for (i = 0; i < num_channels; i++) {
295 296 297
			if (channels[i].scan_index < 0)
				continue;

298 299 300 301
			/* Establish necessary mask length */
			if (channels[i].scan_index >
			    (int)indio_dev->masklength - 1)
				indio_dev->masklength
302
					= channels[i].scan_index + 1;
303

304
			ret = iio_buffer_add_channel_sysfs(indio_dev,
305
							 &channels[i]);
306
			if (ret < 0)
307 308
				goto error_cleanup_dynamic;
			attrcount += ret;
309
			if (channels[i].type == IIO_TIMESTAMP)
310
				indio_dev->scan_index_timestamp =
311
					channels[i].scan_index;
312
		}
313
		if (indio_dev->masklength && buffer->scan_mask == NULL) {
314 315 316
			buffer->scan_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
						    sizeof(*buffer->scan_mask),
						    GFP_KERNEL);
317
			if (buffer->scan_mask == NULL) {
318
				ret = -ENOMEM;
319
				goto error_cleanup_dynamic;
320 321
			}
		}
322 323
	}

324
	buffer->scan_el_group.name = iio_scan_elements_group_name;
325

326 327 328
	buffer->scan_el_group.attrs = kcalloc(attrcount + 1,
					      sizeof(buffer->scan_el_group.attrs[0]),
					      GFP_KERNEL);
329
	if (buffer->scan_el_group.attrs == NULL) {
330 331 332
		ret = -ENOMEM;
		goto error_free_scan_mask;
	}
333 334 335
	if (buffer->scan_el_attrs)
		memcpy(buffer->scan_el_group.attrs, buffer->scan_el_attrs,
		       sizeof(buffer->scan_el_group.attrs[0])*attrcount_orig);
336 337
	attrn = attrcount_orig;

338 339 340
	list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l)
		buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr;
	indio_dev->groups[indio_dev->groupcounter++] = &buffer->scan_el_group;
341

342
	return 0;
343 344

error_free_scan_mask:
345
	kfree(buffer->scan_mask);
346
error_cleanup_dynamic:
347
	__iio_buffer_attr_cleanup(indio_dev);
348

349 350
	return ret;
}
351
EXPORT_SYMBOL(iio_buffer_register);
352

353
void iio_buffer_unregister(struct iio_dev *indio_dev)
354
{
355 356 357
	kfree(indio_dev->buffer->scan_mask);
	kfree(indio_dev->buffer->scan_el_group.attrs);
	__iio_buffer_attr_cleanup(indio_dev);
358
}
359
EXPORT_SYMBOL(iio_buffer_unregister);
360

361 362 363
ssize_t iio_buffer_read_length(struct device *dev,
			       struct device_attribute *attr,
			       char *buf)
364
{
L
Lars-Peter Clausen 已提交
365
	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
366
	struct iio_buffer *buffer = indio_dev->buffer;
367

368
	if (buffer->access->get_length)
369
		return sprintf(buf, "%d\n",
370
			       buffer->access->get_length(buffer));
371

372
	return 0;
373
}
374
EXPORT_SYMBOL(iio_buffer_read_length);
375

376 377 378 379
ssize_t iio_buffer_write_length(struct device *dev,
				struct device_attribute *attr,
				const char *buf,
				size_t len)
380
{
L
Lars-Peter Clausen 已提交
381
	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
382
	struct iio_buffer *buffer = indio_dev->buffer;
383 384
	unsigned int val;
	int ret;
385

386
	ret = kstrtouint(buf, 10, &val);
387 388 389
	if (ret)
		return ret;

390 391
	if (buffer->access->get_length)
		if (val == buffer->access->get_length(buffer))
392 393
			return len;

394
	mutex_lock(&indio_dev->mlock);
395
	if (iio_buffer_is_active(indio_dev->buffer)) {
396 397
		ret = -EBUSY;
	} else {
398
		if (buffer->access->set_length)
399 400
			buffer->access->set_length(buffer, val);
		ret = 0;
401
	}
402
	mutex_unlock(&indio_dev->mlock);
403

404
	return ret ? ret : len;
405
}
406
EXPORT_SYMBOL(iio_buffer_write_length);
407

408 409 410
ssize_t iio_buffer_show_enable(struct device *dev,
			       struct device_attribute *attr,
			       char *buf)
411
{
L
Lars-Peter Clausen 已提交
412
	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
413
	return sprintf(buf, "%d\n", iio_buffer_is_active(indio_dev->buffer));
414
}
415
EXPORT_SYMBOL(iio_buffer_show_enable);
416

417
/* Note NULL used as error indicator as it doesn't make sense. */
418
static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
419
					  unsigned int masklength,
420
					  const unsigned long *mask)
421 422 423 424 425 426 427 428 429 430 431
{
	if (bitmap_empty(mask, masklength))
		return NULL;
	while (*av_masks) {
		if (bitmap_subset(mask, av_masks, masklength))
			return av_masks;
		av_masks += BITS_TO_LONGS(masklength);
	}
	return NULL;
}

432 433
static int iio_compute_scan_bytes(struct iio_dev *indio_dev, const long *mask,
				  bool timestamp)
434 435 436 437 438 439
{
	const struct iio_chan_spec *ch;
	unsigned bytes = 0;
	int length, i;

	/* How much space will the demuxed element take? */
440
	for_each_set_bit(i, mask,
441 442
			 indio_dev->masklength) {
		ch = iio_find_channel_from_si(indio_dev, i);
443
		length = ch->scan_type.storagebits / 8;
444 445 446
		bytes = ALIGN(bytes, length);
		bytes += length;
	}
447
	if (timestamp) {
448
		ch = iio_find_channel_from_si(indio_dev,
449
					      indio_dev->scan_index_timestamp);
450
		length = ch->scan_type.storagebits / 8;
451 452 453
		bytes = ALIGN(bytes, length);
		bytes += length;
	}
454 455 456
	return bytes;
}

457 458 459
int iio_update_buffers(struct iio_dev *indio_dev,
		       struct iio_buffer *insert_buffer,
		       struct iio_buffer *remove_buffer)
460
{
461 462 463 464 465
	int ret;
	int success = 0;
	struct iio_buffer *buffer;
	unsigned long *compound_mask;
	const unsigned long *old_mask;
466

467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486
	/* Wind down existing buffers - iff there are any */
	if (!list_empty(&indio_dev->buffer_list)) {
		if (indio_dev->setup_ops->predisable) {
			ret = indio_dev->setup_ops->predisable(indio_dev);
			if (ret)
				goto error_ret;
		}
		indio_dev->currentmode = INDIO_DIRECT_MODE;
		if (indio_dev->setup_ops->postdisable) {
			ret = indio_dev->setup_ops->postdisable(indio_dev);
			if (ret)
				goto error_ret;
		}
	}
	/* Keep a copy of current setup to allow roll back */
	old_mask = indio_dev->active_scan_mask;
	if (!indio_dev->available_scan_masks)
		indio_dev->active_scan_mask = NULL;

	if (remove_buffer)
487
		list_del_init(&remove_buffer->buffer_list);
488 489 490 491 492 493 494 495 496 497
	if (insert_buffer)
		list_add(&insert_buffer->buffer_list, &indio_dev->buffer_list);

	/* If no buffers in list, we are done */
	if (list_empty(&indio_dev->buffer_list)) {
		indio_dev->currentmode = INDIO_DIRECT_MODE;
		if (indio_dev->available_scan_masks == NULL)
			kfree(old_mask);
		return 0;
	}
498

499
	/* What scan mask do we actually have? */
500 501 502 503 504 505 506 507 508 509 510 511 512 513 514
	compound_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
				sizeof(long), GFP_KERNEL);
	if (compound_mask == NULL) {
		if (indio_dev->available_scan_masks == NULL)
			kfree(old_mask);
		return -ENOMEM;
	}
	indio_dev->scan_timestamp = 0;

	list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
		bitmap_or(compound_mask, compound_mask, buffer->scan_mask,
			  indio_dev->masklength);
		indio_dev->scan_timestamp |= buffer->scan_timestamp;
	}
	if (indio_dev->available_scan_masks) {
515 516 517
		indio_dev->active_scan_mask =
			iio_scan_mask_match(indio_dev->available_scan_masks,
					    indio_dev->masklength,
518 519 520 521 522 523
					    compound_mask);
		if (indio_dev->active_scan_mask == NULL) {
			/*
			 * Roll back.
			 * Note can only occur when adding a buffer.
			 */
524
			list_del_init(&insert_buffer->buffer_list);
525 526 527 528 529 530
			indio_dev->active_scan_mask = old_mask;
			success = -EINVAL;
		}
	} else {
		indio_dev->active_scan_mask = compound_mask;
	}
531

532 533
	iio_update_demux(indio_dev);

534 535 536 537 538
	/* Wind up again */
	if (indio_dev->setup_ops->preenable) {
		ret = indio_dev->setup_ops->preenable(indio_dev);
		if (ret) {
			printk(KERN_ERR
539
			       "Buffer not started: buffer preenable failed (%d)\n", ret);
540 541 542 543 544 545 546 547 548 549 550 551
			goto error_remove_inserted;
		}
	}
	indio_dev->scan_bytes =
		iio_compute_scan_bytes(indio_dev,
				       indio_dev->active_scan_mask,
				       indio_dev->scan_timestamp);
	list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list)
		if (buffer->access->request_update) {
			ret = buffer->access->request_update(buffer);
			if (ret) {
				printk(KERN_INFO
552
				       "Buffer not started: buffer parameter update failed (%d)\n", ret);
553 554 555 556 557
				goto error_run_postdisable;
			}
		}
	if (indio_dev->info->update_scan_mode) {
		ret = indio_dev->info
558 559
			->update_scan_mode(indio_dev,
					   indio_dev->active_scan_mask);
560
		if (ret < 0) {
561
			printk(KERN_INFO "Buffer not started: update scan mode failed (%d)\n", ret);
562 563 564
			goto error_run_postdisable;
		}
	}
565
	/* Definitely possible for devices to support both of these. */
566 567 568 569 570 571 572 573 574 575
	if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) {
		if (!indio_dev->trig) {
			printk(KERN_INFO "Buffer not started: no trigger\n");
			ret = -EINVAL;
			/* Can only occur on first buffer */
			goto error_run_postdisable;
		}
		indio_dev->currentmode = INDIO_BUFFER_TRIGGERED;
	} else if (indio_dev->modes & INDIO_BUFFER_HARDWARE) {
		indio_dev->currentmode = INDIO_BUFFER_HARDWARE;
576
	} else { /* Should never be reached */
577 578 579 580 581 582 583 584
		ret = -EINVAL;
		goto error_run_postdisable;
	}

	if (indio_dev->setup_ops->postenable) {
		ret = indio_dev->setup_ops->postenable(indio_dev);
		if (ret) {
			printk(KERN_INFO
585
			       "Buffer not started: postenable failed (%d)\n", ret);
586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607
			indio_dev->currentmode = INDIO_DIRECT_MODE;
			if (indio_dev->setup_ops->postdisable)
				indio_dev->setup_ops->postdisable(indio_dev);
			goto error_disable_all_buffers;
		}
	}

	if (indio_dev->available_scan_masks)
		kfree(compound_mask);
	else
		kfree(old_mask);

	return success;

error_disable_all_buffers:
	indio_dev->currentmode = INDIO_DIRECT_MODE;
error_run_postdisable:
	if (indio_dev->setup_ops->postdisable)
		indio_dev->setup_ops->postdisable(indio_dev);
error_remove_inserted:

	if (insert_buffer)
608
		list_del_init(&insert_buffer->buffer_list);
609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633
	indio_dev->active_scan_mask = old_mask;
	kfree(compound_mask);
error_ret:

	return ret;
}
EXPORT_SYMBOL_GPL(iio_update_buffers);

ssize_t iio_buffer_store_enable(struct device *dev,
				struct device_attribute *attr,
				const char *buf,
				size_t len)
{
	int ret;
	bool requested_state;
	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
	bool inlist;

	ret = strtobool(buf, &requested_state);
	if (ret < 0)
		return ret;

	mutex_lock(&indio_dev->mlock);

	/* Find out if it is in the list */
634
	inlist = iio_buffer_is_active(indio_dev->buffer);
635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667
	/* Already in desired state */
	if (inlist == requested_state)
		goto done;

	if (requested_state)
		ret = iio_update_buffers(indio_dev,
					 indio_dev->buffer, NULL);
	else
		ret = iio_update_buffers(indio_dev,
					 NULL, indio_dev->buffer);

	if (ret < 0)
		goto done;
done:
	mutex_unlock(&indio_dev->mlock);
	return (ret < 0) ? ret : len;
}
EXPORT_SYMBOL(iio_buffer_store_enable);

int iio_sw_buffer_preenable(struct iio_dev *indio_dev)
{
	struct iio_buffer *buffer;
	unsigned bytes;
	dev_dbg(&indio_dev->dev, "%s\n", __func__);

	list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list)
		if (buffer->access->set_bytes_per_datum) {
			bytes = iio_compute_scan_bytes(indio_dev,
						       buffer->scan_mask,
						       buffer->scan_timestamp);

			buffer->access->set_bytes_per_datum(buffer, bytes);
		}
668 669 670 671
	return 0;
}
EXPORT_SYMBOL(iio_sw_buffer_preenable);

672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687
/**
 * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected
 * @indio_dev: the iio device
 * @mask: scan mask to be checked
 *
 * Return true if exactly one bit is set in the scan mask, false otherwise. It
 * can be used for devices where only one channel can be active for sampling at
 * a time.
 */
bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
	const unsigned long *mask)
{
	return bitmap_weight(mask, indio_dev->masklength) == 1;
}
EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot);

688 689 690 691 692 693 694 695 696
static bool iio_validate_scan_mask(struct iio_dev *indio_dev,
	const unsigned long *mask)
{
	if (!indio_dev->setup_ops->validate_scan_mask)
		return true;

	return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask);
}

697 698
/**
 * iio_scan_mask_set() - set particular bit in the scan mask
699
 * @indio_dev: the iio device
700
 * @buffer: the buffer whose scan mask we are interested in
701
 * @bit: the bit to be set.
702 703 704 705 706
 *
 * Note that at this point we have no way of knowing what other
 * buffers might request, hence this code only verifies that the
 * individual buffers request is plausible.
 */
707 708
int iio_scan_mask_set(struct iio_dev *indio_dev,
		      struct iio_buffer *buffer, int bit)
709
{
710
	const unsigned long *mask;
711 712 713
	unsigned long *trialmask;

	trialmask = kmalloc(sizeof(*trialmask)*
714
			    BITS_TO_LONGS(indio_dev->masklength),
715 716 717 718
			    GFP_KERNEL);

	if (trialmask == NULL)
		return -ENOMEM;
719
	if (!indio_dev->masklength) {
720
		WARN_ON("Trying to set scanmask prior to registering buffer\n");
721
		goto err_invalid_mask;
722
	}
723
	bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength);
724 725
	set_bit(bit, trialmask);

726 727 728
	if (!iio_validate_scan_mask(indio_dev, trialmask))
		goto err_invalid_mask;

729 730 731
	if (indio_dev->available_scan_masks) {
		mask = iio_scan_mask_match(indio_dev->available_scan_masks,
					   indio_dev->masklength,
732
					   trialmask);
733 734
		if (!mask)
			goto err_invalid_mask;
735
	}
736
	bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength);
737 738 739 740

	kfree(trialmask);

	return 0;
741 742 743 744 745

err_invalid_mask:
	kfree(trialmask);
	return -EINVAL;
}
746 747
EXPORT_SYMBOL_GPL(iio_scan_mask_set);

748 749
int iio_scan_mask_query(struct iio_dev *indio_dev,
			struct iio_buffer *buffer, int bit)
750
{
751
	if (bit > indio_dev->masklength)
752 753
		return -EINVAL;

754
	if (!buffer->scan_mask)
755 756
		return 0;

757
	return test_bit(bit, buffer->scan_mask);
758 759
};
EXPORT_SYMBOL_GPL(iio_scan_mask_query);
760 761 762 763

/**
 * struct iio_demux_table() - table describing demux memcpy ops
 * @from:	index to copy from
764
 * @to:		index to copy to
765 766 767 768 769 770 771 772 773 774
 * @length:	how many bytes to copy
 * @l:		list head used for management
 */
struct iio_demux_table {
	unsigned from;
	unsigned to;
	unsigned length;
	struct list_head l;
};

775 776
static const void *iio_demux(struct iio_buffer *buffer,
				 const void *datain)
777 778 779 780 781 782 783 784 785 786 787 788
{
	struct iio_demux_table *t;

	if (list_empty(&buffer->demux_list))
		return datain;
	list_for_each_entry(t, &buffer->demux_list, l)
		memcpy(buffer->demux_bounce + t->to,
		       datain + t->from, t->length);

	return buffer->demux_bounce;
}

789
static int iio_push_to_buffer(struct iio_buffer *buffer, const void *data)
790
{
791
	const void *dataout = iio_demux(buffer, data);
792

793
	return buffer->access->store_to(buffer, dataout);
794 795
}

796 797 798 799 800 801 802 803 804
static void iio_buffer_demux_free(struct iio_buffer *buffer)
{
	struct iio_demux_table *p, *q;
	list_for_each_entry_safe(p, q, &buffer->demux_list, l) {
		list_del(&p->l);
		kfree(p);
	}
}

805

806
int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data)
807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822
{
	int ret;
	struct iio_buffer *buf;

	list_for_each_entry(buf, &indio_dev->buffer_list, buffer_list) {
		ret = iio_push_to_buffer(buf, data);
		if (ret < 0)
			return ret;
	}

	return 0;
}
EXPORT_SYMBOL_GPL(iio_push_to_buffers);

static int iio_buffer_update_demux(struct iio_dev *indio_dev,
				   struct iio_buffer *buffer)
823 824 825 826
{
	const struct iio_chan_spec *ch;
	int ret, in_ind = -1, out_ind, length;
	unsigned in_loc = 0, out_loc = 0;
827
	struct iio_demux_table *p;
828 829

	/* Clear out any old demux */
830
	iio_buffer_demux_free(buffer);
831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883
	kfree(buffer->demux_bounce);
	buffer->demux_bounce = NULL;

	/* First work out which scan mode we will actually have */
	if (bitmap_equal(indio_dev->active_scan_mask,
			 buffer->scan_mask,
			 indio_dev->masklength))
		return 0;

	/* Now we have the two masks, work from least sig and build up sizes */
	for_each_set_bit(out_ind,
			 indio_dev->active_scan_mask,
			 indio_dev->masklength) {
		in_ind = find_next_bit(indio_dev->active_scan_mask,
				       indio_dev->masklength,
				       in_ind + 1);
		while (in_ind != out_ind) {
			in_ind = find_next_bit(indio_dev->active_scan_mask,
					       indio_dev->masklength,
					       in_ind + 1);
			ch = iio_find_channel_from_si(indio_dev, in_ind);
			length = ch->scan_type.storagebits/8;
			/* Make sure we are aligned */
			in_loc += length;
			if (in_loc % length)
				in_loc += length - in_loc % length;
		}
		p = kmalloc(sizeof(*p), GFP_KERNEL);
		if (p == NULL) {
			ret = -ENOMEM;
			goto error_clear_mux_table;
		}
		ch = iio_find_channel_from_si(indio_dev, in_ind);
		length = ch->scan_type.storagebits/8;
		if (out_loc % length)
			out_loc += length - out_loc % length;
		if (in_loc % length)
			in_loc += length - in_loc % length;
		p->from = in_loc;
		p->to = out_loc;
		p->length = length;
		list_add_tail(&p->l, &buffer->demux_list);
		out_loc += length;
		in_loc += length;
	}
	/* Relies on scan_timestamp being last */
	if (buffer->scan_timestamp) {
		p = kmalloc(sizeof(*p), GFP_KERNEL);
		if (p == NULL) {
			ret = -ENOMEM;
			goto error_clear_mux_table;
		}
		ch = iio_find_channel_from_si(indio_dev,
884
			indio_dev->scan_index_timestamp);
885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904
		length = ch->scan_type.storagebits/8;
		if (out_loc % length)
			out_loc += length - out_loc % length;
		if (in_loc % length)
			in_loc += length - in_loc % length;
		p->from = in_loc;
		p->to = out_loc;
		p->length = length;
		list_add_tail(&p->l, &buffer->demux_list);
		out_loc += length;
		in_loc += length;
	}
	buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL);
	if (buffer->demux_bounce == NULL) {
		ret = -ENOMEM;
		goto error_clear_mux_table;
	}
	return 0;

error_clear_mux_table:
905 906
	iio_buffer_demux_free(buffer);

907 908
	return ret;
}
909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927

int iio_update_demux(struct iio_dev *indio_dev)
{
	struct iio_buffer *buffer;
	int ret;

	list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
		ret = iio_buffer_update_demux(indio_dev, buffer);
		if (ret < 0)
			goto error_clear_mux_table;
	}
	return 0;

error_clear_mux_table:
	list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list)
		iio_buffer_demux_free(buffer);

	return ret;
}
928
EXPORT_SYMBOL_GPL(iio_update_demux);