industrialio-event.c 11.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
/* Industrial I/O event handling
 *
 * Copyright (c) 2008 Jonathan Cameron
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 as published by
 * the Free Software Foundation.
 *
 * Based on elements of hwmon and input subsystems.
 */

#include <linux/anon_inodes.h>
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/kernel.h>
16
#include <linux/kfifo.h>
17
#include <linux/module.h>
18
#include <linux/poll.h>
19 20 21 22
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/wait.h>
23
#include <linux/iio/iio.h>
24
#include "iio_core.h"
25 26
#include <linux/iio/sysfs.h>
#include <linux/iio/events.h>
27 28 29 30 31 32 33 34 35 36 37

/**
 * struct iio_event_interface - chrdev interface for an event line
 * @wait:		wait queue to allow blocking reads of events
 * @det_events:		list of detected events
 * @dev_attr_list:	list of event interface sysfs attribute
 * @flags:		file operations related flags including busy flag.
 * @group:		event interface sysfs attribute group
 */
struct iio_event_interface {
	wait_queue_head_t	wait;
38 39
	DECLARE_KFIFO(det_events, struct iio_event_data, 16);

40 41 42 43 44 45 46 47
	struct list_head	dev_attr_list;
	unsigned long		flags;
	struct attribute_group	group;
};

int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp)
{
	struct iio_event_interface *ev_int = indio_dev->event_interface;
48
	struct iio_event_data ev;
49
	unsigned long flags;
50
	int copied;
51 52

	/* Does anyone care? */
53
	spin_lock_irqsave(&ev_int->wait.lock, flags);
54 55
	if (test_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {

56 57 58 59 60
		ev.id = ev_code;
		ev.timestamp = timestamp;

		copied = kfifo_put(&ev_int->det_events, &ev);
		if (copied != 0)
61
			wake_up_locked_poll(&ev_int->wait, POLLIN);
62
	}
63
	spin_unlock_irqrestore(&ev_int->wait.lock, flags);
64

65
	return 0;
66 67 68
}
EXPORT_SYMBOL(iio_push_event);

69 70 71 72 73 74
/**
 * iio_event_poll() - poll the event queue to find out if it has data
 */
static unsigned int iio_event_poll(struct file *filep,
			     struct poll_table_struct *wait)
{
75 76
	struct iio_dev *indio_dev = filep->private_data;
	struct iio_event_interface *ev_int = indio_dev->event_interface;
77 78 79 80
	unsigned int events = 0;

	poll_wait(filep, &ev_int->wait, wait);

81
	spin_lock_irq(&ev_int->wait.lock);
82 83
	if (!kfifo_is_empty(&ev_int->det_events))
		events = POLLIN | POLLRDNORM;
84
	spin_unlock_irq(&ev_int->wait.lock);
85 86 87 88

	return events;
}

89 90 91 92 93
static ssize_t iio_event_chrdev_read(struct file *filep,
				     char __user *buf,
				     size_t count,
				     loff_t *f_ps)
{
94 95
	struct iio_dev *indio_dev = filep->private_data;
	struct iio_event_interface *ev_int = indio_dev->event_interface;
96
	unsigned int copied;
97 98
	int ret;

99
	if (count < sizeof(struct iio_event_data))
100 101
		return -EINVAL;

102
	spin_lock_irq(&ev_int->wait.lock);
103
	if (kfifo_is_empty(&ev_int->det_events)) {
104 105
		if (filep->f_flags & O_NONBLOCK) {
			ret = -EAGAIN;
106
			goto error_unlock;
107 108
		}
		/* Blocking on device; waiting for something to be there */
109
		ret = wait_event_interruptible_locked_irq(ev_int->wait,
110
					!kfifo_is_empty(&ev_int->det_events));
111
		if (ret)
112
			goto error_unlock;
113 114 115
		/* Single access device so no one else can get the data */
	}

116
	ret = kfifo_to_user(&ev_int->det_events, buf, count, &copied);
117

118
error_unlock:
119
	spin_unlock_irq(&ev_int->wait.lock);
120

121
	return ret ? ret : copied;
122 123 124 125
}

static int iio_event_chrdev_release(struct inode *inode, struct file *filep)
{
126 127
	struct iio_dev *indio_dev = filep->private_data;
	struct iio_event_interface *ev_int = indio_dev->event_interface;
128

129
	spin_lock_irq(&ev_int->wait.lock);
130
	__clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
131 132 133 134 135
	/*
	 * In order to maintain a clean state for reopening,
	 * clear out any awaiting events. The mask will prevent
	 * any new __iio_push_event calls running.
	 */
136
	kfifo_reset_out(&ev_int->det_events);
137
	spin_unlock_irq(&ev_int->wait.lock);
138

139 140
	iio_device_put(indio_dev);

141 142 143 144 145
	return 0;
}

static const struct file_operations iio_event_chrdev_fileops = {
	.read =  iio_event_chrdev_read,
146
	.poll =  iio_event_poll,
147 148 149 150 151 152 153 154 155 156 157 158 159
	.release = iio_event_chrdev_release,
	.owner = THIS_MODULE,
	.llseek = noop_llseek,
};

int iio_event_getfd(struct iio_dev *indio_dev)
{
	struct iio_event_interface *ev_int = indio_dev->event_interface;
	int fd;

	if (ev_int == NULL)
		return -ENODEV;

160
	spin_lock_irq(&ev_int->wait.lock);
161
	if (__test_and_set_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
162
		spin_unlock_irq(&ev_int->wait.lock);
163 164
		return -EBUSY;
	}
165
	spin_unlock_irq(&ev_int->wait.lock);
166 167 168
	iio_device_get(indio_dev);

	fd = anon_inode_getfd("iio:event", &iio_event_chrdev_fileops,
169
				indio_dev, O_RDONLY | O_CLOEXEC);
170
	if (fd < 0) {
171
		spin_lock_irq(&ev_int->wait.lock);
172
		__clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
173
		spin_unlock_irq(&ev_int->wait.lock);
174
		iio_device_put(indio_dev);
175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197
	}
	return fd;
}

static const char * const iio_ev_type_text[] = {
	[IIO_EV_TYPE_THRESH] = "thresh",
	[IIO_EV_TYPE_MAG] = "mag",
	[IIO_EV_TYPE_ROC] = "roc",
	[IIO_EV_TYPE_THRESH_ADAPTIVE] = "thresh_adaptive",
	[IIO_EV_TYPE_MAG_ADAPTIVE] = "mag_adaptive",
};

static const char * const iio_ev_dir_text[] = {
	[IIO_EV_DIR_EITHER] = "either",
	[IIO_EV_DIR_RISING] = "rising",
	[IIO_EV_DIR_FALLING] = "falling"
};

static ssize_t iio_ev_state_store(struct device *dev,
				  struct device_attribute *attr,
				  const char *buf,
				  size_t len)
{
L
Lars-Peter Clausen 已提交
198
	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216
	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
	int ret;
	bool val;

	ret = strtobool(buf, &val);
	if (ret < 0)
		return ret;

	ret = indio_dev->info->write_event_config(indio_dev,
						  this_attr->address,
						  val);
	return (ret < 0) ? ret : len;
}

static ssize_t iio_ev_state_show(struct device *dev,
				 struct device_attribute *attr,
				 char *buf)
{
L
Lars-Peter Clausen 已提交
217
	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
218 219 220 221 222 223 224 225 226 227 228 229 230 231
	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
	int val = indio_dev->info->read_event_config(indio_dev,
						     this_attr->address);

	if (val < 0)
		return val;
	else
		return sprintf(buf, "%d\n", val);
}

static ssize_t iio_ev_value_show(struct device *dev,
				 struct device_attribute *attr,
				 char *buf)
{
L
Lars-Peter Clausen 已提交
232
	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248
	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
	int val, ret;

	ret = indio_dev->info->read_event_value(indio_dev,
						this_attr->address, &val);
	if (ret < 0)
		return ret;

	return sprintf(buf, "%d\n", val);
}

static ssize_t iio_ev_value_store(struct device *dev,
				  struct device_attribute *attr,
				  const char *buf,
				  size_t len)
{
L
Lars-Peter Clausen 已提交
249
	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
250
	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
251
	int val;
252 253 254 255 256
	int ret;

	if (!indio_dev->info->write_event_value)
		return -EINVAL;

257
	ret = kstrtoint(buf, 10, &val);
258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286
	if (ret)
		return ret;

	ret = indio_dev->info->write_event_value(indio_dev, this_attr->address,
						 val);
	if (ret < 0)
		return ret;

	return len;
}

static int iio_device_add_event_sysfs(struct iio_dev *indio_dev,
				      struct iio_chan_spec const *chan)
{
	int ret = 0, i, attrcount = 0;
	u64 mask = 0;
	char *postfix;
	if (!chan->event_mask)
		return 0;

	for_each_set_bit(i, &chan->event_mask, sizeof(chan->event_mask)*8) {
		postfix = kasprintf(GFP_KERNEL, "%s_%s_en",
				    iio_ev_type_text[i/IIO_EV_DIR_MAX],
				    iio_ev_dir_text[i%IIO_EV_DIR_MAX]);
		if (postfix == NULL) {
			ret = -ENOMEM;
			goto error_ret;
		}
		if (chan->modified)
287
			mask = IIO_MOD_EVENT_CODE(chan->type, 0, chan->channel2,
288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361
						  i/IIO_EV_DIR_MAX,
						  i%IIO_EV_DIR_MAX);
		else if (chan->differential)
			mask = IIO_EVENT_CODE(chan->type,
					      0, 0,
					      i%IIO_EV_DIR_MAX,
					      i/IIO_EV_DIR_MAX,
					      0,
					      chan->channel,
					      chan->channel2);
		else
			mask = IIO_UNMOD_EVENT_CODE(chan->type,
						    chan->channel,
						    i/IIO_EV_DIR_MAX,
						    i%IIO_EV_DIR_MAX);

		ret = __iio_add_chan_devattr(postfix,
					     chan,
					     &iio_ev_state_show,
					     iio_ev_state_store,
					     mask,
					     0,
					     &indio_dev->dev,
					     &indio_dev->event_interface->
					     dev_attr_list);
		kfree(postfix);
		if (ret)
			goto error_ret;
		attrcount++;
		postfix = kasprintf(GFP_KERNEL, "%s_%s_value",
				    iio_ev_type_text[i/IIO_EV_DIR_MAX],
				    iio_ev_dir_text[i%IIO_EV_DIR_MAX]);
		if (postfix == NULL) {
			ret = -ENOMEM;
			goto error_ret;
		}
		ret = __iio_add_chan_devattr(postfix, chan,
					     iio_ev_value_show,
					     iio_ev_value_store,
					     mask,
					     0,
					     &indio_dev->dev,
					     &indio_dev->event_interface->
					     dev_attr_list);
		kfree(postfix);
		if (ret)
			goto error_ret;
		attrcount++;
	}
	ret = attrcount;
error_ret:
	return ret;
}

static inline void __iio_remove_event_config_attrs(struct iio_dev *indio_dev)
{
	struct iio_dev_attr *p, *n;
	list_for_each_entry_safe(p, n,
				 &indio_dev->event_interface->
				 dev_attr_list, l) {
		kfree(p->dev_attr.attr.name);
		kfree(p);
	}
}

static inline int __iio_add_event_config_attrs(struct iio_dev *indio_dev)
{
	int j, ret, attrcount = 0;

	/* Dynically created from the channels array */
	for (j = 0; j < indio_dev->num_channels; j++) {
		ret = iio_device_add_event_sysfs(indio_dev,
						 &indio_dev->channels[j]);
		if (ret < 0)
362
			return ret;
363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379
		attrcount += ret;
	}
	return attrcount;
}

static bool iio_check_for_dynamic_events(struct iio_dev *indio_dev)
{
	int j;

	for (j = 0; j < indio_dev->num_channels; j++)
		if (indio_dev->channels[j].event_mask != 0)
			return true;
	return false;
}

static void iio_setup_ev_int(struct iio_event_interface *ev_int)
{
380
	INIT_KFIFO(ev_int->det_events);
381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401
	init_waitqueue_head(&ev_int->wait);
}

static const char *iio_event_group_name = "events";
int iio_device_register_eventset(struct iio_dev *indio_dev)
{
	struct iio_dev_attr *p;
	int ret = 0, attrcount_orig = 0, attrcount, attrn;
	struct attribute **attr;

	if (!(indio_dev->info->event_attrs ||
	      iio_check_for_dynamic_events(indio_dev)))
		return 0;

	indio_dev->event_interface =
		kzalloc(sizeof(struct iio_event_interface), GFP_KERNEL);
	if (indio_dev->event_interface == NULL) {
		ret = -ENOMEM;
		goto error_ret;
	}

S
Sascha Hauer 已提交
402 403
	INIT_LIST_HEAD(&indio_dev->event_interface->dev_attr_list);

404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458
	iio_setup_ev_int(indio_dev->event_interface);
	if (indio_dev->info->event_attrs != NULL) {
		attr = indio_dev->info->event_attrs->attrs;
		while (*attr++ != NULL)
			attrcount_orig++;
	}
	attrcount = attrcount_orig;
	if (indio_dev->channels) {
		ret = __iio_add_event_config_attrs(indio_dev);
		if (ret < 0)
			goto error_free_setup_event_lines;
		attrcount += ret;
	}

	indio_dev->event_interface->group.name = iio_event_group_name;
	indio_dev->event_interface->group.attrs = kcalloc(attrcount + 1,
							  sizeof(indio_dev->event_interface->group.attrs[0]),
							  GFP_KERNEL);
	if (indio_dev->event_interface->group.attrs == NULL) {
		ret = -ENOMEM;
		goto error_free_setup_event_lines;
	}
	if (indio_dev->info->event_attrs)
		memcpy(indio_dev->event_interface->group.attrs,
		       indio_dev->info->event_attrs->attrs,
		       sizeof(indio_dev->event_interface->group.attrs[0])
		       *attrcount_orig);
	attrn = attrcount_orig;
	/* Add all elements from the list. */
	list_for_each_entry(p,
			    &indio_dev->event_interface->dev_attr_list,
			    l)
		indio_dev->event_interface->group.attrs[attrn++] =
			&p->dev_attr.attr;
	indio_dev->groups[indio_dev->groupcounter++] =
		&indio_dev->event_interface->group;

	return 0;

error_free_setup_event_lines:
	__iio_remove_event_config_attrs(indio_dev);
	kfree(indio_dev->event_interface);
error_ret:

	return ret;
}

void iio_device_unregister_eventset(struct iio_dev *indio_dev)
{
	if (indio_dev->event_interface == NULL)
		return;
	__iio_remove_event_config_attrs(indio_dev);
	kfree(indio_dev->event_interface->group.attrs);
	kfree(indio_dev->event_interface);
}