rc-ir-raw.c 15.6 KB
Newer Older
1
/* rc-ir-raw.c - handle IR pulse/space events
2
 *
3
 * Copyright (C) 2010 by Mauro Carvalho Chehab
4 5 6 7 8 9 10 11 12 13 14
 *
 * This program is free software; you can redistribute it and/or modify
 *  it under the terms of the GNU General Public License as published by
 *  the Free Software Foundation version 2 of the License.
 *
 *  This program is distributed in the hope that it will be useful,
 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *  GNU General Public License for more details.
 */

15
#include <linux/export.h>
16
#include <linux/kthread.h>
17
#include <linux/mutex.h>
18
#include <linux/kmod.h>
19
#include <linux/sched.h>
20
#include "rc-core-priv.h"
21

22 23 24
/* Used to keep track of IR raw clients, protected by ir_raw_handler_lock */
static LIST_HEAD(ir_raw_client_list);

25
/* Used to handle IR raw handler extensions */
26
static DEFINE_MUTEX(ir_raw_handler_lock);
27
static LIST_HEAD(ir_raw_handler_list);
28
static atomic64_t available_protocols = ATOMIC64_INIT(0);
29

30
static int ir_raw_event_thread(void *data)
31
{
32
	struct ir_raw_event ev;
33
	struct ir_raw_handler *handler;
34 35
	struct ir_raw_event_ctrl *raw = (struct ir_raw_event_ctrl *)data;

36 37 38 39 40 41 42 43
	while (1) {
		mutex_lock(&ir_raw_handler_lock);
		while (kfifo_out(&raw->kfifo, &ev, 1)) {
			list_for_each_entry(handler, &ir_raw_handler_list, list)
				if (raw->dev->enabled_protocols &
				    handler->protocols || !handler->protocols)
					handler->decode(raw->dev, ev);
			raw->prev_ev = ev;
44
		}
45
		mutex_unlock(&ir_raw_handler_lock);
46

47
		set_current_state(TASK_INTERRUPTIBLE);
48

49 50 51 52 53 54 55
		if (kthread_should_stop()) {
			__set_current_state(TASK_RUNNING);
			break;
		} else if (!kfifo_is_empty(&raw->kfifo))
			set_current_state(TASK_RUNNING);

		schedule();
56
	}
57 58

	return 0;
59 60 61 62
}

/**
 * ir_raw_event_store() - pass a pulse/space duration to the raw ir decoders
63
 * @dev:	the struct rc_dev device descriptor
64
 * @ev:		the struct ir_raw_event descriptor of the pulse/space
65 66 67 68 69 70
 *
 * This routine (which may be called from an interrupt context) stores a
 * pulse/space duration for the raw ir decoding state machines. Pulses are
 * signalled as positive values and spaces as negative values. A zero value
 * will reset the decoding state machines.
 */
71
int ir_raw_event_store(struct rc_dev *dev, struct ir_raw_event *ev)
72
{
73
	if (!dev->raw)
74 75
		return -EINVAL;

76
	IR_dprintk(2, "sample: (%05dus %s)\n",
77
		   TO_US(ev->duration), TO_STR(ev->pulse));
M
Maxim Levitsky 已提交
78

79 80 81 82
	if (!kfifo_put(&dev->raw->kfifo, *ev)) {
		dev_err(&dev->dev, "IR event FIFO is full!\n");
		return -ENOSPC;
	}
83

84 85 86
	return 0;
}
EXPORT_SYMBOL_GPL(ir_raw_event_store);
87

88 89
/**
 * ir_raw_event_store_edge() - notify raw ir decoders of the start of a pulse/space
90
 * @dev:	the struct rc_dev device descriptor
91 92 93 94 95 96 97 98
 * @type:	the type of the event that has occurred
 *
 * This routine (which may be called from an interrupt context) is used to
 * store the beginning of an ir pulse or space (or the start/end of ir
 * reception) for the raw ir decoding state machines. This is used by
 * hardware which does not provide durations directly but only interrupts
 * (or similar events) on state change.
 */
99
int ir_raw_event_store_edge(struct rc_dev *dev, enum raw_event_type type)
100 101 102
{
	ktime_t			now;
	s64			delta; /* ns */
103
	DEFINE_IR_RAW_EVENT(ev);
104
	int			rc = 0;
105
	int			delay;
106

107
	if (!dev->raw)
108
		return -EINVAL;
109

110
	now = ktime_get();
111
	delta = ktime_to_ns(ktime_sub(now, dev->raw->last_event));
112
	delay = MS_TO_NS(dev->input_dev->rep[REP_DELAY]);
113

114 115 116 117
	/* Check for a long duration since last event or if we're
	 * being called for the first time, note that delta can't
	 * possibly be negative.
	 */
118
	if (delta > delay || !dev->raw->last_type)
119
		type |= IR_START_EVENT;
120 121
	else
		ev.duration = delta;
122 123

	if (type & IR_START_EVENT)
124 125
		ir_raw_event_reset(dev);
	else if (dev->raw->last_type & IR_SPACE) {
126
		ev.pulse = false;
127 128
		rc = ir_raw_event_store(dev, &ev);
	} else if (dev->raw->last_type & IR_PULSE) {
129
		ev.pulse = true;
130
		rc = ir_raw_event_store(dev, &ev);
131
	} else
132
		return 0;
133

134 135
	dev->raw->last_event = now;
	dev->raw->last_type = type;
136 137
	return rc;
}
138
EXPORT_SYMBOL_GPL(ir_raw_event_store_edge);
139

140 141
/**
 * ir_raw_event_store_with_filter() - pass next pulse/space to decoders with some processing
142
 * @dev:	the struct rc_dev device descriptor
143 144 145
 * @type:	the type of the event that has occurred
 *
 * This routine (which may be called from an interrupt context) works
L
Lucas De Marchi 已提交
146
 * in similar manner to ir_raw_event_store_edge.
147
 * This routine is intended for devices with limited internal buffer
148 149 150
 * It automerges samples of same type, and handles timeouts. Returns non-zero
 * if the event was added, and zero if the event was ignored due to idle
 * processing.
151
 */
152
int ir_raw_event_store_with_filter(struct rc_dev *dev, struct ir_raw_event *ev)
153
{
154
	if (!dev->raw)
155 156 157
		return -EINVAL;

	/* Ignore spaces in idle mode */
158
	if (dev->idle && !ev->pulse)
159
		return 0;
160 161 162 163 164 165 166 167 168 169
	else if (dev->idle)
		ir_raw_event_set_idle(dev, false);

	if (!dev->raw->this_ev.duration)
		dev->raw->this_ev = *ev;
	else if (ev->pulse == dev->raw->this_ev.pulse)
		dev->raw->this_ev.duration += ev->duration;
	else {
		ir_raw_event_store(dev, &dev->raw->this_ev);
		dev->raw->this_ev = *ev;
170 171 172
	}

	/* Enter idle mode if nessesary */
173 174 175 176
	if (!ev->pulse && dev->timeout &&
	    dev->raw->this_ev.duration >= dev->timeout)
		ir_raw_event_set_idle(dev, true);

177
	return 1;
178 179 180
}
EXPORT_SYMBOL_GPL(ir_raw_event_store_with_filter);

181
/**
182 183 184
 * ir_raw_event_set_idle() - provide hint to rc-core when the device is idle or not
 * @dev:	the struct rc_dev device descriptor
 * @idle:	whether the device is idle or not
185
 */
186
void ir_raw_event_set_idle(struct rc_dev *dev, bool idle)
187
{
188
	if (!dev->raw)
189 190
		return;

191
	IR_dprintk(2, "%s idle mode\n", idle ? "enter" : "leave");
192 193

	if (idle) {
194 195 196
		dev->raw->this_ev.timeout = true;
		ir_raw_event_store(dev, &dev->raw->this_ev);
		init_ir_raw_event(&dev->raw->this_ev);
197
	}
198

199 200 201 202
	if (dev->s_idle)
		dev->s_idle(dev, idle);

	dev->idle = idle;
203 204 205
}
EXPORT_SYMBOL_GPL(ir_raw_event_set_idle);

206 207
/**
 * ir_raw_event_handle() - schedules the decoding of stored ir data
208
 * @dev:	the struct rc_dev device descriptor
209
 *
210
 * This routine will tell rc-core to start decoding stored ir data.
211
 */
212
void ir_raw_event_handle(struct rc_dev *dev)
213
{
214
	if (!dev->raw || !dev->raw->thread)
215
		return;
216

217
	wake_up_process(dev->raw->thread);
218 219
}
EXPORT_SYMBOL_GPL(ir_raw_event_handle);
220

221 222
/* used internally by the sysfs interface */
u64
223
ir_raw_get_allowed_protocols(void)
224
{
225
	return atomic64_read(&available_protocols);
226 227
}

228 229 230 231 232 233
static int change_protocol(struct rc_dev *dev, u64 *rc_type)
{
	/* the caller will update dev->enabled_protocols */
	return 0;
}

234 235 236 237 238 239 240
static void ir_raw_disable_protocols(struct rc_dev *dev, u64 protocols)
{
	mutex_lock(&dev->lock);
	dev->enabled_protocols &= ~protocols;
	mutex_unlock(&dev->lock);
}

241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260
/**
 * ir_raw_gen_manchester() - Encode data with Manchester (bi-phase) modulation.
 * @ev:		Pointer to pointer to next free event. *@ev is incremented for
 *		each raw event filled.
 * @max:	Maximum number of raw events to fill.
 * @timings:	Manchester modulation timings.
 * @n:		Number of bits of data.
 * @data:	Data bits to encode.
 *
 * Encodes the @n least significant bits of @data using Manchester (bi-phase)
 * modulation with the timing characteristics described by @timings, writing up
 * to @max raw IR events using the *@ev pointer.
 *
 * Returns:	0 on success.
 *		-ENOBUFS if there isn't enough space in the array to fit the
 *		full encoded data. In this case all @max events will have been
 *		written.
 */
int ir_raw_gen_manchester(struct ir_raw_event **ev, unsigned int max,
			  const struct ir_raw_timings_manchester *timings,
S
Sean Young 已提交
261
			  unsigned int n, u64 data)
262 263
{
	bool need_pulse;
S
Sean Young 已提交
264
	u64 i;
265 266
	int ret = -ENOBUFS;

S
Sean Young 已提交
267
	i = BIT_ULL(n - 1);
268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325

	if (timings->leader) {
		if (!max--)
			return ret;
		if (timings->pulse_space_start) {
			init_ir_raw_event_duration((*ev)++, 1, timings->leader);

			if (!max--)
				return ret;
			init_ir_raw_event_duration((*ev), 0, timings->leader);
		} else {
			init_ir_raw_event_duration((*ev), 1, timings->leader);
		}
		i >>= 1;
	} else {
		/* continue existing signal */
		--(*ev);
	}
	/* from here on *ev will point to the last event rather than the next */

	while (n && i > 0) {
		need_pulse = !(data & i);
		if (timings->invert)
			need_pulse = !need_pulse;
		if (need_pulse == !!(*ev)->pulse) {
			(*ev)->duration += timings->clock;
		} else {
			if (!max--)
				goto nobufs;
			init_ir_raw_event_duration(++(*ev), need_pulse,
						   timings->clock);
		}

		if (!max--)
			goto nobufs;
		init_ir_raw_event_duration(++(*ev), !need_pulse,
					   timings->clock);
		i >>= 1;
	}

	if (timings->trailer_space) {
		if (!(*ev)->pulse)
			(*ev)->duration += timings->trailer_space;
		else if (!max--)
			goto nobufs;
		else
			init_ir_raw_event_duration(++(*ev), 0,
						   timings->trailer_space);
	}

	ret = 0;
nobufs:
	/* point to the next event rather than last event before returning */
	++(*ev);
	return ret;
}
EXPORT_SYMBOL(ir_raw_gen_manchester);

326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384
/**
 * ir_raw_gen_pd() - Encode data to raw events with pulse-distance modulation.
 * @ev:		Pointer to pointer to next free event. *@ev is incremented for
 *		each raw event filled.
 * @max:	Maximum number of raw events to fill.
 * @timings:	Pulse distance modulation timings.
 * @n:		Number of bits of data.
 * @data:	Data bits to encode.
 *
 * Encodes the @n least significant bits of @data using pulse-distance
 * modulation with the timing characteristics described by @timings, writing up
 * to @max raw IR events using the *@ev pointer.
 *
 * Returns:	0 on success.
 *		-ENOBUFS if there isn't enough space in the array to fit the
 *		full encoded data. In this case all @max events will have been
 *		written.
 */
int ir_raw_gen_pd(struct ir_raw_event **ev, unsigned int max,
		  const struct ir_raw_timings_pd *timings,
		  unsigned int n, u64 data)
{
	int i;
	int ret;
	unsigned int space;

	if (timings->header_pulse) {
		ret = ir_raw_gen_pulse_space(ev, &max, timings->header_pulse,
					     timings->header_space);
		if (ret)
			return ret;
	}

	if (timings->msb_first) {
		for (i = n - 1; i >= 0; --i) {
			space = timings->bit_space[(data >> i) & 1];
			ret = ir_raw_gen_pulse_space(ev, &max,
						     timings->bit_pulse,
						     space);
			if (ret)
				return ret;
		}
	} else {
		for (i = 0; i < n; ++i, data >>= 1) {
			space = timings->bit_space[data & 1];
			ret = ir_raw_gen_pulse_space(ev, &max,
						     timings->bit_pulse,
						     space);
			if (ret)
				return ret;
		}
	}

	ret = ir_raw_gen_pulse_space(ev, &max, timings->trailer_pulse,
				     timings->trailer_space);
	return ret;
}
EXPORT_SYMBOL(ir_raw_gen_pd);

385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448
/**
 * ir_raw_gen_pl() - Encode data to raw events with pulse-length modulation.
 * @ev:		Pointer to pointer to next free event. *@ev is incremented for
 *		each raw event filled.
 * @max:	Maximum number of raw events to fill.
 * @timings:	Pulse distance modulation timings.
 * @n:		Number of bits of data.
 * @data:	Data bits to encode.
 *
 * Encodes the @n least significant bits of @data using space-distance
 * modulation with the timing characteristics described by @timings, writing up
 * to @max raw IR events using the *@ev pointer.
 *
 * Returns:	0 on success.
 *		-ENOBUFS if there isn't enough space in the array to fit the
 *		full encoded data. In this case all @max events will have been
 *		written.
 */
int ir_raw_gen_pl(struct ir_raw_event **ev, unsigned int max,
		  const struct ir_raw_timings_pl *timings,
		  unsigned int n, u64 data)
{
	int i;
	int ret = -ENOBUFS;
	unsigned int pulse;

	if (!max--)
		return ret;

	init_ir_raw_event_duration((*ev)++, 1, timings->header_pulse);

	if (timings->msb_first) {
		for (i = n - 1; i >= 0; --i) {
			if (!max--)
				return ret;
			init_ir_raw_event_duration((*ev)++, 0,
						   timings->bit_space);
			if (!max--)
				return ret;
			pulse = timings->bit_pulse[(data >> i) & 1];
			init_ir_raw_event_duration((*ev)++, 1, pulse);
		}
	} else {
		for (i = 0; i < n; ++i, data >>= 1) {
			if (!max--)
				return ret;
			init_ir_raw_event_duration((*ev)++, 0,
						   timings->bit_space);
			if (!max--)
				return ret;
			pulse = timings->bit_pulse[data & 1];
			init_ir_raw_event_duration((*ev)++, 1, pulse);
		}
	}

	if (!max--)
		return ret;

	init_ir_raw_event_duration((*ev)++, 0, timings->trailer_space);

	return 0;
}
EXPORT_SYMBOL(ir_raw_gen_pl);

449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485
/**
 * ir_raw_encode_scancode() - Encode a scancode as raw events
 *
 * @protocol:		protocol
 * @scancode:		scancode filter describing a single scancode
 * @events:		array of raw events to write into
 * @max:		max number of raw events
 *
 * Attempts to encode the scancode as raw events.
 *
 * Returns:	The number of events written.
 *		-ENOBUFS if there isn't enough space in the array to fit the
 *		encoding. In this case all @max events will have been written.
 *		-EINVAL if the scancode is ambiguous or invalid, or if no
 *		compatible encoder was found.
 */
int ir_raw_encode_scancode(enum rc_type protocol, u32 scancode,
			   struct ir_raw_event *events, unsigned int max)
{
	struct ir_raw_handler *handler;
	int ret = -EINVAL;
	u64 mask = 1ULL << protocol;

	mutex_lock(&ir_raw_handler_lock);
	list_for_each_entry(handler, &ir_raw_handler_list, list) {
		if (handler->protocols & mask && handler->encode) {
			ret = handler->encode(protocol, scancode, events, max);
			if (ret >= 0 || ret == -ENOBUFS)
				break;
		}
	}
	mutex_unlock(&ir_raw_handler_lock);

	return ret;
}
EXPORT_SYMBOL(ir_raw_encode_scancode);

486 487 488
/*
 * Used to (un)register raw event clients
 */
489
int ir_raw_event_register(struct rc_dev *dev)
490 491
{
	int rc;
492
	struct ir_raw_handler *handler;
493
	struct task_struct *thread;
494

495 496
	if (!dev)
		return -EINVAL;
497

498 499 500
	dev->raw = kzalloc(sizeof(*dev->raw), GFP_KERNEL);
	if (!dev->raw)
		return -ENOMEM;
501

502
	dev->raw->dev = dev;
503
	dev->change_protocol = change_protocol;
504
	INIT_KFIFO(dev->raw->kfifo);
505

506 507 508 509 510
	/*
	 * raw transmitters do not need any event registration
	 * because the event is coming from userspace
	 */
	if (dev->driver_type != RC_DRIVER_IR_RAW_TX) {
511 512
		thread = kthread_run(ir_raw_event_thread, dev->raw, "rc%u",
				     dev->minor);
513

514 515
		if (IS_ERR(thread)) {
			rc = PTR_ERR(thread);
516 517
			goto out;
		}
518 519

		dev->raw->thread = thread;
520 521
	}

522
	mutex_lock(&ir_raw_handler_lock);
523
	list_add_tail(&dev->raw->list, &ir_raw_client_list);
524 525
	list_for_each_entry(handler, &ir_raw_handler_list, list)
		if (handler->raw_register)
526
			handler->raw_register(dev);
527
	mutex_unlock(&ir_raw_handler_lock);
528

529
	return 0;
530 531 532 533 534

out:
	kfree(dev->raw);
	dev->raw = NULL;
	return rc;
535 536
}

537
void ir_raw_event_unregister(struct rc_dev *dev)
538
{
539
	struct ir_raw_handler *handler;
540

541
	if (!dev || !dev->raw)
542 543
		return;

544
	kthread_stop(dev->raw->thread);
545

546
	mutex_lock(&ir_raw_handler_lock);
547
	list_del(&dev->raw->list);
548 549
	list_for_each_entry(handler, &ir_raw_handler_list, list)
		if (handler->raw_unregister)
550
			handler->raw_unregister(dev);
551
	mutex_unlock(&ir_raw_handler_lock);
552

553 554
	kfree(dev->raw);
	dev->raw = NULL;
555 556
}

557 558 559 560 561 562
/*
 * Extension interface - used to register the IR decoders
 */

int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler)
{
563 564
	struct ir_raw_event_ctrl *raw;

565
	mutex_lock(&ir_raw_handler_lock);
566
	list_add_tail(&ir_raw_handler->list, &ir_raw_handler_list);
567 568
	if (ir_raw_handler->raw_register)
		list_for_each_entry(raw, &ir_raw_client_list, list)
569
			ir_raw_handler->raw_register(raw->dev);
570
	atomic64_or(ir_raw_handler->protocols, &available_protocols);
571
	mutex_unlock(&ir_raw_handler_lock);
572

573 574 575 576 577 578
	return 0;
}
EXPORT_SYMBOL(ir_raw_handler_register);

void ir_raw_handler_unregister(struct ir_raw_handler *ir_raw_handler)
{
579
	struct ir_raw_event_ctrl *raw;
580
	u64 protocols = ir_raw_handler->protocols;
581

582
	mutex_lock(&ir_raw_handler_lock);
583
	list_del(&ir_raw_handler->list);
584 585 586
	list_for_each_entry(raw, &ir_raw_client_list, list) {
		ir_raw_disable_protocols(raw->dev, protocols);
		if (ir_raw_handler->raw_unregister)
587
			ir_raw_handler->raw_unregister(raw->dev);
588
	}
589
	atomic64_andnot(protocols, &available_protocols);
590
	mutex_unlock(&ir_raw_handler_lock);
591 592
}
EXPORT_SYMBOL(ir_raw_handler_unregister);