rc-ir-raw.c 9.2 KB
Newer Older
1
/* rc-ir-raw.c - handle IR pulse/space events
2
 *
3
 * Copyright (C) 2010 by Mauro Carvalho Chehab
4 5 6 7 8 9 10 11 12 13 14
 *
 * This program is free software; you can redistribute it and/or modify
 *  it under the terms of the GNU General Public License as published by
 *  the Free Software Foundation version 2 of the License.
 *
 *  This program is distributed in the hope that it will be useful,
 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *  GNU General Public License for more details.
 */

15
#include <linux/export.h>
16
#include <linux/kthread.h>
17
#include <linux/mutex.h>
18
#include <linux/kmod.h>
19
#include <linux/sched.h>
20
#include <linux/freezer.h>
21
#include "rc-core-priv.h"
22

23 24 25
/* Used to keep track of IR raw clients, protected by ir_raw_handler_lock */
static LIST_HEAD(ir_raw_client_list);

26
/* Used to handle IR raw handler extensions */
27
static DEFINE_MUTEX(ir_raw_handler_lock);
28
static LIST_HEAD(ir_raw_handler_list);
29
static atomic64_t available_protocols = ATOMIC64_INIT(0);
30

31
static int ir_raw_event_thread(void *data)
32
{
33
	struct ir_raw_event ev;
34
	struct ir_raw_handler *handler;
35 36 37
	struct ir_raw_event_ctrl *raw = (struct ir_raw_event_ctrl *)data;

	while (!kthread_should_stop()) {
38

39 40
		spin_lock_irq(&raw->lock);

41
		if (!kfifo_len(&raw->kfifo)) {
42
			set_current_state(TASK_INTERRUPTIBLE);
43

44 45 46 47 48 49
			if (kthread_should_stop())
				set_current_state(TASK_RUNNING);

			spin_unlock_irq(&raw->lock);
			schedule();
			continue;
50 51
		}

52 53
		if(!kfifo_out(&raw->kfifo, &ev, 1))
			dev_err(&raw->dev->dev, "IR event FIFO is empty!\n");
54
		spin_unlock_irq(&raw->lock);
55

56 57
		mutex_lock(&ir_raw_handler_lock);
		list_for_each_entry(handler, &ir_raw_handler_list, list)
58 59 60
			if (raw->dev->enabled_protocols & handler->protocols ||
			    !handler->protocols)
				handler->decode(raw->dev, ev);
61 62
		raw->prev_ev = ev;
		mutex_unlock(&ir_raw_handler_lock);
63
	}
64 65

	return 0;
66 67 68 69
}

/**
 * ir_raw_event_store() - pass a pulse/space duration to the raw ir decoders
70
 * @dev:	the struct rc_dev device descriptor
71
 * @ev:		the struct ir_raw_event descriptor of the pulse/space
72 73 74 75 76 77
 *
 * This routine (which may be called from an interrupt context) stores a
 * pulse/space duration for the raw ir decoding state machines. Pulses are
 * signalled as positive values and spaces as negative values. A zero value
 * will reset the decoding state machines.
 */
78
int ir_raw_event_store(struct rc_dev *dev, struct ir_raw_event *ev)
79
{
80
	if (!dev->raw)
81 82
		return -EINVAL;

83
	IR_dprintk(2, "sample: (%05dus %s)\n",
84
		   TO_US(ev->duration), TO_STR(ev->pulse));
M
Maxim Levitsky 已提交
85

86 87 88 89
	if (!kfifo_put(&dev->raw->kfifo, *ev)) {
		dev_err(&dev->dev, "IR event FIFO is full!\n");
		return -ENOSPC;
	}
90

91 92 93
	return 0;
}
EXPORT_SYMBOL_GPL(ir_raw_event_store);
94

95 96
/**
 * ir_raw_event_store_edge() - notify raw ir decoders of the start of a pulse/space
97
 * @dev:	the struct rc_dev device descriptor
98 99 100 101 102 103 104 105
 * @type:	the type of the event that has occurred
 *
 * This routine (which may be called from an interrupt context) is used to
 * store the beginning of an ir pulse or space (or the start/end of ir
 * reception) for the raw ir decoding state machines. This is used by
 * hardware which does not provide durations directly but only interrupts
 * (or similar events) on state change.
 */
106
int ir_raw_event_store_edge(struct rc_dev *dev, enum raw_event_type type)
107 108 109
{
	ktime_t			now;
	s64			delta; /* ns */
110
	DEFINE_IR_RAW_EVENT(ev);
111
	int			rc = 0;
112
	int			delay;
113

114
	if (!dev->raw)
115
		return -EINVAL;
116

117
	now = ktime_get();
118
	delta = ktime_to_ns(ktime_sub(now, dev->raw->last_event));
119
	delay = MS_TO_NS(dev->input_dev->rep[REP_DELAY]);
120

121 122 123 124
	/* Check for a long duration since last event or if we're
	 * being called for the first time, note that delta can't
	 * possibly be negative.
	 */
125
	if (delta > delay || !dev->raw->last_type)
126
		type |= IR_START_EVENT;
127 128
	else
		ev.duration = delta;
129 130

	if (type & IR_START_EVENT)
131 132
		ir_raw_event_reset(dev);
	else if (dev->raw->last_type & IR_SPACE) {
133
		ev.pulse = false;
134 135
		rc = ir_raw_event_store(dev, &ev);
	} else if (dev->raw->last_type & IR_PULSE) {
136
		ev.pulse = true;
137
		rc = ir_raw_event_store(dev, &ev);
138
	} else
139
		return 0;
140

141 142
	dev->raw->last_event = now;
	dev->raw->last_type = type;
143 144
	return rc;
}
145
EXPORT_SYMBOL_GPL(ir_raw_event_store_edge);
146

147 148
/**
 * ir_raw_event_store_with_filter() - pass next pulse/space to decoders with some processing
149
 * @dev:	the struct rc_dev device descriptor
150 151 152
 * @type:	the type of the event that has occurred
 *
 * This routine (which may be called from an interrupt context) works
L
Lucas De Marchi 已提交
153
 * in similar manner to ir_raw_event_store_edge.
154
 * This routine is intended for devices with limited internal buffer
155 156 157
 * It automerges samples of same type, and handles timeouts. Returns non-zero
 * if the event was added, and zero if the event was ignored due to idle
 * processing.
158
 */
159
int ir_raw_event_store_with_filter(struct rc_dev *dev, struct ir_raw_event *ev)
160
{
161
	if (!dev->raw)
162 163 164
		return -EINVAL;

	/* Ignore spaces in idle mode */
165
	if (dev->idle && !ev->pulse)
166
		return 0;
167 168 169 170 171 172 173 174 175 176
	else if (dev->idle)
		ir_raw_event_set_idle(dev, false);

	if (!dev->raw->this_ev.duration)
		dev->raw->this_ev = *ev;
	else if (ev->pulse == dev->raw->this_ev.pulse)
		dev->raw->this_ev.duration += ev->duration;
	else {
		ir_raw_event_store(dev, &dev->raw->this_ev);
		dev->raw->this_ev = *ev;
177 178 179
	}

	/* Enter idle mode if nessesary */
180 181 182 183
	if (!ev->pulse && dev->timeout &&
	    dev->raw->this_ev.duration >= dev->timeout)
		ir_raw_event_set_idle(dev, true);

184
	return 1;
185 186 187
}
EXPORT_SYMBOL_GPL(ir_raw_event_store_with_filter);

188
/**
189 190 191
 * ir_raw_event_set_idle() - provide hint to rc-core when the device is idle or not
 * @dev:	the struct rc_dev device descriptor
 * @idle:	whether the device is idle or not
192
 */
193
void ir_raw_event_set_idle(struct rc_dev *dev, bool idle)
194
{
195
	if (!dev->raw)
196 197
		return;

198
	IR_dprintk(2, "%s idle mode\n", idle ? "enter" : "leave");
199 200

	if (idle) {
201 202 203
		dev->raw->this_ev.timeout = true;
		ir_raw_event_store(dev, &dev->raw->this_ev);
		init_ir_raw_event(&dev->raw->this_ev);
204
	}
205

206 207 208 209
	if (dev->s_idle)
		dev->s_idle(dev, idle);

	dev->idle = idle;
210 211 212
}
EXPORT_SYMBOL_GPL(ir_raw_event_set_idle);

213 214
/**
 * ir_raw_event_handle() - schedules the decoding of stored ir data
215
 * @dev:	the struct rc_dev device descriptor
216
 *
217
 * This routine will tell rc-core to start decoding stored ir data.
218
 */
219
void ir_raw_event_handle(struct rc_dev *dev)
220
{
221
	unsigned long flags;
222

223
	if (!dev->raw)
224
		return;
225

226 227 228
	spin_lock_irqsave(&dev->raw->lock, flags);
	wake_up_process(dev->raw->thread);
	spin_unlock_irqrestore(&dev->raw->lock, flags);
229 230
}
EXPORT_SYMBOL_GPL(ir_raw_event_handle);
231

232 233
/* used internally by the sysfs interface */
u64
234
ir_raw_get_allowed_protocols(void)
235
{
236
	return atomic64_read(&available_protocols);
237 238
}

239 240 241 242 243 244
static int change_protocol(struct rc_dev *dev, u64 *rc_type)
{
	/* the caller will update dev->enabled_protocols */
	return 0;
}

245 246 247 248 249 250 251 252
static void ir_raw_disable_protocols(struct rc_dev *dev, u64 protocols)
{
	mutex_lock(&dev->lock);
	dev->enabled_protocols &= ~protocols;
	dev->enabled_wakeup_protocols &= ~protocols;
	mutex_unlock(&dev->lock);
}

253 254 255
/*
 * Used to (un)register raw event clients
 */
256
int ir_raw_event_register(struct rc_dev *dev)
257 258
{
	int rc;
259
	struct ir_raw_handler *handler;
260

261 262
	if (!dev)
		return -EINVAL;
263

264 265 266
	dev->raw = kzalloc(sizeof(*dev->raw), GFP_KERNEL);
	if (!dev->raw)
		return -ENOMEM;
267

268
	dev->raw->dev = dev;
269
	dev->change_protocol = change_protocol;
270
	INIT_KFIFO(dev->raw->kfifo);
271

272 273
	spin_lock_init(&dev->raw->lock);
	dev->raw->thread = kthread_run(ir_raw_event_thread, dev->raw,
274
				       "rc%u", dev->minor);
275

276 277 278
	if (IS_ERR(dev->raw->thread)) {
		rc = PTR_ERR(dev->raw->thread);
		goto out;
279 280
	}

281
	mutex_lock(&ir_raw_handler_lock);
282
	list_add_tail(&dev->raw->list, &ir_raw_client_list);
283 284
	list_for_each_entry(handler, &ir_raw_handler_list, list)
		if (handler->raw_register)
285
			handler->raw_register(dev);
286
	mutex_unlock(&ir_raw_handler_lock);
287

288
	return 0;
289 290 291 292 293

out:
	kfree(dev->raw);
	dev->raw = NULL;
	return rc;
294 295
}

296
void ir_raw_event_unregister(struct rc_dev *dev)
297
{
298
	struct ir_raw_handler *handler;
299

300
	if (!dev || !dev->raw)
301 302
		return;

303
	kthread_stop(dev->raw->thread);
304

305
	mutex_lock(&ir_raw_handler_lock);
306
	list_del(&dev->raw->list);
307 308
	list_for_each_entry(handler, &ir_raw_handler_list, list)
		if (handler->raw_unregister)
309
			handler->raw_unregister(dev);
310
	mutex_unlock(&ir_raw_handler_lock);
311

312 313
	kfree(dev->raw);
	dev->raw = NULL;
314 315
}

316 317 318 319 320 321
/*
 * Extension interface - used to register the IR decoders
 */

int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler)
{
322 323
	struct ir_raw_event_ctrl *raw;

324
	mutex_lock(&ir_raw_handler_lock);
325
	list_add_tail(&ir_raw_handler->list, &ir_raw_handler_list);
326 327
	if (ir_raw_handler->raw_register)
		list_for_each_entry(raw, &ir_raw_client_list, list)
328
			ir_raw_handler->raw_register(raw->dev);
329
	atomic64_or(ir_raw_handler->protocols, &available_protocols);
330
	mutex_unlock(&ir_raw_handler_lock);
331

332 333 334 335 336 337
	return 0;
}
EXPORT_SYMBOL(ir_raw_handler_register);

void ir_raw_handler_unregister(struct ir_raw_handler *ir_raw_handler)
{
338
	struct ir_raw_event_ctrl *raw;
339
	u64 protocols = ir_raw_handler->protocols;
340

341
	mutex_lock(&ir_raw_handler_lock);
342
	list_del(&ir_raw_handler->list);
343 344 345
	list_for_each_entry(raw, &ir_raw_client_list, list) {
		ir_raw_disable_protocols(raw->dev, protocols);
		if (ir_raw_handler->raw_unregister)
346
			ir_raw_handler->raw_unregister(raw->dev);
347
	}
348
	atomic64_andnot(protocols, &available_protocols);
349
	mutex_unlock(&ir_raw_handler_lock);
350 351
}
EXPORT_SYMBOL(ir_raw_handler_unregister);