rc-ir-raw.c 9.4 KB
Newer Older
1
/* rc-ir-raw.c - handle IR pulse/space events
2
 *
3
 * Copyright (C) 2010 by Mauro Carvalho Chehab
4 5 6 7 8 9 10 11 12 13 14
 *
 * This program is free software; you can redistribute it and/or modify
 *  it under the terms of the GNU General Public License as published by
 *  the Free Software Foundation version 2 of the License.
 *
 *  This program is distributed in the hope that it will be useful,
 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *  GNU General Public License for more details.
 */

15
#include <linux/export.h>
16
#include <linux/kthread.h>
17
#include <linux/mutex.h>
18
#include <linux/kmod.h>
19
#include <linux/sched.h>
20
#include <linux/freezer.h>
21
#include "rc-core-priv.h"
22

23 24
/* Define the max number of pulse/space transitions to buffer */
#define MAX_IR_EVENT_SIZE      512
25

26 27 28
/* Used to keep track of IR raw clients, protected by ir_raw_handler_lock */
static LIST_HEAD(ir_raw_client_list);

29
/* Used to handle IR raw handler extensions */
30
static DEFINE_MUTEX(ir_raw_handler_lock);
31 32
static LIST_HEAD(ir_raw_handler_list);
static u64 available_protocols;
33

34
static int ir_raw_event_thread(void *data)
35
{
36
	struct ir_raw_event ev;
37
	struct ir_raw_handler *handler;
38
	struct ir_raw_event_ctrl *raw = (struct ir_raw_event_ctrl *)data;
39
	int retval;
40 41

	while (!kthread_should_stop()) {
42

43
		spin_lock_irq(&raw->lock);
44
		retval = kfifo_len(&raw->kfifo);
45

46
		if (retval < sizeof(ev)) {
47
			set_current_state(TASK_INTERRUPTIBLE);
48

49 50 51 52 53 54
			if (kthread_should_stop())
				set_current_state(TASK_RUNNING);

			spin_unlock_irq(&raw->lock);
			schedule();
			continue;
55 56
		}

57
		retval = kfifo_out(&raw->kfifo, &ev, sizeof(ev));
58
		spin_unlock_irq(&raw->lock);
59

60 61
		mutex_lock(&ir_raw_handler_lock);
		list_for_each_entry(handler, &ir_raw_handler_list, list)
62 63 64
			if (raw->dev->enabled_protocols & handler->protocols ||
			    !handler->protocols)
				handler->decode(raw->dev, ev);
65 66
		raw->prev_ev = ev;
		mutex_unlock(&ir_raw_handler_lock);
67
	}
68 69

	return 0;
70 71 72 73
}

/**
 * ir_raw_event_store() - pass a pulse/space duration to the raw ir decoders
74
 * @dev:	the struct rc_dev device descriptor
75
 * @ev:		the struct ir_raw_event descriptor of the pulse/space
76 77 78 79 80 81
 *
 * This routine (which may be called from an interrupt context) stores a
 * pulse/space duration for the raw ir decoding state machines. Pulses are
 * signalled as positive values and spaces as negative values. A zero value
 * will reset the decoding state machines.
 */
82
int ir_raw_event_store(struct rc_dev *dev, struct ir_raw_event *ev)
83
{
84
	if (!dev->raw)
85 86
		return -EINVAL;

87
	IR_dprintk(2, "sample: (%05dus %s)\n",
88
		   TO_US(ev->duration), TO_STR(ev->pulse));
M
Maxim Levitsky 已提交
89

90
	if (kfifo_in(&dev->raw->kfifo, ev, sizeof(*ev)) != sizeof(*ev))
91
		return -ENOMEM;
92

93 94 95
	return 0;
}
EXPORT_SYMBOL_GPL(ir_raw_event_store);
96

97 98
/**
 * ir_raw_event_store_edge() - notify raw ir decoders of the start of a pulse/space
99
 * @dev:	the struct rc_dev device descriptor
100 101 102 103 104 105 106 107
 * @type:	the type of the event that has occurred
 *
 * This routine (which may be called from an interrupt context) is used to
 * store the beginning of an ir pulse or space (or the start/end of ir
 * reception) for the raw ir decoding state machines. This is used by
 * hardware which does not provide durations directly but only interrupts
 * (or similar events) on state change.
 */
108
int ir_raw_event_store_edge(struct rc_dev *dev, enum raw_event_type type)
109 110 111
{
	ktime_t			now;
	s64			delta; /* ns */
112
	DEFINE_IR_RAW_EVENT(ev);
113
	int			rc = 0;
114
	int			delay;
115

116
	if (!dev->raw)
117
		return -EINVAL;
118

119
	now = ktime_get();
120
	delta = ktime_to_ns(ktime_sub(now, dev->raw->last_event));
121
	delay = MS_TO_NS(dev->input_dev->rep[REP_DELAY]);
122

123 124 125 126
	/* Check for a long duration since last event or if we're
	 * being called for the first time, note that delta can't
	 * possibly be negative.
	 */
127
	if (delta > delay || !dev->raw->last_type)
128
		type |= IR_START_EVENT;
129 130
	else
		ev.duration = delta;
131 132

	if (type & IR_START_EVENT)
133 134
		ir_raw_event_reset(dev);
	else if (dev->raw->last_type & IR_SPACE) {
135
		ev.pulse = false;
136 137
		rc = ir_raw_event_store(dev, &ev);
	} else if (dev->raw->last_type & IR_PULSE) {
138
		ev.pulse = true;
139
		rc = ir_raw_event_store(dev, &ev);
140
	} else
141
		return 0;
142

143 144
	dev->raw->last_event = now;
	dev->raw->last_type = type;
145 146
	return rc;
}
147
EXPORT_SYMBOL_GPL(ir_raw_event_store_edge);
148

149 150
/**
 * ir_raw_event_store_with_filter() - pass next pulse/space to decoders with some processing
151
 * @dev:	the struct rc_dev device descriptor
152 153 154
 * @type:	the type of the event that has occurred
 *
 * This routine (which may be called from an interrupt context) works
L
Lucas De Marchi 已提交
155
 * in similar manner to ir_raw_event_store_edge.
156
 * This routine is intended for devices with limited internal buffer
157 158 159
 * It automerges samples of same type, and handles timeouts. Returns non-zero
 * if the event was added, and zero if the event was ignored due to idle
 * processing.
160
 */
161
int ir_raw_event_store_with_filter(struct rc_dev *dev, struct ir_raw_event *ev)
162
{
163
	if (!dev->raw)
164 165 166
		return -EINVAL;

	/* Ignore spaces in idle mode */
167
	if (dev->idle && !ev->pulse)
168
		return 0;
169 170 171 172 173 174 175 176 177 178
	else if (dev->idle)
		ir_raw_event_set_idle(dev, false);

	if (!dev->raw->this_ev.duration)
		dev->raw->this_ev = *ev;
	else if (ev->pulse == dev->raw->this_ev.pulse)
		dev->raw->this_ev.duration += ev->duration;
	else {
		ir_raw_event_store(dev, &dev->raw->this_ev);
		dev->raw->this_ev = *ev;
179 180 181
	}

	/* Enter idle mode if nessesary */
182 183 184 185
	if (!ev->pulse && dev->timeout &&
	    dev->raw->this_ev.duration >= dev->timeout)
		ir_raw_event_set_idle(dev, true);

186
	return 1;
187 188 189
}
EXPORT_SYMBOL_GPL(ir_raw_event_store_with_filter);

190
/**
191 192 193
 * ir_raw_event_set_idle() - provide hint to rc-core when the device is idle or not
 * @dev:	the struct rc_dev device descriptor
 * @idle:	whether the device is idle or not
194
 */
195
void ir_raw_event_set_idle(struct rc_dev *dev, bool idle)
196
{
197
	if (!dev->raw)
198 199
		return;

200
	IR_dprintk(2, "%s idle mode\n", idle ? "enter" : "leave");
201 202

	if (idle) {
203 204 205
		dev->raw->this_ev.timeout = true;
		ir_raw_event_store(dev, &dev->raw->this_ev);
		init_ir_raw_event(&dev->raw->this_ev);
206
	}
207

208 209 210 211
	if (dev->s_idle)
		dev->s_idle(dev, idle);

	dev->idle = idle;
212 213 214
}
EXPORT_SYMBOL_GPL(ir_raw_event_set_idle);

215 216
/**
 * ir_raw_event_handle() - schedules the decoding of stored ir data
217
 * @dev:	the struct rc_dev device descriptor
218
 *
219
 * This routine will tell rc-core to start decoding stored ir data.
220
 */
221
void ir_raw_event_handle(struct rc_dev *dev)
222
{
223
	unsigned long flags;
224

225
	if (!dev->raw)
226
		return;
227

228 229 230
	spin_lock_irqsave(&dev->raw->lock, flags);
	wake_up_process(dev->raw->thread);
	spin_unlock_irqrestore(&dev->raw->lock, flags);
231 232
}
EXPORT_SYMBOL_GPL(ir_raw_event_handle);
233

234 235
/* used internally by the sysfs interface */
u64
236
ir_raw_get_allowed_protocols(void)
237 238
{
	u64 protocols;
239
	mutex_lock(&ir_raw_handler_lock);
240
	protocols = available_protocols;
241
	mutex_unlock(&ir_raw_handler_lock);
242 243 244
	return protocols;
}

245 246 247 248 249 250
static int change_protocol(struct rc_dev *dev, u64 *rc_type)
{
	/* the caller will update dev->enabled_protocols */
	return 0;
}

251 252 253 254 255 256 257 258
static void ir_raw_disable_protocols(struct rc_dev *dev, u64 protocols)
{
	mutex_lock(&dev->lock);
	dev->enabled_protocols &= ~protocols;
	dev->enabled_wakeup_protocols &= ~protocols;
	mutex_unlock(&dev->lock);
}

259 260 261
/*
 * Used to (un)register raw event clients
 */
262
int ir_raw_event_register(struct rc_dev *dev)
263 264
{
	int rc;
265
	struct ir_raw_handler *handler;
266

267 268
	if (!dev)
		return -EINVAL;
269

270 271 272
	dev->raw = kzalloc(sizeof(*dev->raw), GFP_KERNEL);
	if (!dev->raw)
		return -ENOMEM;
273

274
	dev->raw->dev = dev;
275
	dev->change_protocol = change_protocol;
276 277
	rc = kfifo_alloc(&dev->raw->kfifo,
			 sizeof(struct ir_raw_event) * MAX_IR_EVENT_SIZE,
278
			 GFP_KERNEL);
279 280
	if (rc < 0)
		goto out;
281

282 283
	spin_lock_init(&dev->raw->lock);
	dev->raw->thread = kthread_run(ir_raw_event_thread, dev->raw,
284
				       "rc%u", dev->minor);
285

286 287 288
	if (IS_ERR(dev->raw->thread)) {
		rc = PTR_ERR(dev->raw->thread);
		goto out;
289 290
	}

291
	mutex_lock(&ir_raw_handler_lock);
292
	list_add_tail(&dev->raw->list, &ir_raw_client_list);
293 294
	list_for_each_entry(handler, &ir_raw_handler_list, list)
		if (handler->raw_register)
295
			handler->raw_register(dev);
296
	mutex_unlock(&ir_raw_handler_lock);
297

298
	return 0;
299 300 301 302 303

out:
	kfree(dev->raw);
	dev->raw = NULL;
	return rc;
304 305
}

306
void ir_raw_event_unregister(struct rc_dev *dev)
307
{
308
	struct ir_raw_handler *handler;
309

310
	if (!dev || !dev->raw)
311 312
		return;

313
	kthread_stop(dev->raw->thread);
314

315
	mutex_lock(&ir_raw_handler_lock);
316
	list_del(&dev->raw->list);
317 318
	list_for_each_entry(handler, &ir_raw_handler_list, list)
		if (handler->raw_unregister)
319
			handler->raw_unregister(dev);
320
	mutex_unlock(&ir_raw_handler_lock);
321

322 323 324
	kfifo_free(&dev->raw->kfifo);
	kfree(dev->raw);
	dev->raw = NULL;
325 326
}

327 328 329 330 331 332
/*
 * Extension interface - used to register the IR decoders
 */

int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler)
{
333 334
	struct ir_raw_event_ctrl *raw;

335
	mutex_lock(&ir_raw_handler_lock);
336
	list_add_tail(&ir_raw_handler->list, &ir_raw_handler_list);
337 338
	if (ir_raw_handler->raw_register)
		list_for_each_entry(raw, &ir_raw_client_list, list)
339
			ir_raw_handler->raw_register(raw->dev);
340
	available_protocols |= ir_raw_handler->protocols;
341
	mutex_unlock(&ir_raw_handler_lock);
342

343 344 345 346 347 348
	return 0;
}
EXPORT_SYMBOL(ir_raw_handler_register);

void ir_raw_handler_unregister(struct ir_raw_handler *ir_raw_handler)
{
349
	struct ir_raw_event_ctrl *raw;
350
	u64 protocols = ir_raw_handler->protocols;
351

352
	mutex_lock(&ir_raw_handler_lock);
353
	list_del(&ir_raw_handler->list);
354 355 356
	list_for_each_entry(raw, &ir_raw_client_list, list) {
		ir_raw_disable_protocols(raw->dev, protocols);
		if (ir_raw_handler->raw_unregister)
357
			ir_raw_handler->raw_unregister(raw->dev);
358 359
	}
	available_protocols &= ~protocols;
360
	mutex_unlock(&ir_raw_handler_lock);
361 362
}
EXPORT_SYMBOL(ir_raw_handler_unregister);