v4l2-event.c 8.5 KB
Newer Older
S
Sakari Ailus 已提交
1 2 3 4 5 6 7
/*
 * v4l2-event.c
 *
 * V4L2 events.
 *
 * Copyright (C) 2009--2010 Nokia Corporation.
 *
8
 * Contact: Sakari Ailus <sakari.ailus@iki.fi>
S
Sakari Ailus 已提交
9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * version 2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 */

#include <media/v4l2-dev.h>
#include <media/v4l2-fh.h>
#include <media/v4l2-event.h>

24
#include <linux/mm.h>
S
Sakari Ailus 已提交
25 26
#include <linux/sched.h>
#include <linux/slab.h>
27
#include <linux/export.h>
S
Sakari Ailus 已提交
28

29
static unsigned sev_pos(const struct v4l2_subscribed_event *sev, unsigned idx)
S
Sakari Ailus 已提交
30
{
31 32
	idx += sev->first;
	return idx >= sev->elems ? idx - sev->elems : idx;
S
Sakari Ailus 已提交
33 34 35 36 37 38 39 40 41
}

static int __v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event)
{
	struct v4l2_kevent *kev;
	unsigned long flags;

	spin_lock_irqsave(&fh->vdev->fh_lock, flags);

42
	if (list_empty(&fh->available)) {
S
Sakari Ailus 已提交
43 44 45 46
		spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
		return -ENOENT;
	}

47
	WARN_ON(fh->navailable == 0);
S
Sakari Ailus 已提交
48

49
	kev = list_first_entry(&fh->available, struct v4l2_kevent, list);
50
	list_del(&kev->list);
51
	fh->navailable--;
S
Sakari Ailus 已提交
52

53
	kev->event.pending = fh->navailable;
S
Sakari Ailus 已提交
54
	*event = kev->event;
55 56
	kev->sev->first = sev_pos(kev->sev, 1);
	kev->sev->in_use--;
S
Sakari Ailus 已提交
57 58 59 60 61 62 63 64 65 66 67 68 69 70

	spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);

	return 0;
}

int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event,
		       int nonblocking)
{
	int ret;

	if (nonblocking)
		return __v4l2_event_dequeue(fh, event);

71 72 73 74
	/* Release the vdev lock while waiting */
	if (fh->vdev->lock)
		mutex_unlock(fh->vdev->lock);

S
Sakari Ailus 已提交
75
	do {
76 77
		ret = wait_event_interruptible(fh->wait,
					       fh->navailable != 0);
S
Sakari Ailus 已提交
78
		if (ret < 0)
79
			break;
S
Sakari Ailus 已提交
80 81 82 83

		ret = __v4l2_event_dequeue(fh, event);
	} while (ret == -ENOENT);

84 85 86
	if (fh->vdev->lock)
		mutex_lock(fh->vdev->lock);

S
Sakari Ailus 已提交
87 88
	return ret;
}
89
EXPORT_SYMBOL_GPL(v4l2_event_dequeue);
S
Sakari Ailus 已提交
90

91
/* Caller must hold fh->vdev->fh_lock! */
S
Sakari Ailus 已提交
92
static struct v4l2_subscribed_event *v4l2_event_subscribed(
93
		struct v4l2_fh *fh, u32 type, u32 id)
S
Sakari Ailus 已提交
94 95 96
{
	struct v4l2_subscribed_event *sev;

97
	assert_spin_locked(&fh->vdev->fh_lock);
S
Sakari Ailus 已提交
98

99
	list_for_each_entry(sev, &fh->subscribed, list)
100
		if (sev->type == type && sev->id == id)
S
Sakari Ailus 已提交
101 102 103 104 105
			return sev;

	return NULL;
}

106 107 108 109 110
static void __v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev,
		const struct timespec *ts)
{
	struct v4l2_subscribed_event *sev;
	struct v4l2_kevent *kev;
111
	bool copy_payload = true;
112 113 114 115 116 117 118

	/* Are we subscribed? */
	sev = v4l2_event_subscribed(fh, ev->type, ev->id);
	if (sev == NULL)
		return;

	/* Increase event sequence number on fh. */
119
	fh->sequence++;
120 121

	/* Do we have any free events? */
122 123 124 125 126 127 128
	if (sev->in_use == sev->elems) {
		/* no, remove the oldest one */
		kev = sev->events + sev_pos(sev, 0);
		list_del(&kev->list);
		sev->in_use--;
		sev->first = sev_pos(sev, 1);
		fh->navailable--;
129
		if (sev->elems == 1) {
130 131
			if (sev->ops && sev->ops->replace) {
				sev->ops->replace(&kev->event, ev);
132 133
				copy_payload = false;
			}
134
		} else if (sev->ops && sev->ops->merge) {
135 136
			struct v4l2_kevent *second_oldest =
				sev->events + sev_pos(sev, 0);
137
			sev->ops->merge(&kev->event, &second_oldest->event);
138
		}
139
	}
140 141

	/* Take one and fill it. */
142
	kev = sev->events + sev_pos(sev, sev->in_use);
143
	kev->event.type = ev->type;
144 145
	if (copy_payload)
		kev->event.u = ev->u;
146 147
	kev->event.id = ev->id;
	kev->event.timestamp = *ts;
148
	kev->event.sequence = fh->sequence;
149 150
	sev->in_use++;
	list_add_tail(&kev->list, &fh->available);
151

152
	fh->navailable++;
153

154
	wake_up_all(&fh->wait);
155 156
}

S
Sakari Ailus 已提交
157 158 159 160 161 162
void v4l2_event_queue(struct video_device *vdev, const struct v4l2_event *ev)
{
	struct v4l2_fh *fh;
	unsigned long flags;
	struct timespec timestamp;

163 164 165
	if (vdev == NULL)
		return;

S
Sakari Ailus 已提交
166 167 168 169
	ktime_get_ts(&timestamp);

	spin_lock_irqsave(&vdev->fh_lock, flags);

170
	list_for_each_entry(fh, &vdev->fh_list, list)
171
		__v4l2_event_queue_fh(fh, ev, &timestamp);
S
Sakari Ailus 已提交
172 173 174 175 176

	spin_unlock_irqrestore(&vdev->fh_lock, flags);
}
EXPORT_SYMBOL_GPL(v4l2_event_queue);

177 178 179 180 181 182 183 184 185 186 187 188 189
void v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev)
{
	unsigned long flags;
	struct timespec timestamp;

	ktime_get_ts(&timestamp);

	spin_lock_irqsave(&fh->vdev->fh_lock, flags);
	__v4l2_event_queue_fh(fh, ev, &timestamp);
	spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
}
EXPORT_SYMBOL_GPL(v4l2_event_queue_fh);

S
Sakari Ailus 已提交
190 191
int v4l2_event_pending(struct v4l2_fh *fh)
{
192
	return fh->navailable;
S
Sakari Ailus 已提交
193 194 195
}
EXPORT_SYMBOL_GPL(v4l2_event_pending);

196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211
static void __v4l2_event_unsubscribe(struct v4l2_subscribed_event *sev)
{
	struct v4l2_fh *fh = sev->fh;
	unsigned int i;

	lockdep_assert_held(&fh->subscribe_lock);
	assert_spin_locked(&fh->vdev->fh_lock);

	/* Remove any pending events for this subscription */
	for (i = 0; i < sev->in_use; i++) {
		list_del(&sev->events[sev_pos(sev, i)].list);
		fh->navailable--;
	}
	list_del(&sev->list);
}

S
Sakari Ailus 已提交
212
int v4l2_event_subscribe(struct v4l2_fh *fh,
213
			 const struct v4l2_event_subscription *sub, unsigned elems,
214
			 const struct v4l2_subscribed_event_ops *ops)
S
Sakari Ailus 已提交
215
{
216
	struct v4l2_subscribed_event *sev, *found_ev;
S
Sakari Ailus 已提交
217
	unsigned long flags;
218
	unsigned i;
219
	int ret = 0;
S
Sakari Ailus 已提交
220

221 222 223
	if (sub->type == V4L2_EVENT_ALL)
		return -EINVAL;

224 225
	if (elems < 1)
		elems = 1;
226

227
	sev = kvzalloc(struct_size(sev, events, elems), GFP_KERNEL);
S
Sakari Ailus 已提交
228 229
	if (!sev)
		return -ENOMEM;
230 231 232 233 234 235
	for (i = 0; i < elems; i++)
		sev->events[i].sev = sev;
	sev->type = sub->type;
	sev->id = sub->id;
	sev->flags = sub->flags;
	sev->fh = fh;
236
	sev->ops = ops;
237 238 239
	sev->elems = elems;

	mutex_lock(&fh->subscribe_lock);
S
Sakari Ailus 已提交
240 241

	spin_lock_irqsave(&fh->vdev->fh_lock, flags);
242
	found_ev = v4l2_event_subscribed(fh, sub->type, sub->id);
243 244
	if (!found_ev)
		list_add(&sev->list, &fh->subscribed);
S
Sakari Ailus 已提交
245 246
	spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);

247
	if (found_ev) {
248
		/* Already listening */
249
		kvfree(sev);
250
	} else if (sev->ops && sev->ops->add) {
251
		ret = sev->ops->add(sev, elems);
252
		if (ret) {
253 254 255
			spin_lock_irqsave(&fh->vdev->fh_lock, flags);
			__v4l2_event_unsubscribe(sev);
			spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
256
			kvfree(sev);
257 258 259
		}
	}

260 261 262
	mutex_unlock(&fh->subscribe_lock);

	return ret;
S
Sakari Ailus 已提交
263 264 265
}
EXPORT_SYMBOL_GPL(v4l2_event_subscribe);

266
void v4l2_event_unsubscribe_all(struct v4l2_fh *fh)
S
Sakari Ailus 已提交
267
{
268
	struct v4l2_event_subscription sub;
S
Sakari Ailus 已提交
269 270 271 272 273 274 275
	struct v4l2_subscribed_event *sev;
	unsigned long flags;

	do {
		sev = NULL;

		spin_lock_irqsave(&fh->vdev->fh_lock, flags);
276 277
		if (!list_empty(&fh->subscribed)) {
			sev = list_first_entry(&fh->subscribed,
278 279 280
					struct v4l2_subscribed_event, list);
			sub.type = sev->type;
			sub.id = sev->id;
S
Sakari Ailus 已提交
281 282
		}
		spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
283 284
		if (sev)
			v4l2_event_unsubscribe(fh, &sub);
S
Sakari Ailus 已提交
285 286
	} while (sev);
}
287
EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe_all);
S
Sakari Ailus 已提交
288 289

int v4l2_event_unsubscribe(struct v4l2_fh *fh,
290
			   const struct v4l2_event_subscription *sub)
S
Sakari Ailus 已提交
291 292 293 294 295 296 297 298 299
{
	struct v4l2_subscribed_event *sev;
	unsigned long flags;

	if (sub->type == V4L2_EVENT_ALL) {
		v4l2_event_unsubscribe_all(fh);
		return 0;
	}

300 301
	mutex_lock(&fh->subscribe_lock);

S
Sakari Ailus 已提交
302 303
	spin_lock_irqsave(&fh->vdev->fh_lock, flags);

304
	sev = v4l2_event_subscribed(fh, sub->type, sub->id);
305 306
	if (sev != NULL)
		__v4l2_event_unsubscribe(sev);
S
Sakari Ailus 已提交
307 308

	spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
309 310 311 312

	if (sev && sev->ops && sev->ops->del)
		sev->ops->del(sev);

313 314
	mutex_unlock(&fh->subscribe_lock);

315
	kvfree(sev);
S
Sakari Ailus 已提交
316 317 318 319

	return 0;
}
EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe);
320 321 322 323 324 325 326

int v4l2_event_subdev_unsubscribe(struct v4l2_subdev *sd, struct v4l2_fh *fh,
				  struct v4l2_event_subscription *sub)
{
	return v4l2_event_unsubscribe(fh, sub);
}
EXPORT_SYMBOL_GPL(v4l2_event_subdev_unsubscribe);
327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362

static void v4l2_event_src_replace(struct v4l2_event *old,
				const struct v4l2_event *new)
{
	u32 old_changes = old->u.src_change.changes;

	old->u.src_change = new->u.src_change;
	old->u.src_change.changes |= old_changes;
}

static void v4l2_event_src_merge(const struct v4l2_event *old,
				struct v4l2_event *new)
{
	new->u.src_change.changes |= old->u.src_change.changes;
}

static const struct v4l2_subscribed_event_ops v4l2_event_src_ch_ops = {
	.replace = v4l2_event_src_replace,
	.merge = v4l2_event_src_merge,
};

int v4l2_src_change_event_subscribe(struct v4l2_fh *fh,
				const struct v4l2_event_subscription *sub)
{
	if (sub->type == V4L2_EVENT_SOURCE_CHANGE)
		return v4l2_event_subscribe(fh, sub, 0, &v4l2_event_src_ch_ops);
	return -EINVAL;
}
EXPORT_SYMBOL_GPL(v4l2_src_change_event_subscribe);

int v4l2_src_change_event_subdev_subscribe(struct v4l2_subdev *sd,
		struct v4l2_fh *fh, struct v4l2_event_subscription *sub)
{
	return v4l2_src_change_event_subscribe(fh, sub);
}
EXPORT_SYMBOL_GPL(v4l2_src_change_event_subdev_subscribe);