v4l2-async.c 7.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
/*
 * V4L2 asynchronous subdevice registration API
 *
 * Copyright (C) 2012-2013, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#include <linux/device.h>
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/types.h>

#include <media/v4l2-async.h>
#include <media/v4l2-device.h>
#include <media/v4l2-subdev.h>

25
static bool match_i2c(struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
26
{
27
#if IS_ENABLED(CONFIG_I2C)
28
	struct i2c_client *client = i2c_verify_client(sd->dev);
29 30 31
	return client &&
		asd->match.i2c.adapter_id == client->adapter->nr &&
		asd->match.i2c.address == client->addr;
32 33 34
#else
	return false;
#endif
35 36
}

37 38
static bool match_devname(struct v4l2_subdev *sd,
			  struct v4l2_async_subdev *asd)
39
{
40
	return !strcmp(asd->match.device_name.name, dev_name(sd->dev));
41 42
}

43
static bool match_of(struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
44
{
45 46 47 48 49 50 51 52 53 54
	return sd->of_node == asd->match.of.node;
}

static bool match_custom(struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
{
	if (!asd->match.custom.match)
		/* Match always */
		return true;

	return asd->match.custom.match(sd->dev, asd);
55 56
}

57 58 59 60 61
static LIST_HEAD(subdev_list);
static LIST_HEAD(notifier_list);
static DEFINE_MUTEX(list_lock);

static struct v4l2_async_subdev *v4l2_async_belongs(struct v4l2_async_notifier *notifier,
62
						    struct v4l2_subdev *sd)
63
{
64
	bool (*match)(struct v4l2_subdev *, struct v4l2_async_subdev *);
65 66 67 68
	struct v4l2_async_subdev *asd;

	list_for_each_entry(asd, &notifier->waiting, list) {
		/* bus_type has been verified valid before */
69 70
		switch (asd->match_type) {
		case V4L2_ASYNC_MATCH_CUSTOM:
71
			match = match_custom;
72
			break;
73 74
		case V4L2_ASYNC_MATCH_DEVNAME:
			match = match_devname;
75
			break;
76
		case V4L2_ASYNC_MATCH_I2C:
77 78
			match = match_i2c;
			break;
79 80 81
		case V4L2_ASYNC_MATCH_OF:
			match = match_of;
			break;
82 83 84 85 86 87 88
		default:
			/* Cannot happen, unless someone breaks us */
			WARN_ON(true);
			return NULL;
		}

		/* match cannot be NULL here */
89
		if (match(sd, asd))
90 91 92 93 94 95 96
			return asd;
	}

	return NULL;
}

static int v4l2_async_test_notify(struct v4l2_async_notifier *notifier,
97
				  struct v4l2_subdev *sd,
98 99 100 101 102 103
				  struct v4l2_async_subdev *asd)
{
	int ret;

	/* Remove from the waiting list */
	list_del(&asd->list);
104 105
	sd->asd = asd;
	sd->notifier = notifier;
106 107 108 109 110 111 112

	if (notifier->bound) {
		ret = notifier->bound(notifier, sd, asd);
		if (ret < 0)
			return ret;
	}
	/* Move from the global subdevice list to notifier's done */
113
	list_move(&sd->async_list, &notifier->done);
114 115 116 117 118 119 120 121 122 123 124 125 126 127

	ret = v4l2_device_register_subdev(notifier->v4l2_dev, sd);
	if (ret < 0) {
		if (notifier->unbind)
			notifier->unbind(notifier, sd, asd);
		return ret;
	}

	if (list_empty(&notifier->waiting) && notifier->complete)
		return notifier->complete(notifier);

	return 0;
}

128
static void v4l2_async_cleanup(struct v4l2_subdev *sd)
129 130
{
	v4l2_device_unregister_subdev(sd);
131 132 133
	/* Subdevice driver will reprobe and put the subdev back onto the list */
	list_del_init(&sd->async_list);
	sd->asd = NULL;
134 135 136 137 138 139
	sd->dev = NULL;
}

int v4l2_async_notifier_register(struct v4l2_device *v4l2_dev,
				 struct v4l2_async_notifier *notifier)
{
140
	struct v4l2_subdev *sd, *tmp;
141 142 143 144 145 146 147 148 149 150 151
	struct v4l2_async_subdev *asd;
	int i;

	if (!notifier->num_subdevs || notifier->num_subdevs > V4L2_MAX_SUBDEVS)
		return -EINVAL;

	notifier->v4l2_dev = v4l2_dev;
	INIT_LIST_HEAD(&notifier->waiting);
	INIT_LIST_HEAD(&notifier->done);

	for (i = 0; i < notifier->num_subdevs; i++) {
152
		asd = notifier->subdevs[i];
153

154 155 156 157
		switch (asd->match_type) {
		case V4L2_ASYNC_MATCH_CUSTOM:
		case V4L2_ASYNC_MATCH_DEVNAME:
		case V4L2_ASYNC_MATCH_I2C:
158
		case V4L2_ASYNC_MATCH_OF:
159 160 161
			break;
		default:
			dev_err(notifier->v4l2_dev ? notifier->v4l2_dev->dev : NULL,
162 163
				"Invalid match type %u on %p\n",
				asd->match_type, asd);
164 165 166 167 168 169 170 171 172 173
			return -EINVAL;
		}
		list_add_tail(&asd->list, &notifier->waiting);
	}

	mutex_lock(&list_lock);

	/* Keep also completed notifiers on the list */
	list_add(&notifier->list, &notifier_list);

174
	list_for_each_entry_safe(sd, tmp, &subdev_list, async_list) {
175 176
		int ret;

177
		asd = v4l2_async_belongs(notifier, sd);
178 179 180
		if (!asd)
			continue;

181
		ret = v4l2_async_test_notify(notifier, sd, asd);
182 183 184 185 186 187 188 189 190 191 192 193 194 195
		if (ret < 0) {
			mutex_unlock(&list_lock);
			return ret;
		}
	}

	mutex_unlock(&list_lock);

	return 0;
}
EXPORT_SYMBOL(v4l2_async_notifier_register);

void v4l2_async_notifier_unregister(struct v4l2_async_notifier *notifier)
{
196
	struct v4l2_subdev *sd, *tmp;
197 198
	unsigned int notif_n_subdev = notifier->num_subdevs;
	unsigned int n_subdev = min(notif_n_subdev, V4L2_MAX_SUBDEVS);
199
	struct device **dev;
200 201
	int i = 0;

202 203 204
	if (!notifier->v4l2_dev)
		return;

205 206 207 208 209 210
	dev = kmalloc(n_subdev * sizeof(*dev), GFP_KERNEL);
	if (!dev) {
		dev_err(notifier->v4l2_dev->dev,
			"Failed to allocate device cache!\n");
	}

211 212 213 214
	mutex_lock(&list_lock);

	list_del(&notifier->list);

215
	list_for_each_entry_safe(sd, tmp, &notifier->done, async_list) {
216 217 218
		struct device *d;

		d = get_device(sd->dev);
219

220
		v4l2_async_cleanup(sd);
221 222

		/* If we handled USB devices, we'd have to lock the parent too */
223
		device_release_driver(d);
224 225

		if (notifier->unbind)
226
			notifier->unbind(notifier, sd, sd->asd);
227 228 229 230 231 232 233 234 235

		/*
		 * Store device at the device cache, in order to call
		 * put_device() on the final step
		 */
		if (dev)
			dev[i++] = d;
		else
			put_device(d);
236 237 238 239
	}

	mutex_unlock(&list_lock);

240 241 242 243 244 245
	/*
	 * Call device_attach() to reprobe devices
	 *
	 * NOTE: If dev allocation fails, i is 0, and the whole loop won't be
	 * executed.
	 */
246 247 248 249 250 251 252 253 254 255 256 257 258 259 260
	while (i--) {
		struct device *d = dev[i];

		if (d && device_attach(d) < 0) {
			const char *name = "(none)";
			int lock = device_trylock(d);

			if (lock && d->driver)
				name = d->driver->name;
			dev_err(d, "Failed to re-probe to %s\n", name);
			if (lock)
				device_unlock(d);
		}
		put_device(d);
	}
261
	kfree(dev);
262 263 264

	notifier->v4l2_dev = NULL;

265 266 267 268 269 270 271 272 273 274 275
	/*
	 * Don't care about the waiting list, it is initialised and populated
	 * upon notifier registration.
	 */
}
EXPORT_SYMBOL(v4l2_async_notifier_unregister);

int v4l2_async_register_subdev(struct v4l2_subdev *sd)
{
	struct v4l2_async_notifier *notifier;

276 277 278 279 280 281 282 283
	/*
	 * No reference taken. The reference is held by the device
	 * (struct v4l2_subdev.dev), and async sub-device does not
	 * exist independently of the device at any point of time.
	 */
	if (!sd->of_node && sd->dev)
		sd->of_node = sd->dev->of_node;

284 285
	mutex_lock(&list_lock);

286
	INIT_LIST_HEAD(&sd->async_list);
287 288

	list_for_each_entry(notifier, &notifier_list, list) {
289
		struct v4l2_async_subdev *asd = v4l2_async_belongs(notifier, sd);
290
		if (asd) {
291
			int ret = v4l2_async_test_notify(notifier, sd, asd);
292 293 294 295 296 297
			mutex_unlock(&list_lock);
			return ret;
		}
	}

	/* None matched, wait for hot-plugging */
298
	list_add(&sd->async_list, &subdev_list);
299 300 301 302 303 304 305 306 307

	mutex_unlock(&list_lock);

	return 0;
}
EXPORT_SYMBOL(v4l2_async_register_subdev);

void v4l2_async_unregister_subdev(struct v4l2_subdev *sd)
{
308
	struct v4l2_async_notifier *notifier = sd->notifier;
309

310 311 312
	if (!sd->asd) {
		if (!list_empty(&sd->async_list))
			v4l2_async_cleanup(sd);
313 314 315 316 317
		return;
	}

	mutex_lock(&list_lock);

318
	list_add(&sd->asd->list, &notifier->waiting);
319

320
	v4l2_async_cleanup(sd);
321 322

	if (notifier->unbind)
323
		notifier->unbind(notifier, sd, sd->asd);
324 325 326 327

	mutex_unlock(&list_lock);
}
EXPORT_SYMBOL(v4l2_async_unregister_subdev);