v4l2-async.c 7.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
/*
 * V4L2 asynchronous subdevice registration API
 *
 * Copyright (C) 2012-2013, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#include <linux/device.h>
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/types.h>

#include <media/v4l2-async.h>
#include <media/v4l2-device.h>
#include <media/v4l2-subdev.h>

25
static bool match_i2c(struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
26
{
27
#if IS_ENABLED(CONFIG_I2C)
28
	struct i2c_client *client = i2c_verify_client(sd->dev);
29 30 31
	return client &&
		asd->match.i2c.adapter_id == client->adapter->nr &&
		asd->match.i2c.address == client->addr;
32 33 34
#else
	return false;
#endif
35 36
}

37 38
static bool match_devname(struct v4l2_subdev *sd,
			  struct v4l2_async_subdev *asd)
39
{
40
	return !strcmp(asd->match.device_name.name, dev_name(sd->dev));
41 42
}

43
static bool match_of(struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
44
{
45 46
	return !of_node_cmp(of_node_full_name(sd->of_node),
			    of_node_full_name(asd->match.of.node));
47 48 49 50 51 52 53 54 55
}

static bool match_custom(struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
{
	if (!asd->match.custom.match)
		/* Match always */
		return true;

	return asd->match.custom.match(sd->dev, asd);
56 57
}

58 59 60 61 62
static LIST_HEAD(subdev_list);
static LIST_HEAD(notifier_list);
static DEFINE_MUTEX(list_lock);

static struct v4l2_async_subdev *v4l2_async_belongs(struct v4l2_async_notifier *notifier,
63
						    struct v4l2_subdev *sd)
64
{
65
	bool (*match)(struct v4l2_subdev *, struct v4l2_async_subdev *);
66 67 68 69
	struct v4l2_async_subdev *asd;

	list_for_each_entry(asd, &notifier->waiting, list) {
		/* bus_type has been verified valid before */
70 71
		switch (asd->match_type) {
		case V4L2_ASYNC_MATCH_CUSTOM:
72
			match = match_custom;
73
			break;
74 75
		case V4L2_ASYNC_MATCH_DEVNAME:
			match = match_devname;
76
			break;
77
		case V4L2_ASYNC_MATCH_I2C:
78 79
			match = match_i2c;
			break;
80 81 82
		case V4L2_ASYNC_MATCH_OF:
			match = match_of;
			break;
83 84 85 86 87 88 89
		default:
			/* Cannot happen, unless someone breaks us */
			WARN_ON(true);
			return NULL;
		}

		/* match cannot be NULL here */
90
		if (match(sd, asd))
91 92 93 94 95 96 97
			return asd;
	}

	return NULL;
}

static int v4l2_async_test_notify(struct v4l2_async_notifier *notifier,
98
				  struct v4l2_subdev *sd,
99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115
				  struct v4l2_async_subdev *asd)
{
	int ret;

	if (notifier->bound) {
		ret = notifier->bound(notifier, sd, asd);
		if (ret < 0)
			return ret;
	}

	ret = v4l2_device_register_subdev(notifier->v4l2_dev, sd);
	if (ret < 0) {
		if (notifier->unbind)
			notifier->unbind(notifier, sd, asd);
		return ret;
	}

116 117 118 119 120 121 122 123
	/* Remove from the waiting list */
	list_del(&asd->list);
	sd->asd = asd;
	sd->notifier = notifier;

	/* Move from the global subdevice list to notifier's done */
	list_move(&sd->async_list, &notifier->done);

124 125 126 127 128 129
	if (list_empty(&notifier->waiting) && notifier->complete)
		return notifier->complete(notifier);

	return 0;
}

130
static void v4l2_async_cleanup(struct v4l2_subdev *sd)
131 132
{
	v4l2_device_unregister_subdev(sd);
133 134 135
	/* Subdevice driver will reprobe and put the subdev back onto the list */
	list_del_init(&sd->async_list);
	sd->asd = NULL;
136 137 138 139 140 141
	sd->dev = NULL;
}

int v4l2_async_notifier_register(struct v4l2_device *v4l2_dev,
				 struct v4l2_async_notifier *notifier)
{
142
	struct v4l2_subdev *sd, *tmp;
143 144 145 146 147 148 149 150 151 152 153
	struct v4l2_async_subdev *asd;
	int i;

	if (!notifier->num_subdevs || notifier->num_subdevs > V4L2_MAX_SUBDEVS)
		return -EINVAL;

	notifier->v4l2_dev = v4l2_dev;
	INIT_LIST_HEAD(&notifier->waiting);
	INIT_LIST_HEAD(&notifier->done);

	for (i = 0; i < notifier->num_subdevs; i++) {
154
		asd = notifier->subdevs[i];
155

156 157 158 159
		switch (asd->match_type) {
		case V4L2_ASYNC_MATCH_CUSTOM:
		case V4L2_ASYNC_MATCH_DEVNAME:
		case V4L2_ASYNC_MATCH_I2C:
160
		case V4L2_ASYNC_MATCH_OF:
161 162 163
			break;
		default:
			dev_err(notifier->v4l2_dev ? notifier->v4l2_dev->dev : NULL,
164 165
				"Invalid match type %u on %p\n",
				asd->match_type, asd);
166 167 168 169 170 171 172
			return -EINVAL;
		}
		list_add_tail(&asd->list, &notifier->waiting);
	}

	mutex_lock(&list_lock);

173
	list_for_each_entry_safe(sd, tmp, &subdev_list, async_list) {
174 175
		int ret;

176
		asd = v4l2_async_belongs(notifier, sd);
177 178 179
		if (!asd)
			continue;

180
		ret = v4l2_async_test_notify(notifier, sd, asd);
181 182 183 184 185 186
		if (ret < 0) {
			mutex_unlock(&list_lock);
			return ret;
		}
	}

187 188 189
	/* Keep also completed notifiers on the list */
	list_add(&notifier->list, &notifier_list);

190 191 192 193 194 195 196 197
	mutex_unlock(&list_lock);

	return 0;
}
EXPORT_SYMBOL(v4l2_async_notifier_register);

void v4l2_async_notifier_unregister(struct v4l2_async_notifier *notifier)
{
198
	struct v4l2_subdev *sd, *tmp;
199 200
	unsigned int notif_n_subdev = notifier->num_subdevs;
	unsigned int n_subdev = min(notif_n_subdev, V4L2_MAX_SUBDEVS);
201
	struct device **dev;
202 203
	int i = 0;

204 205 206
	if (!notifier->v4l2_dev)
		return;

207
	dev = kmalloc_array(n_subdev, sizeof(*dev), GFP_KERNEL);
208 209 210 211 212
	if (!dev) {
		dev_err(notifier->v4l2_dev->dev,
			"Failed to allocate device cache!\n");
	}

213 214 215 216
	mutex_lock(&list_lock);

	list_del(&notifier->list);

217
	list_for_each_entry_safe(sd, tmp, &notifier->done, async_list) {
218 219 220
		struct device *d;

		d = get_device(sd->dev);
221

222
		v4l2_async_cleanup(sd);
223 224

		/* If we handled USB devices, we'd have to lock the parent too */
225
		device_release_driver(d);
226 227

		if (notifier->unbind)
228
			notifier->unbind(notifier, sd, sd->asd);
229 230 231 232 233 234 235 236 237

		/*
		 * Store device at the device cache, in order to call
		 * put_device() on the final step
		 */
		if (dev)
			dev[i++] = d;
		else
			put_device(d);
238 239 240 241
	}

	mutex_unlock(&list_lock);

242 243 244 245 246 247
	/*
	 * Call device_attach() to reprobe devices
	 *
	 * NOTE: If dev allocation fails, i is 0, and the whole loop won't be
	 * executed.
	 */
248 249 250 251 252 253 254 255 256 257 258 259 260 261 262
	while (i--) {
		struct device *d = dev[i];

		if (d && device_attach(d) < 0) {
			const char *name = "(none)";
			int lock = device_trylock(d);

			if (lock && d->driver)
				name = d->driver->name;
			dev_err(d, "Failed to re-probe to %s\n", name);
			if (lock)
				device_unlock(d);
		}
		put_device(d);
	}
263
	kfree(dev);
264 265 266

	notifier->v4l2_dev = NULL;

267 268 269 270 271 272 273 274 275 276 277
	/*
	 * Don't care about the waiting list, it is initialised and populated
	 * upon notifier registration.
	 */
}
EXPORT_SYMBOL(v4l2_async_notifier_unregister);

int v4l2_async_register_subdev(struct v4l2_subdev *sd)
{
	struct v4l2_async_notifier *notifier;

278 279 280 281 282 283 284 285
	/*
	 * No reference taken. The reference is held by the device
	 * (struct v4l2_subdev.dev), and async sub-device does not
	 * exist independently of the device at any point of time.
	 */
	if (!sd->of_node && sd->dev)
		sd->of_node = sd->dev->of_node;

286 287
	mutex_lock(&list_lock);

288
	INIT_LIST_HEAD(&sd->async_list);
289 290

	list_for_each_entry(notifier, &notifier_list, list) {
291
		struct v4l2_async_subdev *asd = v4l2_async_belongs(notifier, sd);
292
		if (asd) {
293
			int ret = v4l2_async_test_notify(notifier, sd, asd);
294 295 296 297 298 299
			mutex_unlock(&list_lock);
			return ret;
		}
	}

	/* None matched, wait for hot-plugging */
300
	list_add(&sd->async_list, &subdev_list);
301 302 303 304 305 306 307 308 309

	mutex_unlock(&list_lock);

	return 0;
}
EXPORT_SYMBOL(v4l2_async_register_subdev);

void v4l2_async_unregister_subdev(struct v4l2_subdev *sd)
{
310
	struct v4l2_async_notifier *notifier = sd->notifier;
311

312 313 314
	if (!sd->asd) {
		if (!list_empty(&sd->async_list))
			v4l2_async_cleanup(sd);
315 316 317 318 319
		return;
	}

	mutex_lock(&list_lock);

320
	list_add(&sd->asd->list, &notifier->waiting);
321

322
	v4l2_async_cleanup(sd);
323 324

	if (notifier->unbind)
325
		notifier->unbind(notifier, sd, sd->asd);
326 327 328 329

	mutex_unlock(&list_lock);
}
EXPORT_SYMBOL(v4l2_async_unregister_subdev);