v4l2-async.c 7.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/*
 * V4L2 asynchronous subdevice registration API
 *
 * Copyright (C) 2012-2013, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#include <linux/device.h>
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/mutex.h>
17
#include <linux/of.h>
18 19 20 21 22 23 24 25
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/types.h>

#include <media/v4l2-async.h>
#include <media/v4l2-device.h>
#include <media/v4l2-subdev.h>

26
static bool match_i2c(struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
27
{
28
#if IS_ENABLED(CONFIG_I2C)
29
	struct i2c_client *client = i2c_verify_client(sd->dev);
30 31 32
	return client &&
		asd->match.i2c.adapter_id == client->adapter->nr &&
		asd->match.i2c.address == client->addr;
33 34 35
#else
	return false;
#endif
36 37
}

38 39
static bool match_devname(struct v4l2_subdev *sd,
			  struct v4l2_async_subdev *asd)
40
{
41
	return !strcmp(asd->match.device_name.name, dev_name(sd->dev));
42 43
}

44
static bool match_of(struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
45
{
46 47
	return !of_node_cmp(of_node_full_name(sd->of_node),
			    of_node_full_name(asd->match.of.node));
48 49
}

50 51 52 53 54 55 56 57 58 59
static bool match_fwnode(struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
{
	if (!is_of_node(sd->fwnode) || !is_of_node(asd->match.fwnode.fwnode))
		return sd->fwnode == asd->match.fwnode.fwnode;

	return !of_node_cmp(of_node_full_name(to_of_node(sd->fwnode)),
			    of_node_full_name(
				    to_of_node(asd->match.fwnode.fwnode)));
}

60 61 62 63 64 65 66
static bool match_custom(struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
{
	if (!asd->match.custom.match)
		/* Match always */
		return true;

	return asd->match.custom.match(sd->dev, asd);
67 68
}

69 70 71 72 73
static LIST_HEAD(subdev_list);
static LIST_HEAD(notifier_list);
static DEFINE_MUTEX(list_lock);

static struct v4l2_async_subdev *v4l2_async_belongs(struct v4l2_async_notifier *notifier,
74
						    struct v4l2_subdev *sd)
75
{
76
	bool (*match)(struct v4l2_subdev *, struct v4l2_async_subdev *);
77 78 79 80
	struct v4l2_async_subdev *asd;

	list_for_each_entry(asd, &notifier->waiting, list) {
		/* bus_type has been verified valid before */
81 82
		switch (asd->match_type) {
		case V4L2_ASYNC_MATCH_CUSTOM:
83
			match = match_custom;
84
			break;
85 86
		case V4L2_ASYNC_MATCH_DEVNAME:
			match = match_devname;
87
			break;
88
		case V4L2_ASYNC_MATCH_I2C:
89 90
			match = match_i2c;
			break;
91 92 93
		case V4L2_ASYNC_MATCH_OF:
			match = match_of;
			break;
94 95 96
		case V4L2_ASYNC_MATCH_FWNODE:
			match = match_fwnode;
			break;
97 98 99 100 101 102 103
		default:
			/* Cannot happen, unless someone breaks us */
			WARN_ON(true);
			return NULL;
		}

		/* match cannot be NULL here */
104
		if (match(sd, asd))
105 106 107 108 109 110 111
			return asd;
	}

	return NULL;
}

static int v4l2_async_test_notify(struct v4l2_async_notifier *notifier,
112
				  struct v4l2_subdev *sd,
113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129
				  struct v4l2_async_subdev *asd)
{
	int ret;

	if (notifier->bound) {
		ret = notifier->bound(notifier, sd, asd);
		if (ret < 0)
			return ret;
	}

	ret = v4l2_device_register_subdev(notifier->v4l2_dev, sd);
	if (ret < 0) {
		if (notifier->unbind)
			notifier->unbind(notifier, sd, asd);
		return ret;
	}

130 131 132 133 134 135 136 137
	/* Remove from the waiting list */
	list_del(&asd->list);
	sd->asd = asd;
	sd->notifier = notifier;

	/* Move from the global subdevice list to notifier's done */
	list_move(&sd->async_list, &notifier->done);

138 139 140 141 142 143
	if (list_empty(&notifier->waiting) && notifier->complete)
		return notifier->complete(notifier);

	return 0;
}

144
static void v4l2_async_cleanup(struct v4l2_subdev *sd)
145 146
{
	v4l2_device_unregister_subdev(sd);
147 148 149
	/* Subdevice driver will reprobe and put the subdev back onto the list */
	list_del_init(&sd->async_list);
	sd->asd = NULL;
150 151 152 153 154 155
	sd->dev = NULL;
}

int v4l2_async_notifier_register(struct v4l2_device *v4l2_dev,
				 struct v4l2_async_notifier *notifier)
{
156
	struct v4l2_subdev *sd, *tmp;
157 158 159 160 161 162 163 164 165 166 167
	struct v4l2_async_subdev *asd;
	int i;

	if (!notifier->num_subdevs || notifier->num_subdevs > V4L2_MAX_SUBDEVS)
		return -EINVAL;

	notifier->v4l2_dev = v4l2_dev;
	INIT_LIST_HEAD(&notifier->waiting);
	INIT_LIST_HEAD(&notifier->done);

	for (i = 0; i < notifier->num_subdevs; i++) {
168
		asd = notifier->subdevs[i];
169

170 171 172 173
		switch (asd->match_type) {
		case V4L2_ASYNC_MATCH_CUSTOM:
		case V4L2_ASYNC_MATCH_DEVNAME:
		case V4L2_ASYNC_MATCH_I2C:
174
		case V4L2_ASYNC_MATCH_OF:
175
		case V4L2_ASYNC_MATCH_FWNODE:
176 177 178
			break;
		default:
			dev_err(notifier->v4l2_dev ? notifier->v4l2_dev->dev : NULL,
179 180
				"Invalid match type %u on %p\n",
				asd->match_type, asd);
181 182 183 184 185 186 187
			return -EINVAL;
		}
		list_add_tail(&asd->list, &notifier->waiting);
	}

	mutex_lock(&list_lock);

188
	list_for_each_entry_safe(sd, tmp, &subdev_list, async_list) {
189 190
		int ret;

191
		asd = v4l2_async_belongs(notifier, sd);
192 193 194
		if (!asd)
			continue;

195
		ret = v4l2_async_test_notify(notifier, sd, asd);
196 197 198 199 200 201
		if (ret < 0) {
			mutex_unlock(&list_lock);
			return ret;
		}
	}

202 203 204
	/* Keep also completed notifiers on the list */
	list_add(&notifier->list, &notifier_list);

205 206 207 208 209 210 211 212
	mutex_unlock(&list_lock);

	return 0;
}
EXPORT_SYMBOL(v4l2_async_notifier_register);

void v4l2_async_notifier_unregister(struct v4l2_async_notifier *notifier)
{
213
	struct v4l2_subdev *sd, *tmp;
214 215
	unsigned int notif_n_subdev = notifier->num_subdevs;
	unsigned int n_subdev = min(notif_n_subdev, V4L2_MAX_SUBDEVS);
216
	struct device **dev;
217 218
	int i = 0;

219 220 221
	if (!notifier->v4l2_dev)
		return;

222
	dev = kmalloc_array(n_subdev, sizeof(*dev), GFP_KERNEL);
223 224 225 226 227
	if (!dev) {
		dev_err(notifier->v4l2_dev->dev,
			"Failed to allocate device cache!\n");
	}

228 229 230 231
	mutex_lock(&list_lock);

	list_del(&notifier->list);

232
	list_for_each_entry_safe(sd, tmp, &notifier->done, async_list) {
233 234 235
		struct device *d;

		d = get_device(sd->dev);
236

237
		v4l2_async_cleanup(sd);
238 239

		/* If we handled USB devices, we'd have to lock the parent too */
240
		device_release_driver(d);
241 242

		if (notifier->unbind)
243
			notifier->unbind(notifier, sd, sd->asd);
244 245 246 247 248 249 250 251 252

		/*
		 * Store device at the device cache, in order to call
		 * put_device() on the final step
		 */
		if (dev)
			dev[i++] = d;
		else
			put_device(d);
253 254 255 256
	}

	mutex_unlock(&list_lock);

257 258 259 260 261 262
	/*
	 * Call device_attach() to reprobe devices
	 *
	 * NOTE: If dev allocation fails, i is 0, and the whole loop won't be
	 * executed.
	 */
263 264 265 266 267 268 269 270 271 272 273 274 275 276 277
	while (i--) {
		struct device *d = dev[i];

		if (d && device_attach(d) < 0) {
			const char *name = "(none)";
			int lock = device_trylock(d);

			if (lock && d->driver)
				name = d->driver->name;
			dev_err(d, "Failed to re-probe to %s\n", name);
			if (lock)
				device_unlock(d);
		}
		put_device(d);
	}
278
	kfree(dev);
279 280 281

	notifier->v4l2_dev = NULL;

282 283 284 285 286 287 288 289 290 291 292
	/*
	 * Don't care about the waiting list, it is initialised and populated
	 * upon notifier registration.
	 */
}
EXPORT_SYMBOL(v4l2_async_notifier_unregister);

int v4l2_async_register_subdev(struct v4l2_subdev *sd)
{
	struct v4l2_async_notifier *notifier;

293 294 295 296 297 298 299 300
	/*
	 * No reference taken. The reference is held by the device
	 * (struct v4l2_subdev.dev), and async sub-device does not
	 * exist independently of the device at any point of time.
	 */
	if (!sd->of_node && sd->dev)
		sd->of_node = sd->dev->of_node;

301 302
	mutex_lock(&list_lock);

303
	INIT_LIST_HEAD(&sd->async_list);
304 305

	list_for_each_entry(notifier, &notifier_list, list) {
306
		struct v4l2_async_subdev *asd = v4l2_async_belongs(notifier, sd);
307
		if (asd) {
308
			int ret = v4l2_async_test_notify(notifier, sd, asd);
309 310 311 312 313 314
			mutex_unlock(&list_lock);
			return ret;
		}
	}

	/* None matched, wait for hot-plugging */
315
	list_add(&sd->async_list, &subdev_list);
316 317 318 319 320 321 322 323 324

	mutex_unlock(&list_lock);

	return 0;
}
EXPORT_SYMBOL(v4l2_async_register_subdev);

void v4l2_async_unregister_subdev(struct v4l2_subdev *sd)
{
325
	struct v4l2_async_notifier *notifier = sd->notifier;
326

327 328 329
	if (!sd->asd) {
		if (!list_empty(&sd->async_list))
			v4l2_async_cleanup(sd);
330 331 332 333 334
		return;
	}

	mutex_lock(&list_lock);

335
	list_add(&sd->asd->list, &notifier->waiting);
336

337
	v4l2_async_cleanup(sd);
338 339

	if (notifier->unbind)
340
		notifier->unbind(notifier, sd, sd->asd);
341 342 343 344

	mutex_unlock(&list_lock);
}
EXPORT_SYMBOL(v4l2_async_unregister_subdev);