1 // SPDX-License-Identifier: GPL-2.0-only
3 * V4L2 asynchronous subdevice registration API
5 * Copyright (C) 2012-2013, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
8 #include <linux/debugfs.h>
9 #include <linux/device.h>
10 #include <linux/err.h>
11 #include <linux/i2c.h>
12 #include <linux/list.h>
14 #include <linux/module.h>
15 #include <linux/mutex.h>
17 #include <linux/platform_device.h>
18 #include <linux/seq_file.h>
19 #include <linux/slab.h>
20 #include <linux/types.h>
22 #include <media/v4l2-async.h>
23 #include <media/v4l2-device.h>
24 #include <media/v4l2-fwnode.h>
25 #include <media/v4l2-subdev.h>
27 static int v4l2_async_notifier_call_bound(struct v4l2_async_notifier *n,
28 struct v4l2_subdev *subdev,
29 struct v4l2_async_subdev *asd)
31 if (!n->ops || !n->ops->bound)
34 return n->ops->bound(n, subdev, asd);
37 static void v4l2_async_notifier_call_unbind(struct v4l2_async_notifier *n,
38 struct v4l2_subdev *subdev,
39 struct v4l2_async_subdev *asd)
41 if (!n->ops || !n->ops->unbind)
44 n->ops->unbind(n, subdev, asd);
47 static int v4l2_async_notifier_call_complete(struct v4l2_async_notifier *n)
49 if (!n->ops || !n->ops->complete)
52 return n->ops->complete(n);
55 static bool match_i2c(struct v4l2_async_notifier *notifier,
56 struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
58 #if IS_ENABLED(CONFIG_I2C)
59 struct i2c_client *client = i2c_verify_client(sd->dev);
62 asd->match.i2c.adapter_id == client->adapter->nr &&
63 asd->match.i2c.address == client->addr;
69 static bool match_fwnode(struct v4l2_async_notifier *notifier,
70 struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
72 struct fwnode_handle *other_fwnode;
73 struct fwnode_handle *dev_fwnode;
74 bool asd_fwnode_is_ep;
79 * Both the subdev and the async subdev can provide either an endpoint
80 * fwnode or a device fwnode. Start with the simple case of direct
83 if (sd->fwnode == asd->match.fwnode)
87 * Check the same situation for any possible secondary assigned to the
90 if (!IS_ERR_OR_NULL(sd->fwnode->secondary) &&
91 sd->fwnode->secondary == asd->match.fwnode)
95 * Otherwise, check if the sd fwnode and the asd fwnode refer to an
96 * endpoint or a device. If they're of the same type, there's no match.
97 * Technically speaking this checks if the nodes refer to a connected
98 * endpoint, which is the simplest check that works for both OF and
99 * ACPI. This won't make a difference, as drivers should not try to
100 * match unconnected endpoints.
102 sd_fwnode_is_ep = fwnode_graph_is_endpoint(sd->fwnode);
103 asd_fwnode_is_ep = fwnode_graph_is_endpoint(asd->match.fwnode);
105 if (sd_fwnode_is_ep == asd_fwnode_is_ep)
109 * The sd and asd fwnodes are of different types. Get the device fwnode
110 * parent of the endpoint fwnode, and compare it with the other fwnode.
112 if (sd_fwnode_is_ep) {
113 dev_fwnode = fwnode_graph_get_port_parent(sd->fwnode);
114 other_fwnode = asd->match.fwnode;
116 dev_fwnode = fwnode_graph_get_port_parent(asd->match.fwnode);
117 other_fwnode = sd->fwnode;
120 fwnode_handle_put(dev_fwnode);
122 if (dev_fwnode != other_fwnode)
126 * We have a heterogeneous match. Retrieve the struct device of the side
127 * that matched on a device fwnode to print its driver name.
130 dev = notifier->v4l2_dev ? notifier->v4l2_dev->dev
135 if (dev && dev->driver) {
137 dev_warn(dev, "Driver %s uses device fwnode, incorrect match may occur\n",
139 dev_notice(dev, "Consider updating driver %s to match on endpoints\n",
146 static LIST_HEAD(subdev_list);
147 static LIST_HEAD(notifier_list);
148 static DEFINE_MUTEX(list_lock);
150 static struct v4l2_async_subdev *
151 v4l2_async_find_match(struct v4l2_async_notifier *notifier,
152 struct v4l2_subdev *sd)
154 bool (*match)(struct v4l2_async_notifier *notifier,
155 struct v4l2_subdev *sd, struct v4l2_async_subdev *asd);
156 struct v4l2_async_subdev *asd;
158 list_for_each_entry(asd, ¬ifier->waiting, list) {
159 /* bus_type has been verified valid before */
160 switch (asd->match_type) {
161 case V4L2_ASYNC_MATCH_I2C:
164 case V4L2_ASYNC_MATCH_FWNODE:
165 match = match_fwnode;
168 /* Cannot happen, unless someone breaks us */
173 /* match cannot be NULL here */
174 if (match(notifier, sd, asd))
181 /* Compare two async sub-device descriptors for equivalence */
182 static bool asd_equal(struct v4l2_async_subdev *asd_x,
183 struct v4l2_async_subdev *asd_y)
185 if (asd_x->match_type != asd_y->match_type)
188 switch (asd_x->match_type) {
189 case V4L2_ASYNC_MATCH_I2C:
190 return asd_x->match.i2c.adapter_id ==
191 asd_y->match.i2c.adapter_id &&
192 asd_x->match.i2c.address ==
193 asd_y->match.i2c.address;
194 case V4L2_ASYNC_MATCH_FWNODE:
195 return asd_x->match.fwnode == asd_y->match.fwnode;
203 /* Find the sub-device notifier registered by a sub-device driver. */
204 static struct v4l2_async_notifier *
205 v4l2_async_find_subdev_notifier(struct v4l2_subdev *sd)
207 struct v4l2_async_notifier *n;
209 list_for_each_entry(n, ¬ifier_list, list)
216 /* Get v4l2_device related to the notifier if one can be found. */
217 static struct v4l2_device *
218 v4l2_async_notifier_find_v4l2_dev(struct v4l2_async_notifier *notifier)
220 while (notifier->parent)
221 notifier = notifier->parent;
223 return notifier->v4l2_dev;
227 * Return true if all child sub-device notifiers are complete, false otherwise.
230 v4l2_async_notifier_can_complete(struct v4l2_async_notifier *notifier)
232 struct v4l2_subdev *sd;
234 if (!list_empty(¬ifier->waiting))
237 list_for_each_entry(sd, ¬ifier->done, async_list) {
238 struct v4l2_async_notifier *subdev_notifier =
239 v4l2_async_find_subdev_notifier(sd);
241 if (subdev_notifier &&
242 !v4l2_async_notifier_can_complete(subdev_notifier))
250 * Complete the master notifier if possible. This is done when all async
251 * sub-devices have been bound; v4l2_device is also available then.
254 v4l2_async_notifier_try_complete(struct v4l2_async_notifier *notifier)
256 /* Quick check whether there are still more sub-devices here. */
257 if (!list_empty(¬ifier->waiting))
260 /* Check the entire notifier tree; find the root notifier first. */
261 while (notifier->parent)
262 notifier = notifier->parent;
264 /* This is root if it has v4l2_dev. */
265 if (!notifier->v4l2_dev)
268 /* Is everything ready? */
269 if (!v4l2_async_notifier_can_complete(notifier))
272 return v4l2_async_notifier_call_complete(notifier);
276 v4l2_async_notifier_try_all_subdevs(struct v4l2_async_notifier *notifier);
278 static int v4l2_async_match_notify(struct v4l2_async_notifier *notifier,
279 struct v4l2_device *v4l2_dev,
280 struct v4l2_subdev *sd,
281 struct v4l2_async_subdev *asd)
283 struct v4l2_async_notifier *subdev_notifier;
286 ret = v4l2_device_register_subdev(v4l2_dev, sd);
290 ret = v4l2_async_notifier_call_bound(notifier, sd, asd);
292 v4l2_device_unregister_subdev(sd);
296 /* Remove from the waiting list */
297 list_del(&asd->list);
299 sd->notifier = notifier;
301 /* Move from the global subdevice list to notifier's done */
302 list_move(&sd->async_list, ¬ifier->done);
305 * See if the sub-device has a notifier. If not, return here.
307 subdev_notifier = v4l2_async_find_subdev_notifier(sd);
308 if (!subdev_notifier || subdev_notifier->parent)
312 * Proceed with checking for the sub-device notifier's async
313 * sub-devices, and return the result. The error will be handled by the
316 subdev_notifier->parent = notifier;
318 return v4l2_async_notifier_try_all_subdevs(subdev_notifier);
321 /* Test all async sub-devices in a notifier for a match. */
323 v4l2_async_notifier_try_all_subdevs(struct v4l2_async_notifier *notifier)
325 struct v4l2_device *v4l2_dev =
326 v4l2_async_notifier_find_v4l2_dev(notifier);
327 struct v4l2_subdev *sd;
333 list_for_each_entry(sd, &subdev_list, async_list) {
334 struct v4l2_async_subdev *asd;
337 asd = v4l2_async_find_match(notifier, sd);
341 ret = v4l2_async_match_notify(notifier, v4l2_dev, sd, asd);
346 * v4l2_async_match_notify() may lead to registering a
347 * new notifier and thus changing the async subdevs
348 * list. In order to proceed safely from here, restart
349 * parsing the list from the beginning.
357 static void v4l2_async_cleanup(struct v4l2_subdev *sd)
359 v4l2_device_unregister_subdev(sd);
361 * Subdevice driver will reprobe and put the subdev back
364 list_del_init(&sd->async_list);
368 /* Unbind all sub-devices in the notifier tree. */
370 v4l2_async_notifier_unbind_all_subdevs(struct v4l2_async_notifier *notifier)
372 struct v4l2_subdev *sd, *tmp;
374 list_for_each_entry_safe(sd, tmp, ¬ifier->done, async_list) {
375 struct v4l2_async_notifier *subdev_notifier =
376 v4l2_async_find_subdev_notifier(sd);
379 v4l2_async_notifier_unbind_all_subdevs(subdev_notifier);
381 v4l2_async_notifier_call_unbind(notifier, sd, sd->asd);
382 v4l2_async_cleanup(sd);
384 list_move(&sd->async_list, &subdev_list);
387 notifier->parent = NULL;
390 /* See if an async sub-device can be found in a notifier's lists. */
392 __v4l2_async_notifier_has_async_subdev(struct v4l2_async_notifier *notifier,
393 struct v4l2_async_subdev *asd)
395 struct v4l2_async_subdev *asd_y;
396 struct v4l2_subdev *sd;
398 list_for_each_entry(asd_y, ¬ifier->waiting, list)
399 if (asd_equal(asd, asd_y))
402 list_for_each_entry(sd, ¬ifier->done, async_list) {
403 if (WARN_ON(!sd->asd))
406 if (asd_equal(asd, sd->asd))
414 * Find out whether an async sub-device was set up already or
415 * whether it exists in a given notifier before @this_index.
416 * If @this_index < 0, search the notifier's entire @asd_list.
419 v4l2_async_notifier_has_async_subdev(struct v4l2_async_notifier *notifier,
420 struct v4l2_async_subdev *asd,
423 struct v4l2_async_subdev *asd_y;
426 lockdep_assert_held(&list_lock);
428 /* Check that an asd is not being added more than once. */
429 list_for_each_entry(asd_y, ¬ifier->asd_list, asd_list) {
430 if (this_index >= 0 && j++ >= this_index)
432 if (asd_equal(asd, asd_y))
436 /* Check that an asd does not exist in other notifiers. */
437 list_for_each_entry(notifier, ¬ifier_list, list)
438 if (__v4l2_async_notifier_has_async_subdev(notifier, asd))
444 static int v4l2_async_notifier_asd_valid(struct v4l2_async_notifier *notifier,
445 struct v4l2_async_subdev *asd,
449 notifier->v4l2_dev ? notifier->v4l2_dev->dev : NULL;
454 switch (asd->match_type) {
455 case V4L2_ASYNC_MATCH_I2C:
456 case V4L2_ASYNC_MATCH_FWNODE:
457 if (v4l2_async_notifier_has_async_subdev(notifier, asd,
459 dev_dbg(dev, "subdev descriptor already listed in this or other notifiers\n");
464 dev_err(dev, "Invalid match type %u on %p\n",
465 asd->match_type, asd);
472 void v4l2_async_notifier_init(struct v4l2_async_notifier *notifier)
474 INIT_LIST_HEAD(¬ifier->asd_list);
476 EXPORT_SYMBOL(v4l2_async_notifier_init);
478 static int __v4l2_async_notifier_register(struct v4l2_async_notifier *notifier)
480 struct v4l2_async_subdev *asd;
483 INIT_LIST_HEAD(¬ifier->waiting);
484 INIT_LIST_HEAD(¬ifier->done);
486 mutex_lock(&list_lock);
488 list_for_each_entry(asd, ¬ifier->asd_list, asd_list) {
489 ret = v4l2_async_notifier_asd_valid(notifier, asd, i++);
493 list_add_tail(&asd->list, ¬ifier->waiting);
496 ret = v4l2_async_notifier_try_all_subdevs(notifier);
500 ret = v4l2_async_notifier_try_complete(notifier);
504 /* Keep also completed notifiers on the list */
505 list_add(¬ifier->list, ¬ifier_list);
507 mutex_unlock(&list_lock);
513 * On failure, unbind all sub-devices registered through this notifier.
515 v4l2_async_notifier_unbind_all_subdevs(notifier);
518 mutex_unlock(&list_lock);
523 int v4l2_async_notifier_register(struct v4l2_device *v4l2_dev,
524 struct v4l2_async_notifier *notifier)
528 if (WARN_ON(!v4l2_dev || notifier->sd))
531 notifier->v4l2_dev = v4l2_dev;
533 ret = __v4l2_async_notifier_register(notifier);
535 notifier->v4l2_dev = NULL;
539 EXPORT_SYMBOL(v4l2_async_notifier_register);
541 int v4l2_async_subdev_notifier_register(struct v4l2_subdev *sd,
542 struct v4l2_async_notifier *notifier)
546 if (WARN_ON(!sd || notifier->v4l2_dev))
551 ret = __v4l2_async_notifier_register(notifier);
557 EXPORT_SYMBOL(v4l2_async_subdev_notifier_register);
560 __v4l2_async_notifier_unregister(struct v4l2_async_notifier *notifier)
562 if (!notifier || (!notifier->v4l2_dev && !notifier->sd))
565 v4l2_async_notifier_unbind_all_subdevs(notifier);
568 notifier->v4l2_dev = NULL;
570 list_del(¬ifier->list);
573 void v4l2_async_notifier_unregister(struct v4l2_async_notifier *notifier)
575 mutex_lock(&list_lock);
577 __v4l2_async_notifier_unregister(notifier);
579 mutex_unlock(&list_lock);
581 EXPORT_SYMBOL(v4l2_async_notifier_unregister);
583 static void __v4l2_async_notifier_cleanup(struct v4l2_async_notifier *notifier)
585 struct v4l2_async_subdev *asd, *tmp;
587 if (!notifier || !notifier->asd_list.next)
590 list_for_each_entry_safe(asd, tmp, ¬ifier->asd_list, asd_list) {
591 switch (asd->match_type) {
592 case V4L2_ASYNC_MATCH_FWNODE:
593 fwnode_handle_put(asd->match.fwnode);
599 list_del(&asd->asd_list);
604 void v4l2_async_notifier_cleanup(struct v4l2_async_notifier *notifier)
606 mutex_lock(&list_lock);
608 __v4l2_async_notifier_cleanup(notifier);
610 mutex_unlock(&list_lock);
612 EXPORT_SYMBOL_GPL(v4l2_async_notifier_cleanup);
614 int __v4l2_async_notifier_add_subdev(struct v4l2_async_notifier *notifier,
615 struct v4l2_async_subdev *asd)
619 mutex_lock(&list_lock);
621 ret = v4l2_async_notifier_asd_valid(notifier, asd, -1);
625 list_add_tail(&asd->asd_list, ¬ifier->asd_list);
628 mutex_unlock(&list_lock);
631 EXPORT_SYMBOL_GPL(__v4l2_async_notifier_add_subdev);
633 struct v4l2_async_subdev *
634 __v4l2_async_notifier_add_fwnode_subdev(struct v4l2_async_notifier *notifier,
635 struct fwnode_handle *fwnode,
636 unsigned int asd_struct_size)
638 struct v4l2_async_subdev *asd;
641 asd = kzalloc(asd_struct_size, GFP_KERNEL);
643 return ERR_PTR(-ENOMEM);
645 asd->match_type = V4L2_ASYNC_MATCH_FWNODE;
646 asd->match.fwnode = fwnode_handle_get(fwnode);
648 ret = __v4l2_async_notifier_add_subdev(notifier, asd);
650 fwnode_handle_put(fwnode);
657 EXPORT_SYMBOL_GPL(__v4l2_async_notifier_add_fwnode_subdev);
659 struct v4l2_async_subdev *
660 __v4l2_async_notifier_add_fwnode_remote_subdev(struct v4l2_async_notifier *notif,
661 struct fwnode_handle *endpoint,
662 unsigned int asd_struct_size)
664 struct v4l2_async_subdev *asd;
665 struct fwnode_handle *remote;
667 remote = fwnode_graph_get_remote_port_parent(endpoint);
669 return ERR_PTR(-ENOTCONN);
671 asd = __v4l2_async_notifier_add_fwnode_subdev(notif, remote,
674 * Calling __v4l2_async_notifier_add_fwnode_subdev grabs a refcount,
675 * so drop the one we got in fwnode_graph_get_remote_port_parent.
677 fwnode_handle_put(remote);
680 EXPORT_SYMBOL_GPL(__v4l2_async_notifier_add_fwnode_remote_subdev);
682 struct v4l2_async_subdev *
683 __v4l2_async_notifier_add_i2c_subdev(struct v4l2_async_notifier *notifier,
684 int adapter_id, unsigned short address,
685 unsigned int asd_struct_size)
687 struct v4l2_async_subdev *asd;
690 asd = kzalloc(asd_struct_size, GFP_KERNEL);
692 return ERR_PTR(-ENOMEM);
694 asd->match_type = V4L2_ASYNC_MATCH_I2C;
695 asd->match.i2c.adapter_id = adapter_id;
696 asd->match.i2c.address = address;
698 ret = __v4l2_async_notifier_add_subdev(notifier, asd);
706 EXPORT_SYMBOL_GPL(__v4l2_async_notifier_add_i2c_subdev);
708 int v4l2_async_register_subdev(struct v4l2_subdev *sd)
710 struct v4l2_async_notifier *subdev_notifier;
711 struct v4l2_async_notifier *notifier;
715 * No reference taken. The reference is held by the device
716 * (struct v4l2_subdev.dev), and async sub-device does not
717 * exist independently of the device at any point of time.
719 if (!sd->fwnode && sd->dev)
720 sd->fwnode = dev_fwnode(sd->dev);
722 mutex_lock(&list_lock);
724 INIT_LIST_HEAD(&sd->async_list);
726 list_for_each_entry(notifier, ¬ifier_list, list) {
727 struct v4l2_device *v4l2_dev =
728 v4l2_async_notifier_find_v4l2_dev(notifier);
729 struct v4l2_async_subdev *asd;
734 asd = v4l2_async_find_match(notifier, sd);
738 ret = v4l2_async_match_notify(notifier, v4l2_dev, sd, asd);
742 ret = v4l2_async_notifier_try_complete(notifier);
749 /* None matched, wait for hot-plugging */
750 list_add(&sd->async_list, &subdev_list);
753 mutex_unlock(&list_lock);
759 * Complete failed. Unbind the sub-devices bound through registering
760 * this async sub-device.
762 subdev_notifier = v4l2_async_find_subdev_notifier(sd);
764 v4l2_async_notifier_unbind_all_subdevs(subdev_notifier);
767 v4l2_async_notifier_call_unbind(notifier, sd, sd->asd);
768 v4l2_async_cleanup(sd);
770 mutex_unlock(&list_lock);
774 EXPORT_SYMBOL(v4l2_async_register_subdev);
776 void v4l2_async_unregister_subdev(struct v4l2_subdev *sd)
778 if (!sd->async_list.next)
781 mutex_lock(&list_lock);
783 __v4l2_async_notifier_unregister(sd->subdev_notifier);
784 __v4l2_async_notifier_cleanup(sd->subdev_notifier);
785 kfree(sd->subdev_notifier);
786 sd->subdev_notifier = NULL;
789 struct v4l2_async_notifier *notifier = sd->notifier;
791 list_add(&sd->asd->list, ¬ifier->waiting);
793 v4l2_async_notifier_call_unbind(notifier, sd, sd->asd);
796 v4l2_async_cleanup(sd);
798 mutex_unlock(&list_lock);
800 EXPORT_SYMBOL(v4l2_async_unregister_subdev);
802 static void print_waiting_subdev(struct seq_file *s,
803 struct v4l2_async_subdev *asd)
805 switch (asd->match_type) {
806 case V4L2_ASYNC_MATCH_I2C:
807 seq_printf(s, " [i2c] dev=%d-%04x\n", asd->match.i2c.adapter_id,
808 asd->match.i2c.address);
810 case V4L2_ASYNC_MATCH_FWNODE: {
811 struct fwnode_handle *devnode, *fwnode = asd->match.fwnode;
813 devnode = fwnode_graph_is_endpoint(fwnode) ?
814 fwnode_graph_get_port_parent(fwnode) :
815 fwnode_handle_get(fwnode);
817 seq_printf(s, " [fwnode] dev=%s, node=%pfw\n",
818 devnode->dev ? dev_name(devnode->dev) : "nil",
821 fwnode_handle_put(devnode);
828 v4l2_async_notifier_name(struct v4l2_async_notifier *notifier)
830 if (notifier->v4l2_dev)
831 return notifier->v4l2_dev->name;
832 else if (notifier->sd)
833 return notifier->sd->name;
838 static int pending_subdevs_show(struct seq_file *s, void *data)
840 struct v4l2_async_notifier *notif;
841 struct v4l2_async_subdev *asd;
843 mutex_lock(&list_lock);
845 list_for_each_entry(notif, ¬ifier_list, list) {
846 seq_printf(s, "%s:\n", v4l2_async_notifier_name(notif));
847 list_for_each_entry(asd, ¬if->waiting, list)
848 print_waiting_subdev(s, asd);
851 mutex_unlock(&list_lock);
855 DEFINE_SHOW_ATTRIBUTE(pending_subdevs);
857 static struct dentry *v4l2_async_debugfs_dir;
859 static int __init v4l2_async_init(void)
861 v4l2_async_debugfs_dir = debugfs_create_dir("v4l2-async", NULL);
862 debugfs_create_file("pending_async_subdevices", 0444,
863 v4l2_async_debugfs_dir, NULL,
864 &pending_subdevs_fops);
869 static void __exit v4l2_async_exit(void)
871 debugfs_remove_recursive(v4l2_async_debugfs_dir);
874 subsys_initcall(v4l2_async_init);
875 module_exit(v4l2_async_exit);
877 MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
878 MODULE_AUTHOR("Sakari Ailus <sakari.ailus@linux.intel.com>");
879 MODULE_AUTHOR("Ezequiel Garcia <ezequiel@collabora.com>");
880 MODULE_LICENSE("GPL");