2013-01-08 18:06:31 +08:00
|
|
|
/*
|
|
|
|
* V4L2 asynchronous subdevice registration API
|
|
|
|
*
|
|
|
|
* Copyright (C) 2012-2013, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/device.h>
|
|
|
|
#include <linux/err.h>
|
|
|
|
#include <linux/i2c.h>
|
|
|
|
#include <linux/list.h>
|
[media] v4l2-core: Use kvmalloc() for potentially big allocations
There are multiple places where arrays or otherwise variable sized
buffer are allocated through V4L2 core code, including things like
controls, memory pages, staging buffers for ioctls and so on. Such
allocations can potentially require an order > 0 allocation from the
page allocator, which is not guaranteed to be fulfilled and is likely to
fail on a system with severe memory fragmentation (e.g. a system with
very long uptime).
Since the memory being allocated is intended to be used by the CPU
exclusively, we can consider using vmalloc() as a fallback and this is
exactly what the recently merged kvmalloc() helpers do. A kmalloc() call
is still attempted, even for order > 0 allocations, but it is done
with __GFP_NORETRY and __GFP_NOWARN, with expectation of failing if
requested memory is not available instantly. Only then the vmalloc()
fallback is used. This should give us fast and more reliable allocations
even on systems with higher memory pressure and/or more fragmentation,
while still retaining the same performance level on systems not
suffering from such conditions.
While at it, replace explicit array size calculations on changed
allocations with kvmalloc_array().
Purposedly not touching videobuf1, as it is deprecated, has only few
users remaining and would rather be seen removed instead.
Signed-off-by: Tomasz Figa <tfiga@chromium.org>
Acked-by: Marek Szyprowski <m.szyprowski@samsung.com>
Acked-by: Sakari Ailus <sakari.ailus@linux.intel.com>
Signed-off-by: Hans Verkuil <hans.verkuil@cisco.com>
Signed-off-by: Mauro Carvalho Chehab <mchehab@s-opensource.com>
2017-06-19 11:53:43 +08:00
|
|
|
#include <linux/mm.h>
|
2013-01-08 18:06:31 +08:00
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/mutex.h>
|
2016-08-16 17:54:59 +08:00
|
|
|
#include <linux/of.h>
|
2013-01-08 18:06:31 +08:00
|
|
|
#include <linux/platform_device.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/types.h>
|
|
|
|
|
|
|
|
#include <media/v4l2-async.h>
|
|
|
|
#include <media/v4l2-device.h>
|
2017-08-17 23:28:21 +08:00
|
|
|
#include <media/v4l2-fwnode.h>
|
2013-01-08 18:06:31 +08:00
|
|
|
#include <media/v4l2-subdev.h>
|
|
|
|
|
2017-09-01 20:27:32 +08:00
|
|
|
static int v4l2_async_notifier_call_bound(struct v4l2_async_notifier *n,
|
|
|
|
struct v4l2_subdev *subdev,
|
|
|
|
struct v4l2_async_subdev *asd)
|
|
|
|
{
|
|
|
|
if (!n->ops || !n->ops->bound)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return n->ops->bound(n, subdev, asd);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void v4l2_async_notifier_call_unbind(struct v4l2_async_notifier *n,
|
|
|
|
struct v4l2_subdev *subdev,
|
|
|
|
struct v4l2_async_subdev *asd)
|
|
|
|
{
|
|
|
|
if (!n->ops || !n->ops->unbind)
|
|
|
|
return;
|
|
|
|
|
|
|
|
n->ops->unbind(n, subdev, asd);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int v4l2_async_notifier_call_complete(struct v4l2_async_notifier *n)
|
|
|
|
{
|
|
|
|
if (!n->ops || !n->ops->complete)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return n->ops->complete(n);
|
|
|
|
}
|
|
|
|
|
2015-06-12 03:18:01 +08:00
|
|
|
static bool match_i2c(struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
|
2013-01-08 18:06:31 +08:00
|
|
|
{
|
2013-06-24 16:13:51 +08:00
|
|
|
#if IS_ENABLED(CONFIG_I2C)
|
2015-06-12 03:18:01 +08:00
|
|
|
struct i2c_client *client = i2c_verify_client(sd->dev);
|
2013-01-08 18:06:31 +08:00
|
|
|
return client &&
|
|
|
|
asd->match.i2c.adapter_id == client->adapter->nr &&
|
|
|
|
asd->match.i2c.address == client->addr;
|
2013-06-24 16:13:51 +08:00
|
|
|
#else
|
|
|
|
return false;
|
|
|
|
#endif
|
2013-01-08 18:06:31 +08:00
|
|
|
}
|
|
|
|
|
2015-06-12 03:18:01 +08:00
|
|
|
static bool match_devname(struct v4l2_subdev *sd,
|
|
|
|
struct v4l2_async_subdev *asd)
|
2013-01-08 18:06:31 +08:00
|
|
|
{
|
2015-06-12 03:18:01 +08:00
|
|
|
return !strcmp(asd->match.device_name.name, dev_name(sd->dev));
|
2013-01-08 18:06:31 +08:00
|
|
|
}
|
|
|
|
|
2016-08-16 17:54:59 +08:00
|
|
|
static bool match_fwnode(struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
|
|
|
|
{
|
2017-07-21 06:06:22 +08:00
|
|
|
return sd->fwnode == asd->match.fwnode.fwnode;
|
2016-08-16 17:54:59 +08:00
|
|
|
}
|
|
|
|
|
2015-06-12 03:18:01 +08:00
|
|
|
static bool match_custom(struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
|
|
|
|
{
|
|
|
|
if (!asd->match.custom.match)
|
|
|
|
/* Match always */
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return asd->match.custom.match(sd->dev, asd);
|
2013-07-19 23:21:29 +08:00
|
|
|
}
|
|
|
|
|
2013-01-08 18:06:31 +08:00
|
|
|
static LIST_HEAD(subdev_list);
|
|
|
|
static LIST_HEAD(notifier_list);
|
|
|
|
static DEFINE_MUTEX(list_lock);
|
|
|
|
|
2017-09-05 00:44:39 +08:00
|
|
|
static struct v4l2_async_subdev *v4l2_async_find_match(
|
|
|
|
struct v4l2_async_notifier *notifier, struct v4l2_subdev *sd)
|
2013-01-08 18:06:31 +08:00
|
|
|
{
|
2015-06-12 03:18:01 +08:00
|
|
|
bool (*match)(struct v4l2_subdev *, struct v4l2_async_subdev *);
|
2013-01-08 18:06:31 +08:00
|
|
|
struct v4l2_async_subdev *asd;
|
|
|
|
|
|
|
|
list_for_each_entry(asd, ¬ifier->waiting, list) {
|
|
|
|
/* bus_type has been verified valid before */
|
2013-07-19 23:14:46 +08:00
|
|
|
switch (asd->match_type) {
|
|
|
|
case V4L2_ASYNC_MATCH_CUSTOM:
|
2015-06-12 03:18:01 +08:00
|
|
|
match = match_custom;
|
2013-01-08 18:06:31 +08:00
|
|
|
break;
|
2013-07-19 23:14:46 +08:00
|
|
|
case V4L2_ASYNC_MATCH_DEVNAME:
|
|
|
|
match = match_devname;
|
2013-01-08 18:06:31 +08:00
|
|
|
break;
|
2013-07-19 23:14:46 +08:00
|
|
|
case V4L2_ASYNC_MATCH_I2C:
|
2013-01-08 18:06:31 +08:00
|
|
|
match = match_i2c;
|
|
|
|
break;
|
2016-08-16 17:54:59 +08:00
|
|
|
case V4L2_ASYNC_MATCH_FWNODE:
|
|
|
|
match = match_fwnode;
|
|
|
|
break;
|
2013-01-08 18:06:31 +08:00
|
|
|
default:
|
|
|
|
/* Cannot happen, unless someone breaks us */
|
|
|
|
WARN_ON(true);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* match cannot be NULL here */
|
2015-06-12 03:18:01 +08:00
|
|
|
if (match(sd, asd))
|
2013-01-08 18:06:31 +08:00
|
|
|
return asd;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2017-09-25 08:54:31 +08:00
|
|
|
/* Find the sub-device notifier registered by a sub-device driver. */
|
|
|
|
static struct v4l2_async_notifier *v4l2_async_find_subdev_notifier(
|
|
|
|
struct v4l2_subdev *sd)
|
|
|
|
{
|
|
|
|
struct v4l2_async_notifier *n;
|
|
|
|
|
|
|
|
list_for_each_entry(n, ¬ifier_list, list)
|
|
|
|
if (n->sd == sd)
|
|
|
|
return n;
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Get v4l2_device related to the notifier if one can be found. */
|
|
|
|
static struct v4l2_device *v4l2_async_notifier_find_v4l2_dev(
|
|
|
|
struct v4l2_async_notifier *notifier)
|
|
|
|
{
|
|
|
|
while (notifier->parent)
|
|
|
|
notifier = notifier->parent;
|
|
|
|
|
|
|
|
return notifier->v4l2_dev;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Return true if all child sub-device notifiers are complete, false otherwise.
|
|
|
|
*/
|
|
|
|
static bool v4l2_async_notifier_can_complete(
|
|
|
|
struct v4l2_async_notifier *notifier)
|
|
|
|
{
|
|
|
|
struct v4l2_subdev *sd;
|
|
|
|
|
|
|
|
if (!list_empty(¬ifier->waiting))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
list_for_each_entry(sd, ¬ifier->done, async_list) {
|
|
|
|
struct v4l2_async_notifier *subdev_notifier =
|
|
|
|
v4l2_async_find_subdev_notifier(sd);
|
|
|
|
|
|
|
|
if (subdev_notifier &&
|
|
|
|
!v4l2_async_notifier_can_complete(subdev_notifier))
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Complete the master notifier if possible. This is done when all async
|
|
|
|
* sub-devices have been bound; v4l2_device is also available then.
|
|
|
|
*/
|
|
|
|
static int v4l2_async_notifier_try_complete(
|
|
|
|
struct v4l2_async_notifier *notifier)
|
|
|
|
{
|
|
|
|
/* Quick check whether there are still more sub-devices here. */
|
|
|
|
if (!list_empty(¬ifier->waiting))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* Check the entire notifier tree; find the root notifier first. */
|
|
|
|
while (notifier->parent)
|
|
|
|
notifier = notifier->parent;
|
|
|
|
|
|
|
|
/* This is root if it has v4l2_dev. */
|
|
|
|
if (!notifier->v4l2_dev)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* Is everything ready? */
|
|
|
|
if (!v4l2_async_notifier_can_complete(notifier))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return v4l2_async_notifier_call_complete(notifier);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int v4l2_async_notifier_try_all_subdevs(
|
|
|
|
struct v4l2_async_notifier *notifier);
|
|
|
|
|
2017-09-05 00:44:39 +08:00
|
|
|
static int v4l2_async_match_notify(struct v4l2_async_notifier *notifier,
|
2017-09-25 08:48:08 +08:00
|
|
|
struct v4l2_device *v4l2_dev,
|
2017-09-05 00:44:39 +08:00
|
|
|
struct v4l2_subdev *sd,
|
|
|
|
struct v4l2_async_subdev *asd)
|
2013-01-08 18:06:31 +08:00
|
|
|
{
|
2017-09-25 08:54:31 +08:00
|
|
|
struct v4l2_async_notifier *subdev_notifier;
|
2013-01-08 18:06:31 +08:00
|
|
|
int ret;
|
|
|
|
|
2017-09-25 08:48:08 +08:00
|
|
|
ret = v4l2_device_register_subdev(v4l2_dev, sd);
|
2017-09-01 20:27:32 +08:00
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
2013-01-08 18:06:31 +08:00
|
|
|
|
2017-07-17 22:04:20 +08:00
|
|
|
ret = v4l2_async_notifier_call_bound(notifier, sd, asd);
|
2013-01-08 18:06:31 +08:00
|
|
|
if (ret < 0) {
|
2017-07-17 22:04:20 +08:00
|
|
|
v4l2_device_unregister_subdev(sd);
|
2013-01-08 18:06:31 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
[media] v4l2-async: failing functions shouldn't have side effects
v4l2-async had several functions doing some operations and then
not undoing the operations in a failure situation. For example,
v4l2_async_test_notify() moved a subdev into notifier's done list
even if registering the subdev (v4l2_device_register_subdev) failed.
If the subdev was allocated and v4l2_async_register_subdev() called
from the driver's probe() function, as usually, the probe()
function freed the allocated subdev and returned a failure.
Nevertheless, the subdev was still left into the notifier's done
list, causing an access to already freed memory when the notifier
was later unregistered.
A hand-edited call trace leaving freed subdevs into the notifier:
v4l2_async_register_notifier(notifier, asd)
cameradrv_probe
sd = devm_kzalloc()
v4l2_async_register_subdev(sd)
v4l2_async_test_notify(notifier, sd, asd)
list_move(sd, ¬ifier->done)
v4l2_device_register_subdev(notifier->v4l2_dev, sd)
cameradrv_registered(sd) -> fails
->v4l2_async_register_subdev returns failure
->cameradrv_probe returns failure
->devres frees the allocated sd
->sd was freed but it still remains in the notifier's list.
This patch fixes this and several other cases where a failing
function could leave nodes into a linked list while the caller
might free the node due to a failure.
Signed-off-by: Tuukka Toivonen <tuukka.toivonen@intel.com>
Acked-by: Sakari Ailus <sakari.ailus@linux.intel.com>
Signed-off-by: Mauro Carvalho Chehab <mchehab@s-opensource.com>
2017-01-27 18:32:56 +08:00
|
|
|
/* Remove from the waiting list */
|
|
|
|
list_del(&asd->list);
|
|
|
|
sd->asd = asd;
|
|
|
|
sd->notifier = notifier;
|
|
|
|
|
|
|
|
/* Move from the global subdevice list to notifier's done */
|
|
|
|
list_move(&sd->async_list, ¬ifier->done);
|
|
|
|
|
2017-09-25 08:54:31 +08:00
|
|
|
/*
|
|
|
|
* See if the sub-device has a notifier. If not, return here.
|
|
|
|
*/
|
|
|
|
subdev_notifier = v4l2_async_find_subdev_notifier(sd);
|
|
|
|
if (!subdev_notifier || subdev_notifier->parent)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Proceed with checking for the sub-device notifier's async
|
|
|
|
* sub-devices, and return the result. The error will be handled by the
|
|
|
|
* caller.
|
|
|
|
*/
|
|
|
|
subdev_notifier->parent = notifier;
|
|
|
|
|
|
|
|
return v4l2_async_notifier_try_all_subdevs(subdev_notifier);
|
2013-01-08 18:06:31 +08:00
|
|
|
}
|
|
|
|
|
2017-09-25 08:48:08 +08:00
|
|
|
/* Test all async sub-devices in a notifier for a match. */
|
|
|
|
static int v4l2_async_notifier_try_all_subdevs(
|
|
|
|
struct v4l2_async_notifier *notifier)
|
|
|
|
{
|
2017-09-25 08:54:31 +08:00
|
|
|
struct v4l2_device *v4l2_dev =
|
|
|
|
v4l2_async_notifier_find_v4l2_dev(notifier);
|
|
|
|
struct v4l2_subdev *sd;
|
|
|
|
|
|
|
|
if (!v4l2_dev)
|
|
|
|
return 0;
|
2017-09-25 08:48:08 +08:00
|
|
|
|
2017-09-25 08:54:31 +08:00
|
|
|
again:
|
|
|
|
list_for_each_entry(sd, &subdev_list, async_list) {
|
2017-09-25 08:48:08 +08:00
|
|
|
struct v4l2_async_subdev *asd;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
asd = v4l2_async_find_match(notifier, sd);
|
|
|
|
if (!asd)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
ret = v4l2_async_match_notify(notifier, v4l2_dev, sd, asd);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
2017-09-25 08:54:31 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* v4l2_async_match_notify() may lead to registering a
|
|
|
|
* new notifier and thus changing the async subdevs
|
|
|
|
* list. In order to proceed safely from here, restart
|
|
|
|
* parsing the list from the beginning.
|
|
|
|
*/
|
|
|
|
goto again;
|
2017-09-25 08:48:08 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-07-22 19:01:33 +08:00
|
|
|
static void v4l2_async_cleanup(struct v4l2_subdev *sd)
|
2013-01-08 18:06:31 +08:00
|
|
|
{
|
|
|
|
v4l2_device_unregister_subdev(sd);
|
2013-07-22 19:01:33 +08:00
|
|
|
/* Subdevice driver will reprobe and put the subdev back onto the list */
|
|
|
|
list_del_init(&sd->async_list);
|
|
|
|
sd->asd = NULL;
|
2013-01-08 18:06:31 +08:00
|
|
|
}
|
|
|
|
|
2017-09-25 08:54:31 +08:00
|
|
|
/* Unbind all sub-devices in the notifier tree. */
|
2017-10-02 18:24:54 +08:00
|
|
|
static void v4l2_async_notifier_unbind_all_subdevs(
|
|
|
|
struct v4l2_async_notifier *notifier)
|
|
|
|
{
|
|
|
|
struct v4l2_subdev *sd, *tmp;
|
|
|
|
|
|
|
|
list_for_each_entry_safe(sd, tmp, ¬ifier->done, async_list) {
|
2017-09-25 08:54:31 +08:00
|
|
|
struct v4l2_async_notifier *subdev_notifier =
|
|
|
|
v4l2_async_find_subdev_notifier(sd);
|
|
|
|
|
|
|
|
if (subdev_notifier)
|
|
|
|
v4l2_async_notifier_unbind_all_subdevs(subdev_notifier);
|
|
|
|
|
2017-09-01 20:27:32 +08:00
|
|
|
v4l2_async_notifier_call_unbind(notifier, sd, sd->asd);
|
2017-10-02 18:24:54 +08:00
|
|
|
v4l2_async_cleanup(sd);
|
|
|
|
|
|
|
|
list_move(&sd->async_list, &subdev_list);
|
|
|
|
}
|
2017-09-25 08:54:31 +08:00
|
|
|
|
|
|
|
notifier->parent = NULL;
|
2017-10-02 18:24:54 +08:00
|
|
|
}
|
|
|
|
|
2017-09-20 15:51:54 +08:00
|
|
|
/* See if an fwnode can be found in a notifier's lists. */
|
|
|
|
static bool __v4l2_async_notifier_fwnode_has_async_subdev(
|
|
|
|
struct v4l2_async_notifier *notifier, struct fwnode_handle *fwnode)
|
|
|
|
{
|
|
|
|
struct v4l2_async_subdev *asd;
|
|
|
|
struct v4l2_subdev *sd;
|
|
|
|
|
|
|
|
list_for_each_entry(asd, ¬ifier->waiting, list) {
|
|
|
|
if (asd->match_type != V4L2_ASYNC_MATCH_FWNODE)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (asd->match.fwnode.fwnode == fwnode)
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
list_for_each_entry(sd, ¬ifier->done, async_list) {
|
|
|
|
if (WARN_ON(!sd->asd))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (sd->asd->match_type != V4L2_ASYNC_MATCH_FWNODE)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (sd->asd->match.fwnode.fwnode == fwnode)
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Find out whether an async sub-device was set up for an fwnode already or
|
|
|
|
* whether it exists in a given notifier before @this_index.
|
|
|
|
*/
|
|
|
|
static bool v4l2_async_notifier_fwnode_has_async_subdev(
|
|
|
|
struct v4l2_async_notifier *notifier, struct fwnode_handle *fwnode,
|
|
|
|
unsigned int this_index)
|
|
|
|
{
|
|
|
|
unsigned int j;
|
|
|
|
|
|
|
|
lockdep_assert_held(&list_lock);
|
|
|
|
|
|
|
|
/* Check that an fwnode is not being added more than once. */
|
|
|
|
for (j = 0; j < this_index; j++) {
|
|
|
|
struct v4l2_async_subdev *asd = notifier->subdevs[this_index];
|
|
|
|
struct v4l2_async_subdev *other_asd = notifier->subdevs[j];
|
|
|
|
|
|
|
|
if (other_asd->match_type == V4L2_ASYNC_MATCH_FWNODE &&
|
|
|
|
asd->match.fwnode.fwnode ==
|
|
|
|
other_asd->match.fwnode.fwnode)
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check than an fwnode did not exist in other notifiers. */
|
|
|
|
list_for_each_entry(notifier, ¬ifier_list, list)
|
|
|
|
if (__v4l2_async_notifier_fwnode_has_async_subdev(
|
|
|
|
notifier, fwnode))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-09-25 08:48:08 +08:00
|
|
|
static int __v4l2_async_notifier_register(struct v4l2_async_notifier *notifier)
|
2013-01-08 18:06:31 +08:00
|
|
|
{
|
2017-09-20 15:51:54 +08:00
|
|
|
struct device *dev =
|
|
|
|
notifier->v4l2_dev ? notifier->v4l2_dev->dev : NULL;
|
2013-01-08 18:06:31 +08:00
|
|
|
struct v4l2_async_subdev *asd;
|
2017-10-02 18:24:54 +08:00
|
|
|
int ret;
|
2013-01-08 18:06:31 +08:00
|
|
|
int i;
|
|
|
|
|
2017-09-25 08:48:08 +08:00
|
|
|
if (notifier->num_subdevs > V4L2_MAX_SUBDEVS)
|
2013-01-08 18:06:31 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
INIT_LIST_HEAD(¬ifier->waiting);
|
|
|
|
INIT_LIST_HEAD(¬ifier->done);
|
|
|
|
|
2017-09-20 15:51:54 +08:00
|
|
|
mutex_lock(&list_lock);
|
|
|
|
|
2013-01-08 18:06:31 +08:00
|
|
|
for (i = 0; i < notifier->num_subdevs; i++) {
|
2013-07-19 23:31:10 +08:00
|
|
|
asd = notifier->subdevs[i];
|
2013-01-08 18:06:31 +08:00
|
|
|
|
2013-07-19 23:14:46 +08:00
|
|
|
switch (asd->match_type) {
|
|
|
|
case V4L2_ASYNC_MATCH_CUSTOM:
|
|
|
|
case V4L2_ASYNC_MATCH_DEVNAME:
|
|
|
|
case V4L2_ASYNC_MATCH_I2C:
|
2017-09-20 15:51:54 +08:00
|
|
|
break;
|
2016-08-16 17:54:59 +08:00
|
|
|
case V4L2_ASYNC_MATCH_FWNODE:
|
2017-09-20 15:51:54 +08:00
|
|
|
if (v4l2_async_notifier_fwnode_has_async_subdev(
|
|
|
|
notifier, asd->match.fwnode.fwnode, i)) {
|
|
|
|
dev_err(dev,
|
|
|
|
"fwnode has already been registered or in notifier's subdev list\n");
|
|
|
|
ret = -EEXIST;
|
|
|
|
goto err_unlock;
|
|
|
|
}
|
2013-01-08 18:06:31 +08:00
|
|
|
break;
|
|
|
|
default:
|
2017-09-20 15:51:54 +08:00
|
|
|
dev_err(dev, "Invalid match type %u on %p\n",
|
2013-07-19 23:14:46 +08:00
|
|
|
asd->match_type, asd);
|
2017-09-20 15:51:54 +08:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto err_unlock;
|
2013-01-08 18:06:31 +08:00
|
|
|
}
|
|
|
|
list_add_tail(&asd->list, ¬ifier->waiting);
|
|
|
|
}
|
|
|
|
|
2017-09-25 08:48:08 +08:00
|
|
|
ret = v4l2_async_notifier_try_all_subdevs(notifier);
|
2017-09-20 15:51:54 +08:00
|
|
|
if (ret < 0)
|
2017-09-25 08:54:31 +08:00
|
|
|
goto err_unbind;
|
2013-01-08 18:06:31 +08:00
|
|
|
|
2017-09-25 08:54:31 +08:00
|
|
|
ret = v4l2_async_notifier_try_complete(notifier);
|
2017-09-20 15:51:54 +08:00
|
|
|
if (ret < 0)
|
2017-09-25 08:54:31 +08:00
|
|
|
goto err_unbind;
|
2017-10-02 18:24:54 +08:00
|
|
|
|
[media] v4l2-async: failing functions shouldn't have side effects
v4l2-async had several functions doing some operations and then
not undoing the operations in a failure situation. For example,
v4l2_async_test_notify() moved a subdev into notifier's done list
even if registering the subdev (v4l2_device_register_subdev) failed.
If the subdev was allocated and v4l2_async_register_subdev() called
from the driver's probe() function, as usually, the probe()
function freed the allocated subdev and returned a failure.
Nevertheless, the subdev was still left into the notifier's done
list, causing an access to already freed memory when the notifier
was later unregistered.
A hand-edited call trace leaving freed subdevs into the notifier:
v4l2_async_register_notifier(notifier, asd)
cameradrv_probe
sd = devm_kzalloc()
v4l2_async_register_subdev(sd)
v4l2_async_test_notify(notifier, sd, asd)
list_move(sd, ¬ifier->done)
v4l2_device_register_subdev(notifier->v4l2_dev, sd)
cameradrv_registered(sd) -> fails
->v4l2_async_register_subdev returns failure
->cameradrv_probe returns failure
->devres frees the allocated sd
->sd was freed but it still remains in the notifier's list.
This patch fixes this and several other cases where a failing
function could leave nodes into a linked list while the caller
might free the node due to a failure.
Signed-off-by: Tuukka Toivonen <tuukka.toivonen@intel.com>
Acked-by: Sakari Ailus <sakari.ailus@linux.intel.com>
Signed-off-by: Mauro Carvalho Chehab <mchehab@s-opensource.com>
2017-01-27 18:32:56 +08:00
|
|
|
/* Keep also completed notifiers on the list */
|
|
|
|
list_add(¬ifier->list, ¬ifier_list);
|
|
|
|
|
2013-01-08 18:06:31 +08:00
|
|
|
mutex_unlock(&list_lock);
|
|
|
|
|
|
|
|
return 0;
|
2017-10-02 18:24:54 +08:00
|
|
|
|
2017-09-25 08:54:31 +08:00
|
|
|
err_unbind:
|
|
|
|
/*
|
|
|
|
* On failure, unbind all sub-devices registered through this notifier.
|
|
|
|
*/
|
2017-10-02 18:24:54 +08:00
|
|
|
v4l2_async_notifier_unbind_all_subdevs(notifier);
|
|
|
|
|
2017-09-20 15:51:54 +08:00
|
|
|
err_unlock:
|
2017-10-02 18:24:54 +08:00
|
|
|
mutex_unlock(&list_lock);
|
|
|
|
|
|
|
|
return ret;
|
2013-01-08 18:06:31 +08:00
|
|
|
}
|
2017-09-25 08:48:08 +08:00
|
|
|
|
|
|
|
int v4l2_async_notifier_register(struct v4l2_device *v4l2_dev,
|
|
|
|
struct v4l2_async_notifier *notifier)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2017-09-25 08:54:31 +08:00
|
|
|
if (WARN_ON(!v4l2_dev || notifier->sd))
|
2017-09-25 08:48:08 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
notifier->v4l2_dev = v4l2_dev;
|
|
|
|
|
|
|
|
ret = __v4l2_async_notifier_register(notifier);
|
|
|
|
if (ret)
|
|
|
|
notifier->v4l2_dev = NULL;
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
2013-01-08 18:06:31 +08:00
|
|
|
EXPORT_SYMBOL(v4l2_async_notifier_register);
|
|
|
|
|
2017-09-25 08:54:31 +08:00
|
|
|
int v4l2_async_subdev_notifier_register(struct v4l2_subdev *sd,
|
|
|
|
struct v4l2_async_notifier *notifier)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (WARN_ON(!sd || notifier->v4l2_dev))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
notifier->sd = sd;
|
|
|
|
|
|
|
|
ret = __v4l2_async_notifier_register(notifier);
|
|
|
|
if (ret)
|
|
|
|
notifier->sd = NULL;
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(v4l2_async_subdev_notifier_register);
|
|
|
|
|
2013-01-08 18:06:31 +08:00
|
|
|
void v4l2_async_notifier_unregister(struct v4l2_async_notifier *notifier)
|
|
|
|
{
|
2017-09-25 08:54:31 +08:00
|
|
|
if (!notifier->v4l2_dev && !notifier->sd)
|
2013-07-03 18:49:06 +08:00
|
|
|
return;
|
|
|
|
|
2013-01-08 18:06:31 +08:00
|
|
|
mutex_lock(&list_lock);
|
|
|
|
|
2017-10-02 18:24:54 +08:00
|
|
|
v4l2_async_notifier_unbind_all_subdevs(notifier);
|
2013-01-08 18:06:31 +08:00
|
|
|
|
2017-09-25 08:54:31 +08:00
|
|
|
notifier->sd = NULL;
|
2013-07-03 18:49:06 +08:00
|
|
|
notifier->v4l2_dev = NULL;
|
2017-09-25 08:54:31 +08:00
|
|
|
|
|
|
|
list_del(¬ifier->list);
|
|
|
|
|
|
|
|
mutex_unlock(&list_lock);
|
2013-01-08 18:06:31 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(v4l2_async_notifier_unregister);
|
|
|
|
|
2017-08-17 23:28:21 +08:00
|
|
|
void v4l2_async_notifier_cleanup(struct v4l2_async_notifier *notifier)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
if (!notifier->max_subdevs)
|
|
|
|
return;
|
|
|
|
|
|
|
|
for (i = 0; i < notifier->num_subdevs; i++) {
|
|
|
|
struct v4l2_async_subdev *asd = notifier->subdevs[i];
|
|
|
|
|
|
|
|
switch (asd->match_type) {
|
|
|
|
case V4L2_ASYNC_MATCH_FWNODE:
|
|
|
|
fwnode_handle_put(asd->match.fwnode.fwnode);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
WARN_ON_ONCE(true);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
kfree(asd);
|
|
|
|
}
|
|
|
|
|
|
|
|
notifier->max_subdevs = 0;
|
|
|
|
notifier->num_subdevs = 0;
|
|
|
|
|
|
|
|
kvfree(notifier->subdevs);
|
|
|
|
notifier->subdevs = NULL;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(v4l2_async_notifier_cleanup);
|
|
|
|
|
2013-01-08 18:06:31 +08:00
|
|
|
int v4l2_async_register_subdev(struct v4l2_subdev *sd)
|
|
|
|
{
|
2017-09-25 08:54:31 +08:00
|
|
|
struct v4l2_async_notifier *subdev_notifier;
|
2013-01-08 18:06:31 +08:00
|
|
|
struct v4l2_async_notifier *notifier;
|
2017-10-02 18:24:54 +08:00
|
|
|
int ret;
|
2013-01-08 18:06:31 +08:00
|
|
|
|
2015-06-12 03:18:01 +08:00
|
|
|
/*
|
|
|
|
* No reference taken. The reference is held by the device
|
|
|
|
* (struct v4l2_subdev.dev), and async sub-device does not
|
|
|
|
* exist independently of the device at any point of time.
|
|
|
|
*/
|
2016-08-27 07:17:25 +08:00
|
|
|
if (!sd->fwnode && sd->dev)
|
|
|
|
sd->fwnode = dev_fwnode(sd->dev);
|
2015-06-12 03:18:01 +08:00
|
|
|
|
2013-01-08 18:06:31 +08:00
|
|
|
mutex_lock(&list_lock);
|
|
|
|
|
2013-07-22 19:01:33 +08:00
|
|
|
INIT_LIST_HEAD(&sd->async_list);
|
2013-01-08 18:06:31 +08:00
|
|
|
|
|
|
|
list_for_each_entry(notifier, ¬ifier_list, list) {
|
2017-09-25 08:54:31 +08:00
|
|
|
struct v4l2_device *v4l2_dev =
|
|
|
|
v4l2_async_notifier_find_v4l2_dev(notifier);
|
|
|
|
struct v4l2_async_subdev *asd;
|
2017-10-02 18:24:54 +08:00
|
|
|
int ret;
|
|
|
|
|
2017-09-25 08:54:31 +08:00
|
|
|
if (!v4l2_dev)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
asd = v4l2_async_find_match(notifier, sd);
|
2017-10-02 18:24:54 +08:00
|
|
|
if (!asd)
|
|
|
|
continue;
|
|
|
|
|
2017-09-25 08:48:08 +08:00
|
|
|
ret = v4l2_async_match_notify(notifier, notifier->v4l2_dev, sd,
|
|
|
|
asd);
|
2017-10-02 18:24:54 +08:00
|
|
|
if (ret)
|
2017-09-25 08:54:31 +08:00
|
|
|
goto err_unbind;
|
2017-10-02 18:24:54 +08:00
|
|
|
|
2017-09-25 08:54:31 +08:00
|
|
|
ret = v4l2_async_notifier_try_complete(notifier);
|
2017-10-02 18:24:54 +08:00
|
|
|
if (ret)
|
2017-09-25 08:54:31 +08:00
|
|
|
goto err_unbind;
|
2017-10-02 18:24:54 +08:00
|
|
|
|
|
|
|
goto out_unlock;
|
2013-01-08 18:06:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* None matched, wait for hot-plugging */
|
2013-07-22 19:01:33 +08:00
|
|
|
list_add(&sd->async_list, &subdev_list);
|
2013-01-08 18:06:31 +08:00
|
|
|
|
2017-10-02 18:24:54 +08:00
|
|
|
out_unlock:
|
2013-01-08 18:06:31 +08:00
|
|
|
mutex_unlock(&list_lock);
|
|
|
|
|
|
|
|
return 0;
|
2017-10-02 18:24:54 +08:00
|
|
|
|
2017-09-25 08:54:31 +08:00
|
|
|
err_unbind:
|
|
|
|
/*
|
|
|
|
* Complete failed. Unbind the sub-devices bound through registering
|
|
|
|
* this async sub-device.
|
|
|
|
*/
|
|
|
|
subdev_notifier = v4l2_async_find_subdev_notifier(sd);
|
|
|
|
if (subdev_notifier)
|
|
|
|
v4l2_async_notifier_unbind_all_subdevs(subdev_notifier);
|
|
|
|
|
|
|
|
if (sd->asd)
|
|
|
|
v4l2_async_notifier_call_unbind(notifier, sd, sd->asd);
|
2017-10-02 18:24:54 +08:00
|
|
|
v4l2_async_cleanup(sd);
|
|
|
|
|
|
|
|
mutex_unlock(&list_lock);
|
|
|
|
|
|
|
|
return ret;
|
2013-01-08 18:06:31 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(v4l2_async_register_subdev);
|
|
|
|
|
|
|
|
void v4l2_async_unregister_subdev(struct v4l2_subdev *sd)
|
|
|
|
{
|
|
|
|
mutex_lock(&list_lock);
|
|
|
|
|
2017-10-03 14:26:32 +08:00
|
|
|
if (sd->asd) {
|
|
|
|
struct v4l2_async_notifier *notifier = sd->notifier;
|
2013-01-08 18:06:31 +08:00
|
|
|
|
2017-10-03 14:26:32 +08:00
|
|
|
list_add(&sd->asd->list, ¬ifier->waiting);
|
|
|
|
|
2017-09-01 20:27:32 +08:00
|
|
|
v4l2_async_notifier_call_unbind(notifier, sd, sd->asd);
|
2017-10-03 14:26:32 +08:00
|
|
|
}
|
2013-01-08 18:06:31 +08:00
|
|
|
|
2017-10-03 04:16:52 +08:00
|
|
|
v4l2_async_cleanup(sd);
|
|
|
|
|
2013-01-08 18:06:31 +08:00
|
|
|
mutex_unlock(&list_lock);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(v4l2_async_unregister_subdev);
|