return vpfe_probe_complete(vpfe);
}
+static const struct v4l2_async_notifier_operations vpfe_async_ops = {
+ .bound = vpfe_async_bound,
+ .complete = vpfe_async_complete,
+};
+
static struct vpfe_config *
vpfe_get_pdata(struct platform_device *pdev)
{
vpfe->notifier.subdevs = vpfe->cfg->asd;
vpfe->notifier.num_subdevs = ARRAY_SIZE(vpfe->cfg->asd);
- vpfe->notifier.bound = vpfe_async_bound;
- vpfe->notifier.complete = vpfe_async_complete;
+ vpfe->notifier.ops = &vpfe_async_ops;
ret = v4l2_async_notifier_register(&vpfe->v4l2_dev,
&vpfe->notifier);
if (ret) {
return 0;
}
+static const struct v4l2_async_notifier_operations isc_async_ops = {
+ .bound = isc_async_bound,
+ .unbind = isc_async_unbind,
+ .complete = isc_async_complete,
+};
+
static void isc_subdev_cleanup(struct isc_device *isc)
{
struct isc_subdev_entity *subdev_entity;
list_for_each_entry(subdev_entity, &isc->subdev_entities, list) {
subdev_entity->notifier.subdevs = &subdev_entity->asd;
subdev_entity->notifier.num_subdevs = 1;
- subdev_entity->notifier.bound = isc_async_bound;
- subdev_entity->notifier.unbind = isc_async_unbind;
- subdev_entity->notifier.complete = isc_async_complete;
+ subdev_entity->notifier.ops = &isc_async_ops;
ret = v4l2_async_notifier_register(&isc->v4l2_dev,
&subdev_entity->notifier);
return 0;
}
+static const struct v4l2_async_notifier_operations isi_graph_notify_ops = {
+ .bound = isi_graph_notify_bound,
+ .unbind = isi_graph_notify_unbind,
+ .complete = isi_graph_notify_complete,
+};
+
static int isi_graph_parse(struct atmel_isi *isi, struct device_node *node)
{
struct device_node *ep = NULL;
isi->notifier.subdevs = subdevs;
isi->notifier.num_subdevs = 1;
- isi->notifier.bound = isi_graph_notify_bound;
- isi->notifier.unbind = isi_graph_notify_unbind;
- isi->notifier.complete = isi_graph_notify_complete;
+ isi->notifier.ops = &isi_graph_notify_ops;
ret = v4l2_async_notifier_register(&isi->v4l2_dev, &isi->notifier);
if (ret < 0) {
return vpif_probe_complete();
}
+static const struct v4l2_async_notifier_operations vpif_async_ops = {
+ .bound = vpif_async_bound,
+ .complete = vpif_async_complete,
+};
+
static struct vpif_capture_config *
vpif_capture_get_pdata(struct platform_device *pdev)
{
} else {
vpif_obj.notifier.subdevs = vpif_obj.config->asd;
vpif_obj.notifier.num_subdevs = vpif_obj.config->asd_sizes[0];
- vpif_obj.notifier.bound = vpif_async_bound;
- vpif_obj.notifier.complete = vpif_async_complete;
+ vpif_obj.notifier.ops = &vpif_async_ops;
err = v4l2_async_notifier_register(&vpif_obj.v4l2_dev,
&vpif_obj.notifier);
if (err) {
return vpif_probe_complete();
}
+static const struct v4l2_async_notifier_operations vpif_async_ops = {
+ .bound = vpif_async_bound,
+ .complete = vpif_async_complete,
+};
+
/*
* vpif_probe: This function creates device entries by register itself to the
* V4L2 driver and initializes fields of each channel objects
} else {
vpif_obj.notifier.subdevs = vpif_obj.config->asd;
vpif_obj.notifier.num_subdevs = vpif_obj.config->asd_sizes[0];
- vpif_obj.notifier.bound = vpif_async_bound;
- vpif_obj.notifier.complete = vpif_async_complete;
+ vpif_obj.notifier.ops = &vpif_async_ops;
err = v4l2_async_notifier_register(&vpif_obj.v4l2_dev,
&vpif_obj.notifier);
if (err) {
return media_device_register(&fmd->media_dev);
}
+static const struct v4l2_async_notifier_operations subdev_notifier_ops = {
+ .bound = subdev_notifier_bound,
+ .complete = subdev_notifier_complete,
+};
+
static int fimc_md_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
if (fmd->num_sensors > 0) {
fmd->subdev_notifier.subdevs = fmd->async_subdevs;
fmd->subdev_notifier.num_subdevs = fmd->num_sensors;
- fmd->subdev_notifier.bound = subdev_notifier_bound;
- fmd->subdev_notifier.complete = subdev_notifier_complete;
+ fmd->subdev_notifier.ops = &subdev_notifier_ops;
fmd->num_sensors = 0;
ret = v4l2_async_notifier_register(&fmd->v4l2_dev,
return media_device_register(&isp->media_dev);
}
+static const struct v4l2_async_notifier_operations isp_subdev_notifier_ops = {
+ .complete = isp_subdev_notifier_complete,
+};
+
/*
* isp_probe - Probe ISP platform device
* @pdev: Pointer to ISP platform device
if (ret < 0)
goto error_register_entities;
- isp->notifier.complete = isp_subdev_notifier_complete;
+ isp->notifier.ops = &isp_subdev_notifier_ops;
ret = v4l2_async_notifier_register(&isp->v4l2_dev, &isp->notifier);
if (ret)
mutex_unlock(&pcdev->mlock);
}
+static const struct v4l2_async_notifier_operations pxa_camera_sensor_ops = {
+ .bound = pxa_camera_sensor_bound,
+ .unbind = pxa_camera_sensor_unbind,
+};
+
/*
* Driver probe, remove, suspend and resume operations
*/
pcdev->asds[0] = &pcdev->asd;
pcdev->notifier.subdevs = pcdev->asds;
pcdev->notifier.num_subdevs = 1;
- pcdev->notifier.bound = pxa_camera_sensor_bound;
- pcdev->notifier.unbind = pxa_camera_sensor_unbind;
+ pcdev->notifier.ops = &pxa_camera_sensor_ops;
if (!of_have_populated_dt())
pcdev->asd.match_type = V4L2_ASYNC_MATCH_I2C;
return media_device_register(&camss->media_dev);
}
+static const struct v4l2_async_notifier_operations camss_subdev_notifier_ops = {
+ .bound = camss_subdev_notifier_bound,
+ .complete = camss_subdev_notifier_complete,
+};
+
static const struct media_device_ops camss_media_ops = {
.link_notify = v4l2_pipeline_link_notify,
};
goto err_register_entities;
if (camss->notifier.num_subdevs) {
- camss->notifier.bound = camss_subdev_notifier_bound;
- camss->notifier.complete = camss_subdev_notifier_complete;
+ camss->notifier.ops = &camss_subdev_notifier_ops;
ret = v4l2_async_notifier_register(&camss->v4l2_dev,
&camss->notifier);
return 0;
}
+static const struct v4l2_async_notifier_operations rvin_digital_notify_ops = {
+ .bound = rvin_digital_notify_bound,
+ .unbind = rvin_digital_notify_unbind,
+ .complete = rvin_digital_notify_complete,
+};
+
static int rvin_digital_parse_v4l2(struct device *dev,
struct v4l2_fwnode_endpoint *vep,
vin_dbg(vin, "Found digital subdevice %pOF\n",
to_of_node(vin->digital->asd.match.fwnode.fwnode));
- vin->notifier.bound = rvin_digital_notify_bound;
- vin->notifier.unbind = rvin_digital_notify_unbind;
- vin->notifier.complete = rvin_digital_notify_complete;
+ vin->notifier.ops = &rvin_digital_notify_ops;
ret = v4l2_async_notifier_register(&vin->v4l2_dev, &vin->notifier);
if (ret < 0) {
vin_err(vin, "Notifier registration failed\n");
return ret;
}
+static const struct v4l2_async_notifier_operations rcar_drif_notify_ops = {
+ .bound = rcar_drif_notify_bound,
+ .unbind = rcar_drif_notify_unbind,
+ .complete = rcar_drif_notify_complete,
+};
+
/* Read endpoint properties */
static void rcar_drif_get_ep_properties(struct rcar_drif_sdr *sdr,
struct fwnode_handle *fwnode)
if (ret)
goto error;
- sdr->notifier.bound = rcar_drif_notify_bound;
- sdr->notifier.unbind = rcar_drif_notify_unbind;
- sdr->notifier.complete = rcar_drif_notify_complete;
+ sdr->notifier.ops = &rcar_drif_notify_ops;
/* Register notifier */
ret = v4l2_async_notifier_register(&sdr->v4l2_dev, &sdr->notifier);
return 0;
}
+static const struct v4l2_async_notifier_operations soc_camera_async_ops = {
+ .bound = soc_camera_async_bound,
+ .unbind = soc_camera_async_unbind,
+ .complete = soc_camera_async_complete,
+};
+
static int scan_async_group(struct soc_camera_host *ici,
struct v4l2_async_subdev **asd, unsigned int size)
{
sasc->notifier.subdevs = asd;
sasc->notifier.num_subdevs = size;
- sasc->notifier.bound = soc_camera_async_bound;
- sasc->notifier.unbind = soc_camera_async_unbind;
- sasc->notifier.complete = soc_camera_async_complete;
+ sasc->notifier.ops = &soc_camera_async_ops;
icd->sasc = sasc;
icd->parent = ici->v4l2_dev.dev;
sasc->notifier.subdevs = &info->subdev;
sasc->notifier.num_subdevs = 1;
- sasc->notifier.bound = soc_camera_async_bound;
- sasc->notifier.unbind = soc_camera_async_unbind;
- sasc->notifier.complete = soc_camera_async_complete;
+ sasc->notifier.ops = &soc_camera_async_ops;
icd->sasc = sasc;
icd->parent = ici->v4l2_dev.dev;
return 0;
}
+static const struct v4l2_async_notifier_operations dcmi_graph_notify_ops = {
+ .bound = dcmi_graph_notify_bound,
+ .unbind = dcmi_graph_notify_unbind,
+ .complete = dcmi_graph_notify_complete,
+};
+
static int dcmi_graph_parse(struct stm32_dcmi *dcmi, struct device_node *node)
{
struct device_node *ep = NULL;
dcmi->notifier.subdevs = subdevs;
dcmi->notifier.num_subdevs = 1;
- dcmi->notifier.bound = dcmi_graph_notify_bound;
- dcmi->notifier.unbind = dcmi_graph_notify_unbind;
- dcmi->notifier.complete = dcmi_graph_notify_complete;
+ dcmi->notifier.ops = &dcmi_graph_notify_ops;
ret = v4l2_async_notifier_register(&dcmi->v4l2_dev, &dcmi->notifier);
if (ret < 0) {
return 0;
}
+static const struct v4l2_async_notifier_operations cal_async_ops = {
+ .bound = cal_async_bound,
+ .complete = cal_async_complete,
+};
+
static int cal_complete_ctx(struct cal_ctx *ctx)
{
struct video_device *vfd;
ctx->asd_list[0] = asd;
ctx->notifier.subdevs = ctx->asd_list;
ctx->notifier.num_subdevs = 1;
- ctx->notifier.bound = cal_async_bound;
- ctx->notifier.complete = cal_async_complete;
+ ctx->notifier.ops = &cal_async_ops;
ret = v4l2_async_notifier_register(&ctx->v4l2_dev,
&ctx->notifier);
if (ret) {
return -EINVAL;
}
+static const struct v4l2_async_notifier_operations xvip_graph_notify_ops = {
+ .bound = xvip_graph_notify_bound,
+ .complete = xvip_graph_notify_complete,
+};
+
static int xvip_graph_parse_one(struct xvip_composite_device *xdev,
struct device_node *node)
{
xdev->notifier.subdevs = subdevs;
xdev->notifier.num_subdevs = num_subdevs;
- xdev->notifier.bound = xvip_graph_notify_bound;
- xdev->notifier.complete = xvip_graph_notify_complete;
+ xdev->notifier.ops = &xvip_graph_notify_ops;
ret = v4l2_async_notifier_register(&xdev->v4l2_dev, &xdev->notifier);
if (ret < 0) {
{
int ret;
- if (notifier->bound) {
- ret = notifier->bound(notifier, sd, asd);
+ if (notifier->ops->bound) {
+ ret = notifier->ops->bound(notifier, sd, asd);
if (ret < 0)
return ret;
}
ret = v4l2_device_register_subdev(notifier->v4l2_dev, sd);
if (ret < 0) {
- if (notifier->unbind)
- notifier->unbind(notifier, sd, asd);
+ if (notifier->ops->unbind)
+ notifier->ops->unbind(notifier, sd, asd);
return ret;
}
struct v4l2_subdev *sd, *tmp;
list_for_each_entry_safe(sd, tmp, ¬ifier->done, async_list) {
- if (notifier->unbind)
- notifier->unbind(notifier, sd, sd->asd);
-
+ if (notifier->ops->unbind)
+ notifier->ops->unbind(notifier, sd, sd->asd);
v4l2_async_cleanup(sd);
list_move(&sd->async_list, &subdev_list);
}
}
- if (list_empty(¬ifier->waiting) && notifier->complete) {
- ret = notifier->complete(notifier);
+ if (list_empty(¬ifier->waiting) && notifier->ops->complete) {
+ ret = notifier->ops->complete(notifier);
if (ret)
goto err_complete;
}
if (ret)
goto err_unlock;
- if (!list_empty(¬ifier->waiting) || !notifier->complete)
+ if (!list_empty(¬ifier->waiting) || !notifier->ops->complete)
goto out_unlock;
- ret = notifier->complete(notifier);
+ ret = notifier->ops->complete(notifier);
if (ret)
goto err_cleanup;
return 0;
err_cleanup:
- if (notifier->unbind)
- notifier->unbind(notifier, sd, sd->asd);
-
+ if (notifier->ops->unbind)
+ notifier->ops->unbind(notifier, sd, sd->asd);
v4l2_async_cleanup(sd);
err_unlock:
list_add(&sd->asd->list, ¬ifier->waiting);
- if (notifier->unbind)
- notifier->unbind(notifier, sd, sd->asd);
+ if (notifier->ops->unbind)
+ notifier->ops->unbind(notifier, sd, sd->asd);
}
v4l2_async_cleanup(sd);
return media_device_register(&imxmd->md);
}
+static const struct v4l2_async_notifier_operations imx_media_subdev_ops = {
+ .bound = imx_media_subdev_bound,
+ .complete = imx_media_probe_complete,
+};
+
/*
* adds controls to a video device from an entity subdevice.
* Continues upstream from the entity's sink pads.
/* prepare the async subdev notifier and register it */
imxmd->subdev_notifier.subdevs = imxmd->async_ptrs;
- imxmd->subdev_notifier.bound = imx_media_subdev_bound;
- imxmd->subdev_notifier.complete = imx_media_probe_complete;
+ imxmd->subdev_notifier.ops = &imx_media_subdev_ops;
ret = v4l2_async_notifier_register(&imxmd->v4l2_dev,
&imxmd->subdev_notifier);
if (ret) {
struct device_node;
struct v4l2_device;
struct v4l2_subdev;
+struct v4l2_async_notifier;
/* A random max subdevice number, used to allocate an array on stack */
#define V4L2_MAX_SUBDEVS 128U
};
/**
+ * struct v4l2_async_notifier_operations - Asynchronous V4L2 notifier operations
+ * @bound: a subdevice driver has successfully probed one of the subdevices
+ * @complete: all subdevices have been probed successfully
+ * @unbind: a subdevice is leaving
+ */
+struct v4l2_async_notifier_operations {
+ int (*bound)(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd);
+ int (*complete)(struct v4l2_async_notifier *notifier);
+ void (*unbind)(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd);
+};
+
+/**
* struct v4l2_async_notifier - v4l2_device notifier data
*
+ * @ops: notifier operations
* @num_subdevs: number of subdevices used in the subdevs array
* @max_subdevs: number of subdevices allocated in the subdevs array
* @subdevs: array of pointers to subdevice descriptors
* @waiting: list of struct v4l2_async_subdev, waiting for their drivers
* @done: list of struct v4l2_subdev, already probed
* @list: member in a global list of notifiers
- * @bound: a subdevice driver has successfully probed one of subdevices
- * @complete: all subdevices have been probed successfully
- * @unbind: a subdevice is leaving
*/
struct v4l2_async_notifier {
+ const struct v4l2_async_notifier_operations *ops;
unsigned int num_subdevs;
unsigned int max_subdevs;
struct v4l2_async_subdev **subdevs;
struct list_head waiting;
struct list_head done;
struct list_head list;
- int (*bound)(struct v4l2_async_notifier *notifier,
- struct v4l2_subdev *subdev,
- struct v4l2_async_subdev *asd);
- int (*complete)(struct v4l2_async_notifier *notifier);
- void (*unbind)(struct v4l2_async_notifier *notifier,
- struct v4l2_subdev *subdev,
- struct v4l2_async_subdev *asd);
};
/**