{
struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev);
+ mutex_lock(&q->subdev_lock);
+
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
fmt->format = *v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
else
fmt->format = q->subdev_fmt;
+ mutex_unlock(&q->subdev_lock);
+
return 0;
}
if (fmt->pad == CIO2_PAD_SOURCE)
return cio2_subdev_get_fmt(sd, cfg, fmt);
+ mutex_lock(&q->subdev_lock);
+
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
*v4l2_subdev_get_try_format(sd, cfg, fmt->pad) = fmt->format;
} else {
fmt->format = q->subdev_fmt;
}
+ mutex_unlock(&q->subdev_lock);
+
return 0;
}
/* Initialize miscellaneous variables */
mutex_init(&q->lock);
+ mutex_init(&q->subdev_lock);
/* Initialize formats to default values */
fmt = &q->subdev_fmt;
fail_subdev_media_entity:
cio2_fbpt_exit(q, &cio2->pci_dev->dev);
fail_fbpt:
+ mutex_destroy(&q->subdev_lock);
mutex_destroy(&q->lock);
return r;
v4l2_device_unregister_subdev(&q->subdev);
media_entity_cleanup(&q->subdev.entity);
cio2_fbpt_exit(q, &cio2->pci_dev->dev);
+ mutex_destroy(&q->subdev_lock);
mutex_destroy(&q->lock);
}
/* Subdev, /dev/v4l-subdevX */
struct v4l2_subdev subdev;
+ struct mutex subdev_lock; /* Serialise acces to subdev_fmt field */
struct media_pad subdev_pads[CIO2_PADS];
struct v4l2_mbus_framefmt subdev_fmt;
atomic_t frame_sequence;