2 * Samsung TV Mixer driver
4 * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
6 * Tomasz Stanislawski, <t.stanislaws@samsung.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published
10 * by the Free Software Foundation. either version 2 of the License,
11 * or (at your option) any later version
14 #define pr_fmt(fmt) "s5p-tv (mixer): " fmt
18 #include <media/v4l2-ioctl.h>
19 #include <linux/videodev2.h>
21 #include <linux/module.h>
22 #include <linux/platform_device.h>
23 #include <linux/timer.h>
24 #include <media/videobuf2-dma-contig.h>
26 static int find_reg_callback(struct device *dev, void *p)
28 struct v4l2_subdev **sd = p;
30 *sd = dev_get_drvdata(dev);
31 /* non-zero value stops iteration */
35 static struct v4l2_subdev *find_and_register_subdev(
36 struct mxr_device *mdev, char *module_name)
38 struct device_driver *drv;
39 struct v4l2_subdev *sd = NULL;
42 /* TODO: add waiting until probe is finished */
43 drv = driver_find(module_name, &platform_bus_type);
45 mxr_warn(mdev, "module %s is missing\n", module_name);
48 /* driver refcnt is increased, it is safe to iterate over devices */
49 ret = driver_for_each_device(drv, NULL, &sd, find_reg_callback);
50 /* ret == 0 means that find_reg_callback was never executed */
52 mxr_warn(mdev, "module %s provides no subdev!\n", module_name);
55 /* v4l2_device_register_subdev detects if sd is NULL */
56 ret = v4l2_device_register_subdev(&mdev->v4l2_dev, sd);
58 mxr_warn(mdev, "failed to register subdev %s\n", sd->name);
66 int mxr_acquire_video(struct mxr_device *mdev,
67 struct mxr_output_conf *output_conf, int output_count)
69 struct device *dev = mdev->dev;
70 struct v4l2_device *v4l2_dev = &mdev->v4l2_dev;
73 struct v4l2_subdev *sd;
75 strlcpy(v4l2_dev->name, dev_name(mdev->dev), sizeof(v4l2_dev->name));
76 /* prepare context for V4L2 device */
77 ret = v4l2_device_register(dev, v4l2_dev);
79 mxr_err(mdev, "could not register v4l2 device.\n");
83 mdev->alloc_ctx = vb2_dma_contig_init_ctx(mdev->dev);
84 if (IS_ERR(mdev->alloc_ctx)) {
85 mxr_err(mdev, "could not acquire vb2 allocator\n");
86 ret = PTR_ERR(mdev->alloc_ctx);
90 /* registering outputs */
92 for (i = 0; i < output_count; ++i) {
93 struct mxr_output_conf *conf = &output_conf[i];
94 struct mxr_output *out;
96 sd = find_and_register_subdev(mdev, conf->module_name);
97 /* trying to register next output */
100 out = kzalloc(sizeof(*out), GFP_KERNEL);
102 mxr_err(mdev, "no memory for '%s'\n",
105 /* registered subdevs are removed in fail_v4l2_dev */
108 strlcpy(out->name, conf->output_name, sizeof(out->name));
110 out->cookie = conf->cookie;
111 mdev->output[mdev->output_cnt++] = out;
112 mxr_info(mdev, "added output '%s' from module '%s'\n",
113 conf->output_name, conf->module_name);
114 /* checking if maximal number of outputs is reached */
115 if (mdev->output_cnt >= MXR_MAX_OUTPUTS)
119 if (mdev->output_cnt == 0) {
120 mxr_err(mdev, "failed to register any output\n");
122 /* skipping fail_output because there is nothing to free */
123 goto fail_vb2_allocator;
129 /* kfree is NULL-safe */
130 for (i = 0; i < mdev->output_cnt; ++i)
131 kfree(mdev->output[i]);
132 memset(mdev->output, 0, sizeof(mdev->output));
135 /* freeing allocator context */
136 vb2_dma_contig_cleanup_ctx(mdev->alloc_ctx);
139 /* NOTE: automatically unregister all subdevs */
140 v4l2_device_unregister(v4l2_dev);
146 void mxr_release_video(struct mxr_device *mdev)
150 /* kfree is NULL-safe */
151 for (i = 0; i < mdev->output_cnt; ++i)
152 kfree(mdev->output[i]);
154 vb2_dma_contig_cleanup_ctx(mdev->alloc_ctx);
155 v4l2_device_unregister(&mdev->v4l2_dev);
158 static int mxr_querycap(struct file *file, void *priv,
159 struct v4l2_capability *cap)
161 struct mxr_layer *layer = video_drvdata(file);
163 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
165 strlcpy(cap->driver, MXR_DRIVER_NAME, sizeof(cap->driver));
166 strlcpy(cap->card, layer->vfd.name, sizeof(cap->card));
167 sprintf(cap->bus_info, "%d", layer->idx);
168 cap->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_OUTPUT_MPLANE;
169 cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
174 static void mxr_geometry_dump(struct mxr_device *mdev, struct mxr_geometry *geo)
176 mxr_dbg(mdev, "src.full_size = (%u, %u)\n",
177 geo->src.full_width, geo->src.full_height);
178 mxr_dbg(mdev, "src.size = (%u, %u)\n",
179 geo->src.width, geo->src.height);
180 mxr_dbg(mdev, "src.offset = (%u, %u)\n",
181 geo->src.x_offset, geo->src.y_offset);
182 mxr_dbg(mdev, "dst.full_size = (%u, %u)\n",
183 geo->dst.full_width, geo->dst.full_height);
184 mxr_dbg(mdev, "dst.size = (%u, %u)\n",
185 geo->dst.width, geo->dst.height);
186 mxr_dbg(mdev, "dst.offset = (%u, %u)\n",
187 geo->dst.x_offset, geo->dst.y_offset);
188 mxr_dbg(mdev, "ratio = (%u, %u)\n",
189 geo->x_ratio, geo->y_ratio);
192 static void mxr_layer_default_geo(struct mxr_layer *layer)
194 struct mxr_device *mdev = layer->mdev;
195 struct v4l2_mbus_framefmt mbus_fmt;
197 memset(&layer->geo, 0, sizeof(layer->geo));
199 mxr_get_mbus_fmt(mdev, &mbus_fmt);
201 layer->geo.dst.full_width = mbus_fmt.width;
202 layer->geo.dst.full_height = mbus_fmt.height;
203 layer->geo.dst.width = layer->geo.dst.full_width;
204 layer->geo.dst.height = layer->geo.dst.full_height;
205 layer->geo.dst.field = mbus_fmt.field;
207 layer->geo.src.full_width = mbus_fmt.width;
208 layer->geo.src.full_height = mbus_fmt.height;
209 layer->geo.src.width = layer->geo.src.full_width;
210 layer->geo.src.height = layer->geo.src.full_height;
212 mxr_geometry_dump(mdev, &layer->geo);
213 layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
214 mxr_geometry_dump(mdev, &layer->geo);
217 static void mxr_layer_update_output(struct mxr_layer *layer)
219 struct mxr_device *mdev = layer->mdev;
220 struct v4l2_mbus_framefmt mbus_fmt;
222 mxr_get_mbus_fmt(mdev, &mbus_fmt);
223 /* checking if update is needed */
224 if (layer->geo.dst.full_width == mbus_fmt.width &&
225 layer->geo.dst.full_height == mbus_fmt.width)
228 layer->geo.dst.full_width = mbus_fmt.width;
229 layer->geo.dst.full_height = mbus_fmt.height;
230 layer->geo.dst.field = mbus_fmt.field;
231 layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
233 mxr_geometry_dump(mdev, &layer->geo);
236 static const struct mxr_format *find_format_by_fourcc(
237 struct mxr_layer *layer, unsigned long fourcc);
238 static const struct mxr_format *find_format_by_index(
239 struct mxr_layer *layer, unsigned long index);
241 static int mxr_enum_fmt(struct file *file, void *priv,
242 struct v4l2_fmtdesc *f)
244 struct mxr_layer *layer = video_drvdata(file);
245 struct mxr_device *mdev = layer->mdev;
246 const struct mxr_format *fmt;
248 mxr_dbg(mdev, "%s\n", __func__);
249 fmt = find_format_by_index(layer, f->index);
253 strlcpy(f->description, fmt->name, sizeof(f->description));
254 f->pixelformat = fmt->fourcc;
259 static unsigned int divup(unsigned int divident, unsigned int divisor)
261 return (divident + divisor - 1) / divisor;
264 unsigned long mxr_get_plane_size(const struct mxr_block *blk,
265 unsigned int width, unsigned int height)
267 unsigned int bl_width = divup(width, blk->width);
268 unsigned int bl_height = divup(height, blk->height);
270 return bl_width * bl_height * blk->size;
273 static void mxr_mplane_fill(struct v4l2_plane_pix_format *planes,
274 const struct mxr_format *fmt, u32 width, u32 height)
278 /* checking if nothing to fill */
282 memset(planes, 0, sizeof(*planes) * fmt->num_subframes);
283 for (i = 0; i < fmt->num_planes; ++i) {
284 struct v4l2_plane_pix_format *plane = planes
285 + fmt->plane2subframe[i];
286 const struct mxr_block *blk = &fmt->plane[i];
287 u32 bl_width = divup(width, blk->width);
288 u32 bl_height = divup(height, blk->height);
289 u32 sizeimage = bl_width * bl_height * blk->size;
290 u16 bytesperline = bl_width * blk->size / blk->height;
292 plane->sizeimage += sizeimage;
293 plane->bytesperline = max(plane->bytesperline, bytesperline);
297 static int mxr_g_fmt(struct file *file, void *priv,
298 struct v4l2_format *f)
300 struct mxr_layer *layer = video_drvdata(file);
301 struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
303 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
305 pix->width = layer->geo.src.full_width;
306 pix->height = layer->geo.src.full_height;
307 pix->field = V4L2_FIELD_NONE;
308 pix->pixelformat = layer->fmt->fourcc;
309 pix->colorspace = layer->fmt->colorspace;
310 mxr_mplane_fill(pix->plane_fmt, layer->fmt, pix->width, pix->height);
315 static int mxr_s_fmt(struct file *file, void *priv,
316 struct v4l2_format *f)
318 struct mxr_layer *layer = video_drvdata(file);
319 const struct mxr_format *fmt;
320 struct v4l2_pix_format_mplane *pix;
321 struct mxr_device *mdev = layer->mdev;
322 struct mxr_geometry *geo = &layer->geo;
324 mxr_dbg(mdev, "%s:%d\n", __func__, __LINE__);
326 pix = &f->fmt.pix_mp;
327 fmt = find_format_by_fourcc(layer, pix->pixelformat);
329 mxr_warn(mdev, "not recognized fourcc: %08x\n",
334 /* set source size to highest accepted value */
335 geo->src.full_width = max(geo->dst.full_width, pix->width);
336 geo->src.full_height = max(geo->dst.full_height, pix->height);
337 layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
338 mxr_geometry_dump(mdev, &layer->geo);
339 /* set cropping to total visible screen */
340 geo->src.width = pix->width;
341 geo->src.height = pix->height;
342 geo->src.x_offset = 0;
343 geo->src.y_offset = 0;
344 /* assure consistency of geometry */
345 layer->ops.fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
346 mxr_geometry_dump(mdev, &layer->geo);
347 /* set full size to lowest possible value */
348 geo->src.full_width = 0;
349 geo->src.full_height = 0;
350 layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
351 mxr_geometry_dump(mdev, &layer->geo);
353 /* returning results */
354 mxr_g_fmt(file, priv, f);
359 static int mxr_g_selection(struct file *file, void *fh,
360 struct v4l2_selection *s)
362 struct mxr_layer *layer = video_drvdata(file);
363 struct mxr_geometry *geo = &layer->geo;
365 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
367 if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT &&
368 s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
372 case V4L2_SEL_TGT_CROP:
373 s->r.left = geo->src.x_offset;
374 s->r.top = geo->src.y_offset;
375 s->r.width = geo->src.width;
376 s->r.height = geo->src.height;
378 case V4L2_SEL_TGT_CROP_DEFAULT:
379 case V4L2_SEL_TGT_CROP_BOUNDS:
382 s->r.width = geo->src.full_width;
383 s->r.height = geo->src.full_height;
385 case V4L2_SEL_TGT_COMPOSE:
386 case V4L2_SEL_TGT_COMPOSE_PADDED:
387 s->r.left = geo->dst.x_offset;
388 s->r.top = geo->dst.y_offset;
389 s->r.width = geo->dst.width;
390 s->r.height = geo->dst.height;
392 case V4L2_SEL_TGT_COMPOSE_DEFAULT:
393 case V4L2_SEL_TGT_COMPOSE_BOUNDS:
396 s->r.width = geo->dst.full_width;
397 s->r.height = geo->dst.full_height;
406 /* returns 1 if rectangle 'a' is inside 'b' */
407 static int mxr_is_rect_inside(struct v4l2_rect *a, struct v4l2_rect *b)
409 if (a->left < b->left)
413 if (a->left + a->width > b->left + b->width)
415 if (a->top + a->height > b->top + b->height)
420 static int mxr_s_selection(struct file *file, void *fh,
421 struct v4l2_selection *s)
423 struct mxr_layer *layer = video_drvdata(file);
424 struct mxr_geometry *geo = &layer->geo;
425 struct mxr_crop *target = NULL;
426 enum mxr_geometry_stage stage;
427 struct mxr_geometry tmp;
428 struct v4l2_rect res;
430 memset(&res, 0, sizeof(res));
432 mxr_dbg(layer->mdev, "%s: rect: %dx%d@%d,%d\n", __func__,
433 s->r.width, s->r.height, s->r.left, s->r.top);
435 if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT &&
436 s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
440 /* ignore read-only targets */
441 case V4L2_SEL_TGT_CROP_DEFAULT:
442 case V4L2_SEL_TGT_CROP_BOUNDS:
443 res.width = geo->src.full_width;
444 res.height = geo->src.full_height;
447 /* ignore read-only targets */
448 case V4L2_SEL_TGT_COMPOSE_DEFAULT:
449 case V4L2_SEL_TGT_COMPOSE_BOUNDS:
450 res.width = geo->dst.full_width;
451 res.height = geo->dst.full_height;
454 case V4L2_SEL_TGT_CROP:
456 stage = MXR_GEOMETRY_CROP;
458 case V4L2_SEL_TGT_COMPOSE:
459 case V4L2_SEL_TGT_COMPOSE_PADDED:
461 stage = MXR_GEOMETRY_COMPOSE;
466 /* apply change and update geometry if needed */
468 /* backup current geometry if setup fails */
469 memcpy(&tmp, geo, sizeof(tmp));
471 /* apply requested selection */
472 target->x_offset = s->r.left;
473 target->y_offset = s->r.top;
474 target->width = s->r.width;
475 target->height = s->r.height;
477 layer->ops.fix_geometry(layer, stage, s->flags);
479 /* retrieve update selection rectangle */
480 res.left = target->x_offset;
481 res.top = target->y_offset;
482 res.width = target->width;
483 res.height = target->height;
485 mxr_geometry_dump(layer->mdev, &layer->geo);
488 /* checking if the rectangle satisfies constraints */
489 if ((s->flags & V4L2_SEL_FLAG_LE) && !mxr_is_rect_inside(&res, &s->r))
491 if ((s->flags & V4L2_SEL_FLAG_GE) && !mxr_is_rect_inside(&s->r, &res))
494 /* return result rectangle */
499 /* restore old geometry, which is not touched if target is NULL */
501 memcpy(geo, &tmp, sizeof(tmp));
505 static int mxr_enum_dv_timings(struct file *file, void *fh,
506 struct v4l2_enum_dv_timings *timings)
508 struct mxr_layer *layer = video_drvdata(file);
509 struct mxr_device *mdev = layer->mdev;
512 /* lock protects from changing sd_out */
513 mutex_lock(&mdev->mutex);
514 ret = v4l2_subdev_call(to_outsd(mdev), video, enum_dv_timings, timings);
515 mutex_unlock(&mdev->mutex);
517 return ret ? -EINVAL : 0;
520 static int mxr_s_dv_timings(struct file *file, void *fh,
521 struct v4l2_dv_timings *timings)
523 struct mxr_layer *layer = video_drvdata(file);
524 struct mxr_device *mdev = layer->mdev;
527 /* lock protects from changing sd_out */
528 mutex_lock(&mdev->mutex);
530 /* timings change cannot be done while there is an entity
531 * dependent on output configuration
533 if (mdev->n_output > 0) {
534 mutex_unlock(&mdev->mutex);
538 ret = v4l2_subdev_call(to_outsd(mdev), video, s_dv_timings, timings);
540 mutex_unlock(&mdev->mutex);
542 mxr_layer_update_output(layer);
544 /* any failure should return EINVAL according to V4L2 doc */
545 return ret ? -EINVAL : 0;
548 static int mxr_g_dv_timings(struct file *file, void *fh,
549 struct v4l2_dv_timings *timings)
551 struct mxr_layer *layer = video_drvdata(file);
552 struct mxr_device *mdev = layer->mdev;
555 /* lock protects from changing sd_out */
556 mutex_lock(&mdev->mutex);
557 ret = v4l2_subdev_call(to_outsd(mdev), video, g_dv_timings, timings);
558 mutex_unlock(&mdev->mutex);
560 return ret ? -EINVAL : 0;
563 static int mxr_dv_timings_cap(struct file *file, void *fh,
564 struct v4l2_dv_timings_cap *cap)
566 struct mxr_layer *layer = video_drvdata(file);
567 struct mxr_device *mdev = layer->mdev;
570 /* lock protects from changing sd_out */
571 mutex_lock(&mdev->mutex);
572 ret = v4l2_subdev_call(to_outsd(mdev), video, dv_timings_cap, cap);
573 mutex_unlock(&mdev->mutex);
575 return ret ? -EINVAL : 0;
578 static int mxr_s_std(struct file *file, void *fh, v4l2_std_id norm)
580 struct mxr_layer *layer = video_drvdata(file);
581 struct mxr_device *mdev = layer->mdev;
584 /* lock protects from changing sd_out */
585 mutex_lock(&mdev->mutex);
587 /* standard change cannot be done while there is an entity
588 * dependent on output configuration
590 if (mdev->n_output > 0) {
591 mutex_unlock(&mdev->mutex);
595 ret = v4l2_subdev_call(to_outsd(mdev), video, s_std_output, norm);
597 mutex_unlock(&mdev->mutex);
599 mxr_layer_update_output(layer);
601 return ret ? -EINVAL : 0;
604 static int mxr_g_std(struct file *file, void *fh, v4l2_std_id *norm)
606 struct mxr_layer *layer = video_drvdata(file);
607 struct mxr_device *mdev = layer->mdev;
610 /* lock protects from changing sd_out */
611 mutex_lock(&mdev->mutex);
612 ret = v4l2_subdev_call(to_outsd(mdev), video, g_std_output, norm);
613 mutex_unlock(&mdev->mutex);
615 return ret ? -EINVAL : 0;
618 static int mxr_enum_output(struct file *file, void *fh, struct v4l2_output *a)
620 struct mxr_layer *layer = video_drvdata(file);
621 struct mxr_device *mdev = layer->mdev;
622 struct mxr_output *out;
623 struct v4l2_subdev *sd;
625 if (a->index >= mdev->output_cnt)
627 out = mdev->output[a->index];
630 strlcpy(a->name, out->name, sizeof(a->name));
632 /* try to obtain supported tv norms */
633 v4l2_subdev_call(sd, video, g_tvnorms_output, &a->std);
635 if (sd->ops->video && sd->ops->video->s_dv_timings)
636 a->capabilities |= V4L2_OUT_CAP_DV_TIMINGS;
637 if (sd->ops->video && sd->ops->video->s_std_output)
638 a->capabilities |= V4L2_OUT_CAP_STD;
639 a->type = V4L2_OUTPUT_TYPE_ANALOG;
644 static int mxr_s_output(struct file *file, void *fh, unsigned int i)
646 struct video_device *vfd = video_devdata(file);
647 struct mxr_layer *layer = video_drvdata(file);
648 struct mxr_device *mdev = layer->mdev;
650 if (i >= mdev->output_cnt || mdev->output[i] == NULL)
653 mutex_lock(&mdev->mutex);
654 if (mdev->n_output > 0) {
655 mutex_unlock(&mdev->mutex);
658 mdev->current_output = i;
660 v4l2_subdev_call(to_outsd(mdev), video, g_tvnorms_output,
662 mutex_unlock(&mdev->mutex);
664 /* update layers geometry */
665 mxr_layer_update_output(layer);
667 mxr_dbg(mdev, "tvnorms = %08llx\n", vfd->tvnorms);
672 static int mxr_g_output(struct file *file, void *fh, unsigned int *p)
674 struct mxr_layer *layer = video_drvdata(file);
675 struct mxr_device *mdev = layer->mdev;
677 mutex_lock(&mdev->mutex);
678 *p = mdev->current_output;
679 mutex_unlock(&mdev->mutex);
684 static int mxr_reqbufs(struct file *file, void *priv,
685 struct v4l2_requestbuffers *p)
687 struct mxr_layer *layer = video_drvdata(file);
689 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
690 return vb2_reqbufs(&layer->vb_queue, p);
693 static int mxr_querybuf(struct file *file, void *priv, struct v4l2_buffer *p)
695 struct mxr_layer *layer = video_drvdata(file);
697 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
698 return vb2_querybuf(&layer->vb_queue, p);
701 static int mxr_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
703 struct mxr_layer *layer = video_drvdata(file);
705 mxr_dbg(layer->mdev, "%s:%d(%d)\n", __func__, __LINE__, p->index);
706 return vb2_qbuf(&layer->vb_queue, p);
709 static int mxr_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
711 struct mxr_layer *layer = video_drvdata(file);
713 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
714 return vb2_dqbuf(&layer->vb_queue, p, file->f_flags & O_NONBLOCK);
717 static int mxr_expbuf(struct file *file, void *priv,
718 struct v4l2_exportbuffer *eb)
720 struct mxr_layer *layer = video_drvdata(file);
722 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
723 return vb2_expbuf(&layer->vb_queue, eb);
726 static int mxr_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
728 struct mxr_layer *layer = video_drvdata(file);
730 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
731 return vb2_streamon(&layer->vb_queue, i);
734 static int mxr_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
736 struct mxr_layer *layer = video_drvdata(file);
738 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
739 return vb2_streamoff(&layer->vb_queue, i);
742 static const struct v4l2_ioctl_ops mxr_ioctl_ops = {
743 .vidioc_querycap = mxr_querycap,
744 /* format handling */
745 .vidioc_enum_fmt_vid_out_mplane = mxr_enum_fmt,
746 .vidioc_s_fmt_vid_out_mplane = mxr_s_fmt,
747 .vidioc_g_fmt_vid_out_mplane = mxr_g_fmt,
749 .vidioc_reqbufs = mxr_reqbufs,
750 .vidioc_querybuf = mxr_querybuf,
751 .vidioc_qbuf = mxr_qbuf,
752 .vidioc_dqbuf = mxr_dqbuf,
753 .vidioc_expbuf = mxr_expbuf,
754 /* Streaming control */
755 .vidioc_streamon = mxr_streamon,
756 .vidioc_streamoff = mxr_streamoff,
757 /* DV Timings functions */
758 .vidioc_enum_dv_timings = mxr_enum_dv_timings,
759 .vidioc_s_dv_timings = mxr_s_dv_timings,
760 .vidioc_g_dv_timings = mxr_g_dv_timings,
761 .vidioc_dv_timings_cap = mxr_dv_timings_cap,
762 /* analog TV standard functions */
763 .vidioc_s_std = mxr_s_std,
764 .vidioc_g_std = mxr_g_std,
765 /* Output handling */
766 .vidioc_enum_output = mxr_enum_output,
767 .vidioc_s_output = mxr_s_output,
768 .vidioc_g_output = mxr_g_output,
769 /* selection ioctls */
770 .vidioc_g_selection = mxr_g_selection,
771 .vidioc_s_selection = mxr_s_selection,
774 static int mxr_video_open(struct file *file)
776 struct mxr_layer *layer = video_drvdata(file);
777 struct mxr_device *mdev = layer->mdev;
780 mxr_dbg(mdev, "%s:%d\n", __func__, __LINE__);
781 if (mutex_lock_interruptible(&layer->mutex))
783 /* assure device probe is finished */
784 wait_for_device_probe();
785 /* creating context for file descriptor */
786 ret = v4l2_fh_open(file);
788 mxr_err(mdev, "v4l2_fh_open failed\n");
792 /* leaving if layer is already initialized */
793 if (!v4l2_fh_is_singular_file(file))
796 /* FIXME: should power be enabled on open? */
797 ret = mxr_power_get(mdev);
799 mxr_err(mdev, "power on failed\n");
803 ret = vb2_queue_init(&layer->vb_queue);
805 mxr_err(mdev, "failed to initialize vb2 queue\n");
808 /* set default format, first on the list */
809 layer->fmt = layer->fmt_array[0];
810 /* setup default geometry */
811 mxr_layer_default_geo(layer);
812 mutex_unlock(&layer->mutex);
820 v4l2_fh_release(file);
823 mutex_unlock(&layer->mutex);
829 mxr_video_poll(struct file *file, struct poll_table_struct *wait)
831 struct mxr_layer *layer = video_drvdata(file);
834 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
836 mutex_lock(&layer->mutex);
837 res = vb2_poll(&layer->vb_queue, file, wait);
838 mutex_unlock(&layer->mutex);
842 static int mxr_video_mmap(struct file *file, struct vm_area_struct *vma)
844 struct mxr_layer *layer = video_drvdata(file);
847 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
849 if (mutex_lock_interruptible(&layer->mutex))
851 ret = vb2_mmap(&layer->vb_queue, vma);
852 mutex_unlock(&layer->mutex);
856 static int mxr_video_release(struct file *file)
858 struct mxr_layer *layer = video_drvdata(file);
860 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
861 mutex_lock(&layer->mutex);
862 if (v4l2_fh_is_singular_file(file)) {
863 vb2_queue_release(&layer->vb_queue);
864 mxr_power_put(layer->mdev);
866 v4l2_fh_release(file);
867 mutex_unlock(&layer->mutex);
871 static const struct v4l2_file_operations mxr_fops = {
872 .owner = THIS_MODULE,
873 .open = mxr_video_open,
874 .poll = mxr_video_poll,
875 .mmap = mxr_video_mmap,
876 .release = mxr_video_release,
877 .unlocked_ioctl = video_ioctl2,
880 static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *pfmt,
881 unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[],
884 struct mxr_layer *layer = vb2_get_drv_priv(vq);
885 const struct mxr_format *fmt = layer->fmt;
887 struct mxr_device *mdev = layer->mdev;
888 struct v4l2_plane_pix_format planes[3];
890 mxr_dbg(mdev, "%s\n", __func__);
891 /* checking if format was configured */
894 mxr_dbg(mdev, "fmt = %s\n", fmt->name);
895 mxr_mplane_fill(planes, fmt, layer->geo.src.full_width,
896 layer->geo.src.full_height);
898 *nplanes = fmt->num_subframes;
899 for (i = 0; i < fmt->num_subframes; ++i) {
900 alloc_ctxs[i] = layer->mdev->alloc_ctx;
901 sizes[i] = planes[i].sizeimage;
902 mxr_dbg(mdev, "size[%d] = %08x\n", i, sizes[i]);
911 static void buf_queue(struct vb2_buffer *vb)
913 struct mxr_buffer *buffer = container_of(vb, struct mxr_buffer, vb);
914 struct mxr_layer *layer = vb2_get_drv_priv(vb->vb2_queue);
915 struct mxr_device *mdev = layer->mdev;
918 spin_lock_irqsave(&layer->enq_slock, flags);
919 list_add_tail(&buffer->list, &layer->enq_list);
920 spin_unlock_irqrestore(&layer->enq_slock, flags);
922 mxr_dbg(mdev, "queuing buffer\n");
925 static void wait_lock(struct vb2_queue *vq)
927 struct mxr_layer *layer = vb2_get_drv_priv(vq);
929 mxr_dbg(layer->mdev, "%s\n", __func__);
930 mutex_lock(&layer->mutex);
933 static void wait_unlock(struct vb2_queue *vq)
935 struct mxr_layer *layer = vb2_get_drv_priv(vq);
937 mxr_dbg(layer->mdev, "%s\n", __func__);
938 mutex_unlock(&layer->mutex);
941 static int start_streaming(struct vb2_queue *vq, unsigned int count)
943 struct mxr_layer *layer = vb2_get_drv_priv(vq);
944 struct mxr_device *mdev = layer->mdev;
947 mxr_dbg(mdev, "%s\n", __func__);
950 mxr_dbg(mdev, "no output buffers queued\n");
954 /* block any changes in output configuration */
955 mxr_output_get(mdev);
957 mxr_layer_update_output(layer);
958 layer->ops.format_set(layer);
959 /* enabling layer in hardware */
960 spin_lock_irqsave(&layer->enq_slock, flags);
961 layer->state = MXR_LAYER_STREAMING;
962 spin_unlock_irqrestore(&layer->enq_slock, flags);
964 layer->ops.stream_set(layer, MXR_ENABLE);
965 mxr_streamer_get(mdev);
970 static void mxr_watchdog(unsigned long arg)
972 struct mxr_layer *layer = (struct mxr_layer *) arg;
973 struct mxr_device *mdev = layer->mdev;
976 mxr_err(mdev, "watchdog fired for layer %s\n", layer->vfd.name);
978 spin_lock_irqsave(&layer->enq_slock, flags);
980 if (layer->update_buf == layer->shadow_buf)
981 layer->update_buf = NULL;
982 if (layer->update_buf) {
983 vb2_buffer_done(&layer->update_buf->vb, VB2_BUF_STATE_ERROR);
984 layer->update_buf = NULL;
986 if (layer->shadow_buf) {
987 vb2_buffer_done(&layer->shadow_buf->vb, VB2_BUF_STATE_ERROR);
988 layer->shadow_buf = NULL;
990 spin_unlock_irqrestore(&layer->enq_slock, flags);
993 static int stop_streaming(struct vb2_queue *vq)
995 struct mxr_layer *layer = vb2_get_drv_priv(vq);
996 struct mxr_device *mdev = layer->mdev;
998 struct timer_list watchdog;
999 struct mxr_buffer *buf, *buf_tmp;
1001 mxr_dbg(mdev, "%s\n", __func__);
1003 spin_lock_irqsave(&layer->enq_slock, flags);
1006 layer->state = MXR_LAYER_STREAMING_FINISH;
1008 /* set all buffer to be done */
1009 list_for_each_entry_safe(buf, buf_tmp, &layer->enq_list, list) {
1010 list_del(&buf->list);
1011 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
1014 spin_unlock_irqrestore(&layer->enq_slock, flags);
1016 /* give 1 seconds to complete to complete last buffers */
1017 setup_timer_on_stack(&watchdog, mxr_watchdog,
1018 (unsigned long)layer);
1019 mod_timer(&watchdog, jiffies + msecs_to_jiffies(1000));
1021 /* wait until all buffers are goes to done state */
1022 vb2_wait_for_all_buffers(vq);
1024 /* stop timer if all synchronization is done */
1025 del_timer_sync(&watchdog);
1026 destroy_timer_on_stack(&watchdog);
1028 /* stopping hardware */
1029 spin_lock_irqsave(&layer->enq_slock, flags);
1030 layer->state = MXR_LAYER_IDLE;
1031 spin_unlock_irqrestore(&layer->enq_slock, flags);
1033 /* disabling layer in hardware */
1034 layer->ops.stream_set(layer, MXR_DISABLE);
1035 /* remove one streamer */
1036 mxr_streamer_put(mdev);
1037 /* allow changes in output configuration */
1038 mxr_output_put(mdev);
1042 static struct vb2_ops mxr_video_qops = {
1043 .queue_setup = queue_setup,
1044 .buf_queue = buf_queue,
1045 .wait_prepare = wait_unlock,
1046 .wait_finish = wait_lock,
1047 .start_streaming = start_streaming,
1048 .stop_streaming = stop_streaming,
1051 /* FIXME: try to put this functions to mxr_base_layer_create */
1052 int mxr_base_layer_register(struct mxr_layer *layer)
1054 struct mxr_device *mdev = layer->mdev;
1057 ret = video_register_device(&layer->vfd, VFL_TYPE_GRABBER, -1);
1059 mxr_err(mdev, "failed to register video device\n");
1061 mxr_info(mdev, "registered layer %s as /dev/video%d\n",
1062 layer->vfd.name, layer->vfd.num);
1066 void mxr_base_layer_unregister(struct mxr_layer *layer)
1068 video_unregister_device(&layer->vfd);
1071 void mxr_layer_release(struct mxr_layer *layer)
1073 if (layer->ops.release)
1074 layer->ops.release(layer);
1077 void mxr_base_layer_release(struct mxr_layer *layer)
1082 static void mxr_vfd_release(struct video_device *vdev)
1084 pr_info("video device release\n");
1087 struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
1088 int idx, char *name, struct mxr_layer_ops *ops)
1090 struct mxr_layer *layer;
1092 layer = kzalloc(sizeof(*layer), GFP_KERNEL);
1093 if (layer == NULL) {
1094 mxr_err(mdev, "not enough memory for layer.\n");
1102 spin_lock_init(&layer->enq_slock);
1103 INIT_LIST_HEAD(&layer->enq_list);
1104 mutex_init(&layer->mutex);
1106 layer->vfd = (struct video_device) {
1108 .release = mxr_vfd_release,
1110 .vfl_dir = VFL_DIR_TX,
1111 .ioctl_ops = &mxr_ioctl_ops,
1113 strlcpy(layer->vfd.name, name, sizeof(layer->vfd.name));
1114 /* let framework control PRIORITY */
1115 set_bit(V4L2_FL_USE_FH_PRIO, &layer->vfd.flags);
1117 video_set_drvdata(&layer->vfd, layer);
1118 layer->vfd.lock = &layer->mutex;
1119 layer->vfd.v4l2_dev = &mdev->v4l2_dev;
1121 layer->vb_queue = (struct vb2_queue) {
1122 .type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
1123 .io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF,
1125 .buf_struct_size = sizeof(struct mxr_buffer),
1126 .ops = &mxr_video_qops,
1127 .mem_ops = &vb2_dma_contig_memops,
1136 static const struct mxr_format *find_format_by_fourcc(
1137 struct mxr_layer *layer, unsigned long fourcc)
1141 for (i = 0; i < layer->fmt_array_size; ++i)
1142 if (layer->fmt_array[i]->fourcc == fourcc)
1143 return layer->fmt_array[i];
1147 static const struct mxr_format *find_format_by_index(
1148 struct mxr_layer *layer, unsigned long index)
1150 if (index >= layer->fmt_array_size)
1152 return layer->fmt_array[index];