2 * Samsung S5P Multi Format Codec v 5.1
4 * Copyright (c) 2010 Samsung Electronics Co., Ltd.
5 * Kamil Debski, <k.debski@samsung.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
16 #include <linux/sched.h>
17 #include <linux/clk.h>
18 #include <linux/module.h>
19 #include <linux/interrupt.h>
20 #include <linux/platform_device.h>
21 #include <linux/version.h>
22 #include <linux/workqueue.h>
23 #include <linux/videodev2.h>
24 #include <media/videobuf2-core.h>
28 #include "s5p_mfc_opr.h"
29 #include "s5p_mfc_intr.h"
30 #include "s5p_mfc_mem.h"
31 #include "s5p_mfc_debug.h"
32 #include "s5p_mfc_reg.h"
33 #include "s5p_mfc_shm.h"
34 #include "s5p_mfc_ctrl.h"
35 #include "s5p_mfc_dec.h"
36 #include "s5p_mfc_enc.h"
37 #include "s5p_mfc_pm.h"
39 #define S5P_MFC_NAME "s5p-mfc"
40 #define S5P_MFC_DEC_NAME "s5p-mfc-dec"
41 #define S5P_MFC_ENC_NAME "s5p-mfc-enc"
44 module_param(debug, int, S_IRUGO | S_IWUSR);
46 /* Helper functions for interrupt processing */
47 /* Remove from hw execution round robin */
48 inline void clear_work_bit(struct s5p_mfc_ctx *ctx)
50 struct s5p_mfc_dev *dev = ctx->dev;
52 spin_lock(&dev->condlock);
53 clear_bit(ctx->num, &dev->ctx_work_bits);
54 spin_unlock(&dev->condlock);
57 /* Wake up context wait_queue */
58 static inline void wake_up_ctx(struct s5p_mfc_ctx *ctx, unsigned int reason,
62 ctx->int_type = reason;
64 if (ctx->state != MFCINST_ABORT)
65 wake_up_interruptible(&ctx->queue);
70 /* Wake up device wait_queue */
71 static inline void wake_up_dev(struct s5p_mfc_dev *dev, unsigned int reason,
75 dev->int_type = reason;
77 wake_up_interruptible(&dev->queue);
80 void s5p_mfc_watchdog(unsigned long arg)
82 struct s5p_mfc_dev *dev = (struct s5p_mfc_dev *)arg;
84 if (test_bit(0, &dev->hw_lock))
85 atomic_inc(&dev->watchdog_cnt);
86 if (atomic_read(&dev->watchdog_cnt) >= MFC_WATCHDOG_CNT) {
87 /* This means that hw is busy and no interrupts were
88 * generated by hw for the Nth time of running this
89 * watchdog timer. This usually means a serious hw
90 * error. Now it is time to kill all instances and
92 mfc_err("Time out during waiting for HW.\n");
93 queue_work(dev->watchdog_workqueue, &dev->watchdog_work);
95 dev->watchdog_timer.expires = jiffies +
96 msecs_to_jiffies(MFC_WATCHDOG_INTERVAL);
97 add_timer(&dev->watchdog_timer);
100 static void s5p_mfc_watchdog_worker(struct work_struct *work)
102 struct s5p_mfc_dev *dev;
103 struct s5p_mfc_ctx *ctx;
108 dev = container_of(work, struct s5p_mfc_dev, watchdog_work);
110 mfc_err("Driver timeout error handling.\n");
111 /* Lock the mutex that protects open and release.
112 * This is necessary as they may load and unload firmware. */
113 mutex_locked = mutex_trylock(&dev->mfc_mutex);
115 mfc_err("This is not good. Some instance may be "
116 "closing/opening.\n");
117 spin_lock_irqsave(&dev->irqlock, flags);
122 for (i = 0; i < MFC_NUM_CONTEXTS; i++) {
125 ctx->state = MFCINST_ERROR;
126 s5p_mfc_cleanup_queue(&ctx->dst_queue,
128 s5p_mfc_cleanup_queue(&ctx->src_queue,
131 wake_up_ctx(ctx, S5P_FIMV_R2H_CMD_ERR_RET, 0);
134 clear_bit(0, &dev->hw_lock);
135 spin_unlock_irqrestore(&dev->irqlock, flags);
136 /* Double check if there is at least one instance running.
137 * If no instance is in memory than no firmware should be present */
138 if (dev->num_inst > 0) {
139 ret = s5p_mfc_load_firmware(dev);
141 mfc_err("Failed to reload FW.\n");
143 mutex_unlock(&dev->mfc_mutex);
147 ret = s5p_mfc_init_hw(dev);
149 mfc_err("Failed to reinit FW.\n");
151 mutex_unlock(&dev->mfc_mutex);
156 mutex_unlock(&dev->mfc_mutex);
159 void mfc_workqueue_clock_off(struct work_struct *work)
161 struct s5p_mfc_dev *dev = container_of(work, struct s5p_mfc_dev,
164 if (test_bit(0, &dev->clk_state) == 0)
168 static inline enum s5p_mfc_node_type s5p_mfc_get_node_type(struct file *file)
170 struct video_device *vdev = video_devdata(file);
173 mfc_err("failed to get video_device");
174 return MFCNODE_INVALID;
177 mfc_debug(2, "video_device index: %d\n", vdev->index);
179 if (vdev->index == 0)
180 return MFCNODE_DECODER;
181 else if (vdev->index == 1)
182 return MFCNODE_ENCODER;
184 return MFCNODE_INVALID;
187 static void s5p_mfc_handle_frame_all_extracted(struct s5p_mfc_ctx *ctx)
189 struct s5p_mfc_buf *dst_buf;
191 ctx->state = MFCINST_FINISHED;
192 mfc_debug(2, "Decided to finish\n");
194 while (!list_empty(&ctx->dst_queue)) {
195 dst_buf = list_entry(ctx->dst_queue.next,
196 struct s5p_mfc_buf, list);
197 mfc_debug(2, "Cleaning up buffer: %d\n",
198 dst_buf->b->v4l2_buf.index);
199 vb2_set_plane_payload(dst_buf->b, 0, 0);
200 vb2_set_plane_payload(dst_buf->b, 1, 0);
201 list_del(&dst_buf->list);
202 ctx->dst_queue_cnt--;
203 dst_buf->b->v4l2_buf.sequence = (ctx->sequence++);
205 /* FIXME: move to proper postion or REMOVE */
206 if (s5p_mfc_read_shm(ctx, PIC_TIME_TOP) ==
207 s5p_mfc_read_shm(ctx, PIC_TIME_BOT))
208 dst_buf->b->v4l2_buf.field = V4L2_FIELD_NONE;
210 dst_buf->b->v4l2_buf.field = V4L2_FIELD_INTERLACED;
212 ctx->dec_dst_flag &= ~(1 << dst_buf->b->v4l2_buf.index);
213 vb2_buffer_done(dst_buf->b, VB2_BUF_STATE_DONE);
214 mfc_debug(2, "Cleaned up buffer: %d\n",
215 dst_buf->b->v4l2_buf.index);
217 mfc_debug(2, "After cleanup\n");
220 static void s5p_mfc_handle_frame_new(struct s5p_mfc_ctx *ctx, unsigned int err)
222 struct s5p_mfc_buf *dst_buf;
223 size_t dspl_y_addr = MFC_GET_ADR(DEC_DISPLAY_Y);
227 /* If frame is same as previous then skip and do not dequeue */
228 if (MFC_GET_REG(DEC_DECODE_FRAME_TYPE) == S5P_FIMV_DECODE_FRAME_SKIPPED)
230 /* The MFC returns address of the buffer, now we have to
231 * check which videobuf does it correspond to */
232 list_for_each_entry(dst_buf, &ctx->dst_queue, list) {
233 mfc_debug(2, "Listing: %d\n", dst_buf->b->v4l2_buf.index);
234 /* Check if this is the buffer we're looking for */
235 mfc_debug(2, "0x%08lx, 0x%08x", mfc_plane_cookie(dst_buf->b, 0),
237 if (mfc_plane_cookie(dst_buf->b, 0) == dspl_y_addr) {
238 list_del(&dst_buf->list);
239 ctx->dst_queue_cnt--;
240 dst_buf->b->v4l2_buf.sequence = ctx->sequence;
241 if (s5p_mfc_read_shm(ctx, PIC_TIME_TOP) ==
242 s5p_mfc_read_shm(ctx, PIC_TIME_BOT))
243 dst_buf->b->v4l2_buf.field = V4L2_FIELD_NONE;
245 dst_buf->b->v4l2_buf.field = V4L2_FIELD_INTERLACED;
246 vb2_set_plane_payload(dst_buf->b, 0, ctx->luma_size);
247 vb2_set_plane_payload(dst_buf->b, 1, ctx->chroma_size);
248 clear_bit(dst_buf->b->v4l2_buf.index, &ctx->dec_dst_flag);
250 vb2_buffer_done(dst_buf->b,
251 err ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
253 index = dst_buf->b->v4l2_buf.index;
254 if (call_cop(ctx, get_buf_ctrls_val, ctx, &ctx->dst_ctrls[index]) < 0)
255 mfc_err("failed in get_buf_ctrls_val\n");
262 /* Handle frame decoding interrupt */
263 static void s5p_mfc_handle_frame(struct s5p_mfc_ctx *ctx,
264 unsigned int reason, unsigned int err)
266 struct s5p_mfc_dev *dev = ctx->dev;
267 unsigned int dst_frame_status;
268 struct s5p_mfc_buf *src_buf;
270 unsigned int res_change;
273 dst_frame_status = s5p_mfc_get_dspl_status()
274 & S5P_FIMV_DEC_STATUS_DECODING_STATUS_MASK;
275 res_change = s5p_mfc_get_dspl_status()
276 & S5P_FIMV_DEC_STATUS_RESOLUTION_MASK;
277 mfc_debug(2, "Frame Status: %x\n", dst_frame_status);
279 if (ctx->state == MFCINST_RES_CHANGE_INIT)
280 ctx->state = MFCINST_RES_CHANGE_FLUSH;
283 mfc_err("Resolution change set to %d\n", res_change);
284 ctx->state = MFCINST_RES_CHANGE_INIT;
286 s5p_mfc_clear_int_flags();
287 wake_up_ctx(ctx, reason, err);
288 if (test_and_clear_bit(0, &dev->hw_lock) == 0)
291 queue_work(dev->irq_workqueue, &dev->work_struct);
293 s5p_mfc_try_run(dev);
296 if (ctx->dpb_flush_flag)
297 ctx->dpb_flush_flag = 0;
299 spin_lock_irqsave(&dev->irqlock, flags);
300 /* All frames remaining in the buffer have been extracted */
301 if (dst_frame_status == S5P_FIMV_DEC_STATUS_DECODING_EMPTY) {
302 if (ctx->state == MFCINST_RES_CHANGE_FLUSH) {
303 mfc_debug(2, "Last frame received after resolution change.\n");
304 s5p_mfc_handle_frame_all_extracted(ctx);
305 ctx->state = MFCINST_RES_CHANGE_END;
306 goto leave_handle_frame;
308 s5p_mfc_handle_frame_all_extracted(ctx);
312 /* A frame has been decoded and is in the buffer */
313 if (dst_frame_status == S5P_FIMV_DEC_STATUS_DISPLAY_ONLY ||
314 dst_frame_status == S5P_FIMV_DEC_STATUS_DECODING_DISPLAY) {
315 s5p_mfc_handle_frame_new(ctx, err);
317 mfc_debug(2, "No frame decode.\n");
319 /* Mark source buffer as complete */
320 if (dst_frame_status != S5P_FIMV_DEC_STATUS_DISPLAY_ONLY
321 && !list_empty(&ctx->src_queue)) {
322 src_buf = list_entry(ctx->src_queue.next, struct s5p_mfc_buf,
324 mfc_debug(2, "Packed PB test. Size:%d, prev offset: %ld, this run:"
325 " %d\n", src_buf->b->v4l2_planes[0].bytesused,
326 ctx->consumed_stream, s5p_mfc_get_consumed_stream());
327 ctx->consumed_stream += s5p_mfc_get_consumed_stream();
328 if (ctx->codec_mode != S5P_FIMV_CODEC_H264_DEC &&
329 s5p_mfc_get_frame_type() == S5P_FIMV_DECODE_FRAME_P_FRAME
330 && ctx->consumed_stream + STUFF_BYTE <
331 src_buf->b->v4l2_planes[0].bytesused) {
332 /* Run MFC again on the same buffer */
333 mfc_debug(2, "Running again the same buffer.\n");
334 s5p_mfc_set_dec_stream_buffer(ctx,
335 src_buf->cookie.stream, ctx->consumed_stream,
336 src_buf->b->v4l2_planes[0].bytesused -
337 ctx->consumed_stream);
338 dev->curr_ctx = ctx->num;
339 s5p_mfc_clean_ctx_int_flags(ctx);
340 spin_unlock_irqrestore(&dev->irqlock, flags);
341 s5p_mfc_clear_int_flags();
342 wake_up_ctx(ctx, reason, err);
343 s5p_mfc_decode_one_frame(ctx, 0);
346 index = src_buf->b->v4l2_buf.index;
347 if (call_cop(ctx, recover_buf_ctrls_val, ctx, &ctx->src_ctrls[index]) < 0)
348 mfc_err("failed in recover_buf_ctrls_val\n");
350 mfc_debug(2, "MFC needs next buffer.\n");
351 ctx->consumed_stream = 0;
352 list_del(&src_buf->list);
353 ctx->src_queue_cnt--;
354 vb2_buffer_done(src_buf->b, VB2_BUF_STATE_DONE);
358 spin_unlock_irqrestore(&dev->irqlock, flags);
359 mfc_debug(2, "Assesing whether this context should be run again.\n");
360 /* if (!s5p_mfc_ctx_ready(ctx)) { */
361 if ((ctx->src_queue_cnt == 0 && ctx->state != MFCINST_FINISHING)
362 || ctx->dst_queue_cnt < ctx->dpb_count) {
363 mfc_debug(2, "No need to run again.\n");
366 mfc_debug(2, "After assesing whether this context should be run again.\n");
367 s5p_mfc_clear_int_flags();
368 wake_up_ctx(ctx, reason, err);
369 if (test_and_clear_bit(0, &dev->hw_lock) == 0)
372 queue_work(dev->irq_workqueue, &dev->work_struct);
374 s5p_mfc_try_run(dev);
377 /* Error handling for interrupt */
378 static inline void s5p_mfc_handle_error(struct s5p_mfc_ctx *ctx,
379 unsigned int reason, unsigned int err)
382 struct s5p_mfc_dev *dev;
386 /* If no context is available then all necessary
387 * processing has been done. */
393 mfc_err("Interrupt Error: %08x\n", err);
394 s5p_mfc_clear_int_flags();
395 wake_up_dev(dev, reason, err);
397 /* Error recovery is dependent on the state of context */
398 switch (ctx->state) {
400 /* This error had to happen while acquireing instance */
401 case MFCINST_GOT_INST:
402 /* This error had to happen while parsing the header */
403 case MFCINST_HEAD_PARSED:
404 /* This error had to happen while setting dst buffers */
405 case MFCINST_RETURN_INST:
406 /* This error had to happen while releasing instance */
408 wake_up_ctx(ctx, reason, err);
409 if (test_and_clear_bit(0, &dev->hw_lock) == 0)
412 queue_work(dev->irq_workqueue, &dev->work_struct);
415 case MFCINST_FINISHING:
416 case MFCINST_FINISHED:
417 case MFCINST_RUNNING:
418 /* It is higly probable that an error occured
419 * while decoding a frame */
421 ctx->state = MFCINST_ERROR;
422 /* Mark all dst buffers as having an error */
423 spin_lock_irqsave(&dev->irqlock, flags);
424 s5p_mfc_cleanup_queue(&ctx->dst_queue, &ctx->vq_dst);
425 /* Mark all src buffers as having an error */
426 s5p_mfc_cleanup_queue(&ctx->src_queue, &ctx->vq_src);
427 spin_unlock_irqrestore(&dev->irqlock, flags);
428 if (test_and_clear_bit(0, &dev->hw_lock) == 0)
431 queue_work(dev->irq_workqueue, &dev->work_struct);
435 mfc_err("Encountered an error interrupt which had not been handled.\n");
441 /* Interrupt processing */
442 static irqreturn_t s5p_mfc_irq(int irq, void *priv)
444 struct s5p_mfc_dev *dev = priv;
445 struct s5p_mfc_buf *src_buf;
446 struct s5p_mfc_ctx *ctx;
450 unsigned int guard_width, guard_height;
453 /* Reset the timeout watchdog */
454 atomic_set(&dev->watchdog_cnt, 0);
455 ctx = dev->ctx[dev->curr_ctx];
456 /* Get the reason of interrupt and the error code */
457 reason = s5p_mfc_get_int_reason();
458 err = s5p_mfc_get_int_err();
459 mfc_debug(2, "Int reason: %d (err: %08x)\n", reason, err);
461 case S5P_FIMV_R2H_CMD_ERR_RET:
462 /* An error has occured */
463 if (ctx->state == MFCINST_RUNNING &&
464 err >= S5P_FIMV_ERR_WARNINGS_START)
465 s5p_mfc_handle_frame(ctx, reason, err);
467 s5p_mfc_handle_error(ctx, reason, err);
469 case S5P_FIMV_R2H_CMD_SLICE_DONE_RET:
470 case S5P_FIMV_R2H_CMD_FRAME_DONE_RET:
471 if (ctx->c_ops->post_frame_start) {
472 if (ctx->c_ops->post_frame_start(ctx))
473 mfc_err("post_frame_start() failed\n");
475 s5p_mfc_clear_int_flags();
476 wake_up_ctx(ctx, reason, err);
477 if (test_and_clear_bit(0, &dev->hw_lock) == 0)
480 queue_work(dev->irq_workqueue, &dev->work_struct);
482 s5p_mfc_try_run(dev);
484 s5p_mfc_handle_frame(ctx, reason, err);
487 case S5P_FIMV_R2H_CMD_SEQ_DONE_RET:
488 if (ctx->c_ops->post_seq_start) {
489 if (ctx->c_ops->post_seq_start(ctx))
490 mfc_err("post_seq_start() failed\n");
492 if (ctx->src_fmt->fourcc != V4L2_PIX_FMT_DIVX3) {
493 ctx->img_width = s5p_mfc_get_img_width();
494 ctx->img_height = s5p_mfc_get_img_height();
497 ctx->buf_width = ALIGN(ctx->img_width, S5P_FIMV_NV12MT_HALIGN);
498 ctx->buf_height = ALIGN(ctx->img_height, S5P_FIMV_NV12MT_VALIGN);
499 mfc_debug(2, "SEQ Done: Movie dimensions %dx%d, "
500 "buffer dimensions: %dx%d\n", ctx->img_width,
501 ctx->img_height, ctx->buf_width, ctx->buf_height);
503 if (ctx->codec_mode == S5P_FIMV_CODEC_H264_DEC) {
504 ctx->luma_size = ALIGN(ctx->buf_width * ctx->buf_height,
505 S5P_FIMV_DEC_BUF_ALIGN);
506 ctx->chroma_size = ALIGN(ctx->buf_width *
507 ALIGN((ctx->img_height >> 1),
508 S5P_FIMV_NV12MT_VALIGN),
509 S5P_FIMV_DEC_BUF_ALIGN);
510 ctx->mv_size = ALIGN(ctx->buf_width *
511 ALIGN((ctx->buf_height >> 2),
512 S5P_FIMV_NV12MT_VALIGN),
513 S5P_FIMV_DEC_BUF_ALIGN);
515 guard_width = ALIGN(ctx->img_width + 24, S5P_FIMV_NV12MT_HALIGN);
516 guard_height = ALIGN(ctx->img_height + 16, S5P_FIMV_NV12MT_VALIGN);
517 ctx->luma_size = ALIGN(guard_width * guard_height,
518 S5P_FIMV_DEC_BUF_ALIGN);
520 guard_width = ALIGN(ctx->img_width + 16, S5P_FIMV_NV12MT_HALIGN);
521 guard_height = ALIGN((ctx->img_height >> 1) + 4, S5P_FIMV_NV12MT_VALIGN);
522 ctx->chroma_size = ALIGN(guard_width * guard_height,
523 S5P_FIMV_DEC_BUF_ALIGN);
528 ctx->dpb_count = s5p_mfc_get_dpb_count();
529 if (ctx->img_width == 0 || ctx->img_width == 0)
530 ctx->state = MFCINST_ERROR;
532 ctx->state = MFCINST_HEAD_PARSED;
535 s5p_mfc_clear_int_flags();
537 if (test_and_clear_bit(0, &dev->hw_lock) == 0)
540 queue_work(dev->irq_workqueue, &dev->work_struct);
542 s5p_mfc_try_run(dev);
543 wake_up_ctx(ctx, reason, err);
545 case S5P_FIMV_R2H_CMD_OPEN_INSTANCE_RET:
546 ctx->inst_no = s5p_mfc_get_inst_no();
547 ctx->state = MFCINST_GOT_INST;
549 wake_up_interruptible(&ctx->queue);
552 case S5P_FIMV_R2H_CMD_CLOSE_INSTANCE_RET:
554 ctx->state = MFCINST_FREE;
555 wake_up(&ctx->queue);
558 case S5P_FIMV_R2H_CMD_SYS_INIT_RET:
559 case S5P_FIMV_R2H_CMD_FW_STATUS_RET:
560 case S5P_FIMV_R2H_CMD_SLEEP_RET:
561 case S5P_FIMV_R2H_CMD_WAKEUP_RET:
564 s5p_mfc_clear_int_flags();
565 wake_up_dev(dev, reason, err);
566 clear_bit(0, &dev->hw_lock);
568 case S5P_FIMV_R2H_CMD_INIT_BUFFERS_RET:
569 s5p_mfc_clear_int_flags();
570 ctx->int_type = reason;
573 spin_lock(&dev->condlock);
574 clear_bit(ctx->num, &dev->ctx_work_bits);
575 spin_unlock(&dev->condlock);
577 ctx->state = MFCINST_RUNNING;
578 if (!ctx->dpb_flush_flag) {
579 mfc_debug(2, "INIT_BUFFERS with dpb_flush - leaving image in src queue.\n");
580 spin_lock_irqsave(&dev->irqlock, flags);
581 if (!list_empty(&ctx->src_queue)) {
582 src_buf = list_entry(ctx->src_queue.next,
583 struct s5p_mfc_buf, list);
584 list_del(&src_buf->list);
585 ctx->src_queue_cnt--;
586 vb2_buffer_done(src_buf->b, VB2_BUF_STATE_DONE);
588 spin_unlock_irqrestore(&dev->irqlock, flags);
590 ctx->dpb_flush_flag = 0;
592 if (test_and_clear_bit(0, &dev->hw_lock) == 0)
595 queue_work(dev->irq_workqueue, &dev->work_struct);
597 wake_up_interruptible(&ctx->queue);
598 s5p_mfc_try_run(dev);
600 if (test_and_clear_bit(0, &dev->hw_lock) == 0)
603 queue_work(dev->irq_workqueue, &dev->work_struct);
605 wake_up_interruptible(&ctx->queue);
609 mfc_debug(2, "Unknown int reason.\n");
610 s5p_mfc_clear_int_flags();
615 s5p_mfc_clear_int_flags();
616 ctx->int_type = reason;
619 if (test_and_clear_bit(0, &dev->hw_lock) == 0)
620 mfc_err("Failed to unlock hw.\n");
622 queue_work(dev->irq_workqueue, &dev->work_struct);
624 s5p_mfc_try_run(dev);
625 mfc_debug(2, "%s-- (via irq_cleanup_hw)\n", __func__);
629 /* Open an MFC node */
630 static int s5p_mfc_open(struct file *file)
632 struct s5p_mfc_ctx *ctx = NULL;
633 struct s5p_mfc_dev *dev = video_drvdata(file);
640 dev->num_inst++; /* It is guarded by mfc_mutex in vfd */
642 /* Allocate memory for context */
643 ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
645 mfc_err("Not enough memory.\n");
649 file->private_data = ctx;
651 INIT_LIST_HEAD(&ctx->src_queue);
652 INIT_LIST_HEAD(&ctx->dst_queue);
653 ctx->src_queue_cnt = 0;
654 ctx->dst_queue_cnt = 0;
655 /* Get context number */
657 while (dev->ctx[ctx->num]) {
659 if (ctx->num >= MFC_NUM_CONTEXTS) {
660 mfc_err("Too many open contexts.\n");
665 /* Mark context as idle */
666 spin_lock_irqsave(&dev->condlock, flags);
667 clear_bit(ctx->num, &dev->ctx_work_bits);
668 spin_unlock_irqrestore(&dev->condlock, flags);
669 dev->ctx[ctx->num] = ctx;
670 if (s5p_mfc_get_node_type(file) == MFCNODE_DECODER) {
671 ctx->type = MFCINST_DECODER;
672 ctx->c_ops = get_dec_codec_ops();
674 ctx->src_fmt = get_dec_def_fmt(1);
675 ctx->dst_fmt = get_dec_def_fmt(0);
676 } else if (s5p_mfc_get_node_type(file) == MFCNODE_ENCODER) {
677 ctx->type = MFCINST_ENCODER;
678 ctx->c_ops = get_enc_codec_ops();
680 ctx->src_fmt = get_enc_def_fmt(1);
681 ctx->dst_fmt = get_enc_def_fmt(0);
683 /* only for encoder */
684 INIT_LIST_HEAD(&ctx->ref_queue);
685 ctx->ref_queue_cnt = 0;
691 /* Load firmware if this is the first instance */
692 if (dev->num_inst == 1) {
693 dev->watchdog_timer.expires = jiffies +
694 msecs_to_jiffies(MFC_WATCHDOG_INTERVAL);
695 add_timer(&dev->watchdog_timer);
698 ret = s5p_mfc_alloc_firmware(dev);
701 ret = s5p_mfc_load_firmware(dev);
705 mfc_debug(2, "power on\n");
706 ret = s5p_mfc_power_on();
708 mfc_err("power on failed\n");
711 #ifndef CONFIG_PM_RUNTIME
712 s5p_mfc_mem_resume(dev->alloc_ctx[0]);
713 s5p_mfc_mem_resume(dev->alloc_ctx[1]);
716 ret = s5p_mfc_init_hw(dev);
721 /* Init videobuf2 queue for CAPTURE */
723 q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
725 if (s5p_mfc_get_node_type(file) == MFCNODE_DECODER) {
726 q->io_modes = VB2_MMAP;
727 q->ops = get_dec_queue_ops();
729 q->io_modes = VB2_MMAP | VB2_USERPTR;
730 q->ops = get_enc_queue_ops();
733 q->mem_ops = s5p_mfc_mem_ops();
734 ret = vb2_queue_init(q);
736 mfc_err("Failed to initialize videobuf2 queue(capture)\n");
740 /* Init videobuf2 queue for OUTPUT */
742 q->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
743 q->io_modes = VB2_MMAP;
745 if (s5p_mfc_get_node_type(file) == MFCNODE_DECODER) {
746 q->io_modes = VB2_MMAP;
747 q->ops = get_dec_queue_ops();
749 q->io_modes = VB2_MMAP | VB2_USERPTR;
750 q->ops = get_enc_queue_ops();
753 q->mem_ops = s5p_mfc_mem_ops();
754 ret = vb2_queue_init(q);
756 mfc_err("Failed to initialize videobuf2 queue(output)\n");
760 if (call_cop(ctx, init_ctx_ctrls, ctx) < 0)
761 mfc_err("failed in init_buf_ctrls\n");
763 init_waitqueue_head(&ctx->queue);
764 mfc_debug(2, "%s-- (via irq_cleanup_hw)\n", __func__);
767 /* Deinit when failure occured */
770 if (dev->num_inst == 1) {
771 if (s5p_mfc_power_off() < 0)
772 mfc_err("power off failed\n");
774 s5p_mfc_release_firmware(dev);
779 s5p_mfc_release_firmware(dev);
783 dev->ctx[ctx->num] = 0;
785 del_timer_sync(&dev->watchdog_timer);
795 /* Release MFC context */
796 static int s5p_mfc_release(struct file *file)
798 struct s5p_mfc_ctx *ctx = file->private_data;
799 struct s5p_mfc_dev *dev = ctx->dev;
804 if (call_cop(ctx, cleanup_ctx_ctrls, ctx) < 0)
805 mfc_err("failed in init_buf_ctrls\n");
807 set_bit(0, &dev->clk_state);
810 vb2_queue_release(&ctx->vq_src);
811 vb2_queue_release(&ctx->vq_dst);
813 /* Mark context as idle */
814 spin_lock_irqsave(&dev->condlock, flags);
815 clear_bit(ctx->num, &dev->ctx_work_bits);
816 spin_unlock_irqrestore(&dev->condlock, flags);
817 /* If instance was initialised then
818 * return instance and free reosurces */
819 if (ctx->inst_no != MFC_NO_INSTANCE_SET) {
820 ctx->state = MFCINST_RETURN_INST;
821 spin_lock_irqsave(&dev->condlock, flags);
822 set_bit(ctx->num, &dev->ctx_work_bits);
823 spin_unlock_irqrestore(&dev->condlock, flags);
824 s5p_mfc_clean_ctx_int_flags(ctx);
825 s5p_mfc_try_run(dev);
826 /* Wait until instance is returned or timeout occured */
827 if (s5p_mfc_wait_for_done_ctx
828 (ctx, S5P_FIMV_R2H_CMD_CLOSE_INSTANCE_RET, 0)) {
829 mfc_err("Err returning instance.\n");
832 s5p_mfc_release_codec_buffers(ctx);
833 s5p_mfc_release_instance_buffer(ctx);
834 if (ctx->type == MFCINST_DECODER)
835 s5p_mfc_release_dec_desc_buffer(ctx);
839 /* hardware locking scheme */
840 if (dev->curr_ctx == ctx->num)
841 clear_bit(0, &dev->hw_lock);
845 if (dev->num_inst == 0) {
846 /* FIXME: is it need ? */
847 s5p_mfc_deinit_hw(dev);
849 #ifndef CONFIG_PM_RUNTIME
850 s5p_mfc_mem_suspend(dev->alloc_ctx[0]);
851 s5p_mfc_mem_suspend(dev->alloc_ctx[1]);
853 /* reset <-> F/W release */
854 s5p_mfc_release_firmware(dev);
855 del_timer_sync(&dev->watchdog_timer);
857 mfc_debug(2, "power off\n");
858 if (s5p_mfc_power_off() < 0)
859 mfc_err("power off failed\n");
862 clear_bit(0, &dev->clk_state);
864 dev->ctx[ctx->num] = 0;
873 static unsigned int s5p_mfc_poll(struct file *file,
874 struct poll_table_struct *wait)
876 struct s5p_mfc_ctx *ctx = file->private_data;
877 struct s5p_mfc_dev *dev = ctx->dev;
878 struct vb2_queue *src_q, *dst_q;
879 struct vb2_buffer *src_vb = NULL, *dst_vb = NULL;
883 src_q = &ctx->vq_src;
884 dst_q = &ctx->vq_dst;
887 * There has to be at least one buffer queued on each queued_list, which
888 * means either in driver already or waiting for driver to claim it
889 * and start processing.
891 if ((!src_q->streaming || list_empty(&src_q->queued_list))
892 && (!dst_q->streaming || list_empty(&dst_q->queued_list))) {
897 mutex_unlock(&dev->mfc_mutex);
899 poll_wait(file, &src_q->done_wq, wait);
900 poll_wait(file, &dst_q->done_wq, wait);
902 mutex_lock(&dev->mfc_mutex);
904 spin_lock_irqsave(&src_q->done_lock, flags);
905 if (!list_empty(&src_q->done_list))
906 src_vb = list_first_entry(&src_q->done_list, struct vb2_buffer,
908 if (src_vb && (src_vb->state == VB2_BUF_STATE_DONE
909 || src_vb->state == VB2_BUF_STATE_ERROR))
910 rc |= POLLOUT | POLLWRNORM;
912 spin_unlock_irqrestore(&src_q->done_lock, flags);
914 spin_lock_irqsave(&dst_q->done_lock, flags);
916 if (!list_empty(&dst_q->done_list))
917 dst_vb = list_first_entry(&dst_q->done_list, struct vb2_buffer,
920 if (dst_vb && (dst_vb->state == VB2_BUF_STATE_DONE
921 || dst_vb->state == VB2_BUF_STATE_ERROR))
922 rc |= POLLIN | POLLRDNORM;
924 spin_unlock_irqrestore(&dst_q->done_lock, flags);
930 static int s5p_mfc_mmap(struct file *file, struct vm_area_struct *vma)
932 struct s5p_mfc_ctx *ctx = file->private_data;
933 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
937 if (offset < DST_QUEUE_OFF_BASE) {
938 mfc_debug(2, "mmaping source.\n");
939 ret = vb2_mmap(&ctx->vq_src, vma);
940 } else { /* capture */
941 mfc_debug(2, "mmaping destination.\n");
942 vma->vm_pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT);
943 ret = vb2_mmap(&ctx->vq_dst, vma);
950 static const struct v4l2_file_operations s5p_mfc_fops = {
951 .owner = THIS_MODULE,
952 .open = s5p_mfc_open,
953 .release = s5p_mfc_release,
954 .poll = s5p_mfc_poll,
955 .unlocked_ioctl = video_ioctl2,
956 .mmap = s5p_mfc_mmap,
959 /* videodec structure */
960 static struct video_device s5p_mfc_dec_videodev = {
961 .name = S5P_MFC_DEC_NAME,
962 .fops = &s5p_mfc_fops,
964 .ioctl_ops = &s5p_mfc_ioctl_ops,
967 .release = video_device_release,
970 static struct video_device s5p_mfc_enc_videodev = {
971 .name = S5P_MFC_ENC_NAME,
972 .fops = &s5p_mfc_fops,
974 .ioctl_ops = &s5p_mfc_enc_ioctl_ops,
977 .release = video_device_release,
980 /* MFC probe function */
981 static int __devinit s5p_mfc_probe(struct platform_device *pdev)
983 struct s5p_mfc_dev *dev;
984 struct video_device *vfd;
985 struct resource *res;
988 char workqueue_name[MFC_WORKQUEUE_LEN];
990 pr_debug("%s++\n", __func__);
991 dev = kzalloc(sizeof *dev, GFP_KERNEL);
993 dev_err(&pdev->dev, "Not enough memory for MFC device.\n");
997 spin_lock_init(&dev->irqlock);
998 spin_lock_init(&dev->condlock);
999 dev_dbg(&pdev->dev, "Initialised spin lock\n");
1000 dev->plat_dev = pdev;
1001 if (!dev->plat_dev) {
1002 dev_err(&pdev->dev, "No platform data specified\n");
1007 dev_dbg(&pdev->dev, "Getting clocks\n");
1008 ret = s5p_mfc_init_pm(dev);
1010 dev_err(&pdev->dev, "failed to get mfc clock source\n");
1014 sprintf(workqueue_name, "mfc_workqueue");
1015 dev->irq_workqueue = create_workqueue(workqueue_name);
1016 if (dev->irq_workqueue == NULL) {
1017 dev_err(&pdev->dev, "failed to create workqueue for mfc\n");
1020 INIT_WORK(&dev->work_struct, mfc_workqueue_clock_off);
1021 clear_bit(0, &dev->clk_state);
1023 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1025 dev_err(&pdev->dev, "failed to get memory region resource.\n");
1029 size = (res->end - res->start) + 1;
1030 dev->mfc_mem = request_mem_region(res->start, size, pdev->name);
1031 if (dev->mfc_mem == NULL) {
1032 dev_err(&pdev->dev, "failed to get memory region.\n");
1036 dev->regs_base = ioremap(dev->mfc_mem->start,
1037 dev->mfc_mem->end - dev->mfc_mem->start + 1);
1038 if (dev->regs_base == NULL) {
1039 dev_err(&pdev->dev, "failed to ioremap address region.\n");
1044 s5p_mfc_init_reg(dev->regs_base);
1046 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1048 dev_err(&pdev->dev, "failed to get irq resource.\n");
1052 dev->irq = res->start;
1053 ret = request_irq(dev->irq, s5p_mfc_irq, IRQF_DISABLED, pdev->name,
1056 dev_err(&pdev->dev, "Failed to install irq (%d)\n", ret);
1060 mutex_init(&dev->mfc_mutex);
1062 ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
1065 init_waitqueue_head(&dev->queue);
1068 vfd = video_device_alloc();
1070 v4l2_err(&dev->v4l2_dev, "Failed to allocate video device\n");
1074 *vfd = s5p_mfc_dec_videodev;
1076 vfd->ioctl_ops = get_dec_v4l2_ioctl_ops();
1078 vfd->lock = &dev->mfc_mutex;
1079 vfd->v4l2_dev = &dev->v4l2_dev;
1080 snprintf(vfd->name, sizeof(vfd->name), "%s", s5p_mfc_dec_videodev.name);
1082 ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
1084 v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
1085 video_device_release(vfd);
1088 v4l2_info(&dev->v4l2_dev, "decoder registered as /dev/video%d\n",
1092 video_set_drvdata(vfd, dev);
1095 vfd = video_device_alloc();
1097 v4l2_err(&dev->v4l2_dev, "Failed to allocate video device\n");
1099 goto unreg_vdev_dec;
1101 *vfd = s5p_mfc_enc_videodev;
1103 vfd->ioctl_ops = get_enc_v4l2_ioctl_ops();
1105 vfd->lock = &dev->mfc_mutex;
1106 vfd->v4l2_dev = &dev->v4l2_dev;
1107 snprintf(vfd->name, sizeof(vfd->name), "%s", s5p_mfc_enc_videodev.name);
1109 ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
1111 v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
1112 video_device_release(vfd);
1115 v4l2_info(&dev->v4l2_dev, "encoder registered as /dev/video%d\n",
1119 video_set_drvdata(vfd, dev);
1121 platform_set_drvdata(pdev, dev);
1124 dev->watchdog_workqueue = create_singlethread_workqueue(S5P_MFC_NAME);
1125 INIT_WORK(&dev->watchdog_work, s5p_mfc_watchdog_worker);
1126 atomic_set(&dev->watchdog_cnt, 0);
1127 init_timer(&dev->watchdog_timer);
1128 dev->watchdog_timer.data = (unsigned long)dev;
1129 dev->watchdog_timer.function = s5p_mfc_watchdog;
1131 dev->alloc_ctx = (struct vb2_alloc_ctx **)
1132 s5p_mfc_mem_init_multi(&pdev->dev);
1134 if (IS_ERR(dev->alloc_ctx)) {
1135 mfc_err("Couldn't prepare allocator ctx.\n");
1136 ret = PTR_ERR(dev->alloc_ctx);
1137 goto alloc_ctx_fail;
1140 pr_debug("%s--\n", __func__);
1143 /* Deinit MFC if probe had failed */
1145 video_unregister_device(dev->vfd_enc);
1147 video_device_release(dev->vfd_enc);
1149 video_unregister_device(dev->vfd_dec);
1151 video_device_release(dev->vfd_dec);
1153 v4l2_device_unregister(&dev->v4l2_dev);
1155 free_irq(dev->irq, dev);
1158 iounmap(dev->regs_base);
1159 dev->regs_base = NULL;
1161 release_resource(dev->mfc_mem);
1162 kfree(dev->mfc_mem);
1165 s5p_mfc_final_pm(dev);
1170 pr_debug("%s-- with error\n", __func__);
1174 /* Remove the driver */
1175 static int __devexit s5p_mfc_remove(struct platform_device *pdev)
1177 struct s5p_mfc_dev *dev = platform_get_drvdata(pdev);
1179 dev_dbg(&pdev->dev, "%s++\n", __func__);
1180 v4l2_info(&dev->v4l2_dev, "Removing %s\n", pdev->name);
1181 del_timer_sync(&dev->watchdog_timer);
1182 flush_workqueue(dev->watchdog_workqueue);
1183 destroy_workqueue(dev->watchdog_workqueue);
1184 video_unregister_device(dev->vfd_enc);
1185 video_unregister_device(dev->vfd_dec);
1186 v4l2_device_unregister(&dev->v4l2_dev);
1187 s5p_mfc_mem_cleanup_multi((void **)dev->alloc_ctx);
1188 mfc_debug(2, "Will now deinit HW\n");
1189 s5p_mfc_deinit_hw(dev);
1190 free_irq(dev->irq, dev);
1191 iounmap(dev->regs_base);
1192 if (dev->mfc_mem != NULL) {
1193 release_resource(dev->mfc_mem);
1194 kfree(dev->mfc_mem);
1195 dev->mfc_mem = NULL;
1197 s5p_mfc_final_pm(dev);
1199 dev_dbg(&pdev->dev, "%s--\n", __func__);
1204 static int s5p_mfc_suspend(struct device *dev)
1206 struct s5p_mfc_dev *m_dev = platform_get_drvdata(to_platform_device(dev));
1209 if (m_dev->num_inst == 0)
1212 /* FIXME: how about locking ? */
1213 ret = s5p_mfc_sleep(m_dev);
1215 s5p_mfc_mem_suspend(m_dev->alloc_ctx[0]);
1216 s5p_mfc_mem_suspend(m_dev->alloc_ctx[1]);
1221 static int s5p_mfc_resume(struct device *dev)
1223 struct s5p_mfc_dev *m_dev = platform_get_drvdata(to_platform_device(dev));
1226 if (m_dev->num_inst == 0)
1229 s5p_mfc_mem_resume(m_dev->alloc_ctx[0]);
1230 s5p_mfc_mem_resume(m_dev->alloc_ctx[1]);
1232 /* FIXME: how about locking ? */
1233 ret = s5p_mfc_wakeup(m_dev);
1237 #ifdef CONFIG_PM_RUNTIME
1238 static int s5p_mfc_runtime_suspend(struct device *dev)
1240 struct s5p_mfc_dev *m_dev = platform_get_drvdata(to_platform_device(dev));
1243 pre_power = atomic_read(&m_dev->pm.power);
1244 atomic_set(&m_dev->pm.power, 0);
1246 if (pre_power == 1) {
1247 s5p_mfc_mem_suspend(m_dev->alloc_ctx[0]);
1248 s5p_mfc_mem_suspend(m_dev->alloc_ctx[1]);
1254 static int s5p_mfc_runtime_idle(struct device *dev)
1259 static int s5p_mfc_runtime_resume(struct device *dev)
1261 struct s5p_mfc_dev *m_dev = platform_get_drvdata(to_platform_device(dev));
1265 if (!m_dev->alloc_ctx)
1268 pre_power = atomic_read(&m_dev->pm.power);
1269 atomic_set(&m_dev->pm.power, 1);
1271 if (pre_power == 0) {
1272 s5p_mfc_mem_resume(m_dev->alloc_ctx[0]);
1273 s5p_mfc_mem_resume(m_dev->alloc_ctx[1]);
1281 #define s5p_mfc_suspend NULL
1282 #define s5p_mfc_resume NULL
1283 #ifdef CONFIG_PM_RUNTIME
1284 #define mfc_runtime_idle NULL
1285 #define mfc_runtime_suspend NULL
1286 #define mfc_runtime_resume NULL
1290 /* Power management */
1291 static const struct dev_pm_ops s5p_mfc_pm_ops = {
1292 .suspend = s5p_mfc_suspend,
1293 .resume = s5p_mfc_resume,
1294 #ifdef CONFIG_PM_RUNTIME
1295 .runtime_idle = s5p_mfc_runtime_idle,
1296 .runtime_suspend = s5p_mfc_runtime_suspend,
1297 .runtime_resume = s5p_mfc_runtime_resume,
1301 static struct platform_driver s5p_mfc_p_driver = {
1302 .probe = s5p_mfc_probe,
1303 .remove = __devexit_p(s5p_mfc_remove),
1305 .name = S5P_MFC_NAME,
1306 .owner = THIS_MODULE,
1307 .pm = &s5p_mfc_pm_ops
1311 static char banner[] __initdata =
1312 "S5P MFC V4L2 Driver, (c) 2010 Samsung Electronics\n";
1314 static int __init s5p_mfc_init(void)
1316 pr_info("%s", banner);
1317 if (platform_driver_register(&s5p_mfc_p_driver) != 0) {
1318 pr_err("Platform device registration failed..\n");
1324 static void __exit s5p_mfc_exit(void)
1326 platform_driver_unregister(&s5p_mfc_p_driver);
1329 module_init(s5p_mfc_init);
1330 module_exit(s5p_mfc_exit);
1332 MODULE_LICENSE("GPL");
1333 MODULE_AUTHOR("Kamil Debski <k.debski@samsung.com>");