marucodec: Changed a way to pass the parameter in the qemu_avpicture_fill function.
authorKitae Kim <kt920.kim@samsung.com>
Thu, 3 Jan 2013 01:35:37 +0000 (10:35 +0900)
committerKitae Kim <kt920.kim@samsung.com>
Mon, 7 Jan 2013 08:12:21 +0000 (17:12 +0900)
The main purpose of qemu_avpictue_fill is calculating encoded or decoded video frame.
In case of decoding, output buffer cannot be allocated before calculating picture size.
If the buffer should be allocated before, the caller needs to use this function twice or seperate implementation.
In addition, the ptr parameter of this function is just required to assign memory address to the destination picture except decoding.
So, I think it would rather use single pointer than double pointer.

Signed-off-by: Kitae Kim <kt920.kim@samsung.com>
tizen/src/hw/maru_codec.c

index b9595e7d802b65ce19776a8d8e8c7e07020e04ff..3860314f3ac219feed1b1cd1eb80d8f25ab5a00a 100644 (file)
@@ -401,7 +401,7 @@ static void qemu_init_pix_fmt_info(void)
     pix_fmt_info[PIX_FMT_YUV411P].y_chroma_shift = 0;
 }
 
-static int qemu_avpicture_fill(AVPicture *picture, uint8_t **ptr,
+static int qemu_avpicture_fill(AVPicture *picture, uint8_t *ptr,
                                 int pix_fmt, int width,
                                 int height, bool encode)
 {
@@ -428,14 +428,18 @@ static int qemu_avpicture_fill(AVPicture *picture, uint8_t **ptr,
         fsize = size + 2 * size2;
         TRACE("stride: %d, stride2: %d, size: %d, size2: %d, fsize: %d\n",
             stride, stride2, size, size2, fsize);
-        if (!encode) {
-            *ptr = av_mallocz(fsize);
+        if (!encode && !ptr) {
+            TRACE("allocate a buffer for a decoded picture.\n");
+            ptr = av_mallocz(fsize);
             if (!ptr) {
                 ERR("failed to allocate memory.\n");
                 return -1;
             }
+        } else {
+            TRACE("calculate encoded picture.\n");
         }
-        picture->data[0] = *ptr;
+
+        picture->data[0] = ptr;
         picture->data[1] = picture->data[0] + size;
         picture->data[2] = picture->data[1] + size2;
         picture->data[3] = NULL;
@@ -921,12 +925,8 @@ int qemu_avcodec_encode_video(SVCodecState *s, int ctx_index)
             ERR("[%s] failed to get input buffer\n", __func__);
             return ret;
         }
-#if 0
-        ret = avpicture_fill((AVPicture *)pict, inputBuf, avctx->pix_fmt,
-                            avctx->width, avctx->height);
-#endif
 
-        ret = qemu_avpicture_fill((AVPicture *)pict, &inputBuf, avctx->pix_fmt,
+        ret = qemu_avpicture_fill((AVPicture *)pict, inputBuf, avctx->pix_fmt,
                             avctx->width, avctx->height, true);
 
         if (ret < 0) {
@@ -1100,7 +1100,7 @@ void qemu_av_picture_copy(SVCodecState *s, int ctx_index)
 
     offset = s->codec_param.mmap_offset;
 
-    numBytes = qemu_avpicture_fill(&dst, &buffer, avctx->pix_fmt,
+    numBytes = qemu_avpicture_fill(&dst, NULL, avctx->pix_fmt,
                                   avctx->width, avctx->height, false);
     TRACE("after avpicture_fill: %d\n", numBytes);
     if (numBytes < 0) {
@@ -1110,11 +1110,12 @@ void qemu_av_picture_copy(SVCodecState *s, int ctx_index)
     }
 
     av_picture_copy(&dst, src, avctx->pix_fmt, avctx->width, avctx->height);
-    memcpy((uint8_t *)s->vaddr + offset, dst.data[0], numBytes);
+    buffer = dst.data[0];
+    memcpy((uint8_t *)s->vaddr + offset, buffer, numBytes);
     TRACE("after copy image buffer from host to guest.\n");
 
     if (buffer) {
-        TRACE("release allocated picture.\n");
+        TRACE("release allocated video frame.\n");
         av_free(buffer);
     }