[TTVD] Use hardware buffers to return captured frame 17/314817/1
authorJakub Gajownik <j.gajownik2@samsung.com>
Sat, 23 Mar 2024 16:15:32 +0000 (17:15 +0100)
committerj.gajownik2 <j.gajownik2@samsung.com>
Fri, 19 Jul 2024 15:10:02 +0000 (17:10 +0200)
Before this change, all captured video frames where
immediately copied to temporary buffer using graphic
accelerator (GA) then mapped, copied to normal buffer
and returned.
This approach is slow on some boards (most notably on
KantSU2e), specially coping mapped data.

This change makes usage of GA accessible buffers to
return captured frames. With recent development of
software rendering and encoder capability of accesing
data without mapping, we can completely skip this
problematic part of pipeline.

Bug: https://jira-eu.sec.samsung.net/browse/VDGAME-532
Change-Id: I5a5d7720f4893537a2d9214508837060388651f3
Signed-off-by: Jakub Gajownik <j.gajownik2@samsung.com>
media/capture/video/tizen/gpu_memory_buffer_tracker_tizen.cc
media/capture/video/tizen/video_capture_device_tizen_tv.cc

index aaac74a5d52d9aa13519d42e0324d780c6b36e4b..940c8be0e59201670f29495ce9d0445a5e36f472 100644 (file)
@@ -53,7 +53,7 @@ bool GpuMemoryBufferTrackerTizen::Init(const gfx::Size& dimensions,
   // structure. Instead of holding NV12 data in 2 separate buffers, we'll
   // use single one. It means we need only single fd for all the planes used.
   auto buffer =
-      gfx::TizenGpuBuffer::Allocate(total_space_needed, false /* scanout */);
+      gfx::TizenGpuBuffer::Allocate(total_space_needed, true /* scanout */);
   int offset = 0;
   for (size_t i = 0; i < VideoFrameLayout::NumPlanes(format); ++i) {
     const auto plane_size = media::VideoFrame::PlaneSize(format, i, dimensions);
index 4d178d33bf5fa34f3517c47372d93f1dbd4e10ae..0e5fa9cad1325681beb92b23e37a739dbb98cb92 100644 (file)
@@ -16,6 +16,7 @@
 #include "base/synchronization/waitable_event.h"
 #include "base/task/sequenced_task_runner.h"
 #include "base/time/time.h"
+#include "base/trace_event/trace_event.h"
 #include "build/tizen_version.h"
 #include "gpu/ipc/common/gpu_memory_buffer_support.h"
 #include "media/base/bind_to_current_loop.h"
@@ -221,8 +222,6 @@ class VideoCaptureDeviceTizenTv::Impl {
   // The timestamp of the first frame received from platform API.
   absl::optional<base::TimeDelta> first_frame_timestamp_;
 
-  HWBuffersArray hw_buffers_;
-
   base::WeakPtrFactory<Impl> weak_factory_{this};
   base::WeakPtr<Impl> weak_self_;
 };
@@ -630,98 +629,74 @@ void VideoCaptureDeviceTizenTv::Impl::OnDecodedFrame(RawFrame frame) {
     return;
   }
 
-  auto gpu_memory_buffer =
-      gpu_memory_buffer_support_.CreateGpuMemoryBufferImplFromHandle(
-          buffer.handle_provider->GetGpuMemoryBufferHandle(), frame.image_size,
-          gfx::BufferFormat::YUV_420_BIPLANAR,
-          gfx::BufferUsage::CAMERA_AND_CPU_READ_WRITE, base::DoNothing());
-  if (!gpu_memory_buffer) {
-    TIZEN_MEDIA_LOG(ERROR) << "Invalid gpu memory buffer, drop";
-    client_->OnFrameDropped(
-        VideoCaptureFrameDropReason::kBufferPoolBufferAllocationFailed);
-    return;
-  }
-
-  // Allocate HW buffers if needed
-  for (size_t i = 0; i < hw_buffers_.size(); ++i) {
-    const auto plane_size = static_cast<size_t>(
-        VideoFrame::PlaneSize(pixel_format, i, frame.image_size).GetArea());
+  constexpr const int kNoSrcOffset = 0;
 
-    if (hw_buffers_[i] && hw_buffers_[i]->Size() == plane_size)
-      continue;
-
-    // Ensure that there is no time when more than two instances of hw buffers
-    // are created.
-    hw_buffers_[i].reset();
-
-    hw_buffers_[i] =
-        gfx::TizenGpuBuffer::Allocate(plane_size, true /* scanout */);
-    if (!hw_buffers_[i]) {
-      TIZEN_MEDIA_LOG(ERROR) << "Cannot allocate HW buffer: " << i << " , drop";
+  {
+    TRACE_EVENT0("gpu", "Camera.CopyDecodedPlaneY");
+    auto hw_y_buffer = gfx::TizenGpuBuffer::ImportFromFd(
+        buffer.handle_provider->GetGpuMemoryBufferHandle()
+            .native_pixmap_handle.planes[0]
+            .fd);
+    if (!hw_y_buffer) {
+      TIZEN_MEDIA_LOG(ERROR) << "Cannot fetch HW buffer";
       client_->OnFrameDropped(
           VideoCaptureFrameDropReason::kBufferPoolBufferAllocationFailed);
       return;
     }
-  }
-
-  params_.requested_format.pixel_format = pixel_format;
 
-  switch (frame.pixel_format) {
-    case MediaVideoPixelFormat::kPixelFormatUnknown:
-    case MediaVideoPixelFormat::kPixelFormatTiled:
-      // Already handled.
-      break;
-    case MediaVideoPixelFormat::kPixelFormatNV12:
-      hw_buffers_[VideoFrame::kUVPlane]->CopyFrom(
-          frame.nv12_data.uv_phys_data, frame.nv12_data.uv_stride,
-          frame.image_size.width(), frame.image_size.width(),
-          frame.image_size.height() / 2);
-      break;
-    case MediaVideoPixelFormat::kPixelFormatNV16:
-      hw_buffers_[VideoFrame::kUVPlane]->Scale(
-          frame.nv12_data.uv_phys_data, frame.nv12_data.uv_stride,
-          frame.image_size.width(), frame.image_size.width(),
-          frame.image_size.height(), frame.image_size.height() / 2);
-      break;
+    hw_y_buffer->CopyFrom(frame.nv12_data.y_phys_data, frame.nv12_data.y_stride,
+                          frame.image_size.width(), frame.image_size.width(),
+                          frame.image_size.height(), kNoSrcOffset,
+                          buffer.handle_provider->GetGpuMemoryBufferHandle()
+                              .native_pixmap_handle.planes[0]
+                              .offset);
   }
 
-  hw_buffers_[VideoFrame::kYPlane]->CopyFrom(
-      frame.nv12_data.y_phys_data, frame.nv12_data.y_stride,
-      frame.image_size.width(), frame.image_size.width(),
-      frame.image_size.height());
-
-  std::array<base::ScopedClosureRunner, kNumNV12Planes> hw_buffers_unmappers;
-
-  for (const auto& plane :
-       {media::VideoFrame::kYPlane, media::VideoFrame::kUVPlane}) {
-    const auto& hw_buffer = hw_buffers_.at(plane);
-    if (!hw_buffer->Map(gfx::TizenGpuBuffer::AccessMode::kReadOnly)) {
-      TIZEN_MEDIA_LOG_NO_INSTANCE(ERROR)
-          << "Cannot map gpu memory buffer, drop";
+  {
+    TRACE_EVENT0("gpu", "Camera.CopyDecodedPlaneUV");
+    auto hw_uv_buffer = gfx::TizenGpuBuffer::ImportFromFd(
+        buffer.handle_provider->GetGpuMemoryBufferHandle()
+            .native_pixmap_handle.planes[1]
+            .fd);
+    if (!hw_uv_buffer) {
+      TIZEN_MEDIA_LOG(ERROR) << "Cannot fetch HW buffer";
       client_->OnFrameDropped(
           VideoCaptureFrameDropReason::kBufferPoolBufferAllocationFailed);
       return;
     }
 
-    hw_buffers_unmappers[plane] = base::ScopedClosureRunner{base::BindOnce(
-        [](gfx::TizenGpuBuffer* hw_buffer) { hw_buffer->Unmap(); },
-        base::Unretained(hw_buffer.get()))};
+    switch (frame.pixel_format) {
+      case MediaVideoPixelFormat::kPixelFormatUnknown:
+      case MediaVideoPixelFormat::kPixelFormatTiled:
+        // Already handled.
+        break;
+      case MediaVideoPixelFormat::kPixelFormatNV12:
+        hw_uv_buffer->CopyFrom(
+            frame.nv12_data.uv_phys_data, frame.nv12_data.uv_stride,
+            frame.image_size.width(), frame.image_size.width(),
+            frame.image_size.height() / 2, kNoSrcOffset,
+            buffer.handle_provider->GetGpuMemoryBufferHandle()
+                .native_pixmap_handle.planes[1]
+                .offset);
+        break;
+      case MediaVideoPixelFormat::kPixelFormatNV16:
+        hw_uv_buffer->Scale(frame.nv12_data.uv_phys_data,
+                            frame.nv12_data.uv_stride, frame.image_size.width(),
+                            frame.image_size.width(), frame.image_size.height(),
+                            frame.image_size.height() / 2, kNoSrcOffset,
+                            buffer.handle_provider->GetGpuMemoryBufferHandle()
+                                .native_pixmap_handle.planes[1]
+                                .offset);
+        break;
+    }
   }
 
-  NV12Data source_data;
-  source_data.y_data = hw_buffers_[VideoFrame::kYPlane]->Memory();
-  source_data.y_stride = VideoFrame::RowBytes(
-      VideoFrame::kYPlane, PIXEL_FORMAT_NV12, frame.image_size.width());
-  source_data.uv_data = hw_buffers_[VideoFrame::kUVPlane]->Memory();
-  source_data.uv_stride = VideoFrame::RowBytes(
-      VideoFrame::kUVPlane, PIXEL_FORMAT_NV12, frame.image_size.width());
-
-  if (!CopyFrameDataParallel(std::move(source_data), gpu_memory_buffer.get(),
-                             frame.image_size)) {
-    TIZEN_MEDIA_LOG_NO_INSTANCE(ERROR) << "Cannot copy frame, drop";
-    client_->OnFrameDropped(
-        VideoCaptureFrameDropReason::kBufferPoolBufferAllocationFailed);
-    return;
+  params_.requested_format.pixel_format = pixel_format;
+  if (params_.requested_format.frame_size != frame.image_size) {
+    TIZEN_MEDIA_LOG(INFO) << "Captured different frame size: "
+                          << frame.image_size.ToString() << " than requested: "
+                          << params_.requested_format.frame_size.ToString();
+    params_.requested_format.frame_size = frame.image_size;
   }
 
   VideoFrameMetadata metadata;